diff --git a/.circleci/config.yml b/.circleci/config.yml index 409b90f4edd..5b3d40ae9db 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -865,6 +865,17 @@ jobs: echo "Skipping doc deploy (not on master)." fi + yellow-paper: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: large + steps: + - *checkout + - *setup_env + - run: + name: "Build yellow paper" + command: build yellow-paper + e2e-join: docker: - image: cimg/base:2023.09 @@ -905,7 +916,21 @@ jobs: name: "Deploy mainnet fork" command: | should_deploy || exit 0 - deploy_ecr mainnet-fork + deploy mainnet-fork + + deploy-contracts: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: medium + steps: + - *checkout + - *setup_env + - run: + name: "Deploy L1 contracts to mainnet fork" + working_directory: l1-contracts + command: | + should_deploy || exit 0 + ./scripts/ci_deploy_contracts.sh deploy-npm: machine: @@ -931,9 +956,40 @@ jobs: name: "Deploy to dockerhub" command: | should_deploy || exit 0 +<<<<<<< HEAD deploy_dockerhub noir deploy_dockerhub aztec-sandbox deploy_dockerhub cli +======= + deploy_dockerhub noir x86_64,arm64 + deploy_dockerhub aztec-sandbox x86_64,arm64 + deploy_dockerhub cli x86_64,arm64 + deploy_dockerhub aztec-faucet x86_64 + deploy_dockerhub mainnet-fork x86_64 + deploy_dockerhub l1-contracts x86_64 + + deploy-devnet: + machine: + image: ubuntu-2204:2023.07.2 + resource_class: medium + steps: + - *checkout + - *setup_env + - run: + name: "Deploy devnet to AWS" + command: | + should_deploy 0 || exit 0 + export TF_VAR_FAUCET_PRIVATE_KEY=$FAUCET_PRIVATE_KEY + export TF_VAR_BOOTNODE_1_PEER_ID=$BOOTNODE_1_PEER_ID + export TF_VAR_BOOTNODE_2_PEER_ID=$BOOTNODE_2_PEER_ID + export TF_VAR_BOOTNODE_1_PRIVATE_KEY=$BOOTNODE_1_PRIVATE_KEY + export TF_VAR_BOOTNODE_2_PRIVATE_KEY=$BOOTNODE_2_PRIVATE_KEY + export TF_VAR_SEQ_1_PUBLISHER_PRIVATE_KEY=$SEQ_1_PUBLISHER_PRIVATE_KEY + export TF_VAR_SEQ_2_PUBLISHER_PRIVATE_KEY=$SEQ_2_PUBLISHER_PRIVATE_KEY + deploy_terraform p2p-bootstrap yarn-project/p2p-bootstrap/terraform + deploy_terraform aztec-node yarn-project/aztec-node/terraform + deploy_terraform aztec-faucet yarn-project/aztec-faucet/terraform +>>>>>>> origin/master # Repeatable config for defining the workflow below. defaults: &defaults @@ -1030,6 +1086,8 @@ workflows: - mainnet-fork: *defaults + - yellow-paper: *defaults + # Yarn Project - yarn-project-base: requires: @@ -1143,6 +1201,7 @@ workflows: - guides-dapp-testing - guides-sample-dapp - guides-up-quick-start + - yellow-paper <<: *defaults # Benchmark jobs. @@ -1157,4 +1216,15 @@ workflows: # Production deployment - deploy-dockerhub: *defaults_deploy - deploy-npm: *defaults_deploy - - deploy-mainnet-fork: *defaults_deploy + - deploy-mainnet-fork: + requires: + - deploy-dockerhub + <<: *defaults_deploy + - deploy-contracts: + requires: + - deploy-mainnet-fork + <<: *defaults_deploy + - deploy-devnet: + requires: + - deploy-contracts + <<: *defaults_deploy diff --git a/barretenberg/cpp/.clangd b/barretenberg/cpp/.clangd index e09234d9e7a..bb22a6eed3e 100644 --- a/barretenberg/cpp/.clangd +++ b/barretenberg/cpp/.clangd @@ -61,6 +61,10 @@ Diagnostics: - google-explicit-constructor # Not honouring. - cppcoreguidelines-owning-memory + # "This check is deprecated since it’s no longer part of the CERT standard. It will be removed in clang-tidy version 19." + - cert-dc21-cpp + # Noisy. As we don't need to return error types or raw allocations, really unlikely we'd cause problems by ignoring a return type. + - modernize-use-nodiscard --- # this divider is necessary # Disable some checks for Google Test/Bench diff --git a/barretenberg/cpp/pil/avm/avm_mini.pil b/barretenberg/cpp/pil/avm/avm_mini.pil index 80103c91057..f9b8f2fe23a 100644 --- a/barretenberg/cpp/pil/avm/avm_mini.pil +++ b/barretenberg/cpp/pil/avm/avm_mini.pil @@ -1,6 +1,7 @@ -constant %N = 256; -namespace avmMini(%N); +include "mem_trace.pil"; + +namespace avmMini(256); //===== CONSTANT POLYNOMIALS ================================================== pol constant clk(i) { i }; @@ -52,46 +53,4 @@ namespace avmMini(%N); // Relation for addition over the finite field subop * (ia + ib - ic) = 0; - // ========= Table MEM-TR ================= - pol commit m_clk; - pol commit m_sub_clk; - pol commit m_addr; - pol commit m_val; - pol commit m_lastAccess; // Boolean (1 when this row is the last of a given address) - pol commit m_rw; // Enum: 0 (read), 1 (write) - - // Type constraints - m_lastAccess * (1 - m_lastAccess) = 0; - m_rw * (1 - m_rw) = 0; - - // m_lastAccess == 0 ==> m_addr' == m_addr - (1 - first) * (1 - m_lastAccess) * (m_addr' - m_addr) = 0; - - // We need: m_lastAccess == 1 ==> m_addr' > m_addr - // The above implies: m_addr' == m_addr ==> m_lastAccess == 0 - // This condition does not apply on the last row. - // clk + 1 used as an expression for positive integers - // TODO: Uncomment when lookups are supported - // (1 - first) * (1 - last) * m_lastAccess { (m_addr' - m_addr) } in clk + 1; // Gated inclusion check. Is it supported? - - // TODO: following constraint - // m_addr' == m_addr && m_clk == m_clk' ==> m_sub_clk' - m_sub_clk > 0 - // Can be enforced with (1 - first) * (1 - last) * (1 - m_lastAccess) { 6 * (m_clk' - m_clk) + m_sub_clk' - m_sub_clk } in clk + 1 - - // Alternatively to the above, one could require - // that m_addr' - m_addr is 0 or 1 (needs to add placeholders m_addr values): - // (m_addr' - m_addr) * (m_addr' - m_addr) - (m_addr' - m_addr) = 0; - // if m_addr' - m_addr is 0 or 1, the following is equiv. to m_lastAccess - // (m_addr' - m_addr) - - // m_lastAccess == 0 && m_rw' == 0 ==> m_val == m_val' - // This condition does not apply on the last row. - // Note: in barretenberg, a shifted polynomial will be 0 on the last row (shift is not cyclic) - // Note2: in barretenberg, if a poynomial is shifted, its non-shifted equivalent must be 0 on the first row - - (1 - first) * (1 - last) * (1 - m_lastAccess) * (1 - m_rw') * (m_val' - m_val) = 0; - - // TODO: Constraint the first load from a given adress has value 0. (Consistency of memory initialization.) - // TODO: when introducing load/store as sub-operations, we will have to add consistency of intermediate - // register values ia, ib, ic \ No newline at end of file diff --git a/barretenberg/cpp/pil/avm/avm_mini_opt.pil b/barretenberg/cpp/pil/avm/avm_mini_opt.pil index 82a456a38ca..ffa516ca9ef 100644 --- a/barretenberg/cpp/pil/avm/avm_mini_opt.pil +++ b/barretenberg/cpp/pil/avm/avm_mini_opt.pil @@ -1,7 +1,16 @@ -constant %N = 256; +namespace memTrace(256); + col witness m_clk; + col witness m_sub_clk; + col witness m_addr; + col witness m_val; + col witness m_lastAccess; + col witness m_rw; + (memTrace.m_lastAccess * (1 - memTrace.m_lastAccess)) = 0; + (memTrace.m_rw * (1 - memTrace.m_rw)) = 0; + (((1 - avmMini.first) * (1 - memTrace.m_lastAccess)) * (memTrace.m_addr' - memTrace.m_addr)) = 0; + (((((1 - avmMini.first) * (1 - avmMini.last)) * (1 - memTrace.m_lastAccess)) * (1 - memTrace.m_rw')) * (memTrace.m_val' - memTrace.m_val)) = 0; namespace avmMini(256); col fixed clk(i) { i }; - col fixed positive(i) { (i + 1) }; col fixed first = [1] + [0]*; col witness subop; col witness ia; @@ -24,14 +33,4 @@ namespace avmMini(256); (avmMini.rwa * (1 - avmMini.rwa)) = 0; (avmMini.rwb * (1 - avmMini.rwb)) = 0; (avmMini.rwc * (1 - avmMini.rwc)) = 0; - (avmMini.subop * ((avmMini.ia + avmMini.ib) - avmMini.ic)) = 0; - col witness m_clk; - col witness m_sub_clk; - col witness m_addr; - col witness m_val; - col witness m_lastAccess; - col witness m_rw; - (avmMini.m_lastAccess * (1 - avmMini.m_lastAccess)) = 0; - (avmMini.m_rw * (1 - avmMini.m_rw)) = 0; - (((1 - avmMini.first) * (1 - avmMini.m_lastAccess)) * (avmMini.m_addr' - avmMini.m_addr)) = 0; - (((((1 - avmMini.first) * (1 - avmMini.last)) * (1 - avmMini.m_lastAccess)) * (1 - avmMini.m_rw')) * (avmMini.m_val' - avmMini.m_val)) = 0; + (avmMini.subop * ((avmMini.ia + avmMini.ib) - avmMini.ic)) = 0; \ No newline at end of file diff --git a/barretenberg/cpp/pil/avm/mem_trace.pil b/barretenberg/cpp/pil/avm/mem_trace.pil new file mode 100644 index 00000000000..38cc0813d2c --- /dev/null +++ b/barretenberg/cpp/pil/avm/mem_trace.pil @@ -0,0 +1,47 @@ + + +include "avm_mini.pil"; + +namespace memTrace(256); + // ========= Table MEM-TR ================= + pol commit m_clk; + pol commit m_sub_clk; + pol commit m_addr; + pol commit m_val; + pol commit m_lastAccess; // Boolean (1 when this row is the last of a given address) + pol commit m_rw; // Enum: 0 (read), 1 (write) + + // Type constraints + m_lastAccess * (1 - m_lastAccess) = 0; + m_rw * (1 - m_rw) = 0; + + // m_lastAccess == 0 ==> m_addr' == m_addr + (1 - avmMini.first) * (1 - m_lastAccess) * (m_addr' - m_addr) = 0; + + // We need: m_lastAccess == 1 ==> m_addr' > m_addr + // The above implies: m_addr' == m_addr ==> m_lastAccess == 0 + // This condition does not apply on the last row. + // clk + 1 used as an expression for positive integers + // TODO: Uncomment when lookups are supported + // (1 - first) * (1 - last) * m_lastAccess { (m_addr' - m_addr) } in clk + 1; // Gated inclusion check. Is it supported? + + // TODO: following constraint + // m_addr' == m_addr && m_clk == m_clk' ==> m_sub_clk' - m_sub_clk > 0 + // Can be enforced with (1 - first) * (1 - last) * (1 - m_lastAccess) { 6 * (m_clk' - m_clk) + m_sub_clk' - m_sub_clk } in clk + 1 + + // Alternatively to the above, one could require + // that m_addr' - m_addr is 0 or 1 (needs to add placeholders m_addr values): + // (m_addr' - m_addr) * (m_addr' - m_addr) - (m_addr' - m_addr) = 0; + // if m_addr' - m_addr is 0 or 1, the following is equiv. to m_lastAccess + // (m_addr' - m_addr) + + // m_lastAccess == 0 && m_rw' == 0 ==> m_val == m_val' + // This condition does not apply on the last row. + // Note: in barretenberg, a shifted polynomial will be 0 on the last row (shift is not cyclic) + // Note2: in barretenberg, if a poynomial is shifted, its non-shifted equivalent must be 0 on the first row + + (1 - avmMini.first) * (1 - avmMini.last) * (1 - m_lastAccess) * (1 - m_rw') * (m_val' - m_val) = 0; + + // TODO: Constraint the first load from a given adress has value 0. (Consistency of memory initialization.) + // TODO: when introducing load/store as sub-operations, we will have to add consistency of intermediate + // register values ia, ib, ic \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp b/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp index d27dc35f9bc..616e85957be 100644 --- a/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp +++ b/barretenberg/cpp/src/barretenberg/benchmark/relations_bench/relations.bench.cpp @@ -24,7 +24,7 @@ template void execute_relation(::benchmark: auto params = proof_system::RelationParameters::get_random(); // Extract an array containing all the polynomial evaluations at a given row i - AllValues new_value; + AllValues new_value{}; // Define the appropriate SumcheckArrayOfValuesOverSubrelations type for this relation and initialize to zero SumcheckArrayOfValuesOverSubrelations accumulator; // Evaluate each constraint in the relation and check that each is satisfied diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.test.cpp index 203ebb523e2..602cfe2fa78 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/gemini/gemini.test.cpp @@ -25,7 +25,7 @@ template class GeminiTest : public CommitmentTest { std::vector multilinear_commitments, std::vector multilinear_commitments_to_be_shifted) { - auto prover_transcript = BaseTranscript::prover_init_empty(); + auto prover_transcript = BaseTranscript::prover_init_empty(); const Fr rho = Fr::random_element(); @@ -79,7 +79,7 @@ template class GeminiTest : public CommitmentTest { // Check that the Fold polynomials have been evaluated correctly in the prover this->verify_batch_opening_pair(prover_output.opening_pairs, prover_output.witnesses); - auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); + auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); // Compute: // - Single opening pair: {r, \hat{a}_0} diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp index 7a46a3eb71e..758d21d805b 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.hpp @@ -35,7 +35,7 @@ template class IPA { static void compute_opening_proof(std::shared_ptr ck, const OpeningPair& opening_pair, const Polynomial& polynomial, - BaseTranscript& transcript) + BaseTranscript& transcript) { ASSERT(opening_pair.challenge != 0 && "The challenge point should not be zero"); auto poly_degree = static_cast(polynomial.size()); @@ -134,7 +134,7 @@ template class IPA { * * @return true/false depending on if the proof verifies */ - static bool verify(std::shared_ptr vk, const OpeningClaim& opening_claim, BaseTranscript& transcript) + static bool verify(std::shared_ptr vk, const OpeningClaim& opening_claim, BaseTranscript& transcript) { auto poly_degree = static_cast(transcript.template receive_from_prover("IPA:poly_degree")); Fr generator_challenge = transcript.get_challenge("IPA:generator_challenge"); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp index 7527aa2a1eb..315374defab 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/ipa/ipa.test.cpp @@ -70,11 +70,11 @@ TEST_F(IPATest, Open) const OpeningClaim opening_claim{ opening_pair, commitment }; // initialize empty prover transcript - BaseTranscript prover_transcript; + BaseTranscript prover_transcript; IPA::compute_opening_proof(this->ck(), opening_pair, poly, prover_transcript); // initialize verifier transcript from proof data - BaseTranscript verifier_transcript{ prover_transcript.proof_data }; + BaseTranscript verifier_transcript{ prover_transcript.proof_data }; auto result = IPA::verify(this->vk(), opening_claim, verifier_transcript); EXPECT_TRUE(result); @@ -129,7 +129,7 @@ TEST_F(IPATest, GeminiShplonkIPAWithShift) batched_commitment_unshifted = commitment1 * rhos[0] + commitment2 * rhos[1]; batched_commitment_to_be_shifted = commitment2 * rhos[2]; - auto prover_transcript = BaseTranscript::prover_init_empty(); + auto prover_transcript = BaseTranscript::prover_init_empty(); auto gemini_polynomials = GeminiProver::compute_gemini_polynomials( mle_opening_point, std::move(batched_unshifted), std::move(batched_to_be_shifted)); @@ -162,7 +162,7 @@ TEST_F(IPATest, GeminiShplonkIPAWithShift) IPA::compute_opening_proof(this->ck(), shplonk_opening_pair, shplonk_witness, prover_transcript); - auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); + auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); auto gemini_verifier_claim = GeminiVerifier::reduce_verification(mle_opening_point, batched_evaluation, diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.hpp index f56018b2963..ca024515717 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.hpp @@ -31,7 +31,7 @@ template class KZG { static void compute_opening_proof(std::shared_ptr ck, const OpeningPair& opening_pair, const Polynomial& polynomial, - BaseTranscript& prover_trancript) + BaseTranscript& prover_trancript) { Polynomial quotient(polynomial); quotient[0] -= opening_pair.evaluation; @@ -53,9 +53,7 @@ template class KZG { * - P₀ = C − v⋅[1]₁ + r⋅[x]₁ * - P₁ = [Q(x)]₁ */ - static bool verify(std::shared_ptr vk, - const OpeningClaim& claim, - BaseTranscript& verifier_transcript) + static bool verify(std::shared_ptr vk, const OpeningClaim& claim, BaseTranscript& verifier_transcript) { auto quotient_commitment = verifier_transcript.template receive_from_prover("KZG:W"); auto lhs = claim.commitment - (GroupElement::one() * claim.opening_pair.evaluation) + diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.test.cpp index 9334ddaf482..f2f9f569b06 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/kzg/kzg.test.cpp @@ -39,11 +39,11 @@ TYPED_TEST(KZGTest, single) auto opening_pair = OpeningPair{ challenge, evaluation }; auto opening_claim = OpeningClaim{ opening_pair, commitment }; - auto prover_transcript = BaseTranscript::prover_init_empty(); + auto prover_transcript = BaseTranscript::prover_init_empty(); KZG::compute_opening_proof(this->ck(), opening_pair, witness, prover_transcript); - auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); + auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); bool verified = KZG::verify(this->vk(), opening_claim, verifier_transcript); EXPECT_EQ(verified, true); @@ -109,7 +109,7 @@ TYPED_TEST(KZGTest, GeminiShplonkKzgWithShift) batched_commitment_unshifted = commitment1 * rhos[0] + commitment2 * rhos[1]; batched_commitment_to_be_shifted = commitment2 * rhos[2]; - auto prover_transcript = BaseTranscript::prover_init_empty(); + auto prover_transcript = BaseTranscript::prover_init_empty(); // Run the full prover PCS protocol: @@ -154,7 +154,7 @@ TYPED_TEST(KZGTest, GeminiShplonkKzgWithShift) // Run the full verifier PCS protocol with genuine opening claims (genuine commitment, genuine evaluation) - auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); + auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); // Gemini verifier output: // - claim: d+1 commitments to Fold_{r}^(0), Fold_{-r}^(0), Fold^(l), d+1 evaluations a_0_pos, a_l, l = 0:d-1 diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.test.cpp index 74cd152c21a..15cb7605b35 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/shplonk/shplonk.test.cpp @@ -28,7 +28,7 @@ TYPED_TEST(ShplonkTest, ShplonkSimple) const size_t n = 16; - auto prover_transcript = BaseTranscript::prover_init_empty(); + auto prover_transcript = BaseTranscript::prover_init_empty(); // Generate two random (unrelated) polynomials of two different sizes, as well as their evaluations at a (single but // different) random point and their commitments. @@ -64,7 +64,7 @@ TYPED_TEST(ShplonkTest, ShplonkSimple) opening_claims.emplace_back(OpeningClaim{ opening_pairs[0], commitment1 }); opening_claims.emplace_back(OpeningClaim{ opening_pairs[1], commitment2 }); - auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); + auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); // Execute the shplonk verifier functionality const auto verifier_claim = ShplonkVerifier::reduce_verification(this->vk(), opening_claims, verifier_transcript); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/zeromorph/zeromorph.hpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/zeromorph/zeromorph.hpp index 1cd359ebe69..1817a578440 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/zeromorph/zeromorph.hpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/zeromorph/zeromorph.hpp @@ -1,6 +1,9 @@ #pragma once +#include "barretenberg/common/ref_vector.hpp" #include "barretenberg/common/zip_view.hpp" #include "barretenberg/polynomials/polynomial.hpp" +#include "barretenberg/transcript/transcript.hpp" + namespace proof_system::honk::pcs::zeromorph { /** @@ -321,7 +324,8 @@ template class ZeroMorphProver_ { auto& transcript, const std::vector>& concatenated_polynomials = {}, const std::vector& concatenated_evaluations = {}, - const std::vector>>& concatenation_groups = {}) + // TODO(https://github.com/AztecProtocol/barretenberg/issues/743) remove span + const std::vector>>& concatenation_groups = {}) { // Generate batching challenge \rho and powers 1,...,\rho^{m-1} FF rho = transcript.get_challenge("rho"); @@ -394,7 +398,7 @@ template class ZeroMorphProver_ { } // Get challenge y - auto y_challenge = transcript.get_challenge("ZM:y"); + FF y_challenge = transcript.get_challenge("ZM:y"); // Compute the batched, lifted-degree quotient \hat{q} auto batched_quotient = compute_batched_lifted_degree_quotient(quotients, y_challenge, N); @@ -404,7 +408,7 @@ template class ZeroMorphProver_ { transcript.send_to_verifier("ZM:C_q", q_commitment); // Get challenges x and z - auto [x_challenge, z_challenge] = transcript.get_challenges("ZM:x", "ZM:z"); + auto [x_challenge, z_challenge] = challenges_to_field_elements(transcript.get_challenges("ZM:x", "ZM:z")); // Compute degree check polynomial \zeta partially evaluated at x auto zeta_x = @@ -513,14 +517,14 @@ template class ZeroMorphVerifier_ { * @param concatenation_groups_commitments * @return Commitment */ - static Commitment compute_C_Z_x(std::vector f_commitments, - std::vector g_commitments, + static Commitment compute_C_Z_x(const std::vector& f_commitments, + const std::vector& g_commitments, std::vector& C_q_k, FF rho, FF batched_evaluation, FF x_challenge, std::vector u_challenge, - const std::vector>& concatenation_groups_commitments = {}) + const std::vector>& concatenation_groups_commitments = {}) { size_t log_N = C_q_k.size(); size_t N = 1 << log_N; @@ -611,7 +615,7 @@ template class ZeroMorphVerifier_ { * @brief Utility for native batch multiplication of group elements * @note This is used only for native verification and is not optimized for efficiency */ - static Commitment batch_mul_native(std::vector points, std::vector scalars) + static Commitment batch_mul_native(const std::vector& points, const std::vector& scalars) { auto result = points[0] * scalars[0]; for (size_t idx = 1; idx < scalars.size(); ++idx) { @@ -637,7 +641,7 @@ template class ZeroMorphVerifier_ { auto&& shifted_evaluations, auto& multivariate_challenge, auto& transcript, - const std::vector>& concatenation_group_commitments = {}, + const std::vector>& concatenation_group_commitments = {}, const std::vector& concatenated_evaluations = {}) { size_t log_N = multivariate_challenge.size(); @@ -667,13 +671,13 @@ template class ZeroMorphVerifier_ { } // Challenge y - auto y_challenge = transcript.get_challenge("ZM:y"); + FF y_challenge = transcript.get_challenge("ZM:y"); // Receive commitment C_{q} auto C_q = transcript.template receive_from_prover("ZM:C_q"); // Challenges x, z - auto [x_challenge, z_challenge] = transcript.get_challenges("ZM:x", "ZM:z"); + auto [x_challenge, z_challenge] = challenges_to_field_elements(transcript.get_challenges("ZM:x", "ZM:z")); // Compute commitment C_{\zeta_x} auto C_zeta_x = compute_C_zeta_x(C_q, C_q_k, y_challenge, x_challenge); diff --git a/barretenberg/cpp/src/barretenberg/commitment_schemes/zeromorph/zeromorph.test.cpp b/barretenberg/cpp/src/barretenberg/commitment_schemes/zeromorph/zeromorph.test.cpp index 30876c73e50..d884e1b046d 100644 --- a/barretenberg/cpp/src/barretenberg/commitment_schemes/zeromorph/zeromorph.test.cpp +++ b/barretenberg/cpp/src/barretenberg/commitment_schemes/zeromorph/zeromorph.test.cpp @@ -77,7 +77,7 @@ template class ZeroMorphTest : public CommitmentTest { } // Initialize an empty BaseTranscript - auto prover_transcript = BaseTranscript::prover_init_empty(); + auto prover_transcript = BaseTranscript::prover_init_empty(); // Execute Prover protocol ZeroMorphProver::prove(f_polynomials, @@ -88,7 +88,7 @@ template class ZeroMorphTest : public CommitmentTest { this->commitment_key, prover_transcript); - auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); + auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); // Execute Verifier protocol auto pairing_points = ZeroMorphVerifier::verify( @@ -223,7 +223,7 @@ template class ZeroMorphWithConcatenationTest : public CommitmentT } // Initialize an empty BaseTranscript - auto prover_transcript = BaseTranscript::prover_init_empty(); + auto prover_transcript = BaseTranscript::prover_init_empty(); std::vector> concatenated_polynomials_views; for (auto& poly : concatenated_polynomials) { @@ -246,9 +246,9 @@ template class ZeroMorphWithConcatenationTest : public CommitmentT prover_transcript, concatenated_polynomials_views, c_evaluations, - concatenation_groups_views); + to_vector_of_ref_vectors(concatenation_groups_views)); - auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); + auto verifier_transcript = BaseTranscript::verifier_init_empty(prover_transcript); // Execute Verifier protocol auto pairing_points = ZeroMorphVerifier::verify(f_commitments, // unshifted @@ -257,7 +257,7 @@ template class ZeroMorphWithConcatenationTest : public CommitmentT w_evaluations, // shifted u_challenge, verifier_transcript, - concatenation_groups_commitments, + to_vector_of_ref_vectors(concatenation_groups_commitments), c_evaluations); verified = this->vk()->pairing_check(pairing_points[0], pairing_points[1]); diff --git a/barretenberg/cpp/src/barretenberg/common/constexpr_utils.hpp b/barretenberg/cpp/src/barretenberg/common/constexpr_utils.hpp index ed11246196f..29bbfc47387 100644 --- a/barretenberg/cpp/src/barretenberg/common/constexpr_utils.hpp +++ b/barretenberg/cpp/src/barretenberg/common/constexpr_utils.hpp @@ -12,8 +12,6 @@ * * constexpr_for : loop over a range , where the size_t iterator `i` is a constexpr variable * constexpr_find : find if an element is in an array - * concatenate_arrays : smoosh multiple std::array objects into a single std::array - * */ namespace barretenberg { @@ -121,31 +119,6 @@ template constexpr bool constexpr_find() return found; } -/** - * @brief merges multiple std::arrays into a single array. - * Array lengths can be different but array type must match - * Method is constexpr and should concat constexpr arrays at compile time - * - * @tparam Type the array type - * @tparam sizes template parameter pack of size_t value params - * @param arrays - * @return constexpr auto - * - * @details template params should be autodeducted. Example use case: - * - * ``` - * std::array a{1, 2}; - * std::array b{1,3, 5}; - * std::array c = concatenate(a, b); - * ``` - */ -template -constexpr auto concatenate_arrays(const std::array&... arrays) -{ - return std::apply([](auto... elems) -> std::array { return { { elems... } }; }, - std::tuple_cat(std::tuple_cat(arrays)...)); -} - /** * @brief Create a constexpr array object whose elements contain a default value * diff --git a/barretenberg/cpp/src/barretenberg/common/ref_array.hpp b/barretenberg/cpp/src/barretenberg/common/ref_array.hpp new file mode 100644 index 00000000000..f9c9fa11f3b --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/ref_array.hpp @@ -0,0 +1,140 @@ +#include "barretenberg/common/assert.hpp" +#include +#include +#include +#include +#include + +// TODO(https://github.com/AztecProtocol/barretenberg/issues/794) namespace this once convenient +/** + * @brief A template class for a reference array. Behaves as if std::array was possible. + * + * This class provides a fixed-size array of pointers to elements of type T, exposed as references. + * It offers random access to its elements and provides an iterator class + * for traversal. + * + * @tparam T The type of elements stored in the array. + * @tparam N The size of the array. + */ +template class RefArray { + public: + RefArray(const std::array& ptr_array) + { + std::size_t i = 0; + for (T& elem : ptr_array) { + storage[i++] = &elem; + } + } + RefArray(std::initializer_list init) + { + if (init.size() != N) { + throw std::invalid_argument("Initializer list size does not match RefArray size"); + } + std::size_t i = 0; + for (auto& elem : init) { + storage[i++] = &elem; + } + } + + T& operator[](std::size_t idx) const + { + ASSERT(idx < N); + return *storage[idx]; + } + + /** + * @brief Nested iterator class for RefArray, based on indexing into the pointer array. + * Provides semantics similar to what would be expected if std::array was possible. + */ + class iterator { + public: + /** + * @brief Constructs an iterator for a given RefArray object. + * + * @param array Pointer to the RefArray object. + * @param pos The starting position in the array. + */ + iterator(RefArray const* array, std::size_t pos) + : array(array) + , pos(pos) + {} + + T& operator*() const { return (*array)[pos]; } + + iterator& operator++() + { + pos++; + return *this; + } + + iterator operator++(int) + { + iterator temp = *this; + ++(*this); + return temp; + } + + bool operator==(iterator const& other) const { return pos == other.pos; } + bool operator!=(iterator const& other) const { return pos != other.pos; } + + private: + RefArray const* array; + std::size_t pos; + }; + + /** + * @brief Returns an iterator to the beginning of the RefArray. + * + * @return An iterator to the first element. + */ + iterator begin() const { return iterator(this, 0); } + /** + * @brief Returns an iterator to the end of the RefArray. + * + * @return An iterator to the element following the last element. + */ + iterator end() const { return iterator(this, N); } + + private: + // We are making a high-level array, for simplicity having a C array as backing makes sense. + // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays) + T* storage[N]; +}; + +/** + * @brief Deduction guide for the RefArray class. + * Allows for RefArray {a, b, c} without explicit template params. + */ +template RefArray(T&, Ts&...) -> RefArray; + +/** + * @brief Concatenates multiple RefArray objects into a single RefArray. + * + * This function takes multiple RefArray objects as input and concatenates them into a single + * RefArray. + + * @tparam T The type of elements in the RefArray. + * @tparam Ns The sizes of the input RefArrays. + * @param ref_arrays The RefArray objects to be concatenated. + * @return RefArray object containing all elements from the input arrays. + */ +template RefArray concatenate(const RefArray&... ref_arrays) +{ + // Fold expression to calculate the total size of the new array using fold expression + constexpr std::size_t TotalSize = (Ns + ...); + std::array concatenated; + + std::size_t offset = 0; + // Copies elements from a given RefArray to the concatenated array + auto copy_into = [&](const auto& ref_array, std::size_t& offset) { + for (std::size_t i = 0; i < ref_array.size(); ++i) { + concatenated[offset + i] = &ref_array[i]; + } + offset += ref_array.size(); + }; + + // Fold expression to copy elements from each input RefArray to the concatenated array + (..., copy_into(ref_arrays, offset)); + + return RefArray{ concatenated }; +} \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/common/ref_vector.hpp b/barretenberg/cpp/src/barretenberg/common/ref_vector.hpp new file mode 100644 index 00000000000..fa47379ba83 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/ref_vector.hpp @@ -0,0 +1,149 @@ +#pragma once +#include "barretenberg/common/assert.hpp" +#include +#include +#include +#include +#include + +// TODO(https://github.com/AztecProtocol/barretenberg/issues/794) namespace this once convenient +/** + * @brief A template class for a reference vector. Behaves as if std::vector was possible. + * + * This class provides a dynamic-size vector of pointers to elements of type T, exposed as references. + * It offers random access to its elements and provides an iterator class for traversal. + * + * @tparam T The type of elements stored in the vector. + * @warning This should NOT be used for long-term storage, only for efficient passing. Any long-term sharing of values + * should use shared pointers. + */ +template class RefVector { + public: + RefVector() = default; + explicit RefVector(const std::vector& ptr_vector) + : storage(ptr_vector) + {} + + explicit RefVector(std::vector& vector) + : storage(vector.size()) + { + for (size_t i = 0; i < vector.size(); i++) { + storage[i] = &vector[i]; + } + } + + template RefVector(T& ref, Ts&... rest) + { + storage.push_back(&ref); + (storage.push_back(&rest), ...); + } + + T& operator[](std::size_t idx) const + { + ASSERT(idx < storage.size()); + return *storage[idx]; + } + + /** + * @brief Nested iterator class for RefVector, based on indexing into the pointer vector. + * Provides semantics similar to what would be expected if std::vector was possible. + */ + class iterator { + public: + /** + * @brief Constructs an iterator for a given RefVector object. + * + * @param vector Pointer to the RefVector object. + * @param pos The starting position in the vector. + */ + iterator(RefVector const* vector, std::size_t pos) + : vector(vector) + , pos(pos) + {} + + T& operator*() const { return (*vector)[pos]; } + + iterator& operator++() + { + pos++; + return *this; + } + + iterator operator++(int) + { + iterator temp = *this; + ++(*this); + return temp; + } + + bool operator==(iterator const& other) const { return pos == other.pos; } + bool operator!=(iterator const& other) const { return pos != other.pos; } + + private: + RefVector const* vector; + std::size_t pos; + }; + + [[nodiscard]] std::size_t size() const { return storage.size(); } + + void push_back(T& element) { storage.push_back(element); } + iterator begin() const { return iterator(this, 0); } + iterator end() const { return iterator(this, storage.size()); } + + template operator std::vector() const + { + std::vector ret; + for (T* elem : storage) { + ret.push_back(*elem); + } + return ret; + } + + std::vector& get_storage() { return storage; } + const std::vector& get_storage() const { return storage; } + + private: + std::vector storage; +}; + +/** + * @brief Deduction guide for the RefVector class. + * Allows for RefVector {a, b, c} without explicit template params. + */ +template RefVector(T&, Ts&...) -> RefVector; + +/** + * @brief Concatenates multiple RefVector objects into a single RefVector. + * + * This function takes multiple RefVector objects as input and concatenates them into a single + * RefVector. + * + * @tparam T The type of elements in the RefVector. + * @param ref_vectors The RefVector objects to be concatenated. + * @return RefVector object containing all elements from the input vectors. + */ +template RefVector concatenate(const RefVector& ref_vector, const auto&... ref_vectors) +{ + RefVector concatenated; + // Reserve our final space + concatenated.get_storage().reserve(ref_vector.size() + (ref_vectors.size() + ...)); + + auto append = [&](const auto& vec) { + std::copy(vec.get_storage().begin(), vec.get_storage().end(), std::back_inserter(concatenated.get_storage())); + }; + + append(ref_vector); + // Unpack and append each RefVector's elements to concatenated + (append(ref_vectors), ...); + + return concatenated; +} + +template static std::vector> to_vector_of_ref_vectors(std::vector>& vec) +{ + std::vector> result; + for (std::vector& inner : vec) { + result.push_back(RefVector{ inner }); + } + return result; +} diff --git a/barretenberg/cpp/src/barretenberg/common/std_array.hpp b/barretenberg/cpp/src/barretenberg/common/std_array.hpp new file mode 100644 index 00000000000..850464ae36a --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/common/std_array.hpp @@ -0,0 +1,39 @@ +#pragma once + +#include + +// TODO(https://github.com/AztecProtocol/barretenberg/issues/794) namespace this once convenient +/** + * @brief Concatenates multiple std::array objects into a single array. + * + * This function template takes a variadic number of std::array objects and concatenates + * their elements into a single std::array. The size of the resulting array is the sum of the sizes + * of the input arrays. + * + * @tparam T The type of elements stored in the arrays. + * @tparam Ns The sizes of the input arrays. This is a variadic template parameter pack representing + * the sizes of each input array. + * @param arrays Variadic number of std::array objects to concatenate. Each array can have a + * different size but must contain the same type of elements. + * @return std::array A new std::array containing all elements from the input arrays + * concatenated in the order they were passed. + * + * Example usage: + * std::array a = {1, 2}; + * std::array b = {3, 4, 5}; + * auto result = concatenate(a, b); // result is std::array{1, 2, 3, 4, 5} + */ +template std::array concatenate(const std::array&... arrays) +{ + std::array result; + + std::size_t offset = 0; + auto copy_into = [&](const auto& array) { + std::copy(array.begin(), array.end(), result.begin() + offset); + offset += array.size(); + }; + + (copy_into(arrays), ...); + + return result; +} diff --git a/barretenberg/cpp/src/barretenberg/common/thread.cpp b/barretenberg/cpp/src/barretenberg/common/thread.cpp index 04a71f5746b..6d3007c8739 100644 --- a/barretenberg/cpp/src/barretenberg/common/thread.cpp +++ b/barretenberg/cpp/src/barretenberg/common/thread.cpp @@ -9,11 +9,11 @@ * The first implementation was `parallel_for_spawning`. You can read a description of each implementation in the * relevant source file, but parallel_for_spawning is the simplest approach imaginable. * Once WASM was working, I checked its performance in native code by running it against the polynomials benchmarks. - * In doing so, OMP outperformed it significantly (at least for FFT algorithims). This set me on a course to try + * In doing so, OMP outperformed it significantly (at least for FFT algorithms). This set me on a course to try * and understand why and to provide a suitable alternative. Ultimately I found solutions that compared to OMP with * "moody" and "atomic_pool" solutions, although they were not *quite* as fast as OMP. However interestingly, when it * comes to actual "real world" testing (with proof construction), rather than raw benchmarking, most of the solutions - * performaed about the same, with OMP *actually slightly worse*. So maybe all this effort was a bit redundant. + * performed about the same, with OMP *actually slightly worse*. So maybe all this effort was a bit redundant. * Remember to always do real world testing... * * My theory as to why OMP performs so much better in benchmarks is because it runs the tests in a very tight loop, @@ -85,4 +85,4 @@ void parallel_for(size_t num_iterations, const std::function& func // parallel_for_queued(num_iterations, func); #endif #endif -} \ No newline at end of file +} diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp index 286aed4c0cd..f8d874cc601 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element.hpp @@ -20,9 +20,9 @@ template class alignas(64) affine_el constexpr affine_element(const Fq& a, const Fq& b) noexcept; - constexpr affine_element(const affine_element& other) noexcept; + constexpr affine_element(const affine_element& other) noexcept = default; - constexpr affine_element(affine_element&& other) noexcept; + constexpr affine_element(affine_element&& other) noexcept = default; static constexpr affine_element one() noexcept { return { Params::one_x, Params::one_y }; }; @@ -52,9 +52,9 @@ template class alignas(64) affine_el typename CompileTimeEnabled = std::enable_if_t<(BaseField::modulus >> 255) == uint256_t(1), void>> static constexpr std::array from_compressed_unsafe(const uint256_t& compressed) noexcept; - constexpr affine_element& operator=(const affine_element& other) noexcept; + constexpr affine_element& operator=(const affine_element& other) noexcept = default; - constexpr affine_element& operator=(affine_element&& other) noexcept; + constexpr affine_element& operator=(affine_element&& other) noexcept = default; constexpr affine_element operator+(const affine_element& other) const noexcept; diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp index a917dfebed7..21fa09e3f64 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/affine_element_impl.hpp @@ -10,18 +10,6 @@ constexpr affine_element::affine_element(const Fq& a, const Fq& b) no , y(b) {} -template -constexpr affine_element::affine_element(const affine_element& other) noexcept - : x(other.x) - , y(other.y) -{} - -template -constexpr affine_element::affine_element(affine_element&& other) noexcept - : x(other.x) - , y(other.y) -{} - template template constexpr affine_element affine_element::from_compressed(const uint256_t& compressed) noexcept @@ -80,25 +68,6 @@ constexpr affine_element affine_element::operator+( return affine_element(element(*this) + element(other)); } -template -constexpr affine_element& affine_element::operator=(const affine_element& other) noexcept -{ - if (this == &other) { - return *this; - } - x = other.x; - y = other.y; - return *this; -} - -template -constexpr affine_element& affine_element::operator=(affine_element&& other) noexcept -{ - x = other.x; - y = other.y; - return *this; -} - template template diff --git a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp index c6dbee10ead..846d11d64c0 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp +++ b/barretenberg/cpp/src/barretenberg/ecc/groups/wnaf.hpp @@ -239,12 +239,12 @@ inline uint64_t get_num_scalar_bits(const uint64_t* scalar) * * N.B. IN OUR STDLIB ALGORITHMS THE SKEW VALUE REPRESENTS AN ADDITION NOT A SUBTRACTION (i.e. we add +1 at the end of * the scalar mul algo we don't sub 1) (this is to eliminate situations which could produce the point at infinity as an - * output as our circuit logic cannot accomodate this edge case). + * output as our circuit logic cannot accommodate this edge case). * * Credits: Zac W. * * @param scalar Pointer to the 128-bit non-montgomery scalar that is supposed to be transformed into wnaf - * @param wnaf Pointer to output array that needs to accomodate enough 64-bit WNAF entries + * @param wnaf Pointer to output array that needs to accommodate enough 64-bit WNAF entries * @param skew_map Reference to output skew value, which if true shows that the point should be added once at the end of * computation * @param wnaf_round_counts Pointer to output array specifying the number of points participating in each round @@ -497,4 +497,4 @@ inline void fixed_wnaf_with_restricted_first_slice(uint64_t* scalar, // } } // namespace barretenberg::wnaf -// NOLINTEND(readability-implicit-bool-conversion) \ No newline at end of file +// NOLINTEND(readability-implicit-bool-conversion) diff --git a/barretenberg/cpp/src/barretenberg/ecc/pippenger.md b/barretenberg/cpp/src/barretenberg/ecc/pippenger.md index 18f8b85941f..a7463663216 100644 --- a/barretenberg/cpp/src/barretenberg/ecc/pippenger.md +++ b/barretenberg/cpp/src/barretenberg/ecc/pippenger.md @@ -26,7 +26,7 @@ So, for example, if the most significant 6 bits of a scalar are `011001` (25), w At the end of each round, we then 'concatenate' all of the buckets into a sum. Let's represent each bucket accumulator in an array `A[num_buckets]`. The concatenation phase will compute `A[0] + 2A[1] + 3A[2] + 4A[3] + 5A[4] + ... = Sum`. -Finally, we add each `Sum` point into an overall accumulator. For example, for a set of 254 bit scalars, if we evaluate the most 6 significant bits of each scalar and accumulate the resulting point into `Sum`, we actually need `(2^{248}).Sum` to accomodate for the bit shift. +Finally, we add each `Sum` point into an overall accumulator. For example, for a set of 254 bit scalars, if we evaluate the most 6 significant bits of each scalar and accumulate the resulting point into `Sum`, we actually need `(2^{248}).Sum` to accommodate for the bit shift. This final step is similar to the 'double and add' algorithm in a traditional scalar multiplication algorithm - we start at the most significant bit slice and work our way down to minimize the number of doublings. At each round, we multiply the overall accumulator by 2^{bit slice size} - by the time we iterate over every round, we will have performed the total required number of doublings for every 'bit slice' that we add into our accumulator. @@ -38,7 +38,7 @@ Total run time = 16 * 2^{18} + 16 * 2^{15} = 18 * 2^{18}. So the aggregate numbe ## The problem with Pippenger's algorithm -As it is currently implemented, each round will iterate over the points to be added, and add each point into one of the round's buckets. Whilst point access is sequential in memory, bucket access is very much not. In fact, if the points being multiplied are from a zero-knowlege proof, bucket access is literally uniformly randomly distributed and therefore presents the worst-case scenario. +As it is currently implemented, each round will iterate over the points to be added, and add each point into one of the round's buckets. Whilst point access is sequential in memory, bucket access is very much not. In fact, if the points being multiplied are from a zero-knowledge proof, bucket access is literally uniformly randomly distributed and therefore presents the worst-case scenario. This makes it difficult to parallelize. It is not possible to simply assign threads a section of points to iterate over, because of race conditions when two threads access the same bucket. @@ -69,8 +69,8 @@ Drawbacks of this approach: ## Summary -By restructuring the memory heirarchy of our pippenger algorithm, we can create a parallelizable version of pippenger. This will significantly simplify the logic of our PLONK prover (instead of allocating threads for batches of multi-exponentations, we can multi-thread individual multi-exponentiations, simplifying our thread logic). +By restructuring the memory hierarchy of our pippenger algorithm, we can create a parallelizable version of pippenger. This will significantly simplify the logic of our PLONK prover (instead of allocating threads for batches of multi-exponentations, we can multi-thread individual multi-exponentiations, simplifying our thread logic). This will concretely reduce the number of pippenger rounds of our multi-exponentiations by approximately 1, giving a theoretical 15% speed-up. Some of this will be eaten by the run-time of the radix sort. -Longer term, this parallelizable algorithm will be significantly easier to adapt for GPUs, using OpenCL. \ No newline at end of file +Longer term, this parallelizable algorithm will be significantly easier to adapt for GPUs, using OpenCL. diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp index d757c37c5ed..fff8ec8c0fd 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_prover.cpp @@ -168,7 +168,8 @@ template void ECCVMProver_::execute_wire_commitment template void ECCVMProver_::execute_log_derivative_commitments_round() { // Compute and add beta to relation parameters - auto [beta, gamma] = transcript.get_challenges("beta", "gamma"); + auto [beta, gamma] = challenges_to_field_elements(transcript.get_challenges("beta", "gamma")); + // TODO(#583)(@zac-williamson): fix Transcript to be able to generate more than 2 challenges per round! oof. auto beta_sqr = beta * beta; relation_parameters.gamma = gamma; @@ -206,7 +207,7 @@ template void ECCVMProver_::execute_relation_check_ using Sumcheck = sumcheck::SumcheckProver; auto sumcheck = Sumcheck(key->circuit_size, transcript); - auto alpha = transcript.get_challenge("alpha"); + FF alpha = transcript.get_challenge("alpha"); sumcheck_output = sumcheck.prove(prover_polynomials, relation_parameters, alpha); } @@ -226,13 +227,17 @@ template void ECCVMProver_::execute_univariatizatio // Batch the unshifted polynomials and the to-be-shifted polynomials using ρ Polynomial batched_poly_unshifted(key->circuit_size); // batched unshifted polynomials size_t poly_idx = 0; // TODO(https://github.com/AztecProtocol/barretenberg/issues/391) zip + ASSERT(prover_polynomials.get_to_be_shifted().size() == prover_polynomials.get_shifted().size()); + for (auto& unshifted_poly : prover_polynomials.get_unshifted()) { + ASSERT(poly_idx < rhos.size()); batched_poly_unshifted.add_scaled(unshifted_poly, rhos[poly_idx]); ++poly_idx; } Polynomial batched_poly_to_be_shifted(key->circuit_size); // batched to-be-shifted polynomials for (auto& to_be_shifted_poly : prover_polynomials.get_to_be_shifted()) { + ASSERT(poly_idx < rhos.size()); batched_poly_to_be_shifted.add_scaled(to_be_shifted_poly, rhos[poly_idx]); ++poly_idx; }; diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp index 8af4c20cca0..c0964b3be39 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_transcript.test.cpp @@ -304,10 +304,11 @@ TYPED_TEST(ECCVMTranscriptTests, ChallengeGenerationTest) constexpr uint32_t random_val{ 17 }; // arbitrary transcript.send_to_verifier("random val", random_val); // test more challenges - auto [a, b, c] = transcript.get_challenges("a", "b", "c"); + auto [a, b, c] = challenges_to_field_elements(transcript.get_challenges("a", "b", "c")); + ASSERT_NE(a, 0) << "Challenge a is 0"; - ASSERT_NE(b, 0) << "Challenge a is 0"; - ASSERT_NE(b, 0) << "Challenge a is 0"; + ASSERT_NE(b, 0) << "Challenge b is 0"; + ASSERT_NE(c, 0) << "Challenge c is 0"; } TYPED_TEST(ECCVMTranscriptTests, StructureTest) @@ -333,7 +334,7 @@ TYPED_TEST(ECCVMTranscriptTests, StructureTest) EXPECT_TRUE(verifier.verify_proof(prover.export_proof())); // we have changed nothing so proof is still valid typename Flavor::Commitment one_group_val = Flavor::Commitment::one(); - typename Flavor::FF rand_val = Flavor::FF::random_element(); + auto rand_val = Flavor::FF::random_element(); prover.transcript.transcript_Px_comm = one_group_val * rand_val; // choose random object to modify EXPECT_TRUE(verifier.verify_proof( prover.export_proof())); // we have not serialized it back to the proof so it should still be fine diff --git a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp index 01aea6e673a..3f61f75d571 100644 --- a/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/eccvm/eccvm_verifier.cpp @@ -142,7 +142,8 @@ template bool ECCVMVerifier_::verify_proof(const plonk commitments.lookup_read_counts_1 = receive_commitment(commitment_labels.lookup_read_counts_1); // Get challenge for sorted list batching and wire four memory records - auto [beta, gamma] = transcript.get_challenges("beta", "gamma"); + auto [beta, gamma] = challenges_to_field_elements(transcript.get_challenges("beta", "gamma")); + relation_parameters.gamma = gamma; auto beta_sqr = beta * beta; relation_parameters.beta = beta; @@ -158,7 +159,7 @@ template bool ECCVMVerifier_::verify_proof(const plonk // Execute Sumcheck Verifier auto sumcheck = SumcheckVerifier(circuit_size); - auto alpha = transcript.get_challenge("alpha"); + FF alpha = transcript.get_challenge("alpha"); auto [multivariate_challenge, purported_evaluations, sumcheck_verified] = sumcheck.verify(relation_parameters, alpha, transcript); diff --git a/barretenberg/cpp/src/barretenberg/flavor/ecc_vm.hpp b/barretenberg/cpp/src/barretenberg/flavor/ecc_vm.hpp index 0f1049f109f..a055891e8bd 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ecc_vm.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ecc_vm.hpp @@ -2,9 +2,11 @@ #include "barretenberg/commitment_schemes/commitment_key.hpp" #include "barretenberg/commitment_schemes/ipa/ipa.hpp" #include "barretenberg/commitment_schemes/kzg/kzg.hpp" +#include "barretenberg/common/std_array.hpp" #include "barretenberg/ecc/curves/bn254/bn254.hpp" #include "barretenberg/ecc/curves/grumpkin/grumpkin.hpp" #include "barretenberg/flavor/flavor.hpp" +#include "barretenberg/flavor/flavor_macros.hpp" #include "barretenberg/polynomials/univariate.hpp" #include "barretenberg/relations/ecc_vm/ecc_lookup_relation.hpp" #include "barretenberg/relations/ecc_vm/ecc_msm_relation.hpp" @@ -85,715 +87,238 @@ template class ECCVMBa * @brief A base class labelling precomputed entities and (ordered) subsets of interest. * @details Used to build the proving key and verification key. */ - template - class PrecomputedEntities : public PrecomputedEntities_ { + template class PrecomputedEntities : public PrecomputedEntitiesBase { public: - DataType lagrange_first; // column 0 - DataType lagrange_second; // column 1 - DataType lagrange_last; // column 2 - - DEFINE_POINTER_VIEW(NUM_PRECOMPUTED_ENTITIES, &lagrange_first, &lagrange_second, &lagrange_last) + using DataType = DataType_; + DEFINE_FLAVOR_MEMBERS(DataType, + lagrange_first, // column 0 + lagrange_second, // column 1 + lagrange_last); // column 2 + + DataType get_selectors() { return get_all(); }; + RefVector get_sigma_polynomials() { return {}; }; + RefVector get_id_polynomials() { return {}; }; + RefVector get_table_polynomials() { return {}; }; + }; - std::vector get_selectors() override { return { lagrange_first, lagrange_second, lagrange_last }; }; - std::vector get_sigma_polynomials() override { return {}; }; - std::vector get_id_polynomials() override { return {}; }; - std::vector get_table_polynomials() { return {}; }; + /** + * @brief Container for all derived witness polynomials used/constructed by the prover. + * @details Shifts are not included here since they do not occupy their own memory. + */ + template struct DerivedWitnessEntities { + DEFINE_FLAVOR_MEMBERS(DataType, + z_perm, // column 0 + lookup_inverses); // column 1 }; /** * @brief Container for all witness polynomials used/constructed by the prover. * @details Shifts are not included here since they do not occupy their own memory. */ - template - class WitnessEntities : public WitnessEntities_ { + template class WireEntities { public: - DataType transcript_add; // column 0 - DataType transcript_mul; // column 1 - DataType transcript_eq; // column 2 - DataType transcript_collision_check; // column 3 - DataType transcript_msm_transition; // column 4 - DataType transcript_pc; // column 5 - DataType transcript_msm_count; // column 6 - DataType transcript_Px; // column 7 - DataType transcript_Py; // column 8 - DataType transcript_z1; // column 9 - DataType transcript_z2; // column 10 - DataType transcript_z1zero; // column 11 - DataType transcript_z2zero; // column 12 - DataType transcript_op; // column 13 - DataType transcript_accumulator_x; // column 14 - DataType transcript_accumulator_y; // column 15 - DataType transcript_msm_x; // column 16 - DataType transcript_msm_y; // column 17 - DataType precompute_pc; // column 18 - DataType precompute_point_transition; // column 19 - DataType precompute_round; // column 20 - DataType precompute_scalar_sum; // column 21 - DataType precompute_s1hi; // column 22 - DataType precompute_s1lo; // column 23 - DataType precompute_s2hi; // column 24 - DataType precompute_s2lo; // column 25 - DataType precompute_s3hi; // column 26 - DataType precompute_s3lo; // column 27 - DataType precompute_s4hi; // column 28 - DataType precompute_s4lo; // column 29 - DataType precompute_skew; // column 30 - DataType precompute_dx; // column 31 - DataType precompute_dy; // column 32 - DataType precompute_tx; // column 33 - DataType precompute_ty; // column 34 - DataType msm_transition; // column 35 - DataType msm_add; // column 36 - DataType msm_double; // column 37 - DataType msm_skew; // column 38 - DataType msm_accumulator_x; // column 39 - DataType msm_accumulator_y; // column 40 - DataType msm_pc; // column 41 - DataType msm_size_of_msm; // column 42 - DataType msm_count; // column 43 - DataType msm_round; // column 44 - DataType msm_add1; // column 45 - DataType msm_add2; // column 46 - DataType msm_add3; // column 47 - DataType msm_add4; // column 48 - DataType msm_x1; // column 49 - DataType msm_y1; // column 50 - DataType msm_x2; // column 51 - DataType msm_y2; // column 52 - DataType msm_x3; // column 53 - DataType msm_y3; // column 54 - DataType msm_x4; // column 55 - DataType msm_y4; // column 56 - DataType msm_collision_x1; // column 57 - DataType msm_collision_x2; // column 58 - DataType msm_collision_x3; // column 59 - DataType msm_collision_x4; // column 60 - DataType msm_lambda1; // column 61 - DataType msm_lambda2; // column 62 - DataType msm_lambda3; // column 63 - DataType msm_lambda4; // column 64 - DataType msm_slice1; // column 65 - DataType msm_slice2; // column 66 - DataType msm_slice3; // column 67 - DataType msm_slice4; // column 68 - DataType transcript_accumulator_empty; // column 69 - DataType transcript_reset_accumulator; // column 70 - DataType precompute_select; // column 71 - DataType lookup_read_counts_0; // column 72 - DataType lookup_read_counts_1; // column 73 - DataType z_perm; // column 74 - DataType lookup_inverses; // column 75 + DEFINE_FLAVOR_MEMBERS(DataType, + transcript_add, // column 0 + transcript_mul, // column 1 + transcript_eq, // column 2 + transcript_collision_check, // column 3 + transcript_msm_transition, // column 4 + transcript_pc, // column 5 + transcript_msm_count, // column 6 + transcript_Px, // column 7 + transcript_Py, // column 8 + transcript_z1, // column 9 + transcript_z2, // column 10 + transcript_z1zero, // column 11 + transcript_z2zero, // column 12 + transcript_op, // column 13 + transcript_accumulator_x, // column 14 + transcript_accumulator_y, // column 15 + transcript_msm_x, // column 16 + transcript_msm_y, // column 17 + precompute_pc, // column 18 + precompute_point_transition, // column 19 + precompute_round, // column 20 + precompute_scalar_sum, // column 21 + precompute_s1hi, // column 22 + precompute_s1lo, // column 23 + precompute_s2hi, // column 24 + precompute_s2lo, // column 25 + precompute_s3hi, // column 26 + precompute_s3lo, // column 27 + precompute_s4hi, // column 28 + precompute_s4lo, // column 29 + precompute_skew, // column 30 + precompute_dx, // column 31 + precompute_dy, // column 32 + precompute_tx, // column 33 + precompute_ty, // column 34 + msm_transition, // column 35 + msm_add, // column 36 + msm_double, // column 37 + msm_skew, // column 38 + msm_accumulator_x, // column 39 + msm_accumulator_y, // column 40 + msm_pc, // column 41 + msm_size_of_msm, // column 42 + msm_count, // column 43 + msm_round, // column 44 + msm_add1, // column 45 + msm_add2, // column 46 + msm_add3, // column 47 + msm_add4, // column 48 + msm_x1, // column 49 + msm_y1, // column 50 + msm_x2, // column 51 + msm_y2, // column 52 + msm_x3, // column 53 + msm_y3, // column 54 + msm_x4, // column 55 + msm_y4, // column 56 + msm_collision_x1, // column 57 + msm_collision_x2, // column 58 + msm_collision_x3, // column 59 + msm_collision_x4, // column 60 + msm_lambda1, // column 61 + msm_lambda2, // column 62 + msm_lambda3, // column 63 + msm_lambda4, // column 64 + msm_slice1, // column 65 + msm_slice2, // column 66 + msm_slice3, // column 67 + msm_slice4, // column 68 + transcript_accumulator_empty, // column 69 + transcript_reset_accumulator, // column 70 + precompute_select, // column 71 + lookup_read_counts_0, // column 72 + lookup_read_counts_1); // column 73 + }; - DEFINE_POINTER_VIEW(NUM_WITNESS_ENTITIES, - &transcript_add, - &transcript_mul, - &transcript_eq, - &transcript_collision_check, - &transcript_msm_transition, - &transcript_pc, - &transcript_msm_count, - &transcript_Px, - &transcript_Py, - &transcript_z1, - &transcript_z2, - &transcript_z1zero, - &transcript_z2zero, - &transcript_op, - &transcript_accumulator_x, - &transcript_accumulator_y, - &transcript_msm_x, - &transcript_msm_y, - &precompute_pc, - &precompute_point_transition, - &precompute_round, - &precompute_scalar_sum, - &precompute_s1hi, - &precompute_s1lo, - &precompute_s2hi, - &precompute_s2lo, - &precompute_s3hi, - &precompute_s3lo, - &precompute_s4hi, - &precompute_s4lo, - &precompute_skew, - &precompute_dx, - &precompute_dy, - &precompute_tx, - &precompute_ty, - &msm_transition, - &msm_add, - &msm_double, - &msm_skew, - &msm_accumulator_x, - &msm_accumulator_y, - &msm_pc, - &msm_size_of_msm, - &msm_count, - &msm_round, - &msm_add1, - &msm_add2, - &msm_add3, - &msm_add4, - &msm_x1, - &msm_y1, - &msm_x2, - &msm_y2, - &msm_x3, - &msm_y3, - &msm_x4, - &msm_y4, - &msm_collision_x1, - &msm_collision_x2, - &msm_collision_x3, - &msm_collision_x4, - &msm_lambda1, - &msm_lambda2, - &msm_lambda3, - &msm_lambda4, - &msm_slice1, - &msm_slice2, - &msm_slice3, - &msm_slice4, - &transcript_accumulator_empty, - &transcript_reset_accumulator, - &precompute_select, - &lookup_read_counts_0, - &lookup_read_counts_1, - &z_perm, - &lookup_inverses) - std::vector get_wires() override - { - return { - transcript_add, - transcript_mul, - transcript_eq, - transcript_collision_check, - transcript_msm_transition, - transcript_pc, - transcript_msm_count, - transcript_Px, - transcript_Py, - transcript_z1, - transcript_z2, - transcript_z1zero, - transcript_z2zero, - transcript_op, - transcript_accumulator_x, - transcript_accumulator_y, - transcript_msm_x, - transcript_msm_y, - precompute_pc, - precompute_point_transition, - precompute_round, - precompute_scalar_sum, - precompute_s1hi, - precompute_s1lo, - precompute_s2hi, - precompute_s2lo, - precompute_s3hi, - precompute_s3lo, - precompute_s4hi, - precompute_s4lo, - precompute_skew, - precompute_dx, - precompute_dy, - precompute_tx, - precompute_ty, - msm_transition, - msm_add, - msm_double, - msm_skew, - msm_accumulator_x, - msm_accumulator_y, - msm_pc, - msm_size_of_msm, - msm_count, - msm_round, - msm_add1, - msm_add2, - msm_add3, - msm_add4, - msm_x1, - msm_y1, - msm_x2, - msm_y2, - msm_x3, - msm_y3, - msm_x4, - msm_y4, - msm_collision_x1, - msm_collision_x2, - msm_collision_x3, - msm_collision_x4, - msm_lambda1, - msm_lambda2, - msm_lambda3, - msm_lambda4, - msm_slice1, - msm_slice2, - msm_slice3, - msm_slice4, - transcript_accumulator_empty, - transcript_reset_accumulator, - precompute_select, - lookup_read_counts_0, - lookup_read_counts_1, - }; - }; + /** + * @brief Container for all witness polynomials used/constructed by the prover. + * @details Shifts are not included here since they do not occupy their own memory. + */ + template + class WitnessEntities : public WireEntities, public DerivedWitnessEntities { + public: + DEFINE_COMPOUND_GET_ALL(WireEntities::get_all(), DerivedWitnessEntities::get_all()) + DEFINE_COMPOUND_POINTER_VIEW(WireEntities::pointer_view(), + DerivedWitnessEntities::pointer_view()) + RefVector get_wires() { return WireEntities::get_all(); }; // The sorted concatenations of table and witness data needed for plookup. - std::vector get_sorted_polynomials() { return {}; }; + RefVector get_sorted_polynomials() { return {}; }; }; + /** + * @brief Represents polynomials shifted by 1 or their evaluations, defined relative to WitnessEntities. + */ + template class ShiftedEntities { + public: + DEFINE_FLAVOR_MEMBERS(DataType, + transcript_mul_shift, // column 0 + transcript_msm_count_shift, // column 1 + transcript_accumulator_x_shift, // column 2 + transcript_accumulator_y_shift, // column 3 + precompute_scalar_sum_shift, // column 4 + precompute_s1hi_shift, // column 5 + precompute_dx_shift, // column 6 + precompute_dy_shift, // column 7 + precompute_tx_shift, // column 8 + precompute_ty_shift, // column 9 + msm_transition_shift, // column 10 + msm_add_shift, // column 11 + msm_double_shift, // column 12 + msm_skew_shift, // column 13 + msm_accumulator_x_shift, // column 14 + msm_accumulator_y_shift, // column 15 + msm_count_shift, // column 16 + msm_round_shift, // column 17 + msm_add1_shift, // column 18 + msm_pc_shift, // column 19 + precompute_pc_shift, // column 20 + transcript_pc_shift, // column 21 + precompute_round_shift, // column 22 + transcript_accumulator_empty_shift, // column 23 + precompute_select_shift, // column 24 + z_perm_shift); // column 25 + }; /** * @brief A base class labelling all entities (for instance, all of the polynomials used by the prover during * sumcheck) in this Honk variant along with particular subsets of interest * @details Used to build containers for: the prover's polynomial during sumcheck; the sumcheck's folded * polynomials; the univariates consturcted during during sumcheck; the evaluations produced by sumcheck. * - * Symbolically we have: AllEntities = PrecomputedEntities + WitnessEntities + "ShiftedEntities". It could be - * implemented as such, but we have this now. + * Symbolically we have: AllEntities = PrecomputedEntities + WitnessEntities + ShiftedEntities. + * TODO(https://github.com/AztecProtocol/barretenberg/issues/788): Move to normal composition once comfortable + * updating usage sites. */ - template - class AllEntities : public AllEntities_ { + template + class AllEntities : public PrecomputedEntities, + public WitnessEntities, + public ShiftedEntities { public: - DataType lagrange_first; // column 0 - DataType lagrange_second; // column 1 - DataType lagrange_last; // column 2 - DataType transcript_add; // column 3 - DataType transcript_mul; // column 4 - DataType transcript_eq; // column 5 - DataType transcript_collision_check; // column 6 - DataType transcript_msm_transition; // column 7 - DataType transcript_pc; // column 8 - DataType transcript_msm_count; // column 9 - DataType transcript_Px; // column 10 - DataType transcript_Py; // column 11 - DataType transcript_z1; // column 12 - DataType transcript_z2; // column 13 - DataType transcript_z1zero; // column 14 - DataType transcript_z2zero; // column 15 - DataType transcript_op; // column 16 - DataType transcript_accumulator_x; // column 17 - DataType transcript_accumulator_y; // column 18 - DataType transcript_msm_x; // column 19 - DataType transcript_msm_y; // column 20 - DataType precompute_pc; // column 21 - DataType precompute_point_transition; // column 22 - DataType precompute_round; // column 23 - DataType precompute_scalar_sum; // column 24 - DataType precompute_s1hi; // column 25 - DataType precompute_s1lo; // column 26 - DataType precompute_s2hi; // column 27 - DataType precompute_s2lo; // column 28 - DataType precompute_s3hi; // column 29 - DataType precompute_s3lo; // column 30 - DataType precompute_s4hi; // column 31 - DataType precompute_s4lo; // column 32 - DataType precompute_skew; // column 33 - DataType precompute_dx; // column 34 - DataType precompute_dy; // column 35 - DataType precompute_tx; // column 36 - DataType precompute_ty; // column 37 - DataType msm_transition; // column 38 - DataType msm_add; // column 39 - DataType msm_double; // column 40 - DataType msm_skew; // column 41 - DataType msm_accumulator_x; // column 42 - DataType msm_accumulator_y; // column 43 - DataType msm_pc; // column 44 - DataType msm_size_of_msm; // column 45 - DataType msm_count; // column 46 - DataType msm_round; // column 47 - DataType msm_add1; // column 48 - DataType msm_add2; // column 49 - DataType msm_add3; // column 50 - DataType msm_add4; // column 51 - DataType msm_x1; // column 52 - DataType msm_y1; // column 53 - DataType msm_x2; // column 54 - DataType msm_y2; // column 55 - DataType msm_x3; // column 56 - DataType msm_y3; // column 57 - DataType msm_x4; // column 58 - DataType msm_y4; // column 59 - DataType msm_collision_x1; // column 60 - DataType msm_collision_x2; // column 61 - DataType msm_collision_x3; // column 62 - DataType msm_collision_x4; // column 63 - DataType msm_lambda1; // column 64 - DataType msm_lambda2; // column 65 - DataType msm_lambda3; // column 66 - DataType msm_lambda4; // column 67 - DataType msm_slice1; // column 68 - DataType msm_slice2; // column 69 - DataType msm_slice3; // column 70 - DataType msm_slice4; // column 71 - DataType transcript_accumulator_empty; // column 72 - DataType transcript_reset_accumulator; // column 73 - DataType precompute_select; // column 74 - DataType lookup_read_counts_0; // column 75 - DataType lookup_read_counts_1; // column 76 - DataType z_perm; // column 77 - DataType lookup_inverses; // column 78 - DataType transcript_mul_shift; // column 79 - DataType transcript_msm_count_shift; // column 80 - DataType transcript_accumulator_x_shift; // column 81 - DataType transcript_accumulator_y_shift; // column 82 - DataType precompute_scalar_sum_shift; // column 83 - DataType precompute_s1hi_shift; // column 84 - DataType precompute_dx_shift; // column 85 - DataType precompute_dy_shift; // column 86 - DataType precompute_tx_shift; // column 87 - DataType precompute_ty_shift; // column 88 - DataType msm_transition_shift; // column 89 - DataType msm_add_shift; // column 90 - DataType msm_double_shift; // column 91 - DataType msm_skew_shift; // column 92 - DataType msm_accumulator_x_shift; // column 93 - DataType msm_accumulator_y_shift; // column 94 - DataType msm_count_shift; // column 95 - DataType msm_round_shift; // column 96 - DataType msm_add1_shift; // column 97 - DataType msm_pc_shift; // column 98 - DataType precompute_pc_shift; // column 99 - DataType transcript_pc_shift; // column 100 - DataType precompute_round_shift; // column 101 - DataType transcript_accumulator_empty_shift; // column 102 - DataType precompute_select_shift; // column 103 - DataType z_perm_shift; // column 104 - - template [[nodiscard]] const DataType& lookup_read_counts() const - { - if constexpr (index == 0) { - return lookup_read_counts_0; - } else { - static_assert(index == 1); - return lookup_read_counts_1; - } - } - - // defines a method pointer_view that returns the following, with const and non-const variants - DEFINE_POINTER_VIEW(NUM_ALL_ENTITIES, - &lagrange_first, - &lagrange_second, - &lagrange_last, - &transcript_add, - &transcript_mul, - &transcript_eq, - &transcript_collision_check, - &transcript_msm_transition, - &transcript_pc, - &transcript_msm_count, - &transcript_Px, - &transcript_Py, - &transcript_z1, - &transcript_z2, - &transcript_z1zero, - &transcript_z2zero, - &transcript_op, - &transcript_accumulator_x, - &transcript_accumulator_y, - &transcript_msm_x, - &transcript_msm_y, - &precompute_pc, - &precompute_point_transition, - &precompute_round, - &precompute_scalar_sum, - &precompute_s1hi, - &precompute_s1lo, - &precompute_s2hi, - &precompute_s2lo, - &precompute_s3hi, - &precompute_s3lo, - &precompute_s4hi, - &precompute_s4lo, - &precompute_skew, - &precompute_dx, - &precompute_dy, - &precompute_tx, - &precompute_ty, - &msm_transition, - &msm_add, - &msm_double, - &msm_skew, - &msm_accumulator_x, - &msm_accumulator_y, - &msm_pc, - &msm_size_of_msm, - &msm_count, - &msm_round, - &msm_add1, - &msm_add2, - &msm_add3, - &msm_add4, - &msm_x1, - &msm_y1, - &msm_x2, - &msm_y2, - &msm_x3, - &msm_y3, - &msm_x4, - &msm_y4, - &msm_collision_x1, - &msm_collision_x2, - &msm_collision_x3, - &msm_collision_x4, - &msm_lambda1, - &msm_lambda2, - &msm_lambda3, - &msm_lambda4, - &msm_slice1, - &msm_slice2, - &msm_slice3, - &msm_slice4, - &transcript_accumulator_empty, - &transcript_reset_accumulator, - &precompute_select, - &lookup_read_counts_0, - &lookup_read_counts_1, - &z_perm, - &lookup_inverses, - &transcript_mul_shift, - &transcript_msm_count_shift, - &transcript_accumulator_x_shift, - &transcript_accumulator_y_shift, - &precompute_scalar_sum_shift, - &precompute_s1hi_shift, - &precompute_dx_shift, - &precompute_dy_shift, - &precompute_tx_shift, - &precompute_ty_shift, - &msm_transition_shift, - &msm_add_shift, - &msm_double_shift, - &msm_skew_shift, - &msm_accumulator_x_shift, - &msm_accumulator_y_shift, - &msm_count_shift, - &msm_round_shift, - &msm_add1_shift, - &msm_pc_shift, - &precompute_pc_shift, - &transcript_pc_shift, - &precompute_round_shift, - &transcript_accumulator_empty_shift, - &precompute_select_shift, - &z_perm_shift) - std::vector get_wires() override - { - return { - transcript_add, - transcript_mul, - transcript_eq, - transcript_collision_check, - transcript_msm_transition, - transcript_pc, - transcript_msm_count, - transcript_Px, - transcript_Py, - transcript_z1, - transcript_z2, - transcript_z1zero, - transcript_z2zero, - transcript_op, - transcript_accumulator_x, - transcript_accumulator_y, - transcript_msm_x, - transcript_msm_y, - precompute_pc, - precompute_point_transition, - precompute_round, - precompute_scalar_sum, - precompute_s1hi, - precompute_s1lo, - precompute_s2hi, - precompute_s2lo, - precompute_s3hi, - precompute_s3lo, - precompute_s4hi, - precompute_s4lo, - precompute_skew, - precompute_dx, - precompute_dy, - precompute_tx, - precompute_ty, - msm_transition, - msm_add, - msm_double, - msm_skew, - msm_accumulator_x, - msm_accumulator_y, - msm_pc, - msm_size_of_msm, - msm_count, - msm_round, - msm_add1, - msm_add2, - msm_add3, - msm_add4, - msm_x1, - msm_y1, - msm_x2, - msm_y2, - msm_x3, - msm_y3, - msm_x4, - msm_y4, - msm_collision_x1, - msm_collision_x2, - msm_collision_x3, - msm_collision_x4, - msm_lambda1, - msm_lambda2, - msm_lambda3, - msm_lambda4, - msm_slice1, - msm_slice2, - msm_slice3, - msm_slice4, - transcript_accumulator_empty, - transcript_reset_accumulator, - precompute_select, - lookup_read_counts_0, - lookup_read_counts_1, - }; - }; + // Initialize members + AllEntities() + : PrecomputedEntities{} + , WitnessEntities{} + , ShiftedEntities{} + {} + // get_wires is inherited + + DEFINE_COMPOUND_GET_ALL(PrecomputedEntities::get_all(), + WitnessEntities::get_all(), + ShiftedEntities::get_all()) + DEFINE_COMPOUND_POINTER_VIEW(PrecomputedEntities::pointer_view(), + WitnessEntities::pointer_view(), + ShiftedEntities::pointer_view()) // Gemini-specific getters. - std::vector get_unshifted() override + RefVector get_unshifted() { - return { - lagrange_first, - lagrange_second, - lagrange_last, - transcript_add, - transcript_eq, - transcript_collision_check, - transcript_msm_transition, - transcript_Px, - transcript_Py, - transcript_z1, - transcript_z2, - transcript_z1zero, - transcript_z2zero, - transcript_op, - transcript_msm_x, - transcript_msm_y, - precompute_point_transition, - precompute_s1hi, - precompute_s2hi, - precompute_s2lo, - precompute_s3hi, - precompute_s3lo, - precompute_s4hi, - precompute_s4lo, - precompute_skew, - msm_size_of_msm, - msm_add2, - msm_add3, - msm_add4, - msm_x1, - msm_y1, - msm_x2, - msm_y2, - msm_x3, - msm_y3, - msm_x4, - msm_y4, - msm_collision_x1, - msm_collision_x2, - msm_collision_x3, - msm_collision_x4, - msm_lambda1, - msm_lambda2, - msm_lambda3, - msm_lambda4, - msm_slice1, - msm_slice2, - msm_slice3, - msm_slice4, - transcript_reset_accumulator, - lookup_read_counts_0, - lookup_read_counts_1, - lookup_inverses, - }; + return concatenate(PrecomputedEntities::get_all(), WitnessEntities::get_all()); }; - std::vector get_to_be_shifted() override - { - return { - transcript_mul, - transcript_msm_count, - transcript_accumulator_x, - transcript_accumulator_y, - precompute_scalar_sum, - precompute_s1hi, - precompute_dx, - precompute_dy, - precompute_tx, - precompute_ty, - msm_transition, - msm_add, - msm_double, - msm_skew, - msm_accumulator_x, - msm_accumulator_y, - msm_count, - msm_round, - msm_add1, - msm_pc, - precompute_pc, - transcript_pc, - precompute_round, - transcript_accumulator_empty, - precompute_select, - z_perm, - }; - }; - std::vector get_shifted() override + RefVector get_to_be_shifted() { - return { - transcript_mul_shift, - transcript_msm_count_shift, - transcript_accumulator_x_shift, - transcript_accumulator_y_shift, - precompute_scalar_sum_shift, - precompute_s1hi_shift, - precompute_dx_shift, - precompute_dy_shift, - precompute_tx_shift, - precompute_ty_shift, - msm_transition_shift, - msm_add_shift, - msm_double_shift, - msm_skew_shift, - msm_accumulator_x_shift, - msm_accumulator_y_shift, - msm_count_shift, - msm_round_shift, - msm_add1_shift, - msm_pc_shift, - precompute_pc_shift, - transcript_pc_shift, - precompute_round_shift, - transcript_accumulator_empty_shift, - precompute_select_shift, - z_perm_shift, - }; - }; + return { this->transcript_mul, + this->transcript_msm_count, + this->transcript_accumulator_x, + this->transcript_accumulator_y, + this->precompute_scalar_sum, + this->precompute_s1hi, + this->precompute_dx, + this->precompute_dy, + this->precompute_tx, + this->precompute_ty, + this->msm_transition, + this->msm_add, + this->msm_double, + this->msm_skew, + this->msm_accumulator_x, + this->msm_accumulator_y, + this->msm_count, + this->msm_round, + this->msm_add1, + this->msm_pc, + this->precompute_pc, + this->transcript_pc, + this->precompute_round, + this->transcript_accumulator_empty, + this->precompute_select, + this->z_perm }; + } + RefVector get_shifted() { return ShiftedEntities::get_all(); }; }; public: /** * @brief The proving key is responsible for storing the polynomials used by the prover. - * @note TODO(Cody): Maybe multiple inheritance is the right thing here. In that case, nothing should eve inherit - * from ProvingKey. + * @note TODO(Cody): Maybe multiple inheritance is the right thing here. In that case, nothing should eve + * inherit from ProvingKey. */ - class ProvingKey : public ProvingKey_, - WitnessEntities> { + class ProvingKey : public ProvingKey_, WitnessEntities> { public: // Expose constructors on the base class - using Base = ProvingKey_, - WitnessEntities>; + using Base = ProvingKey_, WitnessEntities>; using Base::Base; // The plookup wires that store plookup read data. @@ -804,25 +329,25 @@ template class ECCVMBa * @brief The verification key is responsible for storing the the commitments to the precomputed (non-witnessk) * polynomials used by the verifier. * - * @note Note the discrepancy with what sort of data is stored here vs in the proving key. We may want to resolve - * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our - * circuits. + * @note Note the discrepancy with what sort of data is stored here vs in the proving key. We may want to + * resolve that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for + * portability of our circuits. */ - using VerificationKey = VerificationKey_>; + using VerificationKey = VerificationKey_>; /** * @brief A container for polynomials produced after the first round of sumcheck. * @todo TODO(#394) Use polynomial classes for guaranteed memory alignment. */ - using FoldedPolynomials = AllEntities, PolynomialHandle>; + using FoldedPolynomials = AllEntities>; /** - * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated - * at one point. + * @brief A field element for each entity of the flavor. These entities represent the prover polynomials + * evaluated at one point. */ - class AllValues : public AllEntities { + class AllValues : public AllEntities { public: - using Base = AllEntities; + using Base = AllEntities; using Base::Base; AllValues(std::array _data_in) { this->_data = _data_in; } }; @@ -830,15 +355,15 @@ template class ECCVMBa /** * @brief An owning container of polynomials. * @warning When this was introduced it broke some of our design principles. - * - Execution trace builders don't handle "polynomials" because the interpretation of the execution trace columns - * as polynomials is a detail of the proving system, and trace builders are (sometimes in practice, always in - * principle) reusable for different proving protocols (e.g., Plonk and Honk). + * - Execution trace builders don't handle "polynomials" because the interpretation of the execution trace + * columns as polynomials is a detail of the proving system, and trace builders are (sometimes in practice, + * always in principle) reusable for different proving protocols (e.g., Plonk and Honk). * - Polynomial storage is handled by key classes. Polynomials aren't moved, but are accessed elsewhere by * std::spans. * * We will consider revising this data model: TODO(https://github.com/AztecProtocol/barretenberg/issues/743) */ - class AllPolynomials : public AllEntities { + class AllPolynomials : public AllEntities { public: [[nodiscard]] size_t get_polynomial_size() const { return this->lagrange_first.size(); } AllValues get_row(const size_t row_idx) const @@ -854,12 +379,12 @@ template class ECCVMBa * @brief A container for polynomials produced after the first round of sumcheck. * @todo TODO(#394) Use polynomial classes for guaranteed memory alignment. */ - using RowPolynomials = AllEntities; + using RowPolynomials = AllEntities; /** * @brief A container for storing the partially evaluated multivariates produced by sumcheck. */ - class PartiallyEvaluatedMultivariates : public AllEntities { + class PartiallyEvaluatedMultivariates : public AllEntities { public: PartiallyEvaluatedMultivariates() = default; @@ -875,8 +400,7 @@ template class ECCVMBa /** * @brief A container for univariates used during sumcheck. */ - template - using ProverUnivariates = AllEntities, barretenberg::Univariate>; + template using ProverUnivariates = AllEntities>; /** * @brief A container for univariates produced during the hot loop in sumcheck. @@ -886,7 +410,7 @@ template class ECCVMBa /** * @brief A container for the prover polynomials handles; only stores spans. */ - class ProverPolynomials : public AllEntities { + class ProverPolynomials : public AllEntities { public: /** * @brief Returns the evaluations of all prover polynomials at one point on the boolean hypercube, which @@ -904,17 +428,17 @@ template class ECCVMBa /** * @brief A container for commitment labels. - * @note It's debatable whether this should inherit from AllEntities. since most entries are not strictly needed. It - * has, however, been useful during debugging to have these labels available. + * @note It's debatable whether this should inherit from AllEntities. since most entries are not strictly + * needed. It has, however, been useful during debugging to have these labels available. * */ - class CommitmentLabels : public AllEntities { + class CommitmentLabels : public AllEntities { private: - using Base = AllEntities; + using Base = AllEntities; public: CommitmentLabels() - : AllEntities() + : AllEntities() { Base::transcript_add = "TRANSCRIPT_ADD"; Base::transcript_mul = "TRANSCRIPT_MUL"; @@ -999,13 +523,13 @@ template class ECCVMBa }; }; - class VerifierCommitments : public AllEntities { + class VerifierCommitments : public AllEntities { private: - using Base = AllEntities; + using Base = AllEntities; public: VerifierCommitments(const std::shared_ptr& verification_key, - [[maybe_unused]] const BaseTranscript& transcript) + [[maybe_unused]] const BaseTranscript& transcript) { static_cast(transcript); Base::lagrange_first = verification_key->lagrange_first; @@ -1018,7 +542,7 @@ template class ECCVMBa * @brief Derived class that defines proof structure for ECCVM proofs, as well as supporting functions. * */ - class Transcript : public BaseTranscript { + class Transcript : public BaseTranscript { public: uint32_t circuit_size; Commitment transcript_add_comm; @@ -1112,323 +636,312 @@ template class ECCVMBa Transcript() = default; Transcript(const std::vector& proof) - : BaseTranscript(proof) + : BaseTranscript(proof) {} - void deserialize_full_transcript() override + void deserialize_full_transcript() { // take current proof and put them into the struct size_t num_bytes_read = 0; - circuit_size = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); + circuit_size = + BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); size_t log_n = numeric::get_msb(circuit_size); - transcript_add_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_mul_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_eq_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_collision_check_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_msm_transition_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_pc_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_msm_count_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_Px_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_Py_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_z1_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_z2_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_z1zero_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_z2zero_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_op_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_accumulator_x_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_accumulator_y_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_msm_x_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_msm_y_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_pc_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_point_transition_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_round_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_scalar_sum_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_s1hi_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_s1lo_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_s2hi_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_s2lo_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_s3hi_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_s3lo_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_s4hi_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_s4lo_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_skew_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_dx_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_dy_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_tx_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_ty_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_transition_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_add_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_double_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_skew_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_accumulator_x_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_accumulator_y_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_pc_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_size_of_msm_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_count_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_round_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_add1_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_add2_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_add3_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_add4_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_x1_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_y1_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_x2_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_y2_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_x3_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_y3_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_x4_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_y4_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_collision_x1_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_collision_x2_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_collision_x3_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_collision_x4_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_lambda1_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_lambda2_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_lambda3_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_lambda4_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_slice1_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_slice2_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_slice3_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - msm_slice4_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_accumulator_empty_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - transcript_reset_accumulator_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - precompute_select_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - lookup_read_counts_0_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - lookup_read_counts_1_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - lookup_inverses_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); - z_perm_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); + transcript_add_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_mul_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_eq_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_collision_check_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_msm_transition_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_pc_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_msm_count_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_Px_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_Py_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_z1_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_z2_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_z1zero_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_z2zero_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_op_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_accumulator_x_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_accumulator_y_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_msm_x_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_msm_y_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_pc_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_point_transition_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_round_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_scalar_sum_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_s1hi_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_s1lo_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_s2hi_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_s2lo_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_s3hi_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_s3lo_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_s4hi_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_s4lo_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_skew_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_dx_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_dy_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_tx_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_ty_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + msm_transition_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + msm_add_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_double_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_skew_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_accumulator_x_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + msm_accumulator_y_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + msm_pc_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_size_of_msm_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + msm_count_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_round_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_add1_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_add2_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_add3_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_add4_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_x1_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_y1_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_x2_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_y2_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_x3_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_y3_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_x4_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_y4_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_collision_x1_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + msm_collision_x2_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + msm_collision_x3_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + msm_collision_x4_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + msm_lambda1_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_lambda2_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_lambda3_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_lambda4_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_slice1_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_slice2_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_slice3_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + msm_slice4_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); + transcript_accumulator_empty_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + transcript_reset_accumulator_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + precompute_select_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + lookup_read_counts_0_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + lookup_read_counts_1_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + lookup_inverses_comm = BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read); + z_perm_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); for (size_t i = 0; i < log_n; ++i) { - sumcheck_univariates.emplace_back(BaseTranscript::template deserialize_from_buffer< + sumcheck_univariates.emplace_back(BaseTranscript::template deserialize_from_buffer< barretenberg::Univariate>( - BaseTranscript::proof_data, num_bytes_read)); + BaseTranscript::proof_data, num_bytes_read)); } - sumcheck_evaluations = - BaseTranscript::template deserialize_from_buffer>( - BaseTranscript::proof_data, num_bytes_read); + sumcheck_evaluations = BaseTranscript::template deserialize_from_buffer>( + BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < log_n - 1; ++i) { - gemini_univariate_comms.emplace_back(BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read)); + gemini_univariate_comms.emplace_back(BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read)); } for (size_t i = 0; i < log_n; ++i) { - gemini_a_evals.emplace_back(BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read)); + gemini_a_evals.emplace_back( + BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read)); } - shplonk_q_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); + shplonk_q_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); if (std::is_same>::value) { - kzg_w_comm = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); + kzg_w_comm = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); } else if (std::is_same>::value) { - ipa_poly_degree = BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read); + ipa_poly_degree = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, + num_bytes_read); auto log_poly_degree = static_cast(numeric::get_msb(ipa_poly_degree)); for (size_t i = 0; i < log_poly_degree; ++i) { - ipa_l_comms.emplace_back(BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read)); - ipa_r_comms.emplace_back(BaseTranscript::template deserialize_from_buffer( - BaseTranscript::proof_data, num_bytes_read)); + ipa_l_comms.emplace_back(BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read)); + ipa_r_comms.emplace_back(BaseTranscript::template deserialize_from_buffer( + BaseTranscript::proof_data, num_bytes_read)); } - ipa_a_0_eval = BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, - num_bytes_read); + ipa_a_0_eval = + BaseTranscript::template deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); } else { throw_or_abort("Unsupported PCS"); } } - void serialize_full_transcript() override + void serialize_full_transcript() { - size_t old_proof_length = BaseTranscript::proof_data.size(); - BaseTranscript::proof_data.clear(); + size_t old_proof_length = BaseTranscript::proof_data.size(); + BaseTranscript::proof_data.clear(); size_t log_n = numeric::get_msb(circuit_size); - BaseTranscript::template serialize_to_buffer(circuit_size, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_add_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_mul_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_eq_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_collision_check_comm, - BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_msm_transition_comm, - BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_pc_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_msm_count_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_Px_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_Py_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_z1_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_z2_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_z1zero_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_z2zero_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_op_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_accumulator_x_comm, - BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_accumulator_y_comm, - BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_msm_x_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_msm_y_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_pc_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_point_transition_comm, - BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_round_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_scalar_sum_comm, - BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_s1hi_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_s1lo_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_s2hi_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_s2lo_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_s3hi_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_s3lo_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_s4hi_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_s4lo_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_skew_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_dx_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_dy_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_tx_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_ty_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_transition_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_add_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_double_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_skew_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_accumulator_x_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_accumulator_y_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_pc_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_size_of_msm_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_count_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_round_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_add1_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_add2_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_add3_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_add4_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_x1_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_y1_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_x2_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_y2_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_x3_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_y3_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_x4_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_y4_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_collision_x1_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_collision_x2_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_collision_x3_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_collision_x4_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_lambda1_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_lambda2_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_lambda3_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_lambda4_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_slice1_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_slice2_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_slice3_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(msm_slice4_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_accumulator_empty_comm, - BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(transcript_reset_accumulator_comm, - BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(precompute_select_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(lookup_read_counts_0_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(lookup_read_counts_1_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(lookup_inverses_comm, BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(z_perm_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(circuit_size, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_add_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_mul_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_eq_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_collision_check_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_msm_transition_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_pc_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_msm_count_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_Px_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_Py_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_z1_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_z2_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_z1zero_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_z2zero_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_op_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_accumulator_x_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_accumulator_y_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_msm_x_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_msm_y_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_pc_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_point_transition_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_round_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_scalar_sum_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_s1hi_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_s1lo_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_s2hi_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_s2lo_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_s3hi_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_s3lo_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_s4hi_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_s4lo_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_skew_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_dx_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_dy_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_tx_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_ty_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_transition_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_add_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_double_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_skew_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_accumulator_x_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_accumulator_y_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_pc_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_size_of_msm_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_count_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_round_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_add1_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_add2_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_add3_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_add4_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_x1_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_y1_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_x2_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_y2_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_x3_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_y3_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_x4_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_y4_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_collision_x1_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_collision_x2_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_collision_x3_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_collision_x4_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_lambda1_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_lambda2_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_lambda3_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_lambda4_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_slice1_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_slice2_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_slice3_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(msm_slice4_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_accumulator_empty_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(transcript_reset_accumulator_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(precompute_select_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(lookup_read_counts_0_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(lookup_read_counts_1_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(lookup_inverses_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(z_perm_comm, BaseTranscript::proof_data); for (size_t i = 0; i < log_n; ++i) { - BaseTranscript::template serialize_to_buffer(sumcheck_univariates[i], - BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(sumcheck_univariates[i], BaseTranscript::proof_data); } - BaseTranscript::template serialize_to_buffer(sumcheck_evaluations, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(sumcheck_evaluations, BaseTranscript::proof_data); for (size_t i = 0; i < log_n - 1; ++i) { - BaseTranscript::template serialize_to_buffer(gemini_univariate_comms[i], - BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(gemini_univariate_comms[i], BaseTranscript::proof_data); } for (size_t i = 0; i < log_n; ++i) { - BaseTranscript::template serialize_to_buffer(gemini_a_evals[i], BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(gemini_a_evals[i], BaseTranscript::proof_data); } - BaseTranscript::template serialize_to_buffer(shplonk_q_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(shplonk_q_comm, BaseTranscript::proof_data); if (std::is_same>::value) { - BaseTranscript::template serialize_to_buffer(kzg_w_comm, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(kzg_w_comm, BaseTranscript::proof_data); } else if (std::is_same>::value) { - BaseTranscript::template serialize_to_buffer(ipa_poly_degree, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(ipa_poly_degree, BaseTranscript::proof_data); auto log_poly_degree = static_cast(numeric::get_msb(ipa_poly_degree)); for (size_t i = 0; i < log_poly_degree; ++i) { - BaseTranscript::template serialize_to_buffer(ipa_l_comms[i], BaseTranscript::proof_data); - BaseTranscript::template serialize_to_buffer(ipa_r_comms[i], BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(ipa_l_comms[i], BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(ipa_r_comms[i], BaseTranscript::proof_data); } - BaseTranscript::template serialize_to_buffer(ipa_a_0_eval, BaseTranscript::proof_data); + BaseTranscript::template serialize_to_buffer(ipa_a_0_eval, BaseTranscript::proof_data); } - ASSERT(BaseTranscript::proof_data.size() == old_proof_length); + ASSERT(BaseTranscript::proof_data.size() == old_proof_length); } }; }; diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp index 4f9f1d515b9..4d29ca7ffb2 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor.hpp @@ -64,6 +64,7 @@ */ #pragma once +#include "barretenberg/common/std_array.hpp" #include "barretenberg/common/zip_view.hpp" #include "barretenberg/polynomials/barycentric.hpp" #include "barretenberg/polynomials/evaluation_domain.hpp" @@ -75,63 +76,16 @@ namespace proof_system::honk::flavor { -#define DEFINE_POINTER_VIEW(ExpectedSize, ...) \ - [[nodiscard]] auto pointer_view() \ - { \ - std::array view{ __VA_ARGS__ }; \ - static_assert(view.size() == ExpectedSize, \ - "Expected array size to match given size (first parameter) in DEFINE_POINTER_VIEW"); \ - return view; \ - } \ - [[nodiscard]] auto pointer_view() const \ - { \ - std::array view{ __VA_ARGS__ }; \ - static_assert(view.size() == ExpectedSize, \ - "Expected array size to match given size (first parameter) in DEFINE_POINTER_VIEW"); \ - return view; \ - } - -/** - * @brief Base data class template, a wrapper for std::array, from which every flavor class ultimately derives. - * - * @tparam T The underlying data type stored in the array - * @tparam HandleType The type that will be used to - * @tparam NUM_ENTITIES The size of the underlying array. - */ -template class Entities_ { - public: - virtual ~Entities_() = default; - - constexpr size_t size() { return NUM_ENTITIES; }; -}; - /** * @brief Base class template containing circuit-specifying data. * */ -template -class PrecomputedEntities_ : public Entities_ { +class PrecomputedEntitiesBase { public: - using DataType = DataType_; - size_t circuit_size; size_t log_circuit_size; size_t num_public_inputs; CircuitType circuit_type; // TODO(#392) - - virtual std::vector get_selectors() = 0; - virtual std::vector get_sigma_polynomials() = 0; - virtual std::vector get_id_polynomials() = 0; -}; - -/** - * @brief Base class template containing witness (wires and derived witnesses). - * @details Shifts are not included here since they do not occupy their own memory. - */ -template -class WitnessEntities_ : public Entities_ { - public: - virtual std::vector get_wires() = 0; }; /** @@ -186,27 +140,10 @@ template class VerificationKey_ : public Preco }; }; -/** - * @brief Base class containing all entities (or handles on these) in one place. - * - * @tparam PrecomputedEntities An instance of PrecomputedEntities_ with affine_element data type and handle type. - */ -template -class AllEntities_ : public Entities_ { - public: - virtual std::vector get_wires() = 0; - virtual std::vector get_unshifted() = 0; - virtual std::vector get_to_be_shifted() = 0; - virtual std::vector get_shifted() = 0; - - // Because of how Gemini is written, is importat to put the polynomials out in this order. - std::vector get_unshifted_then_shifted() - { - std::vector result{ get_unshifted() }; - std::vector shifted{ get_shifted() }; - result.insert(result.end(), shifted.begin(), shifted.end()); - return result; - }; +// Because of how Gemini is written, is importat to put the polynomials out in this order. +auto get_unshifted_then_shifted(const auto& all_entities) +{ + return concatenate(all_entities.get_unshifted(), all_entities.get_shifted()); }; /** diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp b/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp index 8e99d56e397..4b2c8fc2752 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor.test.cpp @@ -28,15 +28,14 @@ TEST(Flavor, Getters) EXPECT_EQ(proving_key.id_2[0], FF(4)); EXPECT_EQ(proving_key.id_3[0], FF(8)); - Flavor::VerificationKey verification_key; Flavor::ProverPolynomials prover_polynomials; - Flavor::AllValues evals; Flavor::CommitmentLabels commitment_labels; // Globals are also available through STL container sizes - EXPECT_EQ(prover_polynomials.size(), Flavor::NUM_ALL_ENTITIES); + EXPECT_EQ(prover_polynomials.get_all().size(), Flavor::NUM_ALL_ENTITIES); // Shited polynomials have the righ tsize - EXPECT_EQ(prover_polynomials.size(), prover_polynomials.get_unshifted_then_shifted().size()); + EXPECT_EQ(prover_polynomials.get_all().size(), + prover_polynomials.get_shifted().size() + prover_polynomials.get_unshifted().size()); // Commitment lables are stored in the flavor. EXPECT_EQ(commitment_labels.w_r, "W_R"); diff --git a/barretenberg/cpp/src/barretenberg/flavor/flavor_macros.hpp b/barretenberg/cpp/src/barretenberg/flavor/flavor_macros.hpp new file mode 100644 index 00000000000..c378aa51489 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/flavor/flavor_macros.hpp @@ -0,0 +1,71 @@ +#pragma once + +// Macros for defining the flavor classes. +// These are used to derive iterator methods along with the body of a 'flavor' class. +// DEFINE_FLAVOR_MEMBERS lets you define a flavor entity as a collection of individual members, and derive an iterator. +// while DEFINE_COMPOUND_GET_ALL and DEFINE_COMPOUND_POINTER_VIEW let you combine the iterators of substructures or base +// classes. + +#include "barretenberg/common/ref_vector.hpp" +#include "barretenberg/common/std_array.hpp" +#include +#include +#include + +template auto _refs_to_pointer_array(Refs&... refs) +{ + return std::array{ &refs... }; +} + +// @deprecated this was less natural than the ref view +#define DEFINE_POINTER_VIEW(...) \ + [[nodiscard]] auto pointer_view() \ + { \ + return _refs_to_pointer_array(__VA_ARGS__); \ + } \ + [[nodiscard]] auto pointer_view() const \ + { \ + return _refs_to_pointer_array(__VA_ARGS__); \ + } + +#define DEFINE_REF_VIEW(...) \ + [[nodiscard]] auto get_all() \ + { \ + return RefVector{ __VA_ARGS__ }; \ + } \ + [[nodiscard]] auto get_all() const \ + { \ + return RefVector{ __VA_ARGS__ }; \ + } + +/** + * @brief Define the body of a flavor class, included each member and a pointer view with which to iterate the struct. + * + * @tparam T The underlying data type stored in the array + * @tparam HandleType The type that will be used to + * @tparam NUM_ENTITIES The size of the underlying array. + */ +#define DEFINE_FLAVOR_MEMBERS(DataType, ...) \ + DataType __VA_ARGS__; \ + DEFINE_POINTER_VIEW(__VA_ARGS__) \ + DEFINE_REF_VIEW(__VA_ARGS__) + +#define DEFINE_COMPOUND_POINTER_VIEW(...) \ + [[nodiscard]] auto pointer_view() \ + { \ + return concatenate(__VA_ARGS__); \ + } \ + [[nodiscard]] auto pointer_view() const \ + { \ + return concatenate(__VA_ARGS__); \ + } + +#define DEFINE_COMPOUND_GET_ALL(...) \ + [[nodiscard]] auto get_all() \ + { \ + return concatenate(__VA_ARGS__); \ + } \ + [[nodiscard]] auto get_all() const \ + { \ + return concatenate(__VA_ARGS__); \ + } diff --git a/barretenberg/cpp/src/barretenberg/flavor/generated/AvmMini_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/generated/AvmMini_flavor.hpp index 9e0c6c9bc34..75573e5ba2b 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/generated/AvmMini_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/generated/AvmMini_flavor.hpp @@ -8,9 +8,11 @@ #include "barretenberg/polynomials/univariate.hpp" #include "barretenberg/flavor/flavor.hpp" +#include "barretenberg/flavor/flavor_macros.hpp" #include "barretenberg/polynomials/evaluation_domain.hpp" #include "barretenberg/polynomials/polynomial.hpp" -#include "barretenberg/relations/generated/AvmMini.hpp" +#include "barretenberg/relations/generated/AvmMini/avm_mini.hpp" +#include "barretenberg/relations/generated/AvmMini/mem_trace.hpp" #include "barretenberg/transcript/transcript.hpp" namespace proof_system::honk { @@ -31,14 +33,14 @@ class AvmMiniFlavor { using CommitmentKey = pcs::CommitmentKey; using VerifierCommitmentKey = pcs::VerifierCommitmentKey; - static constexpr size_t NUM_PRECOMPUTED_ENTITIES = 3; + static constexpr size_t NUM_PRECOMPUTED_ENTITIES = 2; static constexpr size_t NUM_WITNESS_ENTITIES = 20; static constexpr size_t NUM_WIRES = NUM_WITNESS_ENTITIES + NUM_PRECOMPUTED_ENTITIES; // We have two copies of the witness entities, so we subtract the number of fixed ones (they have no shift), one for // the unshifted and one for the shifted - static constexpr size_t NUM_ALL_ENTITIES = 26; + static constexpr size_t NUM_ALL_ENTITIES = 25; - using Relations = std::tuple>; + using Relations = std::tuple, AvmMini_vm::mem_trace>; static constexpr size_t MAX_PARTIAL_RELATION_LENGTH = compute_max_partial_relation_length(); @@ -57,223 +59,200 @@ class AvmMiniFlavor { static constexpr bool has_zero_row = true; private: - template - class PrecomputedEntities : public PrecomputedEntities_ { + template class PrecomputedEntities : public PrecomputedEntitiesBase { public: - DataType avmMini_clk; - DataType avmMini_positive; - DataType avmMini_first; + using DataType = DataType_; - DEFINE_POINTER_VIEW(NUM_PRECOMPUTED_ENTITIES, &avmMini_clk, &avmMini_positive, &avmMini_first) + DEFINE_FLAVOR_MEMBERS(DataType, avmMini_clk, avmMini_first) - std::vector get_selectors() override + RefVector get_selectors() { return { avmMini_clk, - avmMini_positive, avmMini_first, }; }; - std::vector get_sigma_polynomials() override { return {}; }; - std::vector get_id_polynomials() override { return {}; }; - std::vector get_table_polynomials() { return {}; }; + RefVector get_sigma_polynomials() { return {}; }; + RefVector get_id_polynomials() { return {}; }; + RefVector get_table_polynomials() { return {}; }; }; - template - class WitnessEntities : public WitnessEntities_ { + template class WitnessEntities { public: - DataType avmMini_subop; - DataType avmMini_ia; - DataType avmMini_ib; - DataType avmMini_ic; - DataType avmMini_mem_op_a; - DataType avmMini_mem_op_b; - DataType avmMini_mem_op_c; - DataType avmMini_rwa; - DataType avmMini_rwb; - DataType avmMini_rwc; - DataType avmMini_mem_idx_a; - DataType avmMini_mem_idx_b; - DataType avmMini_mem_idx_c; - DataType avmMini_last; - DataType avmMini_m_clk; - DataType avmMini_m_sub_clk; - DataType avmMini_m_addr; - DataType avmMini_m_val; - DataType avmMini_m_lastAccess; - DataType avmMini_m_rw; - - DEFINE_POINTER_VIEW(NUM_WITNESS_ENTITIES, - &avmMini_subop, - &avmMini_ia, - &avmMini_ib, - &avmMini_ic, - &avmMini_mem_op_a, - &avmMini_mem_op_b, - &avmMini_mem_op_c, - &avmMini_rwa, - &avmMini_rwb, - &avmMini_rwc, - &avmMini_mem_idx_a, - &avmMini_mem_idx_b, - &avmMini_mem_idx_c, - &avmMini_last, - &avmMini_m_clk, - &avmMini_m_sub_clk, - &avmMini_m_addr, - &avmMini_m_val, - &avmMini_m_lastAccess, - &avmMini_m_rw) - - std::vector get_wires() override + DEFINE_FLAVOR_MEMBERS(DataType, + memTrace_m_clk, + memTrace_m_sub_clk, + memTrace_m_addr, + memTrace_m_val, + memTrace_m_lastAccess, + memTrace_m_rw, + avmMini_subop, + avmMini_ia, + avmMini_ib, + avmMini_ic, + avmMini_mem_op_a, + avmMini_mem_op_b, + avmMini_mem_op_c, + avmMini_rwa, + avmMini_rwb, + avmMini_rwc, + avmMini_mem_idx_a, + avmMini_mem_idx_b, + avmMini_mem_idx_c, + avmMini_last) + + RefVector get_wires() { return { - avmMini_subop, avmMini_ia, avmMini_ib, avmMini_ic, avmMini_mem_op_a, - avmMini_mem_op_b, avmMini_mem_op_c, avmMini_rwa, avmMini_rwb, avmMini_rwc, - avmMini_mem_idx_a, avmMini_mem_idx_b, avmMini_mem_idx_c, avmMini_last, avmMini_m_clk, - avmMini_m_sub_clk, avmMini_m_addr, avmMini_m_val, avmMini_m_lastAccess, avmMini_m_rw, + memTrace_m_clk, memTrace_m_sub_clk, memTrace_m_addr, memTrace_m_val, memTrace_m_lastAccess, + memTrace_m_rw, avmMini_subop, avmMini_ia, avmMini_ib, avmMini_ic, + avmMini_mem_op_a, avmMini_mem_op_b, avmMini_mem_op_c, avmMini_rwa, avmMini_rwb, + avmMini_rwc, avmMini_mem_idx_a, avmMini_mem_idx_b, avmMini_mem_idx_c, avmMini_last, }; }; - std::vector get_sorted_polynomials() { return {}; }; + RefVector get_sorted_polynomials() { return {}; }; }; - template - class AllEntities : public AllEntities_ { + template class AllEntities { public: - DataType avmMini_clk; - DataType avmMini_positive; - DataType avmMini_first; - - DataType avmMini_subop; - DataType avmMini_ia; - DataType avmMini_ib; - DataType avmMini_ic; - DataType avmMini_mem_op_a; - DataType avmMini_mem_op_b; - DataType avmMini_mem_op_c; - DataType avmMini_rwa; - DataType avmMini_rwb; - DataType avmMini_rwc; - DataType avmMini_mem_idx_a; - DataType avmMini_mem_idx_b; - DataType avmMini_mem_idx_c; - DataType avmMini_last; - DataType avmMini_m_clk; - DataType avmMini_m_sub_clk; - DataType avmMini_m_addr; - DataType avmMini_m_val; - DataType avmMini_m_lastAccess; - DataType avmMini_m_rw; - - DataType avmMini_m_val_shift; - DataType avmMini_m_addr_shift; - DataType avmMini_m_rw_shift; - - DEFINE_POINTER_VIEW(NUM_ALL_ENTITIES, - &avmMini_clk, - &avmMini_positive, - &avmMini_first, - &avmMini_subop, - &avmMini_ia, - &avmMini_ib, - &avmMini_ic, - &avmMini_mem_op_a, - &avmMini_mem_op_b, - &avmMini_mem_op_c, - &avmMini_rwa, - &avmMini_rwb, - &avmMini_rwc, - &avmMini_mem_idx_a, - &avmMini_mem_idx_b, - &avmMini_mem_idx_c, - &avmMini_last, - &avmMini_m_clk, - &avmMini_m_sub_clk, - &avmMini_m_addr, - &avmMini_m_val, - &avmMini_m_lastAccess, - &avmMini_m_rw, - &avmMini_m_val_shift, - &avmMini_m_addr_shift, - &avmMini_m_rw_shift) - - std::vector get_wires() override + DEFINE_FLAVOR_MEMBERS(DataType, + avmMini_clk, + avmMini_first, + memTrace_m_clk, + memTrace_m_sub_clk, + memTrace_m_addr, + memTrace_m_val, + memTrace_m_lastAccess, + memTrace_m_rw, + avmMini_subop, + avmMini_ia, + avmMini_ib, + avmMini_ic, + avmMini_mem_op_a, + avmMini_mem_op_b, + avmMini_mem_op_c, + avmMini_rwa, + avmMini_rwb, + avmMini_rwc, + avmMini_mem_idx_a, + avmMini_mem_idx_b, + avmMini_mem_idx_c, + avmMini_last, + memTrace_m_rw_shift, + memTrace_m_addr_shift, + memTrace_m_val_shift) + + RefVector get_wires() { return { - avmMini_clk, avmMini_positive, avmMini_first, avmMini_subop, avmMini_ia, - avmMini_ib, avmMini_ic, avmMini_mem_op_a, avmMini_mem_op_b, avmMini_mem_op_c, - avmMini_rwa, avmMini_rwb, avmMini_rwc, avmMini_mem_idx_a, avmMini_mem_idx_b, - avmMini_mem_idx_c, avmMini_last, avmMini_m_clk, avmMini_m_sub_clk, avmMini_m_addr, - avmMini_m_val, avmMini_m_lastAccess, avmMini_m_rw, avmMini_m_val_shift, avmMini_m_addr_shift, - avmMini_m_rw_shift, + avmMini_clk, + avmMini_first, + memTrace_m_clk, + memTrace_m_sub_clk, + memTrace_m_addr, + memTrace_m_val, + memTrace_m_lastAccess, + memTrace_m_rw, + avmMini_subop, + avmMini_ia, + avmMini_ib, + avmMini_ic, + avmMini_mem_op_a, + avmMini_mem_op_b, + avmMini_mem_op_c, + avmMini_rwa, + avmMini_rwb, + avmMini_rwc, + avmMini_mem_idx_a, + avmMini_mem_idx_b, + avmMini_mem_idx_c, + avmMini_last, + memTrace_m_rw_shift, + memTrace_m_addr_shift, + memTrace_m_val_shift, }; }; - std::vector get_unshifted() override + RefVector get_unshifted() { return { - avmMini_clk, avmMini_positive, avmMini_first, avmMini_subop, avmMini_ia, - avmMini_ib, avmMini_ic, avmMini_mem_op_a, avmMini_mem_op_b, avmMini_mem_op_c, - avmMini_rwa, avmMini_rwb, avmMini_rwc, avmMini_mem_idx_a, avmMini_mem_idx_b, - avmMini_mem_idx_c, avmMini_last, avmMini_m_clk, avmMini_m_sub_clk, avmMini_m_addr, - avmMini_m_val, avmMini_m_lastAccess, avmMini_m_rw, + avmMini_clk, + avmMini_first, + memTrace_m_clk, + memTrace_m_sub_clk, + memTrace_m_addr, + memTrace_m_val, + memTrace_m_lastAccess, + memTrace_m_rw, + avmMini_subop, + avmMini_ia, + avmMini_ib, + avmMini_ic, + avmMini_mem_op_a, + avmMini_mem_op_b, + avmMini_mem_op_c, + avmMini_rwa, + avmMini_rwb, + avmMini_rwc, + avmMini_mem_idx_a, + avmMini_mem_idx_b, + avmMini_mem_idx_c, + avmMini_last, }; }; - std::vector get_to_be_shifted() override + RefVector get_to_be_shifted() { return { - avmMini_m_val, - avmMini_m_addr, - avmMini_m_rw, + memTrace_m_rw, + memTrace_m_addr, + memTrace_m_val, }; }; - std::vector get_shifted() override + RefVector get_shifted() { return { - avmMini_m_val_shift, - avmMini_m_addr_shift, - avmMini_m_rw_shift, + memTrace_m_rw_shift, + memTrace_m_addr_shift, + memTrace_m_val_shift, }; }; }; public: - class ProvingKey : public ProvingKey_, - WitnessEntities> { + class ProvingKey : public ProvingKey_, WitnessEntities> { public: // Expose constructors on the base class - using Base = ProvingKey_, - WitnessEntities>; + using Base = ProvingKey_, WitnessEntities>; using Base::Base; // The plookup wires that store plookup read data. std::array get_table_column_wires() { return {}; }; }; - using VerificationKey = VerificationKey_>; + using VerificationKey = VerificationKey_>; - using ProverPolynomials = AllEntities; + using ProverPolynomials = AllEntities; - using FoldedPolynomials = AllEntities, PolynomialHandle>; + using FoldedPolynomials = AllEntities>; - class AllValues : public AllEntities { + class AllValues : public AllEntities { public: - using Base = AllEntities; + using Base = AllEntities; using Base::Base; }; - class AllPolynomials : public AllEntities { + class AllPolynomials : public AllEntities { public: - [[nodiscard]] size_t get_polynomial_size() const { return this->avmMini_clk.size(); } + [[nodiscard]] size_t get_polynomial_size() const { return this->memTrace_m_clk.size(); } [[nodiscard]] AllValues get_row(const size_t row_idx) const { AllValues result; @@ -284,9 +263,9 @@ class AvmMiniFlavor { } }; - using RowPolynomials = AllEntities; + using RowPolynomials = AllEntities; - class PartiallyEvaluatedMultivariates : public AllEntities { + class PartiallyEvaluatedMultivariates : public AllEntities { public: PartiallyEvaluatedMultivariates() = default; PartiallyEvaluatedMultivariates(const size_t circuit_size) @@ -302,25 +281,29 @@ class AvmMiniFlavor { * @brief A container for univariates used during Protogalaxy folding and sumcheck. * @details During folding and sumcheck, the prover evaluates the relations on these univariates. */ - template - using ProverUnivariates = AllEntities, barretenberg::Univariate>; + template using ProverUnivariates = AllEntities>; /** * @brief A container for univariates produced during the hot loop in sumcheck. */ using ExtendedEdges = ProverUnivariates; - class CommitmentLabels : public AllEntities { + class CommitmentLabels : public AllEntities { private: - using Base = AllEntities; + using Base = AllEntities; public: CommitmentLabels() - : AllEntities() + : AllEntities() { Base::avmMini_clk = "avmMini_clk"; - Base::avmMini_positive = "avmMini_positive"; Base::avmMini_first = "avmMini_first"; + Base::memTrace_m_clk = "memTrace_m_clk"; + Base::memTrace_m_sub_clk = "memTrace_m_sub_clk"; + Base::memTrace_m_addr = "memTrace_m_addr"; + Base::memTrace_m_val = "memTrace_m_val"; + Base::memTrace_m_lastAccess = "memTrace_m_lastAccess"; + Base::memTrace_m_rw = "memTrace_m_rw"; Base::avmMini_subop = "avmMini_subop"; Base::avmMini_ia = "avmMini_ia"; Base::avmMini_ib = "avmMini_ib"; @@ -335,34 +318,32 @@ class AvmMiniFlavor { Base::avmMini_mem_idx_b = "avmMini_mem_idx_b"; Base::avmMini_mem_idx_c = "avmMini_mem_idx_c"; Base::avmMini_last = "avmMini_last"; - Base::avmMini_m_clk = "avmMini_m_clk"; - Base::avmMini_m_sub_clk = "avmMini_m_sub_clk"; - Base::avmMini_m_addr = "avmMini_m_addr"; - Base::avmMini_m_val = "avmMini_m_val"; - Base::avmMini_m_lastAccess = "avmMini_m_lastAccess"; - Base::avmMini_m_rw = "avmMini_m_rw"; }; }; - class VerifierCommitments : public AllEntities { + class VerifierCommitments : public AllEntities { private: - using Base = AllEntities; + using Base = AllEntities; public: - VerifierCommitments(const std::shared_ptr& verification_key, - const BaseTranscript& transcript) + VerifierCommitments(const std::shared_ptr& verification_key, const BaseTranscript& transcript) { static_cast(transcript); avmMini_clk = verification_key->avmMini_clk; - avmMini_positive = verification_key->avmMini_positive; avmMini_first = verification_key->avmMini_first; } }; - class Transcript : public BaseTranscript { + class Transcript : public BaseTranscript { public: uint32_t circuit_size; + Commitment memTrace_m_clk; + Commitment memTrace_m_sub_clk; + Commitment memTrace_m_addr; + Commitment memTrace_m_val; + Commitment memTrace_m_lastAccess; + Commitment memTrace_m_rw; Commitment avmMini_subop; Commitment avmMini_ia; Commitment avmMini_ib; @@ -377,12 +358,6 @@ class AvmMiniFlavor { Commitment avmMini_mem_idx_b; Commitment avmMini_mem_idx_c; Commitment avmMini_last; - Commitment avmMini_m_clk; - Commitment avmMini_m_sub_clk; - Commitment avmMini_m_addr; - Commitment avmMini_m_val; - Commitment avmMini_m_lastAccess; - Commitment avmMini_m_rw; std::vector> sumcheck_univariates; std::array sumcheck_evaluations; @@ -393,43 +368,43 @@ class AvmMiniFlavor { Transcript() = default; Transcript(const std::vector& proof) - : BaseTranscript(proof) + : BaseTranscript(proof) {} - void deserialize_full_transcript() override + void deserialize_full_transcript() { size_t num_bytes_read = 0; circuit_size = deserialize_from_buffer(proof_data, num_bytes_read); size_t log_n = numeric::get_msb(circuit_size); - avmMini_subop = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_ia = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_ib = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_ic = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_mem_op_a = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_mem_op_b = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_mem_op_c = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_rwa = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_rwb = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_rwc = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_mem_idx_a = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_mem_idx_b = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_mem_idx_c = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_last = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_m_clk = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_m_sub_clk = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_m_addr = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_m_val = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_m_lastAccess = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - avmMini_m_rw = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + memTrace_m_clk = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + memTrace_m_sub_clk = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + memTrace_m_addr = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + memTrace_m_val = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + memTrace_m_lastAccess = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + memTrace_m_rw = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_subop = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_ia = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_ib = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_ic = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_mem_op_a = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_mem_op_b = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_mem_op_c = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_rwa = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_rwb = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_rwc = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_mem_idx_a = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_mem_idx_b = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_mem_idx_c = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + avmMini_last = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < log_n; ++i) { sumcheck_univariates.emplace_back( deserialize_from_buffer>( - BaseTranscript::proof_data, num_bytes_read)); + BaseTranscript::proof_data, num_bytes_read)); } - sumcheck_evaluations = deserialize_from_buffer>( - BaseTranscript::proof_data, num_bytes_read); + sumcheck_evaluations = + deserialize_from_buffer>(BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < log_n; ++i) { zm_cq_comms.push_back(deserialize_from_buffer(proof_data, num_bytes_read)); } @@ -437,39 +412,39 @@ class AvmMiniFlavor { zm_pi_comm = deserialize_from_buffer(proof_data, num_bytes_read); } - void serialize_full_transcript() override + void serialize_full_transcript() { size_t old_proof_length = proof_data.size(); - BaseTranscript::proof_data.clear(); + BaseTranscript::proof_data.clear(); size_t log_n = numeric::get_msb(circuit_size); - serialize_to_buffer(circuit_size, BaseTranscript::proof_data); - - serialize_to_buffer(avmMini_subop, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_ia, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_ib, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_ic, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_mem_op_a, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_mem_op_b, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_mem_op_c, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_rwa, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_rwb, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_rwc, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_mem_idx_a, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_mem_idx_b, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_mem_idx_c, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_last, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_m_clk, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_m_sub_clk, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_m_addr, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_m_val, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_m_lastAccess, BaseTranscript::proof_data); - serialize_to_buffer(avmMini_m_rw, BaseTranscript::proof_data); + serialize_to_buffer(circuit_size, BaseTranscript::proof_data); + + serialize_to_buffer(memTrace_m_clk, BaseTranscript::proof_data); + serialize_to_buffer(memTrace_m_sub_clk, BaseTranscript::proof_data); + serialize_to_buffer(memTrace_m_addr, BaseTranscript::proof_data); + serialize_to_buffer(memTrace_m_val, BaseTranscript::proof_data); + serialize_to_buffer(memTrace_m_lastAccess, BaseTranscript::proof_data); + serialize_to_buffer(memTrace_m_rw, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_subop, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_ia, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_ib, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_ic, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_mem_op_a, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_mem_op_b, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_mem_op_c, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_rwa, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_rwb, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_rwc, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_mem_idx_a, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_mem_idx_b, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_mem_idx_c, BaseTranscript::proof_data); + serialize_to_buffer(avmMini_last, BaseTranscript::proof_data); for (size_t i = 0; i < log_n; ++i) { - serialize_to_buffer(sumcheck_univariates[i], BaseTranscript::proof_data); + serialize_to_buffer(sumcheck_univariates[i], BaseTranscript::proof_data); } - serialize_to_buffer(sumcheck_evaluations, BaseTranscript::proof_data); + serialize_to_buffer(sumcheck_evaluations, BaseTranscript::proof_data); for (size_t i = 0; i < log_n; ++i) { serialize_to_buffer(zm_cq_comms[i], proof_data); } diff --git a/barretenberg/cpp/src/barretenberg/flavor/generated/Fib_flavor.hpp b/barretenberg/cpp/src/barretenberg/flavor/generated/Fib_flavor.hpp index 281587d0f42..4d39c4e6003 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/generated/Fib_flavor.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/generated/Fib_flavor.hpp @@ -8,6 +8,7 @@ #include "barretenberg/polynomials/univariate.hpp" #include "barretenberg/flavor/flavor.hpp" +#include "barretenberg/flavor/flavor_macros.hpp" #include "barretenberg/polynomials/evaluation_domain.hpp" #include "barretenberg/polynomials/polynomial.hpp" #include "barretenberg/relations/generated/Fib.hpp" @@ -57,15 +58,12 @@ class FibFlavor { static constexpr bool has_zero_row = true; private: - template - class PrecomputedEntities : public PrecomputedEntities_ { + template class PrecomputedEntities : public PrecomputedEntitiesBase { public: - DataType Fibonacci_LAST; - DataType Fibonacci_FIRST; + using DataType = DataType_; + DEFINE_FLAVOR_MEMBERS(DataType, Fibonacci_LAST, Fibonacci_FIRST) - DEFINE_POINTER_VIEW(NUM_PRECOMPUTED_ENTITIES, &Fibonacci_LAST, &Fibonacci_FIRST) - - std::vector get_selectors() override + RefVector get_selectors() { return { Fibonacci_LAST, @@ -73,20 +71,16 @@ class FibFlavor { }; }; - std::vector get_sigma_polynomials() override { return {}; }; - std::vector get_id_polynomials() override { return {}; }; - std::vector get_table_polynomials() { return {}; }; + RefVector get_sigma_polynomials() { return {}; }; + RefVector get_id_polynomials() { return {}; }; + RefVector get_table_polynomials() { return {}; }; }; - template - class WitnessEntities : public WitnessEntities_ { + template class WitnessEntities { public: - DataType Fibonacci_x; - DataType Fibonacci_y; - - DEFINE_POINTER_VIEW(NUM_WITNESS_ENTITIES, &Fibonacci_x, &Fibonacci_y) + DEFINE_FLAVOR_MEMBERS(DataType, Fibonacci_x, Fibonacci_y) - std::vector get_wires() override + RefVector get_wires() { return { Fibonacci_x, @@ -95,30 +89,15 @@ class FibFlavor { }; }; - std::vector get_sorted_polynomials() { return {}; }; + RefVector get_sorted_polynomials() { return {}; }; }; - template - class AllEntities : public AllEntities_ { + template class AllEntities { public: - DataType Fibonacci_LAST; - DataType Fibonacci_FIRST; - - DataType Fibonacci_x; - DataType Fibonacci_y; - - DataType Fibonacci_x_shift; - DataType Fibonacci_y_shift; - - DEFINE_POINTER_VIEW(NUM_ALL_ENTITIES, - &Fibonacci_LAST, - &Fibonacci_FIRST, - &Fibonacci_x, - &Fibonacci_y, - &Fibonacci_x_shift, - &Fibonacci_y_shift) + DEFINE_FLAVOR_MEMBERS( + DataType, Fibonacci_LAST, Fibonacci_FIRST, Fibonacci_x, Fibonacci_y, Fibonacci_x_shift, Fibonacci_y_shift) - std::vector get_wires() override + RefVector get_wires() { return { Fibonacci_LAST, Fibonacci_FIRST, Fibonacci_x, Fibonacci_y, Fibonacci_x_shift, Fibonacci_y_shift, @@ -126,7 +105,7 @@ class FibFlavor { }; }; - std::vector get_unshifted() override + RefVector get_unshifted() { return { Fibonacci_LAST, @@ -137,7 +116,7 @@ class FibFlavor { }; }; - std::vector get_to_be_shifted() override + RefVector get_to_be_shifted() { return { Fibonacci_x, @@ -146,7 +125,7 @@ class FibFlavor { }; }; - std::vector get_shifted() override + RefVector get_shifted() { return { Fibonacci_x_shift, @@ -157,31 +136,29 @@ class FibFlavor { }; public: - class ProvingKey : public ProvingKey_, - WitnessEntities> { + class ProvingKey : public ProvingKey_, WitnessEntities> { public: // Expose constructors on the base class - using Base = ProvingKey_, - WitnessEntities>; + using Base = ProvingKey_, WitnessEntities>; using Base::Base; // The plookup wires that store plookup read data. std::array get_table_column_wires() { return {}; }; }; - using VerificationKey = VerificationKey_>; + using VerificationKey = VerificationKey_>; - using ProverPolynomials = AllEntities; + using ProverPolynomials = AllEntities; - using FoldedPolynomials = AllEntities, PolynomialHandle>; + using FoldedPolynomials = AllEntities>; - class AllValues : public AllEntities { + class AllValues : public AllEntities { public: - using Base = AllEntities; + using Base = AllEntities; using Base::Base; }; - class AllPolynomials : public AllEntities { + class AllPolynomials : public AllEntities { public: [[nodiscard]] size_t get_polynomial_size() const { return this->Fibonacci_LAST.size(); } [[nodiscard]] AllValues get_row(const size_t row_idx) const @@ -194,9 +171,9 @@ class FibFlavor { } }; - using RowPolynomials = AllEntities; + using RowPolynomials = AllEntities; - class PartiallyEvaluatedMultivariates : public AllEntities { + class PartiallyEvaluatedMultivariates : public AllEntities { public: PartiallyEvaluatedMultivariates() = default; PartiallyEvaluatedMultivariates(const size_t circuit_size) @@ -212,21 +189,20 @@ class FibFlavor { * @brief A container for univariates used during Protogalaxy folding and sumcheck. * @details During folding and sumcheck, the prover evaluates the relations on these univariates. */ - template - using ProverUnivariates = AllEntities, barretenberg::Univariate>; + template using ProverUnivariates = AllEntities>; /** * @brief A container for univariates produced during the hot loop in sumcheck. */ using ExtendedEdges = ProverUnivariates; - class CommitmentLabels : public AllEntities { + class CommitmentLabels : public AllEntities { private: - using Base = AllEntities; + using Base = AllEntities; public: CommitmentLabels() - : AllEntities() + : AllEntities() { Base::Fibonacci_LAST = "Fibonacci_LAST"; Base::Fibonacci_FIRST = "Fibonacci_FIRST"; @@ -235,13 +211,12 @@ class FibFlavor { }; }; - class VerifierCommitments : public AllEntities { + class VerifierCommitments : public AllEntities { private: - using Base = AllEntities; + using Base = AllEntities; public: - VerifierCommitments(const std::shared_ptr& verification_key, - const BaseTranscript& transcript) + VerifierCommitments(const std::shared_ptr& verification_key, const BaseTranscript& transcript) { static_cast(transcript); Fibonacci_LAST = verification_key->Fibonacci_LAST; @@ -249,7 +224,7 @@ class FibFlavor { } }; - class Transcript : public BaseTranscript { + class Transcript : public BaseTranscript { public: uint32_t circuit_size; @@ -265,25 +240,25 @@ class FibFlavor { Transcript() = default; Transcript(const std::vector& proof) - : BaseTranscript(proof) + : BaseTranscript(proof) {} - void deserialize_full_transcript() override + void deserialize_full_transcript() { size_t num_bytes_read = 0; circuit_size = deserialize_from_buffer(proof_data, num_bytes_read); size_t log_n = numeric::get_msb(circuit_size); - Fibonacci_x = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - Fibonacci_y = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + Fibonacci_x = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + Fibonacci_y = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < log_n; ++i) { sumcheck_univariates.emplace_back( deserialize_from_buffer>( - BaseTranscript::proof_data, num_bytes_read)); + BaseTranscript::proof_data, num_bytes_read)); } - sumcheck_evaluations = deserialize_from_buffer>( - BaseTranscript::proof_data, num_bytes_read); + sumcheck_evaluations = + deserialize_from_buffer>(BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < log_n; ++i) { zm_cq_comms.push_back(deserialize_from_buffer(proof_data, num_bytes_read)); } @@ -291,21 +266,21 @@ class FibFlavor { zm_pi_comm = deserialize_from_buffer(proof_data, num_bytes_read); } - void serialize_full_transcript() override + void serialize_full_transcript() { size_t old_proof_length = proof_data.size(); - BaseTranscript::proof_data.clear(); + BaseTranscript::proof_data.clear(); size_t log_n = numeric::get_msb(circuit_size); - serialize_to_buffer(circuit_size, BaseTranscript::proof_data); + serialize_to_buffer(circuit_size, BaseTranscript::proof_data); - serialize_to_buffer(Fibonacci_x, BaseTranscript::proof_data); - serialize_to_buffer(Fibonacci_y, BaseTranscript::proof_data); + serialize_to_buffer(Fibonacci_x, BaseTranscript::proof_data); + serialize_to_buffer(Fibonacci_y, BaseTranscript::proof_data); for (size_t i = 0; i < log_n; ++i) { - serialize_to_buffer(sumcheck_univariates[i], BaseTranscript::proof_data); + serialize_to_buffer(sumcheck_univariates[i], BaseTranscript::proof_data); } - serialize_to_buffer(sumcheck_evaluations, BaseTranscript::proof_data); + serialize_to_buffer(sumcheck_evaluations, BaseTranscript::proof_data); for (size_t i = 0; i < log_n; ++i) { serialize_to_buffer(zm_cq_comms[i], proof_data); } diff --git a/barretenberg/cpp/src/barretenberg/flavor/goblin_translator.hpp b/barretenberg/cpp/src/barretenberg/flavor/goblin_translator.hpp index 454bbf85b67..4290dffe497 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/goblin_translator.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/goblin_translator.hpp @@ -1,7 +1,12 @@ #pragma once #include "barretenberg/commitment_schemes/commitment_key.hpp" #include "barretenberg/commitment_schemes/kzg/kzg.hpp" +#include "barretenberg/common/ref_vector.hpp" +#include "barretenberg/ecc/curves/bn254/bn254.hpp" #include "barretenberg/flavor/flavor.hpp" +#include "barretenberg/flavor/flavor_macros.hpp" +#include "barretenberg/polynomials/univariate.hpp" +#include "barretenberg/proof_system/arithmetization/arithmetization.hpp" #include "barretenberg/proof_system/circuit_builder/goblin_translator_circuit_builder.hpp" #include "barretenberg/relations/relation_parameters.hpp" #include "barretenberg/relations/translator_vm/translator_decomposition_relation.hpp" @@ -62,9 +67,9 @@ class GoblinTranslator { // This is not a configurable value. Relations are sepcifically designed for it to be 68 static constexpr size_t NUM_LIMB_BITS = CircuitBuilder::NUM_LIMB_BITS; - // The number of multivariate polynomials on which a sumcheck prover sumcheck operates (including shifts). We often - // need containers of this size to hold related data, so we choose a name more agnostic than `NUM_POLYNOMIALS`. - // Note: this number does not include the individual sorted list polynomials. + // The number of multivariate polynomials on which a sumcheck prover sumcheck operates (including shifts). We + // often need containers of this size to hold related data, so we choose a name more agnostic than + // `NUM_POLYNOMIALS`. Note: this number does not include the individual sorted list polynomials. static constexpr size_t NUM_ALL_ENTITIES = 184; // The number of polynomials precomputed to describe a circuit and to aid a prover in constructing a satisfying // assignment of witnesses. We again choose a neutral name. @@ -102,1295 +107,859 @@ class GoblinTranslator { using TupleOfArraysOfValues = decltype(create_tuple_of_arrays_of_values()); private: - template /** * @brief A base class labelling precomputed entities and (ordered) subsets of interest. * @details Used to build the proving key and verification key. */ - class PrecomputedEntities : public PrecomputedEntities_ { + template class PrecomputedEntities : public PrecomputedEntitiesBase { public: - DataType lagrange_first; // column 0 - DataType lagrange_last; // column 1 - // TODO(#758): Check if one of these can be replaced by shifts - DataType lagrange_odd_in_minicircuit; // column 2 - DataType lagrange_even_in_minicircuit; // column 3 - DataType lagrange_second; // column 4 - DataType lagrange_second_to_last_in_minicircuit; // column 5 - DataType ordered_extra_range_constraints_numerator; // column 6 - DEFINE_POINTER_VIEW(NUM_PRECOMPUTED_ENTITIES, - &lagrange_first, - &lagrange_last, - &lagrange_odd_in_minicircuit, - &lagrange_even_in_minicircuit, - &lagrange_second, - &lagrange_second_to_last_in_minicircuit, - &ordered_extra_range_constraints_numerator); - - std::vector get_selectors() { return {}; }; - std::vector get_sigma_polynomials() { return {}; }; - std::vector get_id_polynomials() { return {}; }; + using DataType = DataType_; + DEFINE_FLAVOR_MEMBERS(DataType, + lagrange_first, // column 0 + lagrange_last, // column 1 + // TODO(#758): Check if one of these can be replaced by shifts + lagrange_odd_in_minicircuit, // column 2 + lagrange_even_in_minicircuit, // column 3 + lagrange_second, // column 4 + lagrange_second_to_last_in_minicircuit, // column 5 + ordered_extra_range_constraints_numerator); // column 6 + RefVector get_selectors() { return {}; }; + RefVector get_sigma_polynomials() { return {}; }; + RefVector get_id_polynomials() { return {}; }; }; + template class ConcatenatedRangeConstraints { + public: + DEFINE_FLAVOR_MEMBERS(DataType, + concatenated_range_constraints_0, // column 0 + concatenated_range_constraints_1, // column 1 + concatenated_range_constraints_2, // column 2 + concatenated_range_constraints_3) // column 3 + }; + // TODO(https://github.com/AztecProtocol/barretenberg/issues/790) dedupe with shifted? + template class WireToBeShiftedEntities { + public: + DEFINE_FLAVOR_MEMBERS(DataType, + x_lo_y_hi, // column 0 + x_hi_z_1, // column 1 + y_lo_z_2, // column 2 + p_x_low_limbs, // column 3 + p_x_low_limbs_range_constraint_0, // column 4 + p_x_low_limbs_range_constraint_1, // column 5 + p_x_low_limbs_range_constraint_2, // column 6 + p_x_low_limbs_range_constraint_3, // column 7 + p_x_low_limbs_range_constraint_4, // column 8 + p_x_low_limbs_range_constraint_tail, // column 9 + p_x_high_limbs, // column 10 + p_x_high_limbs_range_constraint_0, // column 11 + p_x_high_limbs_range_constraint_1, // column 12 + p_x_high_limbs_range_constraint_2, // column 13 + p_x_high_limbs_range_constraint_3, // column 14 + p_x_high_limbs_range_constraint_4, // column 15 + p_x_high_limbs_range_constraint_tail, // column 16 + p_y_low_limbs, // column 17 + p_y_low_limbs_range_constraint_0, // column 18 + p_y_low_limbs_range_constraint_1, // column 19 + p_y_low_limbs_range_constraint_2, // column 20 + p_y_low_limbs_range_constraint_3, // column 21 + p_y_low_limbs_range_constraint_4, // column 22 + p_y_low_limbs_range_constraint_tail, // column 23 + p_y_high_limbs, // column 24 + p_y_high_limbs_range_constraint_0, // column 25 + p_y_high_limbs_range_constraint_1, // column 26 + p_y_high_limbs_range_constraint_2, // column 27 + p_y_high_limbs_range_constraint_3, // column 28 + p_y_high_limbs_range_constraint_4, // column 29 + p_y_high_limbs_range_constraint_tail, // column 30 + z_low_limbs, // column 31 + z_low_limbs_range_constraint_0, // column 32 + z_low_limbs_range_constraint_1, // column 33 + z_low_limbs_range_constraint_2, // column 34 + z_low_limbs_range_constraint_3, // column 35 + z_low_limbs_range_constraint_4, // column 36 + z_low_limbs_range_constraint_tail, // column 37 + z_high_limbs, // column 38 + z_high_limbs_range_constraint_0, // column 39 + z_high_limbs_range_constraint_1, // column 40 + z_high_limbs_range_constraint_2, // column 41 + z_high_limbs_range_constraint_3, // column 42 + z_high_limbs_range_constraint_4, // column 43 + z_high_limbs_range_constraint_tail, // column 44 + accumulators_binary_limbs_0, // column 45 + accumulators_binary_limbs_1, // column 46 + accumulators_binary_limbs_2, // column 47 + accumulators_binary_limbs_3, // column 48 + accumulator_low_limbs_range_constraint_0, // column 49 + accumulator_low_limbs_range_constraint_1, // column 50 + accumulator_low_limbs_range_constraint_2, // column 51 + accumulator_low_limbs_range_constraint_3, // column 52 + accumulator_low_limbs_range_constraint_4, // column 53 + accumulator_low_limbs_range_constraint_tail, // column 54 + accumulator_high_limbs_range_constraint_0, // column 55 + accumulator_high_limbs_range_constraint_1, // column 56 + accumulator_high_limbs_range_constraint_2, // column 57 + accumulator_high_limbs_range_constraint_3, // column 58 + accumulator_high_limbs_range_constraint_4, // column 59 + accumulator_high_limbs_range_constraint_tail, // column 60 + quotient_low_binary_limbs, // column 61 + quotient_high_binary_limbs, // column 62 + quotient_low_limbs_range_constraint_0, // column 63 + quotient_low_limbs_range_constraint_1, // column 64 + quotient_low_limbs_range_constraint_2, // column 65 + quotient_low_limbs_range_constraint_3, // column 66 + quotient_low_limbs_range_constraint_4, // column 67 + quotient_low_limbs_range_constraint_tail, // column 68 + quotient_high_limbs_range_constraint_0, // column 69 + quotient_high_limbs_range_constraint_1, // column 70 + quotient_high_limbs_range_constraint_2, // column 71 + quotient_high_limbs_range_constraint_3, // column 72 + quotient_high_limbs_range_constraint_4, // column 73 + quotient_high_limbs_range_constraint_tail, // column 74 + relation_wide_limbs, // column 75 + relation_wide_limbs_range_constraint_0, // column 76 + relation_wide_limbs_range_constraint_1, // column 77 + relation_wide_limbs_range_constraint_2, // column 78 + relation_wide_limbs_range_constraint_3, // column 79 + ordered_range_constraints_0, // column 80 + ordered_range_constraints_1, // column 81 + ordered_range_constraints_2, // column 82 + ordered_range_constraints_3, // column 83 + ordered_range_constraints_4); // column 84 + }; + template class WireNonshiftedEntities { + public: + DEFINE_FLAVOR_MEMBERS(DataType, + op // column 0 + ); + }; + template class DerivedWitnessEntities { + public: + DEFINE_FLAVOR_MEMBERS(DataType, + z_perm); // column 0 + }; /** * @brief Container for all witness polynomials used/constructed by the prover. - * @details Shifts are not included here since they do not occupy their own memory. */ - template - class WitnessEntities : public WitnessEntities_ { + template + class WitnessEntities : public WireNonshiftedEntities, + public WireToBeShiftedEntities, + public DerivedWitnessEntities, + public ConcatenatedRangeConstraints { public: - DataType op; // column 0 - DataType x_lo_y_hi; // column 1 - DataType x_hi_z_1; // column 2 - DataType y_lo_z_2; // column 3 - DataType p_x_low_limbs; // column 4 - DataType p_x_low_limbs_range_constraint_0; // column 5 - DataType p_x_low_limbs_range_constraint_1; // column 6 - DataType p_x_low_limbs_range_constraint_2; // column 7 - DataType p_x_low_limbs_range_constraint_3; // column 8 - DataType p_x_low_limbs_range_constraint_4; // column 9 - DataType p_x_low_limbs_range_constraint_tail; // column 10 - DataType p_x_high_limbs; // column 11 - DataType p_x_high_limbs_range_constraint_0; // column 12 - DataType p_x_high_limbs_range_constraint_1; // column 13 - DataType p_x_high_limbs_range_constraint_2; // column 14 - DataType p_x_high_limbs_range_constraint_3; // column 15 - DataType p_x_high_limbs_range_constraint_4; // column 16 - DataType p_x_high_limbs_range_constraint_tail; // column 17 - DataType p_y_low_limbs; // column 18 - DataType p_y_low_limbs_range_constraint_0; // column 19 - DataType p_y_low_limbs_range_constraint_1; // column 20 - DataType p_y_low_limbs_range_constraint_2; // column 21 - DataType p_y_low_limbs_range_constraint_3; // column 22 - DataType p_y_low_limbs_range_constraint_4; // column 23 - DataType p_y_low_limbs_range_constraint_tail; // column 24 - DataType p_y_high_limbs; // column 25 - DataType p_y_high_limbs_range_constraint_0; // column 26 - DataType p_y_high_limbs_range_constraint_1; // column 27 - DataType p_y_high_limbs_range_constraint_2; // column 28 - DataType p_y_high_limbs_range_constraint_3; // column 29 - DataType p_y_high_limbs_range_constraint_4; // column 30 - DataType p_y_high_limbs_range_constraint_tail; // column 31 - DataType z_low_limbs; // column 32 - DataType z_low_limbs_range_constraint_0; // column 33 - DataType z_low_limbs_range_constraint_1; // column 34 - DataType z_low_limbs_range_constraint_2; // column 35 - DataType z_low_limbs_range_constraint_3; // column 36 - DataType z_low_limbs_range_constraint_4; // column 37 - DataType z_low_limbs_range_constraint_tail; // column 38 - DataType z_high_limbs; // column 39 - DataType z_high_limbs_range_constraint_0; // column 40 - DataType z_high_limbs_range_constraint_1; // column 41 - DataType z_high_limbs_range_constraint_2; // column 42 - DataType z_high_limbs_range_constraint_3; // column 43 - DataType z_high_limbs_range_constraint_4; // column 44 - DataType z_high_limbs_range_constraint_tail; // column 45 - DataType accumulators_binary_limbs_0; // column 46 - DataType accumulators_binary_limbs_1; // column 47 - DataType accumulators_binary_limbs_2; // column 48 - DataType accumulators_binary_limbs_3; // column 49 - DataType accumulator_low_limbs_range_constraint_0; // column 50 - DataType accumulator_low_limbs_range_constraint_1; // column 51 - DataType accumulator_low_limbs_range_constraint_2; // column 52 - DataType accumulator_low_limbs_range_constraint_3; // column 53 - DataType accumulator_low_limbs_range_constraint_4; // column 54 - DataType accumulator_low_limbs_range_constraint_tail; // column 55 - DataType accumulator_high_limbs_range_constraint_0; // column 56 - DataType accumulator_high_limbs_range_constraint_1; // column 57 - DataType accumulator_high_limbs_range_constraint_2; // column 58 - DataType accumulator_high_limbs_range_constraint_3; // column 59 - DataType accumulator_high_limbs_range_constraint_4; // column 60 - DataType accumulator_high_limbs_range_constraint_tail; // column 61 - DataType quotient_low_binary_limbs; // column 62 - DataType quotient_high_binary_limbs; // column 63 - DataType quotient_low_limbs_range_constraint_0; // column 64 - DataType quotient_low_limbs_range_constraint_1; // column 65 - DataType quotient_low_limbs_range_constraint_2; // column 66 - DataType quotient_low_limbs_range_constraint_3; // column 67 - DataType quotient_low_limbs_range_constraint_4; // column 68 - DataType quotient_low_limbs_range_constraint_tail; // column 69 - DataType quotient_high_limbs_range_constraint_0; // column 70 - DataType quotient_high_limbs_range_constraint_1; // column 71 - DataType quotient_high_limbs_range_constraint_2; // column 72 - DataType quotient_high_limbs_range_constraint_3; // column 73 - DataType quotient_high_limbs_range_constraint_4; // column 74 - DataType quotient_high_limbs_range_constraint_tail; // column 75 - DataType relation_wide_limbs; // column 76 - DataType relation_wide_limbs_range_constraint_0; // column 77 - DataType relation_wide_limbs_range_constraint_1; // column 78 - DataType relation_wide_limbs_range_constraint_2; // column 79 - DataType relation_wide_limbs_range_constraint_3; // column 80 - DataType concatenated_range_constraints_0; // column 81 - DataType concatenated_range_constraints_1; // column 82 - DataType concatenated_range_constraints_2; // column 83 - DataType concatenated_range_constraints_3; // column 84 - DataType ordered_range_constraints_0; // column 85 - DataType ordered_range_constraints_1; // column 86 - DataType ordered_range_constraints_2; // column 87 - DataType ordered_range_constraints_3; // column 88 - DataType ordered_range_constraints_4; // column 89 - DataType z_perm; // column 90 + DEFINE_COMPOUND_GET_ALL(WireNonshiftedEntities::get_all(), + WireToBeShiftedEntities::get_all(), + DerivedWitnessEntities::get_all(), + ConcatenatedRangeConstraints::get_all()) + DEFINE_COMPOUND_POINTER_VIEW(WireNonshiftedEntities::pointer_view(), + WireToBeShiftedEntities::pointer_view(), + DerivedWitnessEntities::pointer_view(), + ConcatenatedRangeConstraints::pointer_view()) - DEFINE_POINTER_VIEW(NUM_WITNESS_ENTITIES, - &op, - &x_lo_y_hi, - &x_hi_z_1, - &y_lo_z_2, - &p_x_low_limbs, - &p_x_low_limbs_range_constraint_0, - &p_x_low_limbs_range_constraint_1, - &p_x_low_limbs_range_constraint_2, - &p_x_low_limbs_range_constraint_3, - &p_x_low_limbs_range_constraint_4, - &p_x_low_limbs_range_constraint_tail, - &p_x_high_limbs, - &p_x_high_limbs_range_constraint_0, - &p_x_high_limbs_range_constraint_1, - &p_x_high_limbs_range_constraint_2, - &p_x_high_limbs_range_constraint_3, - &p_x_high_limbs_range_constraint_4, - &p_x_high_limbs_range_constraint_tail, - &p_y_low_limbs, - &p_y_low_limbs_range_constraint_0, - &p_y_low_limbs_range_constraint_1, - &p_y_low_limbs_range_constraint_2, - &p_y_low_limbs_range_constraint_3, - &p_y_low_limbs_range_constraint_4, - &p_y_low_limbs_range_constraint_tail, - &p_y_high_limbs, - &p_y_high_limbs_range_constraint_0, - &p_y_high_limbs_range_constraint_1, - &p_y_high_limbs_range_constraint_2, - &p_y_high_limbs_range_constraint_3, - &p_y_high_limbs_range_constraint_4, - &p_y_high_limbs_range_constraint_tail, - &z_low_limbs, - &z_low_limbs_range_constraint_0, - &z_low_limbs_range_constraint_1, - &z_low_limbs_range_constraint_2, - &z_low_limbs_range_constraint_3, - &z_low_limbs_range_constraint_4, - &z_low_limbs_range_constraint_tail, - &z_high_limbs, - &z_high_limbs_range_constraint_0, - &z_high_limbs_range_constraint_1, - &z_high_limbs_range_constraint_2, - &z_high_limbs_range_constraint_3, - &z_high_limbs_range_constraint_4, - &z_high_limbs_range_constraint_tail, - &accumulators_binary_limbs_0, - &accumulators_binary_limbs_1, - &accumulators_binary_limbs_2, - &accumulators_binary_limbs_3, - &accumulator_low_limbs_range_constraint_0, - &accumulator_low_limbs_range_constraint_1, - &accumulator_low_limbs_range_constraint_2, - &accumulator_low_limbs_range_constraint_3, - &accumulator_low_limbs_range_constraint_4, - &accumulator_low_limbs_range_constraint_tail, - &accumulator_high_limbs_range_constraint_0, - &accumulator_high_limbs_range_constraint_1, - &accumulator_high_limbs_range_constraint_2, - &accumulator_high_limbs_range_constraint_3, - &accumulator_high_limbs_range_constraint_4, - &accumulator_high_limbs_range_constraint_tail, - "ient_low_binary_limbs, - "ient_high_binary_limbs, - "ient_low_limbs_range_constraint_0, - "ient_low_limbs_range_constraint_1, - "ient_low_limbs_range_constraint_2, - "ient_low_limbs_range_constraint_3, - "ient_low_limbs_range_constraint_4, - "ient_low_limbs_range_constraint_tail, - "ient_high_limbs_range_constraint_0, - "ient_high_limbs_range_constraint_1, - "ient_high_limbs_range_constraint_2, - "ient_high_limbs_range_constraint_3, - "ient_high_limbs_range_constraint_4, - "ient_high_limbs_range_constraint_tail, - &relation_wide_limbs, - &relation_wide_limbs_range_constraint_0, - &relation_wide_limbs_range_constraint_1, - &relation_wide_limbs_range_constraint_2, - &relation_wide_limbs_range_constraint_3, - &concatenated_range_constraints_0, - &concatenated_range_constraints_1, - &concatenated_range_constraints_2, - &concatenated_range_constraints_3, - &ordered_range_constraints_0, - &ordered_range_constraints_1, - &ordered_range_constraints_2, - &ordered_range_constraints_3, - &ordered_range_constraints_4, - &z_perm) + RefVector get_wires() + { + return { this->op, + this->x_lo_y_hi, + this->x_hi_z_1, + this->y_lo_z_2, + this->p_x_low_limbs, + this->p_x_low_limbs_range_constraint_0, + this->p_x_low_limbs_range_constraint_1, + this->p_x_low_limbs_range_constraint_2, + this->p_x_low_limbs_range_constraint_3, + this->p_x_low_limbs_range_constraint_4, + this->p_x_low_limbs_range_constraint_tail, + this->p_x_high_limbs, + this->p_x_high_limbs_range_constraint_0, + this->p_x_high_limbs_range_constraint_1, + this->p_x_high_limbs_range_constraint_2, + this->p_x_high_limbs_range_constraint_3, + this->p_x_high_limbs_range_constraint_4, + this->p_x_high_limbs_range_constraint_tail, + this->p_y_low_limbs, + this->p_y_low_limbs_range_constraint_0, + this->p_y_low_limbs_range_constraint_1, + this->p_y_low_limbs_range_constraint_2, + this->p_y_low_limbs_range_constraint_3, + this->p_y_low_limbs_range_constraint_4, + this->p_y_low_limbs_range_constraint_tail, + this->p_y_high_limbs, + this->p_y_high_limbs_range_constraint_0, + this->p_y_high_limbs_range_constraint_1, + this->p_y_high_limbs_range_constraint_2, + this->p_y_high_limbs_range_constraint_3, + this->p_y_high_limbs_range_constraint_4, + this->p_y_high_limbs_range_constraint_tail, + this->z_low_limbs, + this->z_low_limbs_range_constraint_0, + this->z_low_limbs_range_constraint_1, + this->z_low_limbs_range_constraint_2, + this->z_low_limbs_range_constraint_3, + this->z_low_limbs_range_constraint_4, + this->z_low_limbs_range_constraint_tail, + this->z_high_limbs, + this->z_high_limbs_range_constraint_0, + this->z_high_limbs_range_constraint_1, + this->z_high_limbs_range_constraint_2, + this->z_high_limbs_range_constraint_3, + this->z_high_limbs_range_constraint_4, + this->z_high_limbs_range_constraint_tail, + this->accumulators_binary_limbs_0, + this->accumulators_binary_limbs_1, + this->accumulators_binary_limbs_2, + this->accumulators_binary_limbs_3, + this->accumulator_low_limbs_range_constraint_0, + this->accumulator_low_limbs_range_constraint_1, + this->accumulator_low_limbs_range_constraint_2, + this->accumulator_low_limbs_range_constraint_3, + this->accumulator_low_limbs_range_constraint_4, + this->accumulator_low_limbs_range_constraint_tail, + this->accumulator_high_limbs_range_constraint_0, + this->accumulator_high_limbs_range_constraint_1, + this->accumulator_high_limbs_range_constraint_2, + this->accumulator_high_limbs_range_constraint_3, + this->accumulator_high_limbs_range_constraint_4, + this->accumulator_high_limbs_range_constraint_tail, + this->quotient_low_binary_limbs, + this->quotient_high_binary_limbs, + this->quotient_low_limbs_range_constraint_0, + this->quotient_low_limbs_range_constraint_1, + this->quotient_low_limbs_range_constraint_2, + this->quotient_low_limbs_range_constraint_3, + this->quotient_low_limbs_range_constraint_4, + this->quotient_low_limbs_range_constraint_tail, + this->quotient_high_limbs_range_constraint_0, + this->quotient_high_limbs_range_constraint_1, + this->quotient_high_limbs_range_constraint_2, + this->quotient_high_limbs_range_constraint_3, + this->quotient_high_limbs_range_constraint_4, + this->quotient_high_limbs_range_constraint_tail, + this->relation_wide_limbs, + this->relation_wide_limbs_range_constraint_0, + this->relation_wide_limbs_range_constraint_1, + this->relation_wide_limbs_range_constraint_2, + this->relation_wide_limbs_range_constraint_3, + this->ordered_range_constraints_0, + this->ordered_range_constraints_1, + this->ordered_range_constraints_2, + this->ordered_range_constraints_3, + this->ordered_range_constraints_4 }; + }; - std::vector get_wires() override + // everything but ConcatenatedRangeConstraints + RefVector get_unshifted_wires() + { + return concatenate(WireNonshiftedEntities::get_all(), + WireToBeShiftedEntities::get_all(), + DerivedWitnessEntities::get_all()); + }; + RefVector get_to_be_shifted() { - return { op, - x_lo_y_hi, - x_hi_z_1, - y_lo_z_2, - p_x_low_limbs, - p_x_low_limbs_range_constraint_0, - p_x_low_limbs_range_constraint_1, - p_x_low_limbs_range_constraint_2, - p_x_low_limbs_range_constraint_3, - p_x_low_limbs_range_constraint_4, - p_x_low_limbs_range_constraint_tail, - p_x_high_limbs, - p_x_high_limbs_range_constraint_0, - p_x_high_limbs_range_constraint_1, - p_x_high_limbs_range_constraint_2, - p_x_high_limbs_range_constraint_3, - p_x_high_limbs_range_constraint_4, - p_x_high_limbs_range_constraint_tail, - p_y_low_limbs, - p_y_low_limbs_range_constraint_0, - p_y_low_limbs_range_constraint_1, - p_y_low_limbs_range_constraint_2, - p_y_low_limbs_range_constraint_3, - p_y_low_limbs_range_constraint_4, - p_y_low_limbs_range_constraint_tail, - p_y_high_limbs, - p_y_high_limbs_range_constraint_0, - p_y_high_limbs_range_constraint_1, - p_y_high_limbs_range_constraint_2, - p_y_high_limbs_range_constraint_3, - p_y_high_limbs_range_constraint_4, - p_y_high_limbs_range_constraint_tail, - z_low_limbs, - z_low_limbs_range_constraint_0, - z_low_limbs_range_constraint_1, - z_low_limbs_range_constraint_2, - z_low_limbs_range_constraint_3, - z_low_limbs_range_constraint_4, - z_low_limbs_range_constraint_tail, - z_high_limbs, - z_high_limbs_range_constraint_0, - z_high_limbs_range_constraint_1, - z_high_limbs_range_constraint_2, - z_high_limbs_range_constraint_3, - z_high_limbs_range_constraint_4, - z_high_limbs_range_constraint_tail, - accumulators_binary_limbs_0, - accumulators_binary_limbs_1, - accumulators_binary_limbs_2, - accumulators_binary_limbs_3, - accumulator_low_limbs_range_constraint_0, - accumulator_low_limbs_range_constraint_1, - accumulator_low_limbs_range_constraint_2, - accumulator_low_limbs_range_constraint_3, - accumulator_low_limbs_range_constraint_4, - accumulator_low_limbs_range_constraint_tail, - accumulator_high_limbs_range_constraint_0, - accumulator_high_limbs_range_constraint_1, - accumulator_high_limbs_range_constraint_2, - accumulator_high_limbs_range_constraint_3, - accumulator_high_limbs_range_constraint_4, - accumulator_high_limbs_range_constraint_tail, - quotient_low_binary_limbs, - quotient_high_binary_limbs, - quotient_low_limbs_range_constraint_0, - quotient_low_limbs_range_constraint_1, - quotient_low_limbs_range_constraint_2, - quotient_low_limbs_range_constraint_3, - quotient_low_limbs_range_constraint_4, - quotient_low_limbs_range_constraint_tail, - quotient_high_limbs_range_constraint_0, - quotient_high_limbs_range_constraint_1, - quotient_high_limbs_range_constraint_2, - quotient_high_limbs_range_constraint_3, - quotient_high_limbs_range_constraint_4, - quotient_high_limbs_range_constraint_tail, - relation_wide_limbs, - relation_wide_limbs_range_constraint_0, - relation_wide_limbs_range_constraint_1, - relation_wide_limbs_range_constraint_2, - relation_wide_limbs_range_constraint_3, - ordered_range_constraints_0, - ordered_range_constraints_1, - ordered_range_constraints_2, - ordered_range_constraints_3, - ordered_range_constraints_4 }; + return concatenate(WireToBeShiftedEntities::get_all(), + DerivedWitnessEntities::get_all()); }; /** * @brief Get the polynomials that need to be constructed from other polynomials by concatenation * - * @return std::vector + * @return RefVector */ - std::vector get_concatenated_constraints() - { - return { concatenated_range_constraints_0, - concatenated_range_constraints_1, - concatenated_range_constraints_2, - concatenated_range_constraints_3 }; - } + auto get_concatenated_constraints() { return ConcatenatedRangeConstraints::get_all(); } /** * @brief Get the polynomials that are concatenated for the permutation relation * - * @return std::vector> + * @return std::vector> */ - std::vector> get_concatenation_groups() + std::vector> get_concatenation_groups() { return { { - p_x_low_limbs_range_constraint_0, - p_x_low_limbs_range_constraint_1, - p_x_low_limbs_range_constraint_2, - p_x_low_limbs_range_constraint_3, - p_x_low_limbs_range_constraint_4, - p_x_low_limbs_range_constraint_tail, - p_x_high_limbs_range_constraint_0, - p_x_high_limbs_range_constraint_1, - p_x_high_limbs_range_constraint_2, - p_x_high_limbs_range_constraint_3, - p_x_high_limbs_range_constraint_4, - p_x_high_limbs_range_constraint_tail, - p_y_low_limbs_range_constraint_0, - p_y_low_limbs_range_constraint_1, - p_y_low_limbs_range_constraint_2, - p_y_low_limbs_range_constraint_3, + this->p_x_low_limbs_range_constraint_0, + this->p_x_low_limbs_range_constraint_1, + this->p_x_low_limbs_range_constraint_2, + this->p_x_low_limbs_range_constraint_3, + this->p_x_low_limbs_range_constraint_4, + this->p_x_low_limbs_range_constraint_tail, + this->p_x_high_limbs_range_constraint_0, + this->p_x_high_limbs_range_constraint_1, + this->p_x_high_limbs_range_constraint_2, + this->p_x_high_limbs_range_constraint_3, + this->p_x_high_limbs_range_constraint_4, + this->p_x_high_limbs_range_constraint_tail, + this->p_y_low_limbs_range_constraint_0, + this->p_y_low_limbs_range_constraint_1, + this->p_y_low_limbs_range_constraint_2, + this->p_y_low_limbs_range_constraint_3, }, { - p_y_low_limbs_range_constraint_4, - p_y_low_limbs_range_constraint_tail, - p_y_high_limbs_range_constraint_0, - p_y_high_limbs_range_constraint_1, - p_y_high_limbs_range_constraint_2, - p_y_high_limbs_range_constraint_3, - p_y_high_limbs_range_constraint_4, - p_y_high_limbs_range_constraint_tail, - z_low_limbs_range_constraint_0, - z_low_limbs_range_constraint_1, - z_low_limbs_range_constraint_2, - z_low_limbs_range_constraint_3, - z_low_limbs_range_constraint_4, - z_low_limbs_range_constraint_tail, - z_high_limbs_range_constraint_0, - z_high_limbs_range_constraint_1, + this->p_y_low_limbs_range_constraint_4, + this->p_y_low_limbs_range_constraint_tail, + this->p_y_high_limbs_range_constraint_0, + this->p_y_high_limbs_range_constraint_1, + this->p_y_high_limbs_range_constraint_2, + this->p_y_high_limbs_range_constraint_3, + this->p_y_high_limbs_range_constraint_4, + this->p_y_high_limbs_range_constraint_tail, + this->z_low_limbs_range_constraint_0, + this->z_low_limbs_range_constraint_1, + this->z_low_limbs_range_constraint_2, + this->z_low_limbs_range_constraint_3, + this->z_low_limbs_range_constraint_4, + this->z_low_limbs_range_constraint_tail, + this->z_high_limbs_range_constraint_0, + this->z_high_limbs_range_constraint_1, }, { - z_high_limbs_range_constraint_2, - z_high_limbs_range_constraint_3, - z_high_limbs_range_constraint_4, - z_high_limbs_range_constraint_tail, - accumulator_low_limbs_range_constraint_0, - accumulator_low_limbs_range_constraint_1, - accumulator_low_limbs_range_constraint_2, - accumulator_low_limbs_range_constraint_3, - accumulator_low_limbs_range_constraint_4, - accumulator_low_limbs_range_constraint_tail, - accumulator_high_limbs_range_constraint_0, - accumulator_high_limbs_range_constraint_1, - accumulator_high_limbs_range_constraint_2, - accumulator_high_limbs_range_constraint_3, - accumulator_high_limbs_range_constraint_4, - accumulator_high_limbs_range_constraint_tail, + this->z_high_limbs_range_constraint_2, + this->z_high_limbs_range_constraint_3, + this->z_high_limbs_range_constraint_4, + this->z_high_limbs_range_constraint_tail, + this->accumulator_low_limbs_range_constraint_0, + this->accumulator_low_limbs_range_constraint_1, + this->accumulator_low_limbs_range_constraint_2, + this->accumulator_low_limbs_range_constraint_3, + this->accumulator_low_limbs_range_constraint_4, + this->accumulator_low_limbs_range_constraint_tail, + this->accumulator_high_limbs_range_constraint_0, + this->accumulator_high_limbs_range_constraint_1, + this->accumulator_high_limbs_range_constraint_2, + this->accumulator_high_limbs_range_constraint_3, + this->accumulator_high_limbs_range_constraint_4, + this->accumulator_high_limbs_range_constraint_tail, }, { - quotient_low_limbs_range_constraint_0, - quotient_low_limbs_range_constraint_1, - quotient_low_limbs_range_constraint_2, - quotient_low_limbs_range_constraint_3, - quotient_low_limbs_range_constraint_4, - quotient_low_limbs_range_constraint_tail, - quotient_high_limbs_range_constraint_0, - quotient_high_limbs_range_constraint_1, - quotient_high_limbs_range_constraint_2, - quotient_high_limbs_range_constraint_3, - quotient_high_limbs_range_constraint_4, - quotient_high_limbs_range_constraint_tail, - relation_wide_limbs_range_constraint_0, - relation_wide_limbs_range_constraint_1, - relation_wide_limbs_range_constraint_2, - relation_wide_limbs_range_constraint_3, + this->quotient_low_limbs_range_constraint_0, + this->quotient_low_limbs_range_constraint_1, + this->quotient_low_limbs_range_constraint_2, + this->quotient_low_limbs_range_constraint_3, + this->quotient_low_limbs_range_constraint_4, + this->quotient_low_limbs_range_constraint_tail, + this->quotient_high_limbs_range_constraint_0, + this->quotient_high_limbs_range_constraint_1, + this->quotient_high_limbs_range_constraint_2, + this->quotient_high_limbs_range_constraint_3, + this->quotient_high_limbs_range_constraint_4, + this->quotient_high_limbs_range_constraint_tail, + this->relation_wide_limbs_range_constraint_0, + this->relation_wide_limbs_range_constraint_1, + this->relation_wide_limbs_range_constraint_2, + this->relation_wide_limbs_range_constraint_3, }, }; }; }; + /** + * @brief Represents polynomials shifted by 1 or their evaluations, defined relative to WireToBeShiftedEntities. + */ + template class ShiftedEntities { + public: + DEFINE_FLAVOR_MEMBERS(DataType, + x_lo_y_hi_shift, // column 0 + x_hi_z_1_shift, // column 1 + y_lo_z_2_shift, // column 2 + p_x_low_limbs_shift, // column 3 + p_x_low_limbs_range_constraint_0_shift, // column 4 + p_x_low_limbs_range_constraint_1_shift, // column 5 + p_x_low_limbs_range_constraint_2_shift, // column 6 + p_x_low_limbs_range_constraint_3_shift, // column 7 + p_x_low_limbs_range_constraint_4_shift, // column 8 + p_x_low_limbs_range_constraint_tail_shift, // column 9 + p_x_high_limbs_shift, // column 10 + p_x_high_limbs_range_constraint_0_shift, // column 11 + p_x_high_limbs_range_constraint_1_shift, // column 12 + p_x_high_limbs_range_constraint_2_shift, // column 13 + p_x_high_limbs_range_constraint_3_shift, // column 14 + p_x_high_limbs_range_constraint_4_shift, // column 15 + p_x_high_limbs_range_constraint_tail_shift, // column 16 + p_y_low_limbs_shift, // column 17 + p_y_low_limbs_range_constraint_0_shift, // column 18 + p_y_low_limbs_range_constraint_1_shift, // column 19 + p_y_low_limbs_range_constraint_2_shift, // column 20 + p_y_low_limbs_range_constraint_3_shift, // column 21 + p_y_low_limbs_range_constraint_4_shift, // column 22 + p_y_low_limbs_range_constraint_tail_shift, // column 23 + p_y_high_limbs_shift, // column 24 + p_y_high_limbs_range_constraint_0_shift, // column 25 + p_y_high_limbs_range_constraint_1_shift, // column 26 + p_y_high_limbs_range_constraint_2_shift, // column 27 + p_y_high_limbs_range_constraint_3_shift, // column 28 + p_y_high_limbs_range_constraint_4_shift, // column 29 + p_y_high_limbs_range_constraint_tail_shift, // column 30 + z_low_limbs_shift, // column 31 + z_low_limbs_range_constraint_0_shift, // column 32 + z_low_limbs_range_constraint_1_shift, // column 33 + z_low_limbs_range_constraint_2_shift, // column 34 + z_low_limbs_range_constraint_3_shift, // column 35 + z_low_limbs_range_constraint_4_shift, // column 36 + z_low_limbs_range_constraint_tail_shift, // column 37 + z_high_limbs_shift, // column 38 + z_high_limbs_range_constraint_0_shift, // column 39 + z_high_limbs_range_constraint_1_shift, // column 40 + z_high_limbs_range_constraint_2_shift, // column 41 + z_high_limbs_range_constraint_3_shift, // column 42 + z_high_limbs_range_constraint_4_shift, // column 43 + z_high_limbs_range_constraint_tail_shift, // column 44 + accumulators_binary_limbs_0_shift, // column 45 + accumulators_binary_limbs_1_shift, // column 46 + accumulators_binary_limbs_2_shift, // column 47 + accumulators_binary_limbs_3_shift, // column 48 + accumulator_low_limbs_range_constraint_0_shift, // column 49 + accumulator_low_limbs_range_constraint_1_shift, // column 50 + accumulator_low_limbs_range_constraint_2_shift, // column 51 + accumulator_low_limbs_range_constraint_3_shift, // column 52 + accumulator_low_limbs_range_constraint_4_shift, // column 53 + accumulator_low_limbs_range_constraint_tail_shift, // column 54 + accumulator_high_limbs_range_constraint_0_shift, // column 55 + accumulator_high_limbs_range_constraint_1_shift, // column 56 + accumulator_high_limbs_range_constraint_2_shift, // column 57 + accumulator_high_limbs_range_constraint_3_shift, // column 58 + accumulator_high_limbs_range_constraint_4_shift, // column 59 + accumulator_high_limbs_range_constraint_tail_shift, // column 60 + quotient_low_binary_limbs_shift, // column 61 + quotient_high_binary_limbs_shift, // column 62 + quotient_low_limbs_range_constraint_0_shift, // column 63 + quotient_low_limbs_range_constraint_1_shift, // column 64 + quotient_low_limbs_range_constraint_2_shift, // column 65 + quotient_low_limbs_range_constraint_3_shift, // column 66 + quotient_low_limbs_range_constraint_4_shift, // column 67 + quotient_low_limbs_range_constraint_tail_shift, // column 68 + quotient_high_limbs_range_constraint_0_shift, // column 69 + quotient_high_limbs_range_constraint_1_shift, // column 70 + quotient_high_limbs_range_constraint_2_shift, // column 71 + quotient_high_limbs_range_constraint_3_shift, // column 72 + quotient_high_limbs_range_constraint_4_shift, // column 73 + quotient_high_limbs_range_constraint_tail_shift, // column 74 + relation_wide_limbs_shift, // column 75 + relation_wide_limbs_range_constraint_0_shift, // column 76 + relation_wide_limbs_range_constraint_1_shift, // column 77 + relation_wide_limbs_range_constraint_2_shift, // column 78 + relation_wide_limbs_range_constraint_3_shift, // column 79 + ordered_range_constraints_0_shift, // column 80 + ordered_range_constraints_1_shift, // column 81 + ordered_range_constraints_2_shift, // column 82 + ordered_range_constraints_3_shift, // column 83 + ordered_range_constraints_4_shift, // column 84 + z_perm_shift) // column 85 + }; /** * @brief A base class labelling all entities (for instance, all of the polynomials used by the prover during * sumcheck) in this Honk variant along with particular subsets of interest * @details Used to build containers for: the prover's polynomial during sumcheck; the sumcheck's folded * polynomials; the univariates consturcted during during sumcheck; the evaluations produced by sumcheck. * - * Symbolically we have: AllEntities = PrecomputedEntities + WitnessEntities + "shiftEntities". It could be - * implemented as such, but we have this now. + * Symbolically we have: AllEntities = PrecomputedEntities + WitnessEntities + ShiftedEntities. */ - template - class AllEntities : public AllEntities_ { + template + class AllEntities : public PrecomputedEntities, + public WitnessEntities, + public ShiftedEntities { public: - DataType op; // column 0 - DataType x_lo_y_hi; // column 1 - DataType x_hi_z_1; // column 2 - DataType y_lo_z_2; // column 3 - DataType p_x_low_limbs; // column 4 - DataType p_x_low_limbs_range_constraint_0; // column 5 - DataType p_x_low_limbs_range_constraint_1; // column 6 - DataType p_x_low_limbs_range_constraint_2; // column 7 - DataType p_x_low_limbs_range_constraint_3; // column 8 - DataType p_x_low_limbs_range_constraint_4; // column 9 - DataType p_x_low_limbs_range_constraint_tail; // column 10 - DataType p_x_high_limbs; // column 11 - DataType p_x_high_limbs_range_constraint_0; // column 12 - DataType p_x_high_limbs_range_constraint_1; // column 13 - DataType p_x_high_limbs_range_constraint_2; // column 14 - DataType p_x_high_limbs_range_constraint_3; // column 15 - DataType p_x_high_limbs_range_constraint_4; // column 16 - DataType p_x_high_limbs_range_constraint_tail; // column 17 - DataType p_y_low_limbs; // column 18 - DataType p_y_low_limbs_range_constraint_0; // column 19 - DataType p_y_low_limbs_range_constraint_1; // column 20 - DataType p_y_low_limbs_range_constraint_2; // column 21 - DataType p_y_low_limbs_range_constraint_3; // column 22 - DataType p_y_low_limbs_range_constraint_4; // column 23 - DataType p_y_low_limbs_range_constraint_tail; // column 24 - DataType p_y_high_limbs; // column 25 - DataType p_y_high_limbs_range_constraint_0; // column 26 - DataType p_y_high_limbs_range_constraint_1; // column 27 - DataType p_y_high_limbs_range_constraint_2; // column 28 - DataType p_y_high_limbs_range_constraint_3; // column 29 - DataType p_y_high_limbs_range_constraint_4; // column 30 - DataType p_y_high_limbs_range_constraint_tail; // column 31 - DataType z_low_limbs; // column 32 - DataType z_low_limbs_range_constraint_0; // column 33 - DataType z_low_limbs_range_constraint_1; // column 34 - DataType z_low_limbs_range_constraint_2; // column 35 - DataType z_low_limbs_range_constraint_3; // column 36 - DataType z_low_limbs_range_constraint_4; // column 37 - DataType z_low_limbs_range_constraint_tail; // column 38 - DataType z_high_limbs; // column 39 - DataType z_high_limbs_range_constraint_0; // column 40 - DataType z_high_limbs_range_constraint_1; // column 41 - DataType z_high_limbs_range_constraint_2; // column 42 - DataType z_high_limbs_range_constraint_3; // column 43 - DataType z_high_limbs_range_constraint_4; // column 44 - DataType z_high_limbs_range_constraint_tail; // column 45 - DataType accumulators_binary_limbs_0; // column 46 - DataType accumulators_binary_limbs_1; // column 47 - DataType accumulators_binary_limbs_2; // column 48 - DataType accumulators_binary_limbs_3; // column 49 - DataType accumulator_low_limbs_range_constraint_0; // column 50 - DataType accumulator_low_limbs_range_constraint_1; // column 51 - DataType accumulator_low_limbs_range_constraint_2; // column 52 - DataType accumulator_low_limbs_range_constraint_3; // column 53 - DataType accumulator_low_limbs_range_constraint_4; // column 54 - DataType accumulator_low_limbs_range_constraint_tail; // column 55 - DataType accumulator_high_limbs_range_constraint_0; // column 56 - DataType accumulator_high_limbs_range_constraint_1; // column 57 - DataType accumulator_high_limbs_range_constraint_2; // column 58 - DataType accumulator_high_limbs_range_constraint_3; // column 59 - DataType accumulator_high_limbs_range_constraint_4; // column 60 - DataType accumulator_high_limbs_range_constraint_tail; // column 61 - DataType quotient_low_binary_limbs; // column 62 - DataType quotient_high_binary_limbs; // column 63 - DataType quotient_low_limbs_range_constraint_0; // column 64 - DataType quotient_low_limbs_range_constraint_1; // column 65 - DataType quotient_low_limbs_range_constraint_2; // column 66 - DataType quotient_low_limbs_range_constraint_3; // column 67 - DataType quotient_low_limbs_range_constraint_4; // column 68 - DataType quotient_low_limbs_range_constraint_tail; // column 69 - DataType quotient_high_limbs_range_constraint_0; // column 70 - DataType quotient_high_limbs_range_constraint_1; // column 71 - DataType quotient_high_limbs_range_constraint_2; // column 72 - DataType quotient_high_limbs_range_constraint_3; // column 73 - DataType quotient_high_limbs_range_constraint_4; // column 74 - DataType quotient_high_limbs_range_constraint_tail; // column 75 - DataType relation_wide_limbs; // column 76 - DataType relation_wide_limbs_range_constraint_0; // column 77 - DataType relation_wide_limbs_range_constraint_1; // column 78 - DataType relation_wide_limbs_range_constraint_2; // column 79 - DataType relation_wide_limbs_range_constraint_3; // column 80 - DataType concatenated_range_constraints_0; // column 81 - DataType concatenated_range_constraints_1; // column 82 - DataType concatenated_range_constraints_2; // column 83 - DataType concatenated_range_constraints_3; // column 84 - DataType ordered_range_constraints_0; // column 85 - DataType ordered_range_constraints_1; // column 86 - DataType ordered_range_constraints_2; // column 87 - DataType ordered_range_constraints_3; // column 88 - DataType ordered_range_constraints_4; // column 89 - DataType z_perm; // column 90 - DataType x_lo_y_hi_shift; // column 91 - DataType x_hi_z_1_shift; // column 92 - DataType y_lo_z_2_shift; // column 93 - DataType p_x_low_limbs_shift; // column 94 - DataType p_x_low_limbs_range_constraint_0_shift; // column 95 - DataType p_x_low_limbs_range_constraint_1_shift; // column 96 - DataType p_x_low_limbs_range_constraint_2_shift; // column 97 - DataType p_x_low_limbs_range_constraint_3_shift; // column 98 - DataType p_x_low_limbs_range_constraint_4_shift; // column 99 - DataType p_x_low_limbs_range_constraint_tail_shift; // column 100 - DataType p_x_high_limbs_shift; // column 101 - DataType p_x_high_limbs_range_constraint_0_shift; // column 102 - DataType p_x_high_limbs_range_constraint_1_shift; // column 103 - DataType p_x_high_limbs_range_constraint_2_shift; // column 104 - DataType p_x_high_limbs_range_constraint_3_shift; // column 105 - DataType p_x_high_limbs_range_constraint_4_shift; // column 106 - DataType p_x_high_limbs_range_constraint_tail_shift; // column 107 - DataType p_y_low_limbs_shift; // column 108 - DataType p_y_low_limbs_range_constraint_0_shift; // column 109 - DataType p_y_low_limbs_range_constraint_1_shift; // column 110 - DataType p_y_low_limbs_range_constraint_2_shift; // column 111 - DataType p_y_low_limbs_range_constraint_3_shift; // column 112 - DataType p_y_low_limbs_range_constraint_4_shift; // column 113 - DataType p_y_low_limbs_range_constraint_tail_shift; // column 114 - DataType p_y_high_limbs_shift; // column 115 - DataType p_y_high_limbs_range_constraint_0_shift; // column 116 - DataType p_y_high_limbs_range_constraint_1_shift; // column 117 - DataType p_y_high_limbs_range_constraint_2_shift; // column 118 - DataType p_y_high_limbs_range_constraint_3_shift; // column 119 - DataType p_y_high_limbs_range_constraint_4_shift; // column 120 - DataType p_y_high_limbs_range_constraint_tail_shift; // column 121 - DataType z_low_limbs_shift; // column 122 - DataType z_low_limbs_range_constraint_0_shift; // column 123 - DataType z_low_limbs_range_constraint_1_shift; // column 124 - DataType z_low_limbs_range_constraint_2_shift; // column 125 - DataType z_low_limbs_range_constraint_3_shift; // column 126 - DataType z_low_limbs_range_constraint_4_shift; // column 127 - DataType z_low_limbs_range_constraint_tail_shift; // column 128 - DataType z_high_limbs_shift; // column 129 - DataType z_high_limbs_range_constraint_0_shift; // column 130 - DataType z_high_limbs_range_constraint_1_shift; // column 131 - DataType z_high_limbs_range_constraint_2_shift; // column 132 - DataType z_high_limbs_range_constraint_3_shift; // column 133 - DataType z_high_limbs_range_constraint_4_shift; // column 134 - DataType z_high_limbs_range_constraint_tail_shift; // column 135 - DataType accumulators_binary_limbs_0_shift; // column 136 - DataType accumulators_binary_limbs_1_shift; // column 137 - DataType accumulators_binary_limbs_2_shift; // column 138 - DataType accumulators_binary_limbs_3_shift; // column 139 - DataType accumulator_low_limbs_range_constraint_0_shift; // column 140 - DataType accumulator_low_limbs_range_constraint_1_shift; // column 141 - DataType accumulator_low_limbs_range_constraint_2_shift; // column 142 - DataType accumulator_low_limbs_range_constraint_3_shift; // column 143 - DataType accumulator_low_limbs_range_constraint_4_shift; // column 144 - DataType accumulator_low_limbs_range_constraint_tail_shift; // column 145 - DataType accumulator_high_limbs_range_constraint_0_shift; // column 146 - DataType accumulator_high_limbs_range_constraint_1_shift; // column 147 - DataType accumulator_high_limbs_range_constraint_2_shift; // column 148 - DataType accumulator_high_limbs_range_constraint_3_shift; // column 149 - DataType accumulator_high_limbs_range_constraint_4_shift; // column 150 - DataType accumulator_high_limbs_range_constraint_tail_shift; // column 151 - DataType quotient_low_binary_limbs_shift; // column 152 - DataType quotient_high_binary_limbs_shift; // column 153 - DataType quotient_low_limbs_range_constraint_0_shift; // column 154 - DataType quotient_low_limbs_range_constraint_1_shift; // column 155 - DataType quotient_low_limbs_range_constraint_2_shift; // column 156 - DataType quotient_low_limbs_range_constraint_3_shift; // column 157 - DataType quotient_low_limbs_range_constraint_4_shift; // column 158 - DataType quotient_low_limbs_range_constraint_tail_shift; // column 159 - DataType quotient_high_limbs_range_constraint_0_shift; // column 160 - DataType quotient_high_limbs_range_constraint_1_shift; // column 161 - DataType quotient_high_limbs_range_constraint_2_shift; // column 162 - DataType quotient_high_limbs_range_constraint_3_shift; // column 163 - DataType quotient_high_limbs_range_constraint_4_shift; // column 164 - DataType quotient_high_limbs_range_constraint_tail_shift; // column 165 - DataType relation_wide_limbs_shift; // column 166 - DataType relation_wide_limbs_range_constraint_0_shift; // column 167 - DataType relation_wide_limbs_range_constraint_1_shift; // column 168 - DataType relation_wide_limbs_range_constraint_2_shift; // column 169 - DataType relation_wide_limbs_range_constraint_3_shift; // column 170 - DataType ordered_range_constraints_0_shift; // column 171 - DataType ordered_range_constraints_1_shift; // column 172 - DataType ordered_range_constraints_2_shift; // column 173 - DataType ordered_range_constraints_3_shift; // column 174 - DataType ordered_range_constraints_4_shift; // column 175 - DataType z_perm_shift; // column 176 - DataType lagrange_first; // column 177 - DataType lagrange_last; // column 178 - DataType lagrange_odd_in_minicircuit; // column 179 - DataType lagrange_even_in_minicircuit; // column 180 - DataType lagrange_second; // column 181 - DataType lagrange_second_to_last_in_minicircuit; // column 182 - DataType ordered_extra_range_constraints_numerator; // column 183 - // defines a method pointer_view that returns the following, with const and non-const variants - DEFINE_POINTER_VIEW(NUM_ALL_ENTITIES, - &op, - &x_lo_y_hi, - &x_hi_z_1, - &y_lo_z_2, - &p_x_low_limbs, - &p_x_low_limbs_range_constraint_0, - &p_x_low_limbs_range_constraint_1, - &p_x_low_limbs_range_constraint_2, - &p_x_low_limbs_range_constraint_3, - &p_x_low_limbs_range_constraint_4, - &p_x_low_limbs_range_constraint_tail, - &p_x_high_limbs, - &p_x_high_limbs_range_constraint_0, - &p_x_high_limbs_range_constraint_1, - &p_x_high_limbs_range_constraint_2, - &p_x_high_limbs_range_constraint_3, - &p_x_high_limbs_range_constraint_4, - &p_x_high_limbs_range_constraint_tail, - &p_y_low_limbs, - &p_y_low_limbs_range_constraint_0, - &p_y_low_limbs_range_constraint_1, - &p_y_low_limbs_range_constraint_2, - &p_y_low_limbs_range_constraint_3, - &p_y_low_limbs_range_constraint_4, - &p_y_low_limbs_range_constraint_tail, - &p_y_high_limbs, - &p_y_high_limbs_range_constraint_0, - &p_y_high_limbs_range_constraint_1, - &p_y_high_limbs_range_constraint_2, - &p_y_high_limbs_range_constraint_3, - &p_y_high_limbs_range_constraint_4, - &p_y_high_limbs_range_constraint_tail, - &z_low_limbs, - &z_low_limbs_range_constraint_0, - &z_low_limbs_range_constraint_1, - &z_low_limbs_range_constraint_2, - &z_low_limbs_range_constraint_3, - &z_low_limbs_range_constraint_4, - &z_low_limbs_range_constraint_tail, - &z_high_limbs, - &z_high_limbs_range_constraint_0, - &z_high_limbs_range_constraint_1, - &z_high_limbs_range_constraint_2, - &z_high_limbs_range_constraint_3, - &z_high_limbs_range_constraint_4, - &z_high_limbs_range_constraint_tail, - &accumulators_binary_limbs_0, - &accumulators_binary_limbs_1, - &accumulators_binary_limbs_2, - &accumulators_binary_limbs_3, - &accumulator_low_limbs_range_constraint_0, - &accumulator_low_limbs_range_constraint_1, - &accumulator_low_limbs_range_constraint_2, - &accumulator_low_limbs_range_constraint_3, - &accumulator_low_limbs_range_constraint_4, - &accumulator_low_limbs_range_constraint_tail, - &accumulator_high_limbs_range_constraint_0, - &accumulator_high_limbs_range_constraint_1, - &accumulator_high_limbs_range_constraint_2, - &accumulator_high_limbs_range_constraint_3, - &accumulator_high_limbs_range_constraint_4, - &accumulator_high_limbs_range_constraint_tail, - "ient_low_binary_limbs, - "ient_high_binary_limbs, - "ient_low_limbs_range_constraint_0, - "ient_low_limbs_range_constraint_1, - "ient_low_limbs_range_constraint_2, - "ient_low_limbs_range_constraint_3, - "ient_low_limbs_range_constraint_4, - "ient_low_limbs_range_constraint_tail, - "ient_high_limbs_range_constraint_0, - "ient_high_limbs_range_constraint_1, - "ient_high_limbs_range_constraint_2, - "ient_high_limbs_range_constraint_3, - "ient_high_limbs_range_constraint_4, - "ient_high_limbs_range_constraint_tail, - &relation_wide_limbs, - &relation_wide_limbs_range_constraint_0, - &relation_wide_limbs_range_constraint_1, - &relation_wide_limbs_range_constraint_2, - &relation_wide_limbs_range_constraint_3, - &concatenated_range_constraints_0, - &concatenated_range_constraints_1, - &concatenated_range_constraints_2, - &concatenated_range_constraints_3, - &ordered_range_constraints_0, - &ordered_range_constraints_1, - &ordered_range_constraints_2, - &ordered_range_constraints_3, - &ordered_range_constraints_4, - &z_perm, - &x_lo_y_hi_shift, - &x_hi_z_1_shift, - &y_lo_z_2_shift, - &p_x_low_limbs_shift, - &p_x_low_limbs_range_constraint_0_shift, - &p_x_low_limbs_range_constraint_1_shift, - &p_x_low_limbs_range_constraint_2_shift, - &p_x_low_limbs_range_constraint_3_shift, - &p_x_low_limbs_range_constraint_4_shift, - &p_x_low_limbs_range_constraint_tail_shift, - &p_x_high_limbs_shift, - &p_x_high_limbs_range_constraint_0_shift, - &p_x_high_limbs_range_constraint_1_shift, - &p_x_high_limbs_range_constraint_2_shift, - &p_x_high_limbs_range_constraint_3_shift, - &p_x_high_limbs_range_constraint_4_shift, - &p_x_high_limbs_range_constraint_tail_shift, - &p_y_low_limbs_shift, - &p_y_low_limbs_range_constraint_0_shift, - &p_y_low_limbs_range_constraint_1_shift, - &p_y_low_limbs_range_constraint_2_shift, - &p_y_low_limbs_range_constraint_3_shift, - &p_y_low_limbs_range_constraint_4_shift, - &p_y_low_limbs_range_constraint_tail_shift, - &p_y_high_limbs_shift, - &p_y_high_limbs_range_constraint_0_shift, - &p_y_high_limbs_range_constraint_1_shift, - &p_y_high_limbs_range_constraint_2_shift, - &p_y_high_limbs_range_constraint_3_shift, - &p_y_high_limbs_range_constraint_4_shift, - &p_y_high_limbs_range_constraint_tail_shift, - &z_low_limbs_shift, - &z_low_limbs_range_constraint_0_shift, - &z_low_limbs_range_constraint_1_shift, - &z_low_limbs_range_constraint_2_shift, - &z_low_limbs_range_constraint_3_shift, - &z_low_limbs_range_constraint_4_shift, - &z_low_limbs_range_constraint_tail_shift, - &z_high_limbs_shift, - &z_high_limbs_range_constraint_0_shift, - &z_high_limbs_range_constraint_1_shift, - &z_high_limbs_range_constraint_2_shift, - &z_high_limbs_range_constraint_3_shift, - &z_high_limbs_range_constraint_4_shift, - &z_high_limbs_range_constraint_tail_shift, - &accumulators_binary_limbs_0_shift, - &accumulators_binary_limbs_1_shift, - &accumulators_binary_limbs_2_shift, - &accumulators_binary_limbs_3_shift, - &accumulator_low_limbs_range_constraint_0_shift, - &accumulator_low_limbs_range_constraint_1_shift, - &accumulator_low_limbs_range_constraint_2_shift, - &accumulator_low_limbs_range_constraint_3_shift, - &accumulator_low_limbs_range_constraint_4_shift, - &accumulator_low_limbs_range_constraint_tail_shift, - &accumulator_high_limbs_range_constraint_0_shift, - &accumulator_high_limbs_range_constraint_1_shift, - &accumulator_high_limbs_range_constraint_2_shift, - &accumulator_high_limbs_range_constraint_3_shift, - &accumulator_high_limbs_range_constraint_4_shift, - &accumulator_high_limbs_range_constraint_tail_shift, - "ient_low_binary_limbs_shift, - "ient_high_binary_limbs_shift, - "ient_low_limbs_range_constraint_0_shift, - "ient_low_limbs_range_constraint_1_shift, - "ient_low_limbs_range_constraint_2_shift, - "ient_low_limbs_range_constraint_3_shift, - "ient_low_limbs_range_constraint_4_shift, - "ient_low_limbs_range_constraint_tail_shift, - "ient_high_limbs_range_constraint_0_shift, - "ient_high_limbs_range_constraint_1_shift, - "ient_high_limbs_range_constraint_2_shift, - "ient_high_limbs_range_constraint_3_shift, - "ient_high_limbs_range_constraint_4_shift, - "ient_high_limbs_range_constraint_tail_shift, - &relation_wide_limbs_shift, - &relation_wide_limbs_range_constraint_0_shift, - &relation_wide_limbs_range_constraint_1_shift, - &relation_wide_limbs_range_constraint_2_shift, - &relation_wide_limbs_range_constraint_3_shift, - &ordered_range_constraints_0_shift, - &ordered_range_constraints_1_shift, - &ordered_range_constraints_2_shift, - &ordered_range_constraints_3_shift, - &ordered_range_constraints_4_shift, - &z_perm_shift, - &lagrange_first, - &lagrange_last, - &lagrange_odd_in_minicircuit, - &lagrange_even_in_minicircuit, - &lagrange_second, - &lagrange_second_to_last_in_minicircuit, - &ordered_extra_range_constraints_numerator) - std::vector get_wires() override + // Initialize members + AllEntities() + : PrecomputedEntities{} + , WitnessEntities{} + , ShiftedEntities{} + {} + RefVector get_wires() { + return { this->op, + this->x_lo_y_hi, + this->x_hi_z_1, + this->y_lo_z_2, + this->p_x_low_limbs, + this->p_x_low_limbs_range_constraint_0, + this->p_x_low_limbs_range_constraint_1, + this->p_x_low_limbs_range_constraint_2, + this->p_x_low_limbs_range_constraint_3, + this->p_x_low_limbs_range_constraint_4, + this->p_x_low_limbs_range_constraint_tail, + this->p_x_high_limbs, + this->p_x_high_limbs_range_constraint_0, + this->p_x_high_limbs_range_constraint_1, + this->p_x_high_limbs_range_constraint_2, + this->p_x_high_limbs_range_constraint_3, + this->p_x_high_limbs_range_constraint_4, + this->p_x_high_limbs_range_constraint_tail, + this->p_y_low_limbs, + this->p_y_low_limbs_range_constraint_0, + this->p_y_low_limbs_range_constraint_1, + this->p_y_low_limbs_range_constraint_2, + this->p_y_low_limbs_range_constraint_3, + this->p_y_low_limbs_range_constraint_4, + this->p_y_low_limbs_range_constraint_tail, + this->p_y_high_limbs, + this->p_y_high_limbs_range_constraint_0, + this->p_y_high_limbs_range_constraint_1, + this->p_y_high_limbs_range_constraint_2, + this->p_y_high_limbs_range_constraint_3, + this->p_y_high_limbs_range_constraint_4, + this->p_y_high_limbs_range_constraint_tail, + this->z_low_limbs, + this->z_low_limbs_range_constraint_0, + this->z_low_limbs_range_constraint_1, + this->z_low_limbs_range_constraint_2, + this->z_low_limbs_range_constraint_3, + this->z_low_limbs_range_constraint_4, + this->z_low_limbs_range_constraint_tail, + this->z_high_limbs, + this->z_high_limbs_range_constraint_0, + this->z_high_limbs_range_constraint_1, + this->z_high_limbs_range_constraint_2, + this->z_high_limbs_range_constraint_3, + this->z_high_limbs_range_constraint_4, + this->z_high_limbs_range_constraint_tail, + this->accumulators_binary_limbs_0, + this->accumulators_binary_limbs_1, + this->accumulators_binary_limbs_2, + this->accumulators_binary_limbs_3, + this->accumulator_low_limbs_range_constraint_0, + this->accumulator_low_limbs_range_constraint_1, + this->accumulator_low_limbs_range_constraint_2, + this->accumulator_low_limbs_range_constraint_3, + this->accumulator_low_limbs_range_constraint_4, + this->accumulator_low_limbs_range_constraint_tail, + this->accumulator_high_limbs_range_constraint_0, + this->accumulator_high_limbs_range_constraint_1, + this->accumulator_high_limbs_range_constraint_2, + this->accumulator_high_limbs_range_constraint_3, + this->accumulator_high_limbs_range_constraint_4, + this->accumulator_high_limbs_range_constraint_tail, + this->quotient_low_binary_limbs, + this->quotient_high_binary_limbs, + this->quotient_low_limbs_range_constraint_0, + this->quotient_low_limbs_range_constraint_1, + this->quotient_low_limbs_range_constraint_2, + this->quotient_low_limbs_range_constraint_3, + this->quotient_low_limbs_range_constraint_4, + this->quotient_low_limbs_range_constraint_tail, + this->quotient_high_limbs_range_constraint_0, + this->quotient_high_limbs_range_constraint_1, + this->quotient_high_limbs_range_constraint_2, + this->quotient_high_limbs_range_constraint_3, + this->quotient_high_limbs_range_constraint_4, + this->quotient_high_limbs_range_constraint_tail, + this->relation_wide_limbs, + this->relation_wide_limbs_range_constraint_0, + this->relation_wide_limbs_range_constraint_1, + this->relation_wide_limbs_range_constraint_2, + this->relation_wide_limbs_range_constraint_3, + this->ordered_range_constraints_0, + this->ordered_range_constraints_1, + this->ordered_range_constraints_2, + this->ordered_range_constraints_3, + this->ordered_range_constraints_4 }; + } - return { op, - x_lo_y_hi, - x_hi_z_1, - y_lo_z_2, - p_x_low_limbs, - p_x_low_limbs_range_constraint_0, - p_x_low_limbs_range_constraint_1, - p_x_low_limbs_range_constraint_2, - p_x_low_limbs_range_constraint_3, - p_x_low_limbs_range_constraint_4, - p_x_low_limbs_range_constraint_tail, - p_x_high_limbs, - p_x_high_limbs_range_constraint_0, - p_x_high_limbs_range_constraint_1, - p_x_high_limbs_range_constraint_2, - p_x_high_limbs_range_constraint_3, - p_x_high_limbs_range_constraint_4, - p_x_high_limbs_range_constraint_tail, - p_y_low_limbs, - p_y_low_limbs_range_constraint_0, - p_y_low_limbs_range_constraint_1, - p_y_low_limbs_range_constraint_2, - p_y_low_limbs_range_constraint_3, - p_y_low_limbs_range_constraint_4, - p_y_low_limbs_range_constraint_tail, - p_y_high_limbs, - p_y_high_limbs_range_constraint_0, - p_y_high_limbs_range_constraint_1, - p_y_high_limbs_range_constraint_2, - p_y_high_limbs_range_constraint_3, - p_y_high_limbs_range_constraint_4, - p_y_high_limbs_range_constraint_tail, - z_low_limbs, - z_low_limbs_range_constraint_0, - z_low_limbs_range_constraint_1, - z_low_limbs_range_constraint_2, - z_low_limbs_range_constraint_3, - z_low_limbs_range_constraint_4, - z_low_limbs_range_constraint_tail, - z_high_limbs, - z_high_limbs_range_constraint_0, - z_high_limbs_range_constraint_1, - z_high_limbs_range_constraint_2, - z_high_limbs_range_constraint_3, - z_high_limbs_range_constraint_4, - z_high_limbs_range_constraint_tail, - accumulators_binary_limbs_0, - accumulators_binary_limbs_1, - accumulators_binary_limbs_2, - accumulators_binary_limbs_3, - accumulator_low_limbs_range_constraint_0, - accumulator_low_limbs_range_constraint_1, - accumulator_low_limbs_range_constraint_2, - accumulator_low_limbs_range_constraint_3, - accumulator_low_limbs_range_constraint_4, - accumulator_low_limbs_range_constraint_tail, - accumulator_high_limbs_range_constraint_0, - accumulator_high_limbs_range_constraint_1, - accumulator_high_limbs_range_constraint_2, - accumulator_high_limbs_range_constraint_3, - accumulator_high_limbs_range_constraint_4, - accumulator_high_limbs_range_constraint_tail, - quotient_low_binary_limbs, - quotient_high_binary_limbs, - quotient_low_limbs_range_constraint_0, - quotient_low_limbs_range_constraint_1, - quotient_low_limbs_range_constraint_2, - quotient_low_limbs_range_constraint_3, - quotient_low_limbs_range_constraint_4, - quotient_low_limbs_range_constraint_tail, - quotient_high_limbs_range_constraint_0, - quotient_high_limbs_range_constraint_1, - quotient_high_limbs_range_constraint_2, - quotient_high_limbs_range_constraint_3, - quotient_high_limbs_range_constraint_4, - quotient_high_limbs_range_constraint_tail, - relation_wide_limbs, - relation_wide_limbs_range_constraint_0, - relation_wide_limbs_range_constraint_1, - relation_wide_limbs_range_constraint_2, - relation_wide_limbs_range_constraint_3, - ordered_range_constraints_0, - ordered_range_constraints_1, - ordered_range_constraints_2, - ordered_range_constraints_3, - ordered_range_constraints_4 }; - }; - + DEFINE_COMPOUND_GET_ALL(PrecomputedEntities::get_all(), + WitnessEntities::get_all(), + ShiftedEntities::get_all()) + DEFINE_COMPOUND_POINTER_VIEW(PrecomputedEntities::pointer_view(), + WitnessEntities::pointer_view(), + ShiftedEntities::pointer_view()) /** * @brief Get the polynomials that are concatenated for the permutation relation * - * @return std::vector> + * @return std::vector> */ - std::vector> get_concatenation_groups() + std::vector> get_concatenation_groups() { return { { - p_x_low_limbs_range_constraint_0, - p_x_low_limbs_range_constraint_1, - p_x_low_limbs_range_constraint_2, - p_x_low_limbs_range_constraint_3, - p_x_low_limbs_range_constraint_4, - p_x_low_limbs_range_constraint_tail, - p_x_high_limbs_range_constraint_0, - p_x_high_limbs_range_constraint_1, - p_x_high_limbs_range_constraint_2, - p_x_high_limbs_range_constraint_3, - p_x_high_limbs_range_constraint_4, - p_x_high_limbs_range_constraint_tail, - p_y_low_limbs_range_constraint_0, - p_y_low_limbs_range_constraint_1, - p_y_low_limbs_range_constraint_2, - p_y_low_limbs_range_constraint_3, + this->p_x_low_limbs_range_constraint_0, + this->p_x_low_limbs_range_constraint_1, + this->p_x_low_limbs_range_constraint_2, + this->p_x_low_limbs_range_constraint_3, + this->p_x_low_limbs_range_constraint_4, + this->p_x_low_limbs_range_constraint_tail, + this->p_x_high_limbs_range_constraint_0, + this->p_x_high_limbs_range_constraint_1, + this->p_x_high_limbs_range_constraint_2, + this->p_x_high_limbs_range_constraint_3, + this->p_x_high_limbs_range_constraint_4, + this->p_x_high_limbs_range_constraint_tail, + this->p_y_low_limbs_range_constraint_0, + this->p_y_low_limbs_range_constraint_1, + this->p_y_low_limbs_range_constraint_2, + this->p_y_low_limbs_range_constraint_3, }, { - p_y_low_limbs_range_constraint_4, - p_y_low_limbs_range_constraint_tail, - p_y_high_limbs_range_constraint_0, - p_y_high_limbs_range_constraint_1, - p_y_high_limbs_range_constraint_2, - p_y_high_limbs_range_constraint_3, - p_y_high_limbs_range_constraint_4, - p_y_high_limbs_range_constraint_tail, - z_low_limbs_range_constraint_0, - z_low_limbs_range_constraint_1, - z_low_limbs_range_constraint_2, - z_low_limbs_range_constraint_3, - z_low_limbs_range_constraint_4, - z_low_limbs_range_constraint_tail, - z_high_limbs_range_constraint_0, - z_high_limbs_range_constraint_1, + this->p_y_low_limbs_range_constraint_4, + this->p_y_low_limbs_range_constraint_tail, + this->p_y_high_limbs_range_constraint_0, + this->p_y_high_limbs_range_constraint_1, + this->p_y_high_limbs_range_constraint_2, + this->p_y_high_limbs_range_constraint_3, + this->p_y_high_limbs_range_constraint_4, + this->p_y_high_limbs_range_constraint_tail, + this->z_low_limbs_range_constraint_0, + this->z_low_limbs_range_constraint_1, + this->z_low_limbs_range_constraint_2, + this->z_low_limbs_range_constraint_3, + this->z_low_limbs_range_constraint_4, + this->z_low_limbs_range_constraint_tail, + this->z_high_limbs_range_constraint_0, + this->z_high_limbs_range_constraint_1, }, { - z_high_limbs_range_constraint_2, - z_high_limbs_range_constraint_3, - z_high_limbs_range_constraint_4, - z_high_limbs_range_constraint_tail, - accumulator_low_limbs_range_constraint_0, - accumulator_low_limbs_range_constraint_1, - accumulator_low_limbs_range_constraint_2, - accumulator_low_limbs_range_constraint_3, - accumulator_low_limbs_range_constraint_4, - accumulator_low_limbs_range_constraint_tail, - accumulator_high_limbs_range_constraint_0, - accumulator_high_limbs_range_constraint_1, - accumulator_high_limbs_range_constraint_2, - accumulator_high_limbs_range_constraint_3, - accumulator_high_limbs_range_constraint_4, - accumulator_high_limbs_range_constraint_tail, + this->z_high_limbs_range_constraint_2, + this->z_high_limbs_range_constraint_3, + this->z_high_limbs_range_constraint_4, + this->z_high_limbs_range_constraint_tail, + this->accumulator_low_limbs_range_constraint_0, + this->accumulator_low_limbs_range_constraint_1, + this->accumulator_low_limbs_range_constraint_2, + this->accumulator_low_limbs_range_constraint_3, + this->accumulator_low_limbs_range_constraint_4, + this->accumulator_low_limbs_range_constraint_tail, + this->accumulator_high_limbs_range_constraint_0, + this->accumulator_high_limbs_range_constraint_1, + this->accumulator_high_limbs_range_constraint_2, + this->accumulator_high_limbs_range_constraint_3, + this->accumulator_high_limbs_range_constraint_4, + this->accumulator_high_limbs_range_constraint_tail, }, { - quotient_low_limbs_range_constraint_0, - quotient_low_limbs_range_constraint_1, - quotient_low_limbs_range_constraint_2, - quotient_low_limbs_range_constraint_3, - quotient_low_limbs_range_constraint_4, - quotient_low_limbs_range_constraint_tail, - quotient_high_limbs_range_constraint_0, - quotient_high_limbs_range_constraint_1, - quotient_high_limbs_range_constraint_2, - quotient_high_limbs_range_constraint_3, - quotient_high_limbs_range_constraint_4, - quotient_high_limbs_range_constraint_tail, - relation_wide_limbs_range_constraint_0, - relation_wide_limbs_range_constraint_1, - relation_wide_limbs_range_constraint_2, - relation_wide_limbs_range_constraint_3, + this->quotient_low_limbs_range_constraint_0, + this->quotient_low_limbs_range_constraint_1, + this->quotient_low_limbs_range_constraint_2, + this->quotient_low_limbs_range_constraint_3, + this->quotient_low_limbs_range_constraint_4, + this->quotient_low_limbs_range_constraint_tail, + this->quotient_high_limbs_range_constraint_0, + this->quotient_high_limbs_range_constraint_1, + this->quotient_high_limbs_range_constraint_2, + this->quotient_high_limbs_range_constraint_3, + this->quotient_high_limbs_range_constraint_4, + this->quotient_high_limbs_range_constraint_tail, + this->relation_wide_limbs_range_constraint_0, + this->relation_wide_limbs_range_constraint_1, + this->relation_wide_limbs_range_constraint_2, + this->relation_wide_limbs_range_constraint_3, }, }; } /** * @brief Get the polynomials that need to be constructed from other polynomials by concatenation * - * @return std::vector + * @return RefVector */ - std::vector get_concatenated_constraints() + RefVector get_concatenated_constraints() { - return { concatenated_range_constraints_0, - concatenated_range_constraints_1, - concatenated_range_constraints_2, - concatenated_range_constraints_3 }; + return ConcatenatedRangeConstraints::get_all(); }; /** * @brief Get the polynomials from the grand product denominator * - * @return std::vector + * @return RefVector */ - std::vector get_ordered_constraints() + RefVector get_ordered_constraints() { - return { ordered_range_constraints_0, - ordered_range_constraints_1, - ordered_range_constraints_2, - ordered_range_constraints_3, - ordered_range_constraints_4 }; + return { this->ordered_range_constraints_0, + this->ordered_range_constraints_1, + this->ordered_range_constraints_2, + this->ordered_range_constraints_3, + this->ordered_range_constraints_4 }; }; // Gemini-specific getters. - std::vector get_unshifted() override + RefVector get_unshifted() { return { - op, - x_lo_y_hi, - x_hi_z_1, - y_lo_z_2, - p_x_low_limbs, - p_x_low_limbs_range_constraint_0, - p_x_low_limbs_range_constraint_1, - p_x_low_limbs_range_constraint_2, - p_x_low_limbs_range_constraint_3, - p_x_low_limbs_range_constraint_4, - p_x_low_limbs_range_constraint_tail, - p_x_high_limbs, - p_x_high_limbs_range_constraint_0, - p_x_high_limbs_range_constraint_1, - p_x_high_limbs_range_constraint_2, - p_x_high_limbs_range_constraint_3, - p_x_high_limbs_range_constraint_4, - p_x_high_limbs_range_constraint_tail, - p_y_low_limbs, - p_y_low_limbs_range_constraint_0, - p_y_low_limbs_range_constraint_1, - p_y_low_limbs_range_constraint_2, - p_y_low_limbs_range_constraint_3, - p_y_low_limbs_range_constraint_4, - p_y_low_limbs_range_constraint_tail, - p_y_high_limbs, - p_y_high_limbs_range_constraint_0, - p_y_high_limbs_range_constraint_1, - p_y_high_limbs_range_constraint_2, - p_y_high_limbs_range_constraint_3, - p_y_high_limbs_range_constraint_4, - p_y_high_limbs_range_constraint_tail, - z_low_limbs, - z_low_limbs_range_constraint_0, - z_low_limbs_range_constraint_1, - z_low_limbs_range_constraint_2, - z_low_limbs_range_constraint_3, - z_low_limbs_range_constraint_4, - z_low_limbs_range_constraint_tail, - z_high_limbs, - z_high_limbs_range_constraint_0, - z_high_limbs_range_constraint_1, - z_high_limbs_range_constraint_2, - z_high_limbs_range_constraint_3, - z_high_limbs_range_constraint_4, - z_high_limbs_range_constraint_tail, - accumulators_binary_limbs_0, - accumulators_binary_limbs_1, - accumulators_binary_limbs_2, - accumulators_binary_limbs_3, - accumulator_low_limbs_range_constraint_0, - accumulator_low_limbs_range_constraint_1, - accumulator_low_limbs_range_constraint_2, - accumulator_low_limbs_range_constraint_3, - accumulator_low_limbs_range_constraint_4, - accumulator_low_limbs_range_constraint_tail, - accumulator_high_limbs_range_constraint_0, - accumulator_high_limbs_range_constraint_1, - accumulator_high_limbs_range_constraint_2, - accumulator_high_limbs_range_constraint_3, - accumulator_high_limbs_range_constraint_4, - accumulator_high_limbs_range_constraint_tail, - quotient_low_binary_limbs, - quotient_high_binary_limbs, - quotient_low_limbs_range_constraint_0, - quotient_low_limbs_range_constraint_1, - quotient_low_limbs_range_constraint_2, - quotient_low_limbs_range_constraint_3, - quotient_low_limbs_range_constraint_4, - quotient_low_limbs_range_constraint_tail, - quotient_high_limbs_range_constraint_0, - quotient_high_limbs_range_constraint_1, - quotient_high_limbs_range_constraint_2, - quotient_high_limbs_range_constraint_3, - quotient_high_limbs_range_constraint_4, - quotient_high_limbs_range_constraint_tail, - relation_wide_limbs, - relation_wide_limbs_range_constraint_0, - relation_wide_limbs_range_constraint_1, - relation_wide_limbs_range_constraint_2, - relation_wide_limbs_range_constraint_3, - ordered_range_constraints_0, - ordered_range_constraints_1, - ordered_range_constraints_2, - ordered_range_constraints_3, - ordered_range_constraints_4, - z_perm, - - lagrange_first, - lagrange_last, - lagrange_odd_in_minicircuit, - lagrange_even_in_minicircuit, - lagrange_second, - lagrange_second_to_last_in_minicircuit, - ordered_extra_range_constraints_numerator, + this->x_lo_y_hi, + this->x_hi_z_1, + this->y_lo_z_2, + this->p_x_low_limbs, + this->p_x_low_limbs_range_constraint_0, + this->p_x_low_limbs_range_constraint_1, + this->p_x_low_limbs_range_constraint_2, + this->p_x_low_limbs_range_constraint_3, + this->p_x_low_limbs_range_constraint_4, + this->p_x_low_limbs_range_constraint_tail, + this->p_x_high_limbs, + this->p_x_high_limbs_range_constraint_0, + this->p_x_high_limbs_range_constraint_1, + this->p_x_high_limbs_range_constraint_2, + this->p_x_high_limbs_range_constraint_3, + this->p_x_high_limbs_range_constraint_4, + this->p_x_high_limbs_range_constraint_tail, + this->p_y_low_limbs, + this->p_y_low_limbs_range_constraint_0, + this->p_y_low_limbs_range_constraint_1, + this->p_y_low_limbs_range_constraint_2, + this->p_y_low_limbs_range_constraint_3, + this->p_y_low_limbs_range_constraint_4, + this->p_y_low_limbs_range_constraint_tail, + this->p_y_high_limbs, + this->p_y_high_limbs_range_constraint_0, + this->p_y_high_limbs_range_constraint_1, + this->p_y_high_limbs_range_constraint_2, + this->p_y_high_limbs_range_constraint_3, + this->p_y_high_limbs_range_constraint_4, + this->p_y_high_limbs_range_constraint_tail, + this->z_low_limbs, + this->z_low_limbs_range_constraint_0, + this->z_low_limbs_range_constraint_1, + this->z_low_limbs_range_constraint_2, + this->z_low_limbs_range_constraint_3, + this->z_low_limbs_range_constraint_4, + this->z_low_limbs_range_constraint_tail, + this->z_high_limbs, + this->z_high_limbs_range_constraint_0, + this->z_high_limbs_range_constraint_1, + this->z_high_limbs_range_constraint_2, + this->z_high_limbs_range_constraint_3, + this->z_high_limbs_range_constraint_4, + this->z_high_limbs_range_constraint_tail, + this->accumulators_binary_limbs_0, + this->accumulators_binary_limbs_1, + this->accumulators_binary_limbs_2, + this->accumulators_binary_limbs_3, + this->accumulator_low_limbs_range_constraint_0, + this->accumulator_low_limbs_range_constraint_1, + this->accumulator_low_limbs_range_constraint_2, + this->accumulator_low_limbs_range_constraint_3, + this->accumulator_low_limbs_range_constraint_4, + this->accumulator_low_limbs_range_constraint_tail, + this->accumulator_high_limbs_range_constraint_0, + this->accumulator_high_limbs_range_constraint_1, + this->accumulator_high_limbs_range_constraint_2, + this->accumulator_high_limbs_range_constraint_3, + this->accumulator_high_limbs_range_constraint_4, + this->accumulator_high_limbs_range_constraint_tail, + this->quotient_low_binary_limbs, + this->quotient_high_binary_limbs, + this->quotient_low_limbs_range_constraint_0, + this->quotient_low_limbs_range_constraint_1, + this->quotient_low_limbs_range_constraint_2, + this->quotient_low_limbs_range_constraint_3, + this->quotient_low_limbs_range_constraint_4, + this->quotient_low_limbs_range_constraint_tail, + this->quotient_high_limbs_range_constraint_0, + this->quotient_high_limbs_range_constraint_1, + this->quotient_high_limbs_range_constraint_2, + this->quotient_high_limbs_range_constraint_3, + this->quotient_high_limbs_range_constraint_4, + this->quotient_high_limbs_range_constraint_tail, + this->relation_wide_limbs, + this->relation_wide_limbs_range_constraint_0, + this->relation_wide_limbs_range_constraint_1, + this->relation_wide_limbs_range_constraint_2, + this->relation_wide_limbs_range_constraint_3, + this->ordered_range_constraints_0, + this->ordered_range_constraints_1, + this->ordered_range_constraints_2, + this->ordered_range_constraints_3, + this->ordered_range_constraints_4, + this->z_perm, }; - }; - std::vector get_to_be_shifted() override - { - return { - x_lo_y_hi, - x_hi_z_1, - y_lo_z_2, - p_x_low_limbs, - p_x_low_limbs_range_constraint_0, - p_x_low_limbs_range_constraint_1, - p_x_low_limbs_range_constraint_2, - p_x_low_limbs_range_constraint_3, - p_x_low_limbs_range_constraint_4, - p_x_low_limbs_range_constraint_tail, - p_x_high_limbs, - p_x_high_limbs_range_constraint_0, - p_x_high_limbs_range_constraint_1, - p_x_high_limbs_range_constraint_2, - p_x_high_limbs_range_constraint_3, - p_x_high_limbs_range_constraint_4, - p_x_high_limbs_range_constraint_tail, - p_y_low_limbs, - p_y_low_limbs_range_constraint_0, - p_y_low_limbs_range_constraint_1, - p_y_low_limbs_range_constraint_2, - p_y_low_limbs_range_constraint_3, - p_y_low_limbs_range_constraint_4, - p_y_low_limbs_range_constraint_tail, - p_y_high_limbs, - p_y_high_limbs_range_constraint_0, - p_y_high_limbs_range_constraint_1, - p_y_high_limbs_range_constraint_2, - p_y_high_limbs_range_constraint_3, - p_y_high_limbs_range_constraint_4, - p_y_high_limbs_range_constraint_tail, - z_low_limbs, - z_low_limbs_range_constraint_0, - z_low_limbs_range_constraint_1, - z_low_limbs_range_constraint_2, - z_low_limbs_range_constraint_3, - z_low_limbs_range_constraint_4, - z_low_limbs_range_constraint_tail, - z_high_limbs, - z_high_limbs_range_constraint_0, - z_high_limbs_range_constraint_1, - z_high_limbs_range_constraint_2, - z_high_limbs_range_constraint_3, - z_high_limbs_range_constraint_4, - z_high_limbs_range_constraint_tail, - accumulators_binary_limbs_0, - accumulators_binary_limbs_1, - accumulators_binary_limbs_2, - accumulators_binary_limbs_3, - accumulator_low_limbs_range_constraint_0, - accumulator_low_limbs_range_constraint_1, - accumulator_low_limbs_range_constraint_2, - accumulator_low_limbs_range_constraint_3, - accumulator_low_limbs_range_constraint_4, - accumulator_low_limbs_range_constraint_tail, - accumulator_high_limbs_range_constraint_0, - accumulator_high_limbs_range_constraint_1, - accumulator_high_limbs_range_constraint_2, - accumulator_high_limbs_range_constraint_3, - accumulator_high_limbs_range_constraint_4, - accumulator_high_limbs_range_constraint_tail, - quotient_low_binary_limbs, - quotient_high_binary_limbs, - quotient_low_limbs_range_constraint_0, - quotient_low_limbs_range_constraint_1, - quotient_low_limbs_range_constraint_2, - quotient_low_limbs_range_constraint_3, - quotient_low_limbs_range_constraint_4, - quotient_low_limbs_range_constraint_tail, - quotient_high_limbs_range_constraint_0, - quotient_high_limbs_range_constraint_1, - quotient_high_limbs_range_constraint_2, - quotient_high_limbs_range_constraint_3, - quotient_high_limbs_range_constraint_4, - quotient_high_limbs_range_constraint_tail, - relation_wide_limbs, - relation_wide_limbs_range_constraint_0, - relation_wide_limbs_range_constraint_1, - relation_wide_limbs_range_constraint_2, - relation_wide_limbs_range_constraint_3, - ordered_range_constraints_0, - ordered_range_constraints_1, - ordered_range_constraints_2, - ordered_range_constraints_3, - ordered_range_constraints_4, - - z_perm, - }; - }; - std::vector get_shifted() override + } + // get_to_be_shifted is inherited + RefVector get_shifted() { - return { - x_lo_y_hi_shift, - x_hi_z_1_shift, - y_lo_z_2_shift, - p_x_low_limbs_shift, - p_x_low_limbs_range_constraint_0_shift, - p_x_low_limbs_range_constraint_1_shift, - p_x_low_limbs_range_constraint_2_shift, - p_x_low_limbs_range_constraint_3_shift, - p_x_low_limbs_range_constraint_4_shift, - p_x_low_limbs_range_constraint_tail_shift, - p_x_high_limbs_shift, - p_x_high_limbs_range_constraint_0_shift, - p_x_high_limbs_range_constraint_1_shift, - p_x_high_limbs_range_constraint_2_shift, - p_x_high_limbs_range_constraint_3_shift, - p_x_high_limbs_range_constraint_4_shift, - p_x_high_limbs_range_constraint_tail_shift, - p_y_low_limbs_shift, - p_y_low_limbs_range_constraint_0_shift, - p_y_low_limbs_range_constraint_1_shift, - p_y_low_limbs_range_constraint_2_shift, - p_y_low_limbs_range_constraint_3_shift, - p_y_low_limbs_range_constraint_4_shift, - p_y_low_limbs_range_constraint_tail_shift, - p_y_high_limbs_shift, - p_y_high_limbs_range_constraint_0_shift, - p_y_high_limbs_range_constraint_1_shift, - p_y_high_limbs_range_constraint_2_shift, - p_y_high_limbs_range_constraint_3_shift, - p_y_high_limbs_range_constraint_4_shift, - p_y_high_limbs_range_constraint_tail_shift, - z_low_limbs_shift, - z_low_limbs_range_constraint_0_shift, - z_low_limbs_range_constraint_1_shift, - z_low_limbs_range_constraint_2_shift, - z_low_limbs_range_constraint_3_shift, - z_low_limbs_range_constraint_4_shift, - z_low_limbs_range_constraint_tail_shift, - z_high_limbs_shift, - z_high_limbs_range_constraint_0_shift, - z_high_limbs_range_constraint_1_shift, - z_high_limbs_range_constraint_2_shift, - z_high_limbs_range_constraint_3_shift, - z_high_limbs_range_constraint_4_shift, - z_high_limbs_range_constraint_tail_shift, - accumulators_binary_limbs_0_shift, - accumulators_binary_limbs_1_shift, - accumulators_binary_limbs_2_shift, - accumulators_binary_limbs_3_shift, - accumulator_low_limbs_range_constraint_0_shift, - accumulator_low_limbs_range_constraint_1_shift, - accumulator_low_limbs_range_constraint_2_shift, - accumulator_low_limbs_range_constraint_3_shift, - accumulator_low_limbs_range_constraint_4_shift, - accumulator_low_limbs_range_constraint_tail_shift, - accumulator_high_limbs_range_constraint_0_shift, - accumulator_high_limbs_range_constraint_1_shift, - accumulator_high_limbs_range_constraint_2_shift, - accumulator_high_limbs_range_constraint_3_shift, - accumulator_high_limbs_range_constraint_4_shift, - accumulator_high_limbs_range_constraint_tail_shift, - quotient_low_binary_limbs_shift, - quotient_high_binary_limbs_shift, - quotient_low_limbs_range_constraint_0_shift, - quotient_low_limbs_range_constraint_1_shift, - quotient_low_limbs_range_constraint_2_shift, - quotient_low_limbs_range_constraint_3_shift, - quotient_low_limbs_range_constraint_4_shift, - quotient_low_limbs_range_constraint_tail_shift, - quotient_high_limbs_range_constraint_0_shift, - quotient_high_limbs_range_constraint_1_shift, - quotient_high_limbs_range_constraint_2_shift, - quotient_high_limbs_range_constraint_3_shift, - quotient_high_limbs_range_constraint_4_shift, - quotient_high_limbs_range_constraint_tail_shift, - relation_wide_limbs_shift, - relation_wide_limbs_range_constraint_0_shift, - relation_wide_limbs_range_constraint_1_shift, - relation_wide_limbs_range_constraint_2_shift, - relation_wide_limbs_range_constraint_3_shift, - ordered_range_constraints_0_shift, - ordered_range_constraints_1_shift, - ordered_range_constraints_2_shift, - ordered_range_constraints_3_shift, - ordered_range_constraints_4_shift, + return { this->x_lo_y_hi_shift, + this->x_hi_z_1_shift, + this->y_lo_z_2_shift, + this->p_x_low_limbs_shift, + this->p_x_low_limbs_range_constraint_0_shift, + this->p_x_low_limbs_range_constraint_1_shift, + this->p_x_low_limbs_range_constraint_2_shift, + this->p_x_low_limbs_range_constraint_3_shift, + this->p_x_low_limbs_range_constraint_4_shift, + this->p_x_low_limbs_range_constraint_tail_shift, + this->p_x_high_limbs_shift, + this->p_x_high_limbs_range_constraint_0_shift, + this->p_x_high_limbs_range_constraint_1_shift, + this->p_x_high_limbs_range_constraint_2_shift, + this->p_x_high_limbs_range_constraint_3_shift, + this->p_x_high_limbs_range_constraint_4_shift, + this->p_x_high_limbs_range_constraint_tail_shift, + this->p_y_low_limbs_shift, + this->p_y_low_limbs_range_constraint_0_shift, + this->p_y_low_limbs_range_constraint_1_shift, + this->p_y_low_limbs_range_constraint_2_shift, + this->p_y_low_limbs_range_constraint_3_shift, + this->p_y_low_limbs_range_constraint_4_shift, + this->p_y_low_limbs_range_constraint_tail_shift, + this->p_y_high_limbs_shift, + this->p_y_high_limbs_range_constraint_0_shift, + this->p_y_high_limbs_range_constraint_1_shift, + this->p_y_high_limbs_range_constraint_2_shift, + this->p_y_high_limbs_range_constraint_3_shift, + this->p_y_high_limbs_range_constraint_4_shift, + this->p_y_high_limbs_range_constraint_tail_shift, + this->z_low_limbs_shift, + this->z_low_limbs_range_constraint_0_shift, + this->z_low_limbs_range_constraint_1_shift, + this->z_low_limbs_range_constraint_2_shift, + this->z_low_limbs_range_constraint_3_shift, + this->z_low_limbs_range_constraint_4_shift, + this->z_low_limbs_range_constraint_tail_shift, + this->z_high_limbs_shift, + this->z_high_limbs_range_constraint_0_shift, + this->z_high_limbs_range_constraint_1_shift, + this->z_high_limbs_range_constraint_2_shift, + this->z_high_limbs_range_constraint_3_shift, + this->z_high_limbs_range_constraint_4_shift, + this->z_high_limbs_range_constraint_tail_shift, + this->accumulators_binary_limbs_0_shift, + this->accumulators_binary_limbs_1_shift, + this->accumulators_binary_limbs_2_shift, + this->accumulators_binary_limbs_3_shift, + this->accumulator_low_limbs_range_constraint_0_shift, + this->accumulator_low_limbs_range_constraint_1_shift, + this->accumulator_low_limbs_range_constraint_2_shift, + this->accumulator_low_limbs_range_constraint_3_shift, + this->accumulator_low_limbs_range_constraint_4_shift, + this->accumulator_low_limbs_range_constraint_tail_shift, + this->accumulator_high_limbs_range_constraint_0_shift, + this->accumulator_high_limbs_range_constraint_1_shift, + this->accumulator_high_limbs_range_constraint_2_shift, + this->accumulator_high_limbs_range_constraint_3_shift, + this->accumulator_high_limbs_range_constraint_4_shift, + this->accumulator_high_limbs_range_constraint_tail_shift, + this->quotient_low_binary_limbs_shift, + this->quotient_high_binary_limbs_shift, + this->quotient_low_limbs_range_constraint_0_shift, + this->quotient_low_limbs_range_constraint_1_shift, + this->quotient_low_limbs_range_constraint_2_shift, + this->quotient_low_limbs_range_constraint_3_shift, + this->quotient_low_limbs_range_constraint_4_shift, + this->quotient_low_limbs_range_constraint_tail_shift, + this->quotient_high_limbs_range_constraint_0_shift, + this->quotient_high_limbs_range_constraint_1_shift, + this->quotient_high_limbs_range_constraint_2_shift, + this->quotient_high_limbs_range_constraint_3_shift, + this->quotient_high_limbs_range_constraint_4_shift, + this->quotient_high_limbs_range_constraint_tail_shift, + this->relation_wide_limbs_shift, + this->relation_wide_limbs_range_constraint_0_shift, + this->relation_wide_limbs_range_constraint_1_shift, + this->relation_wide_limbs_range_constraint_2_shift, + this->relation_wide_limbs_range_constraint_3_shift, + this->ordered_range_constraints_0_shift, + this->ordered_range_constraints_1_shift, + this->ordered_range_constraints_2_shift, + this->ordered_range_constraints_3_shift, + this->ordered_range_constraints_4_shift, - z_perm_shift, - }; + this->z_perm_shift }; }; /** * @brief Polynomials/commitments, that can be constructed only after the r challenge has been received from * gemini * - * @return std::vector + * @return RefVector */ - std::vector get_special() { return get_concatenated_constraints(); } + RefVector get_special() { return get_concatenated_constraints(); } - std::vector get_unshifted_then_shifted_then_special() + RefVector get_unshifted_then_shifted_then_special() { - std::vector result{ get_unshifted() }; - std::vector shifted{ get_shifted() }; - std::vector special{ get_special() }; + RefVector result{ this->get_unshifted() }; + RefVector shifted{ get_shifted() }; + RefVector special{ get_special() }; result.insert(result.end(), shifted.begin(), shifted.end()); result.insert(result.end(), special.begin(), special.end()); return result; @@ -1417,25 +986,21 @@ class GoblinTranslator { * @note TODO(Cody): Maybe multiple inheritance is the right thing here. In that case, nothing should eve * inherit from ProvingKey. */ - class ProvingKey : public ProvingKey_, - WitnessEntities> { + class ProvingKey : public ProvingKey_, WitnessEntities> { public: BF batching_challenge_v = { 0 }; BF evaluation_input_x = { 0 }; ProvingKey() = default; // Expose constructors on the base class - using Base = ProvingKey_, - WitnessEntities>; + using Base = ProvingKey_, WitnessEntities>; using Base::Base; ProvingKey(const size_t circuit_size) - : ProvingKey_, - WitnessEntities>(circuit_size, 0) + : ProvingKey_, WitnessEntities>(circuit_size, 0) , batching_challenge_v(0) , evaluation_input_x(0) - {} }; @@ -1447,21 +1012,21 @@ class GoblinTranslator { * resolve that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for * portability of our circuits. */ - using VerificationKey = VerificationKey_>; + using VerificationKey = VerificationKey_>; /** - * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated - * at one point. + * @brief A field element for each entity of the flavor. These entities represent the prover polynomials + * evaluated at one point. */ - class AllValues : public AllEntities { + class AllValues : public AllEntities { public: - using Base = AllEntities; + using Base = AllEntities; using Base::Base; }; /** * @brief A container for the prover polynomials handles; only stores spans. */ - class ProverPolynomials : public AllEntities { + class ProverPolynomials : public AllEntities { public: [[nodiscard]] size_t get_polynomial_size() const { return this->op.size(); } /** @@ -1481,20 +1046,20 @@ class GoblinTranslator { /** * @brief A container for easier mapping of polynomials */ - using ProverPolynomialIds = AllEntities; + using ProverPolynomialIds = AllEntities; /** * @brief An owning container of polynomials. * @warning When this was introduced it broke some of our design principles. - * - Execution trace builders don't handle "polynomials" because the interpretation of the execution trace columns - * as polynomials is a detail of the proving system, and trace builders are (sometimes in practice, always in - * principle) reusable for different proving protocols (e.g., Plonk and Honk). + * - Execution trace builders don't handle "polynomials" because the interpretation of the execution trace + * columns as polynomials is a detail of the proving system, and trace builders are (sometimes in practice, + * always in principle) reusable for different proving protocols (e.g., Plonk and Honk). * - Polynomial storage is handled by key classes. Polynomials aren't moved, but are accessed elsewhere by * std::spans. * * We will consider revising this data model: TODO(https://github.com/AztecProtocol/barretenberg/issues/743) */ - class AllPolynomials : public AllEntities { + class AllPolynomials : public AllEntities { public: [[nodiscard]] AllValues get_row(const size_t row_idx) const { @@ -1509,12 +1074,12 @@ class GoblinTranslator { * @brief A container for polynomials produced after the first round of sumcheck. * @todo TODO(#394) Use polynomial classes for guaranteed memory alignment. */ - using RowPolynomials = AllEntities; + using RowPolynomials = AllEntities; /** * @brief A container for storing the partially evaluated multivariates produced by sumcheck. */ - class PartiallyEvaluatedMultivariates : public AllEntities { + class PartiallyEvaluatedMultivariates : public AllEntities { public: PartiallyEvaluatedMultivariates() = default; @@ -1530,8 +1095,7 @@ class GoblinTranslator { /** * @brief A container for univariates used during sumcheck. */ - template - using ProverUnivariates = AllEntities, barretenberg::Univariate>; + template using ProverUnivariates = AllEntities>; /** * @brief A container for univariates produced during the hot loop in sumcheck. @@ -1544,7 +1108,7 @@ class GoblinTranslator { * needed. It has, however, been useful during debugging to have these labels available. * */ - class CommitmentLabels : public AllEntities { + class CommitmentLabels : public AllEntities { public: CommitmentLabels() { @@ -1645,10 +1209,10 @@ class GoblinTranslator { }; }; - class VerifierCommitments : public AllEntities { + class VerifierCommitments : public AllEntities { public: VerifierCommitments([[maybe_unused]] std::shared_ptr verification_key, - [[maybe_unused]] const BaseTranscript& transcript) + [[maybe_unused]] const BaseTranscript& transcript) { this->lagrange_first = verification_key->lagrange_first; this->lagrange_last = verification_key->lagrange_last; @@ -1661,7 +1225,7 @@ class GoblinTranslator { } }; - using Transcript = BaseTranscript; + using Transcript = BaseTranscript; }; } // namespace proof_system::honk::flavor diff --git a/barretenberg/cpp/src/barretenberg/flavor/goblin_ultra.hpp b/barretenberg/cpp/src/barretenberg/flavor/goblin_ultra.hpp index 66127524328..e67ede73ba1 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/goblin_ultra.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/goblin_ultra.hpp @@ -1,6 +1,8 @@ #pragma once #include "barretenberg/commitment_schemes/kzg/kzg.hpp" +#include "barretenberg/common/ref_vector.hpp" #include "barretenberg/flavor/flavor.hpp" +#include "barretenberg/flavor/flavor_macros.hpp" #include "barretenberg/polynomials/univariate.hpp" #include "barretenberg/proof_system/circuit_builder/goblin_ultra_circuit_builder.hpp" #include "barretenberg/relations/auxiliary_relation.hpp" @@ -40,7 +42,7 @@ class GoblinUltra { // assignment of witnesses. We again choose a neutral name. static constexpr size_t NUM_PRECOMPUTED_ENTITIES = 28; // The total number of witness entities not including shifts. - static constexpr size_t NUM_WITNESS_ENTITIES = 18; + static constexpr size_t NUM_WITNESS_ENTITIES = 14; using GrandProductRelations = std::tuple, proof_system::LookupRelation>; @@ -78,137 +80,81 @@ class GoblinUltra { static constexpr bool has_zero_row = true; private: - template /** * @brief A base class labelling precomputed entities and (ordered) subsets of interest. * @details Used to build the proving key and verification key. */ - class PrecomputedEntities : public PrecomputedEntities_ { + template class PrecomputedEntities : public PrecomputedEntitiesBase { public: - DataType q_m; // column 0 - DataType q_c; // column 1 - DataType q_l; // column 2 - DataType q_r; // column 3 - DataType q_o; // column 4 - DataType q_4; // column 5 - DataType q_arith; // column 6 - DataType q_sort; // column 7 - DataType q_elliptic; // column 8 - DataType q_aux; // column 9 - DataType q_lookup; // column 10 - DataType q_busread; // column 11 - DataType sigma_1; // column 12 - DataType sigma_2; // column 13 - DataType sigma_3; // column 14 - DataType sigma_4; // column 15 - DataType id_1; // column 16 - DataType id_2; // column 17 - DataType id_3; // column 18 - DataType id_4; // column 19 - DataType table_1; // column 20 - DataType table_2; // column 21 - DataType table_3; // column 22 - DataType table_4; // column 23 - DataType lagrange_first; // column 24 - DataType lagrange_last; // column 25 - DataType lagrange_ecc_op; // column 26 // indicator poly for ecc op gates - DataType databus_id; // column 27 // id polynomial, i.e. id_i = i - - DEFINE_POINTER_VIEW(NUM_PRECOMPUTED_ENTITIES, - &q_m, - &q_c, - &q_l, - &q_r, - &q_o, - &q_4, - &q_arith, - &q_sort, - &q_elliptic, - &q_aux, - &q_lookup, - &q_busread, - &sigma_1, - &sigma_2, - &sigma_3, - &sigma_4, - &id_1, - &id_2, - &id_3, - &id_4, - &table_1, - &table_2, - &table_3, - &table_4, - &lagrange_first, - &lagrange_last, - &lagrange_ecc_op, - &databus_id) + using DataType = DataType_; + DEFINE_FLAVOR_MEMBERS(DataType, + q_m, // column 0 + q_c, // column 1 + q_l, // column 2 + q_r, // column 3 + q_o, // column 4 + q_4, // column 5 + q_arith, // column 6 + q_sort, // column 7 + q_elliptic, // column 8 + q_aux, // column 9 + q_lookup, // column 10 + q_busread, // column 11 + sigma_1, // column 12 + sigma_2, // column 13 + sigma_3, // column 14 + sigma_4, // column 15 + id_1, // column 16 + id_2, // column 17 + id_3, // column 18 + id_4, // column 19 + table_1, // column 20 + table_2, // column 21 + table_3, // column 22 + table_4, // column 23 + lagrange_first, // column 24 + lagrange_last, // column 25 + lagrange_ecc_op, // column 26 // indicator poly for ecc op gates + databus_id) // column 27 // id polynomial, i.e. id_i = i static constexpr CircuitType CIRCUIT_TYPE = CircuitBuilder::CIRCUIT_TYPE; - std::vector get_selectors() override + RefVector get_selectors() { return { q_m, q_c, q_l, q_r, q_o, q_4, q_arith, q_sort, q_elliptic, q_aux, q_lookup, q_busread }; }; - std::vector get_sigma_polynomials() override { return { sigma_1, sigma_2, sigma_3, sigma_4 }; }; - std::vector get_id_polynomials() override { return { id_1, id_2, id_3, id_4 }; }; - - std::vector get_table_polynomials() { return { table_1, table_2, table_3, table_4 }; }; + RefVector get_sigma_polynomials() { return { sigma_1, sigma_2, sigma_3, sigma_4 }; }; + RefVector get_id_polynomials() { return { id_1, id_2, id_3, id_4 }; }; + RefVector get_table_polynomials() { return { table_1, table_2, table_3, table_4 }; }; }; /** * @brief Container for all witness polynomials used/constructed by the prover. * @details Shifts are not included here since they do not occupy their own memory. */ - template - class WitnessEntities : public WitnessEntities_ { + template class WitnessEntities { public: - DataType w_l; // column 0 - DataType w_r; // column 1 - DataType w_o; // column 2 - DataType w_4; // column 3 - DataType sorted_1; // column 4 - DataType sorted_2; // column 5 - DataType sorted_3; // column 6 - DataType sorted_4; // column 7 - DataType sorted_accum; // column 8 - DataType z_perm; // column 9 - DataType z_lookup; // column 10 - DataType ecc_op_wire_1; // column 11 - DataType ecc_op_wire_2; // column 12 - DataType ecc_op_wire_3; // column 13 - DataType ecc_op_wire_4; // column 14 - DataType calldata; // column 15 - DataType calldata_read_counts; // column 16 - DataType lookup_inverses; // column 17 - - DEFINE_POINTER_VIEW(NUM_WITNESS_ENTITIES, - &w_l, - &w_r, - &w_o, - &w_4, - &sorted_1, - &sorted_2, - &sorted_3, - &sorted_4, - &sorted_accum, - &z_perm, - &z_lookup, - &ecc_op_wire_1, - &ecc_op_wire_2, - &ecc_op_wire_3, - &ecc_op_wire_4, - &calldata, - &calldata_read_counts, - &lookup_inverses) - - std::vector get_wires() override { return { w_l, w_r, w_o, w_4 }; }; - std::vector get_ecc_op_wires() + DEFINE_FLAVOR_MEMBERS(DataType, + w_l, // column 0 + w_r, // column 1 + w_o, // column 2 + w_4, // column 3 + sorted_accum, // column 4 + z_perm, // column 5 + z_lookup, // column 6 + ecc_op_wire_1, // column 7 + ecc_op_wire_2, // column 8 + ecc_op_wire_3, // column 9 + ecc_op_wire_4, // column 10 + calldata, // column 11 + calldata_read_counts, // column 12 + lookup_inverses // column 13 + ) + RefVector get_wires() { return { w_l, w_r, w_o, w_4 }; }; + RefVector get_ecc_op_wires() { return { ecc_op_wire_1, ecc_op_wire_2, ecc_op_wire_3, ecc_op_wire_4 }; }; - // The sorted concatenations of table and witness data needed for plookup. - std::vector get_sorted_polynomials() { return { sorted_1, sorted_2, sorted_3, sorted_4 }; }; }; /** @@ -220,126 +166,71 @@ class GoblinUltra { * Symbolically we have: AllEntities = PrecomputedEntities + WitnessEntities + "ShiftedEntities". It could be * implemented as such, but we have this now. */ - template - class AllEntities : public AllEntities_ { + template class AllEntities { public: - DataType q_c; // column 0 - DataType q_l; // column 1 - DataType q_r; // column 2 - DataType q_o; // column 3 - DataType q_4; // column 4 - DataType q_m; // column 5 - DataType q_arith; // column 6 - DataType q_sort; // column 7 - DataType q_elliptic; // column 8 - DataType q_aux; // column 9 - DataType q_lookup; // column 10 - DataType q_busread; // column 11 - DataType sigma_1; // column 12 - DataType sigma_2; // column 13 - DataType sigma_3; // column 14 - DataType sigma_4; // column 15 - DataType id_1; // column 16 - DataType id_2; // column 17 - DataType id_3; // column 18 - DataType id_4; // column 19 - DataType table_1; // column 20 - DataType table_2; // column 21 - DataType table_3; // column 22 - DataType table_4; // column 23 - DataType lagrange_first; // column 24 - DataType lagrange_last; // column 25 - DataType lagrange_ecc_op; // column 26 - DataType databus_id; // column 27 - DataType w_l; // column 28 - DataType w_r; // column 29 - DataType w_o; // column 30 - DataType w_4; // column 31 - DataType sorted_accum; // column 32 - DataType z_perm; // column 33 - DataType z_lookup; // column 34 - DataType ecc_op_wire_1; // column 35 - DataType ecc_op_wire_2; // column 36 - DataType ecc_op_wire_3; // column 37 - DataType ecc_op_wire_4; // column 38 - DataType calldata; // column 39 - DataType calldata_read_counts; // column 40 - DataType lookup_inverses; // column 41 - DataType table_1_shift; // column 42 - DataType table_2_shift; // column 43 - DataType table_3_shift; // column 44 - DataType table_4_shift; // column 45 - DataType w_l_shift; // column 46 - DataType w_r_shift; // column 47 - DataType w_o_shift; // column 48 - DataType w_4_shift; // column 49 - DataType sorted_accum_shift; // column 50 - DataType z_perm_shift; // column 51 - DataType z_lookup_shift; // column 52 - - // defines a method pointer_view that returns the following, with const and non-const variants - DEFINE_POINTER_VIEW(NUM_ALL_ENTITIES, - &q_c, - &q_l, - &q_r, - &q_o, - &q_4, - &q_m, - &q_arith, - &q_sort, - &q_elliptic, - &q_aux, - &q_lookup, - &q_busread, - &sigma_1, - &sigma_2, - &sigma_3, - &sigma_4, - &id_1, - &id_2, - &id_3, - &id_4, - &table_1, - &table_2, - &table_3, - &table_4, - &lagrange_first, - &lagrange_last, - &lagrange_ecc_op, - &databus_id, - &w_l, - &w_r, - &w_o, - &w_4, - &sorted_accum, - &z_perm, - &z_lookup, - &ecc_op_wire_1, - &ecc_op_wire_2, - &ecc_op_wire_3, - &ecc_op_wire_4, - &calldata, - &calldata_read_counts, - &lookup_inverses, - &table_1_shift, - &table_2_shift, - &table_3_shift, - &table_4_shift, - &w_l_shift, - &w_r_shift, - &w_o_shift, - &w_4_shift, - &sorted_accum_shift, - &z_perm_shift, - &z_lookup_shift); - - std::vector get_wires() override { return { w_l, w_r, w_o, w_4 }; }; - std::vector get_ecc_op_wires() + DEFINE_FLAVOR_MEMBERS(DataType, + q_c, // column 0 + q_l, // column 1 + q_r, // column 2 + q_o, // column 3 + q_4, // column 4 + q_m, // column 5 + q_arith, // column 6 + q_sort, // column 7 + q_elliptic, // column 8 + q_aux, // column 9 + q_lookup, // column 10 + q_busread, // column 11 + sigma_1, // column 12 + sigma_2, // column 13 + sigma_3, // column 14 + sigma_4, // column 15 + id_1, // column 16 + id_2, // column 17 + id_3, // column 18 + id_4, // column 19 + table_1, // column 20 + table_2, // column 21 + table_3, // column 22 + table_4, // column 23 + lagrange_first, // column 24 + lagrange_last, // column 25 + lagrange_ecc_op, // column 26 + databus_id, // column 27 + w_l, // column 28 + w_r, // column 29 + w_o, // column 30 + w_4, // column 31 + sorted_accum, // column 32 + z_perm, // column 33 + z_lookup, // column 34 + ecc_op_wire_1, // column 35 + ecc_op_wire_2, // column 36 + ecc_op_wire_3, // column 37 + ecc_op_wire_4, // column 38 + calldata, // column 39 + calldata_read_counts, // column 40 + lookup_inverses, // column 41 + table_1_shift, // column 42 + table_2_shift, // column 43 + table_3_shift, // column 44 + table_4_shift, // column 45 + w_l_shift, // column 46 + w_r_shift, // column 47 + w_o_shift, // column 48 + w_4_shift, // column 49 + sorted_accum_shift, // column 50 + z_perm_shift, // column 51 + z_lookup_shift // column 52 + ) + + RefVector get_wires() { return { w_l, w_r, w_o, w_4 }; }; + RefVector get_ecc_op_wires() { return { ecc_op_wire_1, ecc_op_wire_2, ecc_op_wire_3, ecc_op_wire_4 }; }; // Gemini-specific getters. - std::vector get_unshifted() override + RefVector get_unshifted() { return { q_c, q_l, @@ -384,11 +275,11 @@ class GoblinUltra { calldata_read_counts, lookup_inverses }; }; - std::vector get_to_be_shifted() override + RefVector get_to_be_shifted() { return { table_1, table_2, table_3, table_4, w_l, w_r, w_o, w_4, sorted_accum, z_perm, z_lookup }; }; - std::vector get_shifted() override + RefVector get_shifted() { return { table_1_shift, table_2_shift, table_3_shift, table_4_shift, w_l_shift, w_r_shift, w_o_shift, w_4_shift, sorted_accum_shift, z_perm_shift, z_lookup_shift }; @@ -401,12 +292,10 @@ class GoblinUltra { * @note TODO(Cody): Maybe multiple inheritance is the right thing here. In that case, nothing should eve inherit * from ProvingKey. */ - class ProvingKey : public ProvingKey_, - WitnessEntities> { + class ProvingKey : public ProvingKey_, WitnessEntities> { public: // Expose constructors on the base class - using Base = ProvingKey_, - WitnessEntities>; + using Base = ProvingKey_, WitnessEntities>; using Base::Base; std::vector memory_read_records; @@ -426,12 +315,12 @@ class GoblinUltra { * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our * circuits. */ - using VerificationKey = VerificationKey_>; + using VerificationKey = VerificationKey_>; /** * @brief A container for storing the partially evaluated multivariates produced by sumcheck. */ - class PartiallyEvaluatedMultivariates : public AllEntities { + class PartiallyEvaluatedMultivariates : public AllEntities { public: PartiallyEvaluatedMultivariates() = default; @@ -448,8 +337,7 @@ class GoblinUltra { * @brief A container for univariates used during Protogalaxy folding and sumcheck. * @details During folding and sumcheck, the prover evaluates the relations on these univariates. */ - template - using ProverUnivariates = AllEntities, barretenberg::Univariate>; + template using ProverUnivariates = AllEntities>; /** * @brief A container for univariates produced during the hot loop in sumcheck. @@ -460,16 +348,16 @@ class GoblinUltra { * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated * at one point. */ - class AllValues : public AllEntities { + class AllValues : public AllEntities { public: - using Base = AllEntities; + using Base = AllEntities; using Base::Base; }; /** * @brief A container for the prover polynomials handles; only stores spans. */ - class ProverPolynomials : public AllEntities { + class ProverPolynomials : public AllEntities { public: [[nodiscard]] size_t get_polynomial_size() const { return q_c.size(); } [[nodiscard]] AllValues get_row(size_t row_idx) const @@ -482,13 +370,18 @@ class GoblinUltra { } }; + /** + * @brief A container for the witness commitments. + */ + using WitnessCommitments = WitnessEntities; + /** * @brief A container for commitment labels. * @note It's debatable whether this should inherit from AllEntities. since most entries are not strictly needed. It * has, however, been useful during debugging to have these labels available. * */ - class CommitmentLabels : public AllEntities { + class CommitmentLabels : public AllEntities { public: CommitmentLabels() { @@ -538,10 +431,10 @@ class GoblinUltra { }; }; - class VerifierCommitments : public AllEntities { + class VerifierCommitments : public AllEntities { public: VerifierCommitments(std::shared_ptr verification_key, - [[maybe_unused]] const BaseTranscript& transcript) + [[maybe_unused]] const BaseTranscript& transcript) { static_cast(transcript); q_m = verification_key->q_m; @@ -585,7 +478,7 @@ class GoblinUltra { * @brief Derived class that defines proof structure for GoblinUltra proofs, as well as supporting functions. * */ - class Transcript : public BaseTranscript { + class Transcript : public BaseTranscript { public: uint32_t circuit_size; uint32_t public_input_size; @@ -614,7 +507,7 @@ class GoblinUltra { Transcript() = default; Transcript(const std::vector& proof) - : BaseTranscript(proof) + : BaseTranscript(proof) {} void deserialize_full_transcript() override diff --git a/barretenberg/cpp/src/barretenberg/flavor/goblin_ultra_recursive.hpp b/barretenberg/cpp/src/barretenberg/flavor/goblin_ultra_recursive.hpp index f37d26b3adc..dd2a5e7ac5f 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/goblin_ultra_recursive.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/goblin_ultra_recursive.hpp @@ -6,6 +6,7 @@ #include "barretenberg/polynomials/univariate.hpp" #include "barretenberg/flavor/flavor.hpp" +#include "barretenberg/flavor/flavor_macros.hpp" #include "barretenberg/flavor/goblin_ultra.hpp" #include "barretenberg/polynomials/evaluation_domain.hpp" #include "barretenberg/polynomials/polynomial.hpp" @@ -66,7 +67,7 @@ template class GoblinUltraRecursive_ { // assignment of witnesses. We again choose a neutral name. static constexpr size_t NUM_PRECOMPUTED_ENTITIES = 28; // The total number of witness entities not including shifts. - static constexpr size_t NUM_WITNESS_ENTITIES = 18; + static constexpr size_t NUM_WITNESS_ENTITIES = 14; // define the tuple of Relations that comprise the Sumcheck relation using Relations = std::tuple, @@ -91,137 +92,84 @@ template class GoblinUltraRecursive_ { using TupleOfArraysOfValues = decltype(create_tuple_of_arrays_of_values()); private: - template + template /** * @brief A base class labelling precomputed entities and (ordered) subsets of interest. * @details Used to build the proving key and verification key. */ - class PrecomputedEntities : public PrecomputedEntities_ { + class PrecomputedEntities : public PrecomputedEntitiesBase { public: - DataType q_m; // column 0 - DataType q_c; // column 1 - DataType q_l; // column 2 - DataType q_r; // column 3 - DataType q_o; // column 4 - DataType q_4; // column 5 - DataType q_arith; // column 6 - DataType q_sort; // column 7 - DataType q_elliptic; // column 8 - DataType q_aux; // column 9 - DataType q_lookup; // column 10 - DataType q_busread; // column 11 - DataType sigma_1; // column 12 - DataType sigma_2; // column 13 - DataType sigma_3; // column 14 - DataType sigma_4; // column 15 - DataType id_1; // column 16 - DataType id_2; // column 17 - DataType id_3; // column 18 - DataType id_4; // column 19 - DataType table_1; // column 20 - DataType table_2; // column 21 - DataType table_3; // column 22 - DataType table_4; // column 23 - DataType lagrange_first; // column 24 - DataType lagrange_last; // column 25 - DataType lagrange_ecc_op; // column 26 // indicator poly for ecc op gates - DataType databus_id; // column 27 // id polynomial, i.e. id_i = i - - DEFINE_POINTER_VIEW(NUM_PRECOMPUTED_ENTITIES, - &q_m, - &q_c, - &q_l, - &q_r, - &q_o, - &q_4, - &q_arith, - &q_sort, - &q_elliptic, - &q_aux, - &q_lookup, - &q_busread, - &sigma_1, - &sigma_2, - &sigma_3, - &sigma_4, - &id_1, - &id_2, - &id_3, - &id_4, - &table_1, - &table_2, - &table_3, - &table_4, - &lagrange_first, - &lagrange_last, - &lagrange_ecc_op, - &databus_id) + DEFINE_FLAVOR_MEMBERS(DataType, + q_m, // column 0 + q_c, // column 1 + q_l, // column 2 + q_r, // column 3 + q_o, // column 4 + q_4, // column 5 + q_arith, // column 6 + q_sort, // column 7 + q_elliptic, // column 8 + q_aux, // column 9 + q_lookup, // column 10 + q_busread, // column 11 + sigma_1, // column 12 + sigma_2, // column 13 + sigma_3, // column 14 + sigma_4, // column 15 + id_1, // column 16 + id_2, // column 17 + id_3, // column 18 + id_4, // column 19 + table_1, // column 20 + table_2, // column 21 + table_3, // column 22 + table_4, // column 23 + lagrange_first, // column 24 + lagrange_last, // column 25 + lagrange_ecc_op, // column 26 // indicator poly for ecc op gates + databus_id // column 27 // id polynomial, i.e. id_i = i + ) static constexpr CircuitType CIRCUIT_TYPE = CircuitBuilder::CIRCUIT_TYPE; - std::vector get_selectors() override + RefVector get_selectors() { return { q_m, q_c, q_l, q_r, q_o, q_4, q_arith, q_sort, q_elliptic, q_aux, q_lookup, q_busread }; }; - std::vector get_sigma_polynomials() override { return { sigma_1, sigma_2, sigma_3, sigma_4 }; }; - std::vector get_id_polynomials() override { return { id_1, id_2, id_3, id_4 }; }; + RefVector get_sigma_polynomials() { return { sigma_1, sigma_2, sigma_3, sigma_4 }; }; + RefVector get_id_polynomials() { return { id_1, id_2, id_3, id_4 }; }; - std::vector get_table_polynomials() { return { table_1, table_2, table_3, table_4 }; }; + RefVector get_table_polynomials() { return { table_1, table_2, table_3, table_4 }; }; }; /** * @brief Container for all witness polynomials used/constructed by the prover. * @details Shifts are not included here since they do not occupy their own memory. */ - template - class WitnessEntities : public WitnessEntities_ { + template class WitnessEntities { public: - DataType w_l; // column 0 - DataType w_r; // column 1 - DataType w_o; // column 2 - DataType w_4; // column 3 - DataType sorted_1; // column 4 - DataType sorted_2; // column 5 - DataType sorted_3; // column 6 - DataType sorted_4; // column 7 - DataType sorted_accum; // column 8 - DataType z_perm; // column 9 - DataType z_lookup; // column 10 - DataType ecc_op_wire_1; // column 11 - DataType ecc_op_wire_2; // column 12 - DataType ecc_op_wire_3; // column 13 - DataType ecc_op_wire_4; // column 14 - DataType calldata; // column 15 - DataType calldata_read_counts; // column 16 - DataType lookup_inverses; // column 17 - - DEFINE_POINTER_VIEW(NUM_WITNESS_ENTITIES, - &w_l, - &w_r, - &w_o, - &w_4, - &sorted_1, - &sorted_2, - &sorted_3, - &sorted_4, - &sorted_accum, - &z_perm, - &z_lookup, - &ecc_op_wire_1, - &ecc_op_wire_2, - &ecc_op_wire_3, - &ecc_op_wire_4, - &calldata, - &calldata_read_counts, - &lookup_inverses) - - std::vector get_wires() override { return { w_l, w_r, w_o, w_4 }; }; - std::vector get_ecc_op_wires() + DEFINE_FLAVOR_MEMBERS(DataType, + w_l, // column 0 + w_r, // column 1 + w_o, // column 2 + w_4, // column 3 + sorted_accum, // column 4 + z_perm, // column 5 + z_lookup, // column 6 + ecc_op_wire_1, // column 7 + ecc_op_wire_2, // column 8 + ecc_op_wire_3, // column 9 + ecc_op_wire_4, // column 10 + calldata, // column 11 + calldata_read_counts, // column 12 + lookup_inverses // column 13 + ) + + RefVector get_wires() { return { w_l, w_r, w_o, w_4 }; }; + RefVector get_ecc_op_wires() { return { ecc_op_wire_1, ecc_op_wire_2, ecc_op_wire_3, ecc_op_wire_4 }; }; - // The sorted concatenations of table and witness data needed for plookup. - std::vector get_sorted_polynomials() { return { sorted_1, sorted_2, sorted_3, sorted_4 }; }; }; /** @@ -233,126 +181,70 @@ template class GoblinUltraRecursive_ { * Symbolically we have: AllEntities = PrecomputedEntities + WitnessEntities + "ShiftedEntities". It could be * implemented as such, but we have this now. */ - template - class AllEntities : public AllEntities_ { + template class AllEntities { public: - DataType q_c; // column 0 - DataType q_l; // column 1 - DataType q_r; // column 2 - DataType q_o; // column 3 - DataType q_4; // column 4 - DataType q_m; // column 5 - DataType q_arith; // column 6 - DataType q_sort; // column 7 - DataType q_elliptic; // column 8 - DataType q_aux; // column 9 - DataType q_lookup; // column 10 - DataType q_busread; // column 11 - DataType sigma_1; // column 12 - DataType sigma_2; // column 13 - DataType sigma_3; // column 14 - DataType sigma_4; // column 15 - DataType id_1; // column 16 - DataType id_2; // column 17 - DataType id_3; // column 18 - DataType id_4; // column 19 - DataType table_1; // column 20 - DataType table_2; // column 21 - DataType table_3; // column 22 - DataType table_4; // column 23 - DataType lagrange_first; // column 24 - DataType lagrange_last; // column 25 - DataType lagrange_ecc_op; // column 26 - DataType databus_id; // column 27 - DataType w_l; // column 28 - DataType w_r; // column 29 - DataType w_o; // column 30 - DataType w_4; // column 31 - DataType sorted_accum; // column 32 - DataType z_perm; // column 33 - DataType z_lookup; // column 34 - DataType ecc_op_wire_1; // column 35 - DataType ecc_op_wire_2; // column 36 - DataType ecc_op_wire_3; // column 37 - DataType ecc_op_wire_4; // column 38 - DataType calldata; // column 39 - DataType calldata_read_counts; // column 40 - DataType lookup_inverses; // column 41 - DataType table_1_shift; // column 42 - DataType table_2_shift; // column 43 - DataType table_3_shift; // column 44 - DataType table_4_shift; // column 45 - DataType w_l_shift; // column 46 - DataType w_r_shift; // column 47 - DataType w_o_shift; // column 48 - DataType w_4_shift; // column 49 - DataType sorted_accum_shift; // column 50 - DataType z_perm_shift; // column 51 - DataType z_lookup_shift; // column 52 - - // defines a method pointer_view that returns the following, with const and non-const variants - DEFINE_POINTER_VIEW(NUM_ALL_ENTITIES, - &q_c, - &q_l, - &q_r, - &q_o, - &q_4, - &q_m, - &q_arith, - &q_sort, - &q_elliptic, - &q_aux, - &q_lookup, - &q_busread, - &sigma_1, - &sigma_2, - &sigma_3, - &sigma_4, - &id_1, - &id_2, - &id_3, - &id_4, - &table_1, - &table_2, - &table_3, - &table_4, - &lagrange_first, - &lagrange_last, - &lagrange_ecc_op, - &databus_id, - &w_l, - &w_r, - &w_o, - &w_4, - &sorted_accum, - &z_perm, - &z_lookup, - &ecc_op_wire_1, - &ecc_op_wire_2, - &ecc_op_wire_3, - &ecc_op_wire_4, - &calldata, - &calldata_read_counts, - &lookup_inverses, - &table_1_shift, - &table_2_shift, - &table_3_shift, - &table_4_shift, - &w_l_shift, - &w_r_shift, - &w_o_shift, - &w_4_shift, - &sorted_accum_shift, - &z_perm_shift, - &z_lookup_shift); - - std::vector get_wires() override { return { w_l, w_r, w_o, w_4 }; }; - std::vector get_ecc_op_wires() + DEFINE_FLAVOR_MEMBERS(DataType, + q_c, // column 0 + q_l, // column 1 + q_r, // column 2 + q_o, // column 3 + q_4, // column 4 + q_m, // column 5 + q_arith, // column 6 + q_sort, // column 7 + q_elliptic, // column 8 + q_aux, // column 9 + q_lookup, // column 10 + q_busread, // column 11 + sigma_1, // column 12 + sigma_2, // column 13 + sigma_3, // column 14 + sigma_4, // column 15 + id_1, // column 16 + id_2, // column 17 + id_3, // column 18 + id_4, // column 19 + table_1, // column 20 + table_2, // column 21 + table_3, // column 22 + table_4, // column 23 + lagrange_first, // column 24 + lagrange_last, // column 25 + lagrange_ecc_op, // column 26 + databus_id, // column 27 + w_l, // column 28 + w_r, // column 29 + w_o, // column 30 + w_4, // column 31 + sorted_accum, // column 32 + z_perm, // column 33 + z_lookup, // column 34 + ecc_op_wire_1, // column 35 + ecc_op_wire_2, // column 36 + ecc_op_wire_3, // column 37 + ecc_op_wire_4, // column 38 + calldata, // column 39 + calldata_read_counts, // column 40 + lookup_inverses, // column 41 + table_1_shift, // column 42 + table_2_shift, // column 43 + table_3_shift, // column 44 + table_4_shift, // column 45 + w_l_shift, // column 46 + w_r_shift, // column 47 + w_o_shift, // column 48 + w_4_shift, // column 49 + sorted_accum_shift, // column 50 + z_perm_shift, // column 51 + z_lookup_shift); // column 52 + + RefVector get_wires() { return { w_l, w_r, w_o, w_4 }; }; + RefVector get_ecc_op_wires() { return { ecc_op_wire_1, ecc_op_wire_2, ecc_op_wire_3, ecc_op_wire_4 }; }; // Gemini-specific getters. - std::vector get_unshifted() override + RefVector get_unshifted() { return { q_c, q_l, @@ -397,11 +289,11 @@ template class GoblinUltraRecursive_ { calldata_read_counts, lookup_inverses }; }; - std::vector get_to_be_shifted() override + RefVector get_to_be_shifted() { return { table_1, table_2, table_3, table_4, w_l, w_r, w_o, w_4, sorted_accum, z_perm, z_lookup }; }; - std::vector get_shifted() override + RefVector get_shifted() { return { table_1_shift, table_2_shift, table_3_shift, table_4_shift, w_l_shift, w_r_shift, w_o_shift, w_4_shift, sorted_accum_shift, z_perm_shift, z_lookup_shift }; @@ -417,17 +309,17 @@ template class GoblinUltraRecursive_ { * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our * circuits. */ - class VerificationKey : public VerificationKey_> { + class VerificationKey : public VerificationKey_> { public: /** - * @brief Construct a new Verification Key with stdlib types from a provided native verification key + * @brief Construct a new Verification Key with stdlib types from a provided native verification + * key * * @param builder * @param native_key Native verification key from which to extract the precomputed commitments */ VerificationKey(CircuitBuilder* builder, auto native_key) - : VerificationKey_>(native_key->circuit_size, - native_key->num_public_inputs) + : VerificationKey_>(native_key->circuit_size, native_key->num_public_inputs) { this->q_m = Commitment::from_witness(builder, native_key->q_m); this->q_l = Commitment::from_witness(builder, native_key->q_l); @@ -464,9 +356,9 @@ template class GoblinUltraRecursive_ { * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated * at one point. */ - class AllValues : public AllEntities { + class AllValues : public AllEntities { public: - using Base = AllEntities; + using Base = AllEntities; using Base::Base; AllValues(std::array _data_in) { this->_data = _data_in; } }; @@ -477,7 +369,7 @@ template class GoblinUltraRecursive_ { * has, however, been useful during debugging to have these labels available. * */ - class CommitmentLabels : public AllEntities { + class CommitmentLabels : public AllEntities { public: CommitmentLabels() { @@ -527,7 +419,7 @@ template class GoblinUltraRecursive_ { }; }; - class VerifierCommitments : public AllEntities { + class VerifierCommitments : public AllEntities { public: VerifierCommitments(std::shared_ptr verification_key) { @@ -567,7 +459,7 @@ template class GoblinUltraRecursive_ { * functions. * */ - class Transcript : public BaseTranscript { + class Transcript : public BaseTranscript { public: uint32_t circuit_size; uint32_t public_input_size; @@ -596,97 +488,97 @@ template class GoblinUltraRecursive_ { Transcript() = default; Transcript(const std::vector& proof) - : BaseTranscript(proof) + : BaseTranscript(proof) {} /** - * @brief Takes a FULL GoblinUltraRecursive proof and deserializes it into the public member variables that - * compose the structure. Must be called in order to access the structure of the proof. + * @brief Takes a FULL GoblinUltraRecursive proof and deserializes it into the public member + * variables that compose the structure. Must be called in order to access the structure of the + * proof. * */ - void deserialize_full_transcript() override + void deserialize_full_transcript() { // take current proof and put them into the struct size_t num_bytes_read = 0; - circuit_size = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + circuit_size = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); size_t log_n = numeric::get_msb(circuit_size); - public_input_size = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - pub_inputs_offset = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + public_input_size = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + pub_inputs_offset = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < public_input_size; ++i) { - public_inputs.push_back(deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read)); + public_inputs.push_back(deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read)); } - w_l_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - w_r_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - w_o_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - ecc_op_wire_1_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - ecc_op_wire_2_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - ecc_op_wire_3_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - ecc_op_wire_4_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - calldata_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - calldata_read_counts_comm = - deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - lookup_inverses_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - sorted_accum_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - w_4_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - z_perm_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - z_lookup_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + w_l_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + w_r_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + w_o_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + ecc_op_wire_1_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + ecc_op_wire_2_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + ecc_op_wire_3_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + ecc_op_wire_4_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + calldata_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + calldata_read_counts_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + lookup_inverses_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + sorted_accum_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + w_4_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + z_perm_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + z_lookup_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < log_n; ++i) { sumcheck_univariates.push_back( deserialize_from_buffer>( - BaseTranscript::proof_data, num_bytes_read)); + BaseTranscript::proof_data, num_bytes_read)); } - sumcheck_evaluations = deserialize_from_buffer>( - BaseTranscript::proof_data, num_bytes_read); + sumcheck_evaluations = + deserialize_from_buffer>(BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < log_n; ++i) { - zm_cq_comms.push_back( - deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read)); + zm_cq_comms.push_back(deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read)); } - zm_cq_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - zm_pi_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + zm_cq_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + zm_pi_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); } /** - * @brief Serializes the structure variables into a FULL GoblinUltraRecursive proof. Should be called only if - * deserialize_full_transcript() was called and some transcript variable was modified. + * @brief Serializes the structure variables into a FULL GoblinUltraRecursive proof. Should be + * called only if deserialize_full_transcript() was called and some transcript variable was + * modified. * */ - void serialize_full_transcript() override + void serialize_full_transcript() { - size_t old_proof_length = BaseTranscript::proof_data.size(); - BaseTranscript::proof_data.clear(); + size_t old_proof_length = BaseTranscript::proof_data.size(); + BaseTranscript::proof_data.clear(); size_t log_n = numeric::get_msb(circuit_size); - serialize_to_buffer(circuit_size, BaseTranscript::proof_data); - serialize_to_buffer(public_input_size, BaseTranscript::proof_data); - serialize_to_buffer(pub_inputs_offset, BaseTranscript::proof_data); + serialize_to_buffer(circuit_size, BaseTranscript::proof_data); + serialize_to_buffer(public_input_size, BaseTranscript::proof_data); + serialize_to_buffer(pub_inputs_offset, BaseTranscript::proof_data); for (size_t i = 0; i < public_input_size; ++i) { - serialize_to_buffer(public_inputs[i], BaseTranscript::proof_data); + serialize_to_buffer(public_inputs[i], BaseTranscript::proof_data); } - serialize_to_buffer(w_l_comm, BaseTranscript::proof_data); - serialize_to_buffer(w_r_comm, BaseTranscript::proof_data); - serialize_to_buffer(w_o_comm, BaseTranscript::proof_data); - serialize_to_buffer(ecc_op_wire_1_comm, BaseTranscript::proof_data); - serialize_to_buffer(ecc_op_wire_2_comm, BaseTranscript::proof_data); - serialize_to_buffer(ecc_op_wire_3_comm, BaseTranscript::proof_data); - serialize_to_buffer(ecc_op_wire_4_comm, BaseTranscript::proof_data); - serialize_to_buffer(calldata_comm, BaseTranscript::proof_data); - serialize_to_buffer(calldata_read_counts_comm, BaseTranscript::proof_data); - serialize_to_buffer(lookup_inverses_comm, BaseTranscript::proof_data); - serialize_to_buffer(sorted_accum_comm, BaseTranscript::proof_data); - serialize_to_buffer(w_4_comm, BaseTranscript::proof_data); - serialize_to_buffer(z_perm_comm, BaseTranscript::proof_data); - serialize_to_buffer(z_lookup_comm, BaseTranscript::proof_data); + serialize_to_buffer(w_l_comm, BaseTranscript::proof_data); + serialize_to_buffer(w_r_comm, BaseTranscript::proof_data); + serialize_to_buffer(w_o_comm, BaseTranscript::proof_data); + serialize_to_buffer(ecc_op_wire_1_comm, BaseTranscript::proof_data); + serialize_to_buffer(ecc_op_wire_2_comm, BaseTranscript::proof_data); + serialize_to_buffer(ecc_op_wire_3_comm, BaseTranscript::proof_data); + serialize_to_buffer(ecc_op_wire_4_comm, BaseTranscript::proof_data); + serialize_to_buffer(calldata_comm, BaseTranscript::proof_data); + serialize_to_buffer(calldata_read_counts_comm, BaseTranscript::proof_data); + serialize_to_buffer(lookup_inverses_comm, BaseTranscript::proof_data); + serialize_to_buffer(sorted_accum_comm, BaseTranscript::proof_data); + serialize_to_buffer(w_4_comm, BaseTranscript::proof_data); + serialize_to_buffer(z_perm_comm, BaseTranscript::proof_data); + serialize_to_buffer(z_lookup_comm, BaseTranscript::proof_data); for (size_t i = 0; i < log_n; ++i) { - serialize_to_buffer(sumcheck_univariates[i], BaseTranscript::proof_data); + serialize_to_buffer(sumcheck_univariates[i], BaseTranscript::proof_data); } - serialize_to_buffer(sumcheck_evaluations, BaseTranscript::proof_data); + serialize_to_buffer(sumcheck_evaluations, BaseTranscript::proof_data); for (size_t i = 0; i < log_n; ++i) { - serialize_to_buffer(zm_cq_comms[i], BaseTranscript::proof_data); + serialize_to_buffer(zm_cq_comms[i], BaseTranscript::proof_data); } - serialize_to_buffer(zm_cq_comm, BaseTranscript::proof_data); - serialize_to_buffer(zm_pi_comm, BaseTranscript::proof_data); + serialize_to_buffer(zm_cq_comm, BaseTranscript::proof_data); + serialize_to_buffer(zm_pi_comm, BaseTranscript::proof_data); // sanity check to make sure we generate the same length of proof as before. - ASSERT(BaseTranscript::proof_data.size() == old_proof_length); + ASSERT(BaseTranscript::proof_data.size() == old_proof_length); } }; }; diff --git a/barretenberg/cpp/src/barretenberg/flavor/relation_definitions_fwd.hpp b/barretenberg/cpp/src/barretenberg/flavor/relation_definitions_fwd.hpp index 8ebca000c94..f8a547cbf47 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/relation_definitions_fwd.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/relation_definitions_fwd.hpp @@ -4,7 +4,7 @@ #define ExtendedEdge(Flavor) Flavor::ExtendedEdges #define EvaluationEdge(Flavor) Flavor::AllValues -#define EntityEdge(Flavor) Flavor::AllEntities +#define EntityEdge(Flavor) Flavor::AllEntities #define ACCUMULATE(...) _ACCUMULATE(__VA_ARGS__) #define _ACCUMULATE(Preface, RelationImpl, Flavor, AccumulatorType, EdgeType) \ diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra.hpp index da12a3994ef..44c65bf9e78 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra.hpp @@ -2,6 +2,7 @@ #include "barretenberg/commitment_schemes/kzg/kzg.hpp" #include "barretenberg/ecc/curves/bn254/g1.hpp" #include "barretenberg/flavor/flavor.hpp" +#include "barretenberg/flavor/flavor_macros.hpp" #include "barretenberg/polynomials/barycentric.hpp" #include "barretenberg/polynomials/evaluation_domain.hpp" #include "barretenberg/polynomials/polynomial.hpp" @@ -40,7 +41,7 @@ class Ultra { // assignment of witnesses. We again choose a neutral name. static constexpr size_t NUM_PRECOMPUTED_ENTITIES = 25; // The total number of witness entities not including shifts. - static constexpr size_t NUM_WITNESS_ENTITIES = 11; + static constexpr size_t NUM_WITNESS_ENTITIES = 7; using GrandProductRelations = std::tuple, proof_system::LookupRelation>; @@ -75,113 +76,68 @@ class Ultra { static constexpr bool has_zero_row = true; private: - template /** * @brief A base class labelling precomputed entities and (ordered) subsets of interest. * @details Used to build the proving key and verification key. */ - class PrecomputedEntities : public PrecomputedEntities_ { + template class PrecomputedEntities : public PrecomputedEntitiesBase { public: - DataType q_m; // column 0 - DataType q_c; // column 1 - DataType q_l; // column 2 - DataType q_r; // column 3 - DataType q_o; // column 4 - DataType q_4; // column 5 - DataType q_arith; // column 6 - DataType q_sort; // column 7 - DataType q_elliptic; // column 8 - DataType q_aux; // column 9 - DataType q_lookup; // column 10 - DataType sigma_1; // column 11 - DataType sigma_2; // column 12 - DataType sigma_3; // column 13 - DataType sigma_4; // column 14 - DataType id_1; // column 15 - DataType id_2; // column 16 - DataType id_3; // column 17 - DataType id_4; // column 18 - DataType table_1; // column 19 - DataType table_2; // column 20 - DataType table_3; // column 21 - DataType table_4; // column 22 - DataType lagrange_first; // column 23 - DataType lagrange_last; // column 24 - - DEFINE_POINTER_VIEW(NUM_PRECOMPUTED_ENTITIES, - &q_m, - &q_c, - &q_l, - &q_r, - &q_o, - &q_4, - &q_arith, - &q_sort, - &q_elliptic, - &q_aux, - &q_lookup, - &sigma_1, - &sigma_2, - &sigma_3, - &sigma_4, - &id_1, - &id_2, - &id_3, - &id_4, - &table_1, - &table_2, - &table_3, - &table_4, - &lagrange_first, - &lagrange_last) + using DataType = DataType_; + DEFINE_FLAVOR_MEMBERS(DataType, + q_m, // column 0 + q_c, // column 1 + q_l, // column 2 + q_r, // column 3 + q_o, // column 4 + q_4, // column 5 + q_arith, // column 6 + q_sort, // column 7 + q_elliptic, // column 8 + q_aux, // column 9 + q_lookup, // column 10 + sigma_1, // column 11 + sigma_2, // column 12 + sigma_3, // column 13 + sigma_4, // column 14 + id_1, // column 15 + id_2, // column 16 + id_3, // column 17 + id_4, // column 18 + table_1, // column 19 + table_2, // column 20 + table_3, // column 21 + table_4, // column 22 + lagrange_first, // column 23 + lagrange_last) // column 24 static constexpr CircuitType CIRCUIT_TYPE = CircuitBuilder::CIRCUIT_TYPE; - std::vector get_selectors() override + RefVector get_selectors() { return { q_m, q_c, q_l, q_r, q_o, q_4, q_arith, q_sort, q_elliptic, q_aux, q_lookup }; }; - std::vector get_sigma_polynomials() override { return { sigma_1, sigma_2, sigma_3, sigma_4 }; }; - std::vector get_id_polynomials() override { return { id_1, id_2, id_3, id_4 }; }; + RefVector get_sigma_polynomials() { return { sigma_1, sigma_2, sigma_3, sigma_4 }; }; + RefVector get_id_polynomials() { return { id_1, id_2, id_3, id_4 }; }; - std::vector get_table_polynomials() { return { table_1, table_2, table_3, table_4 }; }; + RefVector get_table_polynomials() { return { table_1, table_2, table_3, table_4 }; }; }; /** * @brief Container for all witness polynomials used/constructed by the prover. * @details Shifts are not included here since they do not occupy their own memory. */ - template - class WitnessEntities : public WitnessEntities_ { + template class WitnessEntities { public: - DataType w_l; // column 0 - DataType w_r; // column 1 - DataType w_o; // column 2 - DataType w_4; // column 3 - DataType sorted_1; // column 4 - DataType sorted_2; // column 5 - DataType sorted_3; // column 6 - DataType sorted_4; // column 7 - DataType sorted_accum; // column 8 - DataType z_perm; // column 9 - DataType z_lookup; // column 10 - - DEFINE_POINTER_VIEW(NUM_WITNESS_ENTITIES, - &w_l, - &w_r, - &w_o, - &w_4, - &sorted_1, - &sorted_2, - &sorted_3, - &sorted_4, - &sorted_accum, - &z_perm, - &z_lookup) - - std::vector get_wires() override { return { w_l, w_r, w_o, w_4 }; }; - // The sorted concatenations of table and witness data needed for plookup. - std::vector get_sorted_polynomials() { return { sorted_1, sorted_2, sorted_3, sorted_4 }; }; + DEFINE_FLAVOR_MEMBERS(DataType, + w_l, // column 0 + w_r, // column 1 + w_o, // column 2 + w_4, // column 3 + sorted_accum, // column 4 + z_perm, // column 5 + z_lookup) // column 6 + + RefVector get_wires() { return { w_l, w_r, w_o, w_4 }; }; }; /** @@ -193,101 +149,56 @@ class Ultra { * Symbolically we have: AllEntities = PrecomputedEntities + WitnessEntities + "ShiftedEntities". It could be * implemented as such, but we have this now. */ - template - class AllEntities : public AllEntities_ { + template class AllEntities { public: - DataType q_c; // column 0 - DataType q_l; // column 1 - DataType q_r; // column 2 - DataType q_o; // column 3 - DataType q_4; // column 4 - DataType q_m; // column 5 - DataType q_arith; // column 6 - DataType q_sort; // column 7 - DataType q_elliptic; // column 8 - DataType q_aux; // column 9 - DataType q_lookup; // column 10 - DataType sigma_1; // column 11 - DataType sigma_2; // column 12 - DataType sigma_3; // column 13 - DataType sigma_4; // column 14 - DataType id_1; // column 15 - DataType id_2; // column 16 - DataType id_3; // column 17 - DataType id_4; // column 18 - DataType table_1; // column 19 - DataType table_2; // column 20 - DataType table_3; // column 21 - DataType table_4; // column 22 - DataType lagrange_first; // column 23 - DataType lagrange_last; // column 24 - DataType w_l; // column 25 - DataType w_r; // column 26 - DataType w_o; // column 27 - DataType w_4; // column 28 - DataType sorted_accum; // column 29 - DataType z_perm; // column 30 - DataType z_lookup; // column 31 - DataType table_1_shift; // column 32 - DataType table_2_shift; // column 33 - DataType table_3_shift; // column 34 - DataType table_4_shift; // column 35 - DataType w_l_shift; // column 36 - DataType w_r_shift; // column 37 - DataType w_o_shift; // column 38 - DataType w_4_shift; // column 39 - DataType sorted_accum_shift; // column 40 - DataType z_perm_shift; // column 41 - DataType z_lookup_shift; // column 42 - - // defines a method pointer_view that returns the following, with const and non-const variants - DEFINE_POINTER_VIEW(NUM_ALL_ENTITIES, - &q_c, - &q_l, - &q_r, - &q_o, - &q_4, - &q_m, - &q_arith, - &q_sort, - &q_elliptic, - &q_aux, - &q_lookup, - &sigma_1, - &sigma_2, - &sigma_3, - &sigma_4, - &id_1, - &id_2, - &id_3, - &id_4, - &table_1, - &table_2, - &table_3, - &table_4, - &lagrange_first, - &lagrange_last, - &w_l, - &w_r, - &w_o, - &w_4, - &sorted_accum, - &z_perm, - &z_lookup, - &table_1_shift, - &table_2_shift, - &table_3_shift, - &table_4_shift, - &w_l_shift, - &w_r_shift, - &w_o_shift, - &w_4_shift, - &sorted_accum_shift, - &z_perm_shift, - &z_lookup_shift); - std::vector get_wires() override { return { w_l, w_r, w_o, w_4 }; }; + DEFINE_FLAVOR_MEMBERS(DataType, + q_c, // column 0 + q_l, // column 1 + q_r, // column 2 + q_o, // column 3 + q_4, // column 4 + q_m, // column 5 + q_arith, // column 6 + q_sort, // column 7 + q_elliptic, // column 8 + q_aux, // column 9 + q_lookup, // column 10 + sigma_1, // column 11 + sigma_2, // column 12 + sigma_3, // column 13 + sigma_4, // column 14 + id_1, // column 15 + id_2, // column 16 + id_3, // column 17 + id_4, // column 18 + table_1, // column 19 + table_2, // column 20 + table_3, // column 21 + table_4, // column 22 + lagrange_first, // column 23 + lagrange_last, // column 24 + w_l, // column 25 + w_r, // column 26 + w_o, // column 27 + w_4, // column 28 + sorted_accum, // column 29 + z_perm, // column 30 + z_lookup, // column 31 + table_1_shift, // column 32 + table_2_shift, // column 33 + table_3_shift, // column 34 + table_4_shift, // column 35 + w_l_shift, // column 36 + w_r_shift, // column 37 + w_o_shift, // column 38 + w_4_shift, // column 39 + sorted_accum_shift, // column 40 + z_perm_shift, // column 41 + z_lookup_shift) // column 42 + + RefVector get_wires() { return { w_l, w_r, w_o, w_4 }; }; // Gemini-specific getters. - std::vector get_unshifted() override + RefVector get_unshifted() { return { q_c, q_l, q_r, q_o, q_4, q_m, q_arith, q_sort, q_elliptic, q_aux, q_lookup, sigma_1, sigma_2, sigma_3, sigma_4, id_1, @@ -296,11 +207,11 @@ class Ultra { }; }; - std::vector get_to_be_shifted() override + RefVector get_to_be_shifted() { return { table_1, table_2, table_3, table_4, w_l, w_r, w_o, w_4, sorted_accum, z_perm, z_lookup }; }; - std::vector get_shifted() override + RefVector get_shifted() { return { table_1_shift, table_2_shift, table_3_shift, table_4_shift, w_l_shift, w_r_shift, w_o_shift, w_4_shift, sorted_accum_shift, z_perm_shift, z_lookup_shift }; @@ -313,12 +224,10 @@ class Ultra { * @note TODO(Cody): Maybe multiple inheritance is the right thing here. In that case, nothing should eve inherit * from ProvingKey. */ - class ProvingKey : public ProvingKey_, - WitnessEntities> { + class ProvingKey : public ProvingKey_, WitnessEntities> { public: // Expose constructors on the base class - using Base = ProvingKey_, - WitnessEntities>; + using Base = ProvingKey_, WitnessEntities>; using Base::Base; std::vector memory_read_records; @@ -336,22 +245,22 @@ class Ultra { * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our * circuits. */ - using VerificationKey = VerificationKey_>; + using VerificationKey = VerificationKey_>; /** * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated * at one point. */ - class AllValues : public AllEntities { + class AllValues : public AllEntities { public: - using Base = AllEntities; + using Base = AllEntities; using Base::Base; }; /** * @brief A container for polynomials handles; only stores spans. */ - class ProverPolynomials : public AllEntities { + class ProverPolynomials : public AllEntities { public: [[nodiscard]] size_t get_polynomial_size() const { return q_c.size(); } [[nodiscard]] AllValues get_row(const size_t row_idx) const @@ -367,7 +276,7 @@ class Ultra { /** * @brief A container for storing the partially evaluated multivariates produced by sumcheck. */ - class PartiallyEvaluatedMultivariates : public AllEntities { + class PartiallyEvaluatedMultivariates : public AllEntities { public: PartiallyEvaluatedMultivariates() = default; @@ -384,21 +293,25 @@ class Ultra { * @brief A container for univariates used during Protogalaxy folding and sumcheck. * @details During folding and sumcheck, the prover evaluates the relations on these univariates. */ - template - using ProverUnivariates = AllEntities, barretenberg::Univariate>; + template using ProverUnivariates = AllEntities>; /** * @brief A container for univariates produced during the hot loop in sumcheck. */ using ExtendedEdges = ProverUnivariates; + /** + * @brief A container for the witness commitments. + */ + using WitnessCommitments = WitnessEntities; + /** * @brief A container for commitment labels. * @note It's debatable whether this should inherit from AllEntities. since most entries are not strictly needed. It * has, however, been useful during debugging to have these labels available. * */ - class CommitmentLabels : public AllEntities { + class CommitmentLabels : public AllEntities { public: CommitmentLabels() { @@ -439,10 +352,10 @@ class Ultra { }; }; - class VerifierCommitments : public AllEntities { + class VerifierCommitments : public AllEntities { public: VerifierCommitments(std::shared_ptr verification_key, - [[maybe_unused]] const BaseTranscript& transcript) + [[maybe_unused]] const BaseTranscript& transcript) { static_cast(transcript); q_m = verification_key->q_m; @@ -483,7 +396,7 @@ class Ultra { * @brief Derived class that defines proof structure for Ultra proofs, as well as supporting functions. * */ - class Transcript : public BaseTranscript { + class Transcript : public BaseTranscript { public: // Transcript objects defined as public member variables for easy access and modification uint32_t circuit_size; @@ -507,7 +420,7 @@ class Ultra { // Used by verifier to initialize the transcript Transcript(const std::vector& proof) - : BaseTranscript(proof) + : BaseTranscript(proof) {} static Transcript prover_init_empty() @@ -530,7 +443,7 @@ class Ultra { * structure. Must be called in order to access the structure of the proof. * */ - void deserialize_full_transcript() override + void deserialize_full_transcript() { // take current proof and put them into the struct size_t num_bytes_read = 0; @@ -567,7 +480,7 @@ class Ultra { * deserialize_full_transcript() was called and some transcript variable was modified. * */ - void serialize_full_transcript() override + void serialize_full_transcript() { size_t old_proof_length = proof_data.size(); proof_data.clear(); // clear proof_data so the rest of the function can replace it diff --git a/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive.hpp b/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive.hpp index e780007eb36..b9980e0d071 100644 --- a/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive.hpp +++ b/barretenberg/cpp/src/barretenberg/flavor/ultra_recursive.hpp @@ -6,6 +6,7 @@ #include "barretenberg/polynomials/univariate.hpp" #include "barretenberg/flavor/flavor.hpp" +#include "barretenberg/flavor/flavor_macros.hpp" #include "barretenberg/flavor/ultra.hpp" #include "barretenberg/polynomials/evaluation_domain.hpp" #include "barretenberg/polynomials/polynomial.hpp" @@ -66,7 +67,7 @@ template class UltraRecursive_ { // assignment of witnesses. We again choose a neutral name. static constexpr size_t NUM_PRECOMPUTED_ENTITIES = 25; // The total number of witness entities not including shifts. - static constexpr size_t NUM_WITNESS_ENTITIES = 11; + static constexpr size_t NUM_WITNESS_ENTITIES = 7; // define the tuple of Relations that comprise the Sumcheck relation using Relations = std::tuple, @@ -89,84 +90,68 @@ template class UltraRecursive_ { using TupleOfArraysOfValues = decltype(create_tuple_of_arrays_of_values()); private: - template + template /** * @brief A base class labelling precomputed entities and (ordered) subsets of interest. * @details Used to build the proving key and verification key. */ - class PrecomputedEntities : public PrecomputedEntities_ { + class PrecomputedEntities : public PrecomputedEntitiesBase { public: - DataType q_m; // column 0 - DataType q_c; // column 1 - DataType q_l; // column 2 - DataType q_r; // column 3 - DataType q_o; // column 4 - DataType q_4; // column 5 - DataType q_arith; // column 6 - DataType q_sort; // column 7 - DataType q_elliptic; // column 8 - DataType q_aux; // column 9 - DataType q_lookup; // column 10 - DataType sigma_1; // column 11 - DataType sigma_2; // column 12 - DataType sigma_3; // column 13 - DataType sigma_4; // column 14 - DataType id_1; // column 15 - DataType id_2; // column 16 - DataType id_3; // column 17 - DataType id_4; // column 18 - DataType table_1; // column 19 - DataType table_2; // column 20 - DataType table_3; // column 21 - DataType table_4; // column 22 - DataType lagrange_first; // column 23 - DataType lagrange_last; // column 24 - - std::vector get_selectors() override + DEFINE_FLAVOR_MEMBERS(DataType, + q_m, // column 0 + q_c, // column 1 + q_l, // column 2 + q_r, // column 3 + q_o, // column 4 + q_4, // column 5 + q_arith, // column 6 + q_sort, // column 7 + q_elliptic, // column 8 + q_aux, // column 9 + q_lookup, // column 10 + sigma_1, // column 11 + sigma_2, // column 12 + sigma_3, // column 13 + sigma_4, // column 14 + id_1, // column 15 + id_2, // column 16 + id_3, // column 17 + id_4, // column 18 + table_1, // column 19 + table_2, // column 20 + table_3, // column 21 + table_4, // column 22 + lagrange_first, // column 23 + lagrange_last); // column 24 + + RefVector get_selectors() { return { q_m, q_c, q_l, q_r, q_o, q_4, q_arith, q_sort, q_elliptic, q_aux, q_lookup }; }; - std::vector get_sigma_polynomials() override { return { sigma_1, sigma_2, sigma_3, sigma_4 }; }; - std::vector get_id_polynomials() override { return { id_1, id_2, id_3, id_4 }; }; + RefVector get_sigma_polynomials() { return { sigma_1, sigma_2, sigma_3, sigma_4 }; }; + RefVector get_id_polynomials() { return { id_1, id_2, id_3, id_4 }; }; - std::vector get_table_polynomials() { return { table_1, table_2, table_3, table_4 }; }; + RefVector get_table_polynomials() { return { table_1, table_2, table_3, table_4 }; }; }; /** * @brief Container for all witness polynomials used/constructed by the prover. * @details Shifts are not included here since they do not occupy their own memory. */ - template - class WitnessEntities : public WitnessEntities_ { + template class WitnessEntities { public: - DataType w_l; // column 0 - DataType w_r; // column 1 - DataType w_o; // column 2 - DataType w_4; // column 3 - DataType sorted_1; // column 4 - DataType sorted_2; // column 5 - DataType sorted_3; // column 6 - DataType sorted_4; // column 7 - DataType sorted_accum; // column 8 - DataType z_perm; // column 9 - DataType z_lookup; // column 10 - - DEFINE_POINTER_VIEW(NUM_WITNESS_ENTITIES, - &w_l, - &w_r, - &w_o, - &w_4, - &sorted_1, - &sorted_2, - &sorted_3, - &sorted_4, - &sorted_accum, - &z_perm, - &z_lookup, ) - - std::vector get_wires() override { return { w_l, w_r, w_o, w_4 }; }; - // The sorted concatenations of table and witness data needed for plookup. - std::vector get_sorted_polynomials() { return { sorted_1, sorted_2, sorted_3, sorted_4 }; }; + DEFINE_FLAVOR_MEMBERS(DataType, + w_l, // column 0 + w_r, // column 1 + w_o, // column 2 + w_4, // column 3 + sorted_accum, // column 4 + z_perm, // column 5 + z_lookup // column 6 + + ); + + RefVector get_wires() { return { w_l, w_r, w_o, w_4 }; }; }; /** @@ -178,101 +163,57 @@ template class UltraRecursive_ { * Symbolically we have: AllEntities = PrecomputedEntities + WitnessEntities + "ShiftedEntities". It could be * implemented as such, but we have this now. */ - template - class AllEntities : public AllEntities_ { + template class AllEntities { public: - DataType q_c; // column 0 - DataType q_l; // column 1 - DataType q_r; // column 2 - DataType q_o; // column 3 - DataType q_4; // column 4 - DataType q_m; // column 5 - DataType q_arith; // column 6 - DataType q_sort; // column 7 - DataType q_elliptic; // column 8 - DataType q_aux; // column 9 - DataType q_lookup; // column 10 - DataType sigma_1; // column 11 - DataType sigma_2; // column 12 - DataType sigma_3; // column 13 - DataType sigma_4; // column 14 - DataType id_1; // column 15 - DataType id_2; // column 16 - DataType id_3; // column 17 - DataType id_4; // column 18 - DataType table_1; // column 19 - DataType table_2; // column 20 - DataType table_3; // column 21 - DataType table_4; // column 22 - DataType lagrange_first; // column 23 - DataType lagrange_last; // column 24 - DataType w_l; // column 25 - DataType w_r; // column 26 - DataType w_o; // column 27 - DataType w_4; // column 28 - DataType sorted_accum; // column 29 - DataType z_perm; // column 30 - DataType z_lookup; // column 31 - DataType table_1_shift; // column 32 - DataType table_2_shift; // column 33 - DataType table_3_shift; // column 34 - DataType table_4_shift; // column 35 - DataType w_l_shift; // column 36 - DataType w_r_shift; // column 37 - DataType w_o_shift; // column 38 - DataType w_4_shift; // column 39 - DataType sorted_accum_shift; // column 40 - DataType z_perm_shift; // column 41 - DataType z_lookup_shift; // column 42 - - DEFINE_POINTER_VIEW(NUM_ALL_ENTITIES, - &q_c, - &q_l, - &q_r, - &q_o, - &q_4, - &q_m, - &q_arith, - &q_sort, - &q_elliptic, - &q_aux, - &q_lookup, - &sigma_1, - &sigma_2, - &sigma_3, - &sigma_4, - &id_1, - &id_2, - &id_3, - &id_4, - &table_1, - &table_2, - &table_3, - &table_4, - &lagrange_first, - &lagrange_last, - &w_l, - &w_r, - &w_o, - &w_4, - &sorted_accum, - &z_perm, - &z_lookup, - &table_1_shift, - &table_2_shift, - &table_3_shift, - &table_4_shift, - &w_l_shift, - &w_r_shift, - &w_o_shift, - &w_4_shift, - &sorted_accum_shift, - &z_perm_shift, - &z_lookup_shift) - - std::vector get_wires() override { return { w_l, w_r, w_o, w_4 }; }; + DEFINE_FLAVOR_MEMBERS(DataType, + q_c, // column 0 + q_l, // column 1 + q_r, // column 2 + q_o, // column 3 + q_4, // column 4 + q_m, // column 5 + q_arith, // column 6 + q_sort, // column 7 + q_elliptic, // column 8 + q_aux, // column 9 + q_lookup, // column 10 + sigma_1, // column 11 + sigma_2, // column 12 + sigma_3, // column 13 + sigma_4, // column 14 + id_1, // column 15 + id_2, // column 16 + id_3, // column 17 + id_4, // column 18 + table_1, // column 19 + table_2, // column 20 + table_3, // column 21 + table_4, // column 22 + lagrange_first, // column 23 + lagrange_last, // column 24 + w_l, // column 25 + w_r, // column 26 + w_o, // column 27 + w_4, // column 28 + sorted_accum, // column 29 + z_perm, // column 30 + z_lookup, // column 31 + table_1_shift, // column 32 + table_2_shift, // column 33 + table_3_shift, // column 34 + table_4_shift, // column 35 + w_l_shift, // column 36 + w_r_shift, // column 37 + w_o_shift, // column 38 + w_4_shift, // column 39 + sorted_accum_shift, // column 40 + z_perm_shift, // column 41 + z_lookup_shift // column 42 + ); + + RefVector get_wires() { return { w_l, w_r, w_o, w_4 }; }; // Gemini-specific getters. - std::vector get_unshifted() override + RefVector get_unshifted() { return { q_c, q_l, q_r, q_o, q_4, q_m, q_arith, q_sort, q_elliptic, q_aux, q_lookup, sigma_1, sigma_2, sigma_3, sigma_4, id_1, @@ -281,11 +222,11 @@ template class UltraRecursive_ { }; }; - std::vector get_to_be_shifted() override + RefVector get_to_be_shifted() { return { table_1, table_2, table_3, table_4, w_l, w_r, w_o, w_4, sorted_accum, z_perm, z_lookup }; }; - std::vector get_shifted() override + RefVector get_shifted() { return { table_1_shift, table_2_shift, table_3_shift, table_4_shift, w_l_shift, w_r_shift, w_o_shift, w_4_shift, sorted_accum_shift, z_perm_shift, z_lookup_shift }; @@ -301,7 +242,7 @@ template class UltraRecursive_ { * that, and split out separate PrecomputedPolynomials/Commitments data for clarity but also for portability of our * circuits. */ - class VerificationKey : public VerificationKey_> { + class VerificationKey : public VerificationKey_> { public: /** * @brief Construct a new Verification Key with stdlib types from a provided native verification key @@ -310,8 +251,7 @@ template class UltraRecursive_ { * @param native_key Native verification key from which to extract the precomputed commitments */ VerificationKey(CircuitBuilder* builder, auto native_key) - : VerificationKey_>(native_key->circuit_size, - native_key->num_public_inputs) + : VerificationKey_>(native_key->circuit_size, native_key->num_public_inputs) { this->q_m = Commitment::from_witness(builder, native_key->q_m); this->q_l = Commitment::from_witness(builder, native_key->q_l); @@ -345,9 +285,9 @@ template class UltraRecursive_ { * @brief A field element for each entity of the flavor. These entities represent the prover polynomials evaluated * at one point. */ - class AllValues : public AllEntities { + class AllValues : public AllEntities { public: - using Base = AllEntities; + using Base = AllEntities; using Base::Base; AllValues(std::array _data_in) { this->_data = _data_in; } }; @@ -358,7 +298,7 @@ template class UltraRecursive_ { * has, however, been useful during debugging to have these labels available. * */ - class CommitmentLabels : public AllEntities { + class CommitmentLabels : public AllEntities { public: CommitmentLabels() { @@ -399,7 +339,7 @@ template class UltraRecursive_ { }; }; - class VerifierCommitments : public AllEntities { + class VerifierCommitments : public AllEntities { public: VerifierCommitments(std::shared_ptr verification_key) { @@ -435,7 +375,7 @@ template class UltraRecursive_ { * @brief Derived class that defines proof structure for UltraRecursive proofs, as well as supporting functions. * */ - class Transcript : public BaseTranscript { + class Transcript : public BaseTranscript { public: // Transcript objects defined as public member variables for easy access and modification uint32_t circuit_size; @@ -459,7 +399,7 @@ template class UltraRecursive_ { // Used by verifier to initialize the transcript Transcript(const std::vector& proof) - : BaseTranscript(proof) + : BaseTranscript(proof) {} static Transcript prover_init_empty() @@ -482,74 +422,73 @@ template class UltraRecursive_ { * the structure. Must be called in order to access the structure of the proof. * */ - void deserialize_full_transcript() override + void deserialize_full_transcript() { // take current proof and put them into the struct size_t num_bytes_read = 0; - circuit_size = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + circuit_size = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); size_t log_n = numeric::get_msb(circuit_size); - public_input_size = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - pub_inputs_offset = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + public_input_size = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + pub_inputs_offset = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < public_input_size; ++i) { - public_inputs.push_back(deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read)); + public_inputs.push_back(deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read)); } - w_l_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - w_r_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - w_o_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - sorted_accum_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - w_4_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - z_perm_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - z_lookup_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + w_l_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + w_r_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + w_o_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + sorted_accum_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + w_4_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + z_perm_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + z_lookup_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < log_n; ++i) { sumcheck_univariates.push_back( deserialize_from_buffer>( - BaseTranscript::proof_data, num_bytes_read)); + BaseTranscript::proof_data, num_bytes_read)); } - sumcheck_evaluations = deserialize_from_buffer>( - BaseTranscript::proof_data, num_bytes_read); + sumcheck_evaluations = + deserialize_from_buffer>(BaseTranscript::proof_data, num_bytes_read); for (size_t i = 0; i < log_n; ++i) { - zm_cq_comms.push_back( - deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read)); + zm_cq_comms.push_back(deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read)); } - zm_cq_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); - zm_pi_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + zm_cq_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); + zm_pi_comm = deserialize_from_buffer(BaseTranscript::proof_data, num_bytes_read); } /** * @brief Serializes the structure variables into a FULL UltraRecursive proof. Should be called only if * deserialize_full_transcript() was called and some transcript variable was modified. * */ - void serialize_full_transcript() override + void serialize_full_transcript() { - size_t old_proof_length = BaseTranscript::proof_data.size(); - BaseTranscript::proof_data.clear(); // clear proof_data so the rest of the function can replace it + size_t old_proof_length = BaseTranscript::proof_data.size(); + BaseTranscript::proof_data.clear(); // clear proof_data so the rest of the function can replace it size_t log_n = numeric::get_msb(circuit_size); - serialize_to_buffer(circuit_size, BaseTranscript::proof_data); - serialize_to_buffer(public_input_size, BaseTranscript::proof_data); - serialize_to_buffer(pub_inputs_offset, BaseTranscript::proof_data); + serialize_to_buffer(circuit_size, BaseTranscript::proof_data); + serialize_to_buffer(public_input_size, BaseTranscript::proof_data); + serialize_to_buffer(pub_inputs_offset, BaseTranscript::proof_data); for (size_t i = 0; i < public_input_size; ++i) { - serialize_to_buffer(public_inputs[i], BaseTranscript::proof_data); + serialize_to_buffer(public_inputs[i], BaseTranscript::proof_data); } - serialize_to_buffer(w_l_comm, BaseTranscript::proof_data); - serialize_to_buffer(w_r_comm, BaseTranscript::proof_data); - serialize_to_buffer(w_o_comm, BaseTranscript::proof_data); - serialize_to_buffer(sorted_accum_comm, BaseTranscript::proof_data); - serialize_to_buffer(w_4_comm, BaseTranscript::proof_data); - serialize_to_buffer(z_perm_comm, BaseTranscript::proof_data); - serialize_to_buffer(z_lookup_comm, BaseTranscript::proof_data); + serialize_to_buffer(w_l_comm, BaseTranscript::proof_data); + serialize_to_buffer(w_r_comm, BaseTranscript::proof_data); + serialize_to_buffer(w_o_comm, BaseTranscript::proof_data); + serialize_to_buffer(sorted_accum_comm, BaseTranscript::proof_data); + serialize_to_buffer(w_4_comm, BaseTranscript::proof_data); + serialize_to_buffer(z_perm_comm, BaseTranscript::proof_data); + serialize_to_buffer(z_lookup_comm, BaseTranscript::proof_data); for (size_t i = 0; i < log_n; ++i) { - serialize_to_buffer(sumcheck_univariates[i], BaseTranscript::proof_data); + serialize_to_buffer(sumcheck_univariates[i], BaseTranscript::proof_data); } - serialize_to_buffer(sumcheck_evaluations, BaseTranscript::proof_data); + serialize_to_buffer(sumcheck_evaluations, BaseTranscript::proof_data); for (size_t i = 0; i < log_n; ++i) { - serialize_to_buffer(zm_cq_comms[i], BaseTranscript::proof_data); + serialize_to_buffer(zm_cq_comms[i], BaseTranscript::proof_data); } - serialize_to_buffer(zm_cq_comm, BaseTranscript::proof_data); - serialize_to_buffer(zm_pi_comm, BaseTranscript::proof_data); + serialize_to_buffer(zm_cq_comm, BaseTranscript::proof_data); + serialize_to_buffer(zm_pi_comm, BaseTranscript::proof_data); // sanity check to make sure we generate the same length of proof as before. - ASSERT(BaseTranscript::proof_data.size() == old_proof_length); + ASSERT(BaseTranscript::proof_data.size() == old_proof_length); } }; }; diff --git a/barretenberg/cpp/src/barretenberg/goblin/full_goblin_composer.test.cpp b/barretenberg/cpp/src/barretenberg/goblin/full_goblin_composer.test.cpp index e263cb329a4..09563987010 100644 --- a/barretenberg/cpp/src/barretenberg/goblin/full_goblin_composer.test.cpp +++ b/barretenberg/cpp/src/barretenberg/goblin/full_goblin_composer.test.cpp @@ -30,14 +30,9 @@ class FullGoblinComposerTests : public ::testing::Test { using Point = Curve::AffineElement; using CommitmentKey = pcs::CommitmentKey; using OpQueue = proof_system::ECCOpQueue; - using GoblinUltraBuilder = proof_system::GoblinUltraCircuitBuilder; using ECCVMFlavor = flavor::ECCVM; using ECCVMBuilder = proof_system::ECCVMCircuitBuilder; using ECCVMComposer = ECCVMComposer_; - using TranslatorFlavor = flavor::GoblinTranslator; - using TranslatorBuilder = proof_system::GoblinTranslatorCircuitBuilder; - using TranslatorComposer = GoblinTranslatorComposer; - using TranslatorConsistencyData = barretenberg::TranslationEvaluations; static constexpr size_t NUM_OP_QUEUE_COLUMNS = flavor::GoblinUltra::NUM_WIRES; @@ -46,7 +41,7 @@ class FullGoblinComposerTests : public ::testing::Test { * * @param builder */ - static void generate_test_circuit(GoblinUltraBuilder& builder) + static void generate_test_circuit(proof_system::GoblinUltraCircuitBuilder& builder) { // Add some arbitrary ecc op gates for (size_t i = 0; i < 3; ++i) { @@ -88,7 +83,7 @@ class FullGoblinComposerTests : public ::testing::Test { static void perform_op_queue_interactions_for_mock_first_circuit( std::shared_ptr& op_queue) { - auto builder = GoblinUltraBuilder{ op_queue }; + proof_system::GoblinUltraCircuitBuilder builder{ op_queue }; // Add a mul accum op and an equality op auto point = Point::one() * FF::random_element(); @@ -114,7 +109,8 @@ class FullGoblinComposerTests : public ::testing::Test { * @brief Construct and a verify a Honk proof * */ - static bool construct_and_verify_honk_proof(GoblinUltraComposer& composer, GoblinUltraBuilder& builder) + static bool construct_and_verify_honk_proof(GoblinUltraComposer& composer, + proof_system::GoblinUltraCircuitBuilder& builder) { auto instance = composer.create_instance(builder); auto prover = composer.create_prover(instance); @@ -157,12 +153,12 @@ TEST_F(FullGoblinComposerTests, SimpleCircuit) // Construct a series of simple Goblin circuits; generate and verify their proofs size_t NUM_CIRCUITS = 3; for (size_t circuit_idx = 0; circuit_idx < NUM_CIRCUITS; ++circuit_idx) { - auto builder = GoblinUltraBuilder{ op_queue }; + proof_system::GoblinUltraCircuitBuilder builder{ op_queue }; generate_test_circuit(builder); // The same composer is used to manage Honk and Merge prover/verifier - auto composer = GoblinUltraComposer(); + proof_system::honk::GoblinUltraComposer composer; // Construct and verify Ultra Goblin Honk proof bool honk_verified = construct_and_verify_honk_proof(composer, builder); @@ -187,11 +183,11 @@ TEST_F(FullGoblinComposerTests, SimpleCircuit) // TODO(https://github.com/AztecProtocol/barretenberg/issues/786) Properly derive batching_challenge auto batching_challenge = Fbase::random_element(); auto evaluation_input = eccvm_prover.evaluation_challenge_x; - auto translator_builder = TranslatorBuilder(batching_challenge, evaluation_input, op_queue); - auto translator_composer = TranslatorComposer(); - auto translator_prover = translator_composer.create_prover(translator_builder); - auto translator_verifier = translator_composer.create_verifier(translator_builder); - auto translator_proof = translator_prover.construct_proof(); + proof_system::GoblinTranslatorCircuitBuilder translator_builder{ batching_challenge, evaluation_input, op_queue }; + GoblinTranslatorComposer translator_composer; + GoblinTranslatorProver translator_prover = translator_composer.create_prover(translator_builder); + GoblinTranslatorVerifier translator_verifier = translator_composer.create_verifier(translator_builder); + proof_system::plonk::proof translator_proof = translator_prover.construct_proof(); bool accumulator_construction_verified = translator_verifier.verify_proof(translator_proof); bool translation_verified = translator_verifier.verify_translation(eccvm_prover.translation_evaluations); EXPECT_TRUE(accumulator_construction_verified && translation_verified); diff --git a/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp b/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp index b4f4573c6f8..de07bda2534 100644 --- a/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp +++ b/barretenberg/cpp/src/barretenberg/honk/proof_system/permutation_library.hpp @@ -1,4 +1,5 @@ #pragma once +#include "barretenberg/common/ref_vector.hpp" #include "barretenberg/plonk/proof_system/proving_key/proving_key.hpp" #include "barretenberg/polynomials/polynomial.hpp" #include @@ -184,12 +185,14 @@ void compute_permutation_grand_products(std::shared_ptr void compute_concatenated_polynomials(StorageHandle* proving_key) { - using PolynomialHandle = typename Flavor::PolynomialHandle; + // TODO(AD): use RefVector here, see https://github.com/AztecProtocol/barretenberg/issues/743 + // RefVector makes PolynomialHandle now redundant. Can scale back use of auto then too. + // using PolynomialHandle = typename Flavor::PolynomialHandle; // Concatenation groups are vectors of polynomials that are concatenated together - std::vector> concatenation_groups = proving_key->get_concatenation_groups(); + auto concatenation_groups = proving_key->get_concatenation_groups(); // Resulting concatenated polynomials - std::vector targets = proving_key->get_concatenated_constraints(); + auto targets = proving_key->get_concatenated_constraints(); // A function that produces 1 concatenated polynomial // TODO(#756): This can be rewritten to use more cores. Currently uses at maximum the number of concatenated diff --git a/barretenberg/cpp/src/barretenberg/plonk/proof_system/utils/generalized_permutation.hpp b/barretenberg/cpp/src/barretenberg/plonk/proof_system/utils/generalized_permutation.hpp index fbf08b9c102..7741d113b06 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/proof_system/utils/generalized_permutation.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk/proof_system/utils/generalized_permutation.hpp @@ -27,7 +27,7 @@ inline void compute_gen_permutation_lagrange_base_single(barretenberg::polynomia // here, 'index' refers to an element of our subgroup H // we can almost use permutation[i] to directly index our `roots` array, which contains our subgroup elements // we first have to mask off the 2 high bits, which describe which wire polynomial our permutation maps to (we'll - // deal with that in a bit) we then have to accomodate for the fact that, `roots` only contains *half* of our + // deal with that in a bit) we then have to accommodate for the fact that, `roots` only contains *half* of our // subgroup elements. this is because w^{n/2} = -w and we don't want to perform redundant work computing roots of // unity diff --git a/barretenberg/cpp/src/barretenberg/plonk/proof_system/utils/permutation.hpp b/barretenberg/cpp/src/barretenberg/plonk/proof_system/utils/permutation.hpp index 5f3a78d1a11..7d5565251b9 100644 --- a/barretenberg/cpp/src/barretenberg/plonk/proof_system/utils/permutation.hpp +++ b/barretenberg/cpp/src/barretenberg/plonk/proof_system/utils/permutation.hpp @@ -65,7 +65,7 @@ inline void compute_permutation_lagrange_base_single(barretenberg::polynomial& o // `permutation[i]` will specify the 'index' that this wire value will map to. // Here, 'index' refers to an element of our subgroup H. // We can almost use `permutation[i]` to directly index our `roots` array, which contains our subgroup elements. - // We first have to accomodate for the fact that `roots` only contains *half* of our subgroup elements. This is + // We first have to accommodate for the fact that `roots` only contains *half* of our subgroup elements. This is // because ω^{n/2} = -ω and we don't want to perform redundant work computing roots of unity. size_t raw_idx = permutation[i].subgroup_index; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_helper.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_helper.cpp index cb6e7125760..401ad709c43 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_helper.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_helper.cpp @@ -1,10 +1,9 @@ #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp" -#include "barretenberg/flavor/generated/AvmMini_flavor.hpp" -#include "barretenberg/relations/generated/AvmMini.hpp" - #include "./AvmMini_helper.hpp" +#include "barretenberg/flavor/generated/AvmMini_flavor.hpp" +#include "barretenberg/relations/generated/AvmMini/avm_mini.hpp" namespace proof_system { @@ -24,13 +23,13 @@ void log_avmMini_trace(std::vector const& trace, size_t beg, size_t end) info("== ROW ", i); info("================================================================================"); - info("m_addr: ", trace.at(i).avmMini_m_addr); - info("m_clk: ", trace.at(i).avmMini_m_clk); - info("m_sub_clk: ", trace.at(i).avmMini_m_sub_clk); - info("m_val: ", trace.at(i).avmMini_m_val); - info("m_lastAccess: ", trace.at(i).avmMini_m_lastAccess); - info("m_rw: ", trace.at(i).avmMini_m_rw); - info("m_val_shift: ", trace.at(i).avmMini_m_val_shift); + info("m_addr: ", trace.at(i).memTrace_m_addr); + info("m_clk: ", trace.at(i).memTrace_m_clk); + info("m_sub_clk: ", trace.at(i).memTrace_m_sub_clk); + info("m_val: ", trace.at(i).memTrace_m_val); + info("m_lastAccess: ", trace.at(i).memTrace_m_lastAccess); + info("m_rw: ", trace.at(i).memTrace_m_rw); + info("m_val_shift: ", trace.at(i).memTrace_m_val_shift); info("first: ", trace.at(i).avmMini_first); info("last: ", trace.at(i).avmMini_last); diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_helper.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_helper.hpp index c84b65b1dc6..491d597fa26 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_helper.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_helper.hpp @@ -4,13 +4,13 @@ #include "barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp" #include "barretenberg/flavor/generated/AvmMini_flavor.hpp" -#include "barretenberg/relations/generated/AvmMini.hpp" +#include "barretenberg/proof_system/circuit_builder/generated/AvmMini_circuit_builder.hpp" namespace proof_system { using Flavor = proof_system::honk::flavor::AvmMiniFlavor; using FF = Flavor::FF; -using Row = proof_system::AvmMini_vm::Row; +using Row = proof_system::AvmMiniFullRow; void log_avmMini_trace(std::vector const& trace, size_t beg, size_t end); diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_trace.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_trace.cpp index 8e44891850a..01089a78395 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_trace.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_trace.cpp @@ -9,8 +9,8 @@ #include #include "./AvmMini_trace.hpp" +#include "./generated/AvmMini_circuit_builder.hpp" -#include "barretenberg/relations/generated/AvmMini.hpp" namespace proof_system { /** @@ -82,10 +82,9 @@ void AvmMiniTraceBuilder::insertInMemTrace(uint32_t m_clk, uint32_t m_sub_clk, u } // Memory operations need to be performed before the addition of the corresponding row in -// mainTrace, otherwise the m_clk value will be wrong. This applies to: -// loadAInMemTrace, loadBInMemTrace, loadCInMemTrace +// ainTrace, otherwise the m_clk value will be wrong.This applies to : loadAInMemTrace, loadBInMemTrace, +// loadCInMemTrace // storeAInMemTrace, storeBInMemTrace, storeCInMemTrace - /** * @brief Add a memory trace entry corresponding to a memory load into the intermediate * register Ia. @@ -420,17 +419,17 @@ std::vector AvmMiniTraceBuilder::finalize() auto const& src = memTrace.at(i); auto& dest = mainTrace.at(i); - dest.avmMini_m_clk = FF(src.m_clk); - dest.avmMini_m_sub_clk = FF(src.m_sub_clk); - dest.avmMini_m_addr = FF(src.m_addr); - dest.avmMini_m_val = src.m_val; - dest.avmMini_m_rw = FF(static_cast(src.m_rw)); + dest.memTrace_m_clk = FF(src.m_clk); + dest.memTrace_m_sub_clk = FF(src.m_sub_clk); + dest.memTrace_m_addr = FF(src.m_addr); + dest.memTrace_m_val = src.m_val; + dest.memTrace_m_rw = FF(static_cast(src.m_rw)); if (i + 1 < memTraceSize) { auto const& next = memTrace.at(i + 1); - dest.avmMini_m_lastAccess = FF(static_cast(src.m_addr != next.m_addr)); + dest.memTrace_m_lastAccess = FF(static_cast(src.m_addr != next.m_addr)); } else { - dest.avmMini_m_lastAccess = FF(1); + dest.memTrace_m_lastAccess = FF(1); } } diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_trace.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_trace.hpp index b1fc703544e..781d798cd6e 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_trace.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/AvmMini_trace.hpp @@ -3,13 +3,15 @@ #include "barretenberg/common/throw_or_abort.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp" +#include "barretenberg/proof_system/circuit_builder/generated/AvmMini_circuit_builder.hpp" #include "barretenberg/flavor/generated/AvmMini_flavor.hpp" -#include "barretenberg/relations/generated/AvmMini.hpp" + +#include "barretenberg/relations/generated/AvmMini/avm_mini.hpp" using Flavor = proof_system::honk::flavor::AvmMiniFlavor; using FF = Flavor::FF; -using Row = proof_system::AvmMini_vm::Row; +using Row = proof_system::AvmMiniFullRow; namespace proof_system { diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_circuit_builder.hpp index 6bee55c0d4c..9468df3aa6e 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/eccvm/eccvm_circuit_builder.hpp @@ -390,7 +390,7 @@ template class ECCVMCircuitBuilder { } } for (size_t i = 0; i < precompute_table_state.size(); ++i) { - // first row is always an empty row (to accomodate shifted polynomials which must have 0 as 1st + // first row is always an empty row (to accommodate shifted polynomials which must have 0 as 1st // coefficient). All other rows in the precompute_table_state represent active wnaf gates (i.e. // precompute_select = 1) polys.precompute_select[i] = (i != 0) ? 1 : 0; diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/generated/AvmMini_circuit_builder.hpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/generated/AvmMini_circuit_builder.hpp index 1bf6093681e..336331b3e69 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/generated/AvmMini_circuit_builder.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/generated/AvmMini_circuit_builder.hpp @@ -8,24 +8,53 @@ #include "barretenberg/proof_system/circuit_builder/circuit_builder_base.hpp" #include "barretenberg/flavor/generated/AvmMini_flavor.hpp" -#include "barretenberg/relations/generated/AvmMini.hpp" +#include "barretenberg/relations/generated/AvmMini/avm_mini.hpp" +#include "barretenberg/relations/generated/AvmMini/mem_trace.hpp" using namespace barretenberg; namespace proof_system { +template struct AvmMiniFullRow { + FF avmMini_clk{}; + FF avmMini_first{}; + FF memTrace_m_clk{}; + FF memTrace_m_sub_clk{}; + FF memTrace_m_addr{}; + FF memTrace_m_val{}; + FF memTrace_m_lastAccess{}; + FF memTrace_m_rw{}; + FF avmMini_subop{}; + FF avmMini_ia{}; + FF avmMini_ib{}; + FF avmMini_ic{}; + FF avmMini_mem_op_a{}; + FF avmMini_mem_op_b{}; + FF avmMini_mem_op_c{}; + FF avmMini_rwa{}; + FF avmMini_rwb{}; + FF avmMini_rwc{}; + FF avmMini_mem_idx_a{}; + FF avmMini_mem_idx_b{}; + FF avmMini_mem_idx_c{}; + FF avmMini_last{}; + FF memTrace_m_addr_shift{}; + FF memTrace_m_rw_shift{}; + FF memTrace_m_val_shift{}; +}; + class AvmMiniCircuitBuilder { public: using Flavor = proof_system::honk::flavor::AvmMiniFlavor; using FF = Flavor::FF; - using Row = AvmMini_vm::Row; + using Row = AvmMiniFullRow; // TODO: template using Polynomial = Flavor::Polynomial; using AllPolynomials = Flavor::AllPolynomials; - static constexpr size_t num_fixed_columns = 26; - static constexpr size_t num_polys = 23; + static constexpr size_t num_fixed_columns = 25; + static constexpr size_t num_polys = 22; std::vector rows; void set_trace(std::vector&& trace) { rows = std::move(trace); } @@ -42,8 +71,13 @@ class AvmMiniCircuitBuilder { for (size_t i = 0; i < rows.size(); i++) { polys.avmMini_clk[i] = rows[i].avmMini_clk; - polys.avmMini_positive[i] = rows[i].avmMini_positive; polys.avmMini_first[i] = rows[i].avmMini_first; + polys.memTrace_m_clk[i] = rows[i].memTrace_m_clk; + polys.memTrace_m_sub_clk[i] = rows[i].memTrace_m_sub_clk; + polys.memTrace_m_addr[i] = rows[i].memTrace_m_addr; + polys.memTrace_m_val[i] = rows[i].memTrace_m_val; + polys.memTrace_m_lastAccess[i] = rows[i].memTrace_m_lastAccess; + polys.memTrace_m_rw[i] = rows[i].memTrace_m_rw; polys.avmMini_subop[i] = rows[i].avmMini_subop; polys.avmMini_ia[i] = rows[i].avmMini_ia; polys.avmMini_ib[i] = rows[i].avmMini_ib; @@ -58,17 +92,11 @@ class AvmMiniCircuitBuilder { polys.avmMini_mem_idx_b[i] = rows[i].avmMini_mem_idx_b; polys.avmMini_mem_idx_c[i] = rows[i].avmMini_mem_idx_c; polys.avmMini_last[i] = rows[i].avmMini_last; - polys.avmMini_m_clk[i] = rows[i].avmMini_m_clk; - polys.avmMini_m_sub_clk[i] = rows[i].avmMini_m_sub_clk; - polys.avmMini_m_addr[i] = rows[i].avmMini_m_addr; - polys.avmMini_m_val[i] = rows[i].avmMini_m_val; - polys.avmMini_m_lastAccess[i] = rows[i].avmMini_m_lastAccess; - polys.avmMini_m_rw[i] = rows[i].avmMini_m_rw; } - polys.avmMini_m_val_shift = Polynomial(polys.avmMini_m_val.shifted()); - polys.avmMini_m_addr_shift = Polynomial(polys.avmMini_m_addr.shifted()); - polys.avmMini_m_rw_shift = Polynomial(polys.avmMini_m_rw.shifted()); + polys.memTrace_m_addr_shift = Polynomial(polys.memTrace_m_addr.shifted()); + polys.memTrace_m_rw_shift = Polynomial(polys.memTrace_m_rw.shifted()); + polys.memTrace_m_val_shift = Polynomial(polys.memTrace_m_val.shifted()); return polys; } @@ -103,7 +131,14 @@ class AvmMiniCircuitBuilder { return true; }; - return evaluate_relation.template operator()>("AvmMini"); + if (!evaluate_relation.template operator()>("mem_trace")) { + return false; + } + if (!evaluate_relation.template operator()>("avm_mini")) { + return false; + } + + return true; } [[nodiscard]] size_t get_num_gates() const { return rows.size(); } diff --git a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp index 047e3e8489e..42ef656b807 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/circuit_builder/ultra_circuit_builder.cpp @@ -701,7 +701,7 @@ std::vector UltraCircuitBuilder_::decompose_into_defa * This is not strictly required iff num_bits <= target_range_bitnum. * However, this produces an edge-case where a variable is range-constrained but NOT present in an arithmetic gate. * This in turn produces an unsatisfiable circuit (see `create_new_range_constraint`). We would need to check for - * and accomodate/reject this edge case to support not adding addition gates here if not reqiured + * and accommodate/reject this edge case to support not adding addition gates here if not reqiured * if (num_bits <= target_range_bitnum) { * const uint64_t expected_range = (1ULL << num_bits) - 1ULL; * create_new_range_constraint(variable_index, expected_range); @@ -3454,4 +3454,4 @@ template class UltraCircuitBuilder_ // To enable this we need to template plookup // template class UltraCircuitBuilder_; -} // namespace proof_system \ No newline at end of file +} // namespace proof_system diff --git a/barretenberg/cpp/src/barretenberg/proof_system/composer/permutation_lib.hpp b/barretenberg/cpp/src/barretenberg/proof_system/composer/permutation_lib.hpp index 0726a1ad5bf..90a7f0152f0 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/composer/permutation_lib.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/composer/permutation_lib.hpp @@ -7,6 +7,7 @@ */ #pragma once +#include "barretenberg/common/ref_vector.hpp" #include "barretenberg/ecc/curves/bn254/fr.hpp" #include "barretenberg/flavor/flavor.hpp" #include "barretenberg/plonk/proof_system/proving_key/proving_key.hpp" @@ -280,7 +281,7 @@ PermutationMapping compute_permutation_mapping( */ template void compute_honk_style_permutation_lagrange_polynomials_from_mapping( - std::vector permutation_polynomials, // sigma or ID poly + const RefVector& permutation_polynomials, // sigma or ID poly std::array, Flavor::NUM_WIRES>& permutation_mappings, typename Flavor::ProvingKey* proving_key) { @@ -346,7 +347,7 @@ inline void compute_standard_plonk_lagrange_polynomial(barretenberg::polynomial& // `permutation[i]` will specify the 'index' that this wire value will map to. // Here, 'index' refers to an element of our subgroup H. // We can almost use `permutation[i]` to directly index our `roots` array, which contains our subgroup elements. - // We first have to accomodate for the fact that `roots` only contains *half* of our subgroup elements. This is + // We first have to accommodate for the fact that `roots` only contains *half* of our subgroup elements. This is // because ω^{n/2} = -ω and we don't want to perform redundant work computing roots of unity. size_t raw_idx = permutation[i].row_index; @@ -469,7 +470,7 @@ void compute_standard_plonk_sigma_permutations(const typename Flavor::CircuitBui * * @param key Proving key where we will save the polynomials */ -template inline void compute_first_and_last_lagrange_polynomials(auto proving_key) +template inline void compute_first_and_last_lagrange_polynomials(const auto& proving_key) { const size_t n = proving_key->circuit_size; typename Flavor::Polynomial lagrange_polynomial_0(n); diff --git a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp index d29f4009b05..158c6119d61 100644 --- a/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp +++ b/barretenberg/cpp/src/barretenberg/proof_system/plookup_tables/keccak/keccak_rho.hpp @@ -40,7 +40,7 @@ namespace keccak_tables { * * We need multiple Rho tables in order to efficiently range-constrain our input slices. * - * The maximum number of bits we can accomodate in this lookup table is MAXIMUM_MULTITABLE_BITS (assume this is 8) + * The maximum number of bits we can accommodate in this lookup table is MAXIMUM_MULTITABLE_BITS (assume this is 8) * For example take a left-rotation by 1 bit. The right-slice will be a 63-bit integer. * 63 does not evenly divide 8. i.e. an 8-bit table cannot correctly range-constrain the input slice and we would need * additional range constraints. @@ -49,7 +49,7 @@ namespace keccak_tables { * We can stitch together a lookup table sequence that correctly range constrains the left/right slices for any of our * 25 rotation values * - * @tparam TABLE_BITS The number of bits each lookup table can accomodate + * @tparam TABLE_BITS The number of bits each lookup table can accommodate * @tparam LANE_INDEX Required by get_rho_output_table to produce the correct MultiTable */ template class Rho { diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.cpp index d52af9f2a84..e332819a036 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_prover.cpp @@ -23,8 +23,9 @@ template void ProtoGalaxyProver_::prepa transcript.send_to_verifier(domain_separator + "_public_input_" + std::to_string(i), public_input_i); } - auto [eta, beta, gamma] = transcript.get_challenges( - domain_separator + "_eta", domain_separator + "_beta", domain_separator + "_gamma"); + auto [eta, beta, gamma] = challenges_to_field_elements(transcript.get_challenges( + domain_separator + "_eta", domain_separator + "_beta", domain_separator + "_gamma")); + instance->compute_sorted_accumulator_polynomials(eta); instance->compute_grand_product_polynomials(beta, gamma); instance->alpha = transcript.get_challenge(domain_separator + "_alpha"); @@ -42,7 +43,7 @@ ProverFoldingResult ProtoGalaxyProver_prover_polynomials.get_polynomial_size(); const auto log_instance_size = static_cast(numeric::get_msb(instance_size)); @@ -53,7 +54,7 @@ ProverFoldingResult ProtoGalaxyProver_ betas_star(log_instance_size); betas_star[0] = 1; @@ -69,7 +70,7 @@ ProverFoldingResult ProtoGalaxyProver_ class ProtoGalaxyProver_ { using RelationEvaluations = typename Flavor::TupleOfArraysOfValues; ProverInstances instances; - BaseTranscript transcript; + BaseTranscript transcript; ProtoGalaxyProver_() = default; ProtoGalaxyProver_(ProverInstances insts) diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp index ed6441dae39..360ef286887 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.cpp @@ -5,7 +5,7 @@ namespace proof_system::honk { template void ProtoGalaxyVerifier_::prepare_for_folding(std::vector fold_data) { - transcript = BaseTranscript{ fold_data }; + transcript = BaseTranscript{ fold_data }; auto index = 0; for (auto it = verifier_instances.begin(); it != verifier_instances.end(); it++, index++) { auto inst = *it; @@ -21,8 +21,9 @@ void ProtoGalaxyVerifier_::prepare_for_folding(std::vector(domain_separator + "_public_input_" + std::to_string(i)); inst->public_inputs.emplace_back(public_input_i); } - auto [eta, beta, gamma] = transcript.get_challenges( - domain_separator + "_eta", domain_separator + "_beta", domain_separator + "_gamma"); + auto [eta, beta, gamma] = challenges_to_field_elements(transcript.get_challenges( + domain_separator + "_eta", domain_separator + "_beta", domain_separator + "_gamma")); + const FF public_input_delta = compute_public_input_delta( inst->public_inputs, beta, gamma, inst->instance_size, inst->pub_inputs_offset); const FF lookup_grand_product_delta = compute_lookup_grand_product_delta(beta, gamma, inst->instance_size); @@ -39,7 +40,7 @@ VerifierFoldingResult ProtoGalaxyVerifier_< using Flavor = typename VerifierInstances::Flavor; prepare_for_folding(fold_data); - auto delta = transcript.get_challenge("delta"); + FF delta = transcript.get_challenge("delta"); auto accumulator = get_accumulator(); auto log_instance_size = static_cast(numeric::get_msb(accumulator->instance_size)); auto deltas = compute_round_challenge_pows(log_instance_size, delta); @@ -48,7 +49,7 @@ VerifierFoldingResult ProtoGalaxyVerifier_< perturbator_coeffs[idx] = transcript.template receive_from_prover("perturbator_" + std::to_string(idx)); } auto perturbator = Polynomial(perturbator_coeffs); - auto perturbator_challenge = transcript.get_challenge("perturbator_challenge"); + FF perturbator_challenge = transcript.get_challenge("perturbator_challenge"); auto perturbator_at_challenge = perturbator.evaluate(perturbator_challenge); // Thed degree of K(X) is dk - k - 1 = k(d - 1) - 1. Hence we need k(d - 1) evaluations to represent it. @@ -59,7 +60,7 @@ VerifierFoldingResult ProtoGalaxyVerifier_< } Univariate combiner_quotient( combiner_quotient_evals); - auto combiner_challenge = transcript.get_challenge("combiner_quotient_challenge"); + FF combiner_challenge = transcript.get_challenge("combiner_quotient_challenge"); auto combiner_quotient_at_challenge = combiner_quotient.evaluate(combiner_challenge); auto vanishing_polynomial_at_challenge = combiner_challenge * (combiner_challenge - FF(1)); diff --git a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.hpp b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.hpp index e8f7032cb30..028710983dd 100644 --- a/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/protogalaxy/protogalaxy_verifier.hpp @@ -14,7 +14,7 @@ template class ProtoGalaxyVerifier_ { using Instance = typename VerifierInstances::Instance; using VerificationKey = typename Flavor::VerificationKey; VerifierInstances verifier_instances; - BaseTranscript transcript; + BaseTranscript transcript; ProtoGalaxyVerifier_(VerifierInstances insts) : verifier_instances(insts){}; diff --git a/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini.hpp b/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini.hpp deleted file mode 100644 index 056d6464cf1..00000000000 --- a/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini.hpp +++ /dev/null @@ -1,185 +0,0 @@ - -#pragma once -#include "../relation_parameters.hpp" -#include "../relation_types.hpp" - -namespace proof_system::AvmMini_vm { - -template struct Row { - FF avmMini_clk{}; - FF avmMini_positive{}; - FF avmMini_first{}; - FF avmMini_subop{}; - FF avmMini_ia{}; - FF avmMini_ib{}; - FF avmMini_ic{}; - FF avmMini_mem_op_a{}; - FF avmMini_mem_op_b{}; - FF avmMini_mem_op_c{}; - FF avmMini_rwa{}; - FF avmMini_rwb{}; - FF avmMini_rwc{}; - FF avmMini_mem_idx_a{}; - FF avmMini_mem_idx_b{}; - FF avmMini_mem_idx_c{}; - FF avmMini_last{}; - FF avmMini_m_clk{}; - FF avmMini_m_sub_clk{}; - FF avmMini_m_addr{}; - FF avmMini_m_val{}; - FF avmMini_m_lastAccess{}; - FF avmMini_m_rw{}; - FF avmMini_m_val_shift{}; - FF avmMini_m_addr_shift{}; - FF avmMini_m_rw_shift{}; -}; - -#define DECLARE_VIEWS(index) \ - using View = typename std::tuple_element::type; \ - [[maybe_unused]] auto avmMini_clk = View(new_term.avmMini_clk); \ - [[maybe_unused]] auto avmMini_positive = View(new_term.avmMini_positive); \ - [[maybe_unused]] auto avmMini_first = View(new_term.avmMini_first); \ - [[maybe_unused]] auto avmMini_subop = View(new_term.avmMini_subop); \ - [[maybe_unused]] auto avmMini_ia = View(new_term.avmMini_ia); \ - [[maybe_unused]] auto avmMini_ib = View(new_term.avmMini_ib); \ - [[maybe_unused]] auto avmMini_ic = View(new_term.avmMini_ic); \ - [[maybe_unused]] auto avmMini_mem_op_a = View(new_term.avmMini_mem_op_a); \ - [[maybe_unused]] auto avmMini_mem_op_b = View(new_term.avmMini_mem_op_b); \ - [[maybe_unused]] auto avmMini_mem_op_c = View(new_term.avmMini_mem_op_c); \ - [[maybe_unused]] auto avmMini_rwa = View(new_term.avmMini_rwa); \ - [[maybe_unused]] auto avmMini_rwb = View(new_term.avmMini_rwb); \ - [[maybe_unused]] auto avmMini_rwc = View(new_term.avmMini_rwc); \ - [[maybe_unused]] auto avmMini_mem_idx_a = View(new_term.avmMini_mem_idx_a); \ - [[maybe_unused]] auto avmMini_mem_idx_b = View(new_term.avmMini_mem_idx_b); \ - [[maybe_unused]] auto avmMini_mem_idx_c = View(new_term.avmMini_mem_idx_c); \ - [[maybe_unused]] auto avmMini_last = View(new_term.avmMini_last); \ - [[maybe_unused]] auto avmMini_m_clk = View(new_term.avmMini_m_clk); \ - [[maybe_unused]] auto avmMini_m_sub_clk = View(new_term.avmMini_m_sub_clk); \ - [[maybe_unused]] auto avmMini_m_addr = View(new_term.avmMini_m_addr); \ - [[maybe_unused]] auto avmMini_m_val = View(new_term.avmMini_m_val); \ - [[maybe_unused]] auto avmMini_m_lastAccess = View(new_term.avmMini_m_lastAccess); \ - [[maybe_unused]] auto avmMini_m_rw = View(new_term.avmMini_m_rw); \ - [[maybe_unused]] auto avmMini_m_val_shift = View(new_term.avmMini_m_val_shift); \ - [[maybe_unused]] auto avmMini_m_addr_shift = View(new_term.avmMini_m_addr_shift); \ - [[maybe_unused]] auto avmMini_m_rw_shift = View(new_term.avmMini_m_rw_shift); - -template class AvmMiniImpl { - public: - using FF = FF_; - - static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ - 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - }; - - template - void static accumulate(ContainerOverSubrelations& evals, - const AllEntities& new_term, - [[maybe_unused]] const RelationParameters&, - [[maybe_unused]] const FF& scaling_factor) - { - - // Contribution 0 - { - DECLARE_VIEWS(0); - - auto tmp = (avmMini_subop * (-avmMini_subop + FF(1))); - tmp *= scaling_factor; - std::get<0>(evals) += tmp; - } - // Contribution 1 - { - DECLARE_VIEWS(1); - - auto tmp = (avmMini_mem_op_a * (-avmMini_mem_op_a + FF(1))); - tmp *= scaling_factor; - std::get<1>(evals) += tmp; - } - // Contribution 2 - { - DECLARE_VIEWS(2); - - auto tmp = (avmMini_mem_op_b * (-avmMini_mem_op_b + FF(1))); - tmp *= scaling_factor; - std::get<2>(evals) += tmp; - } - // Contribution 3 - { - DECLARE_VIEWS(3); - - auto tmp = (avmMini_mem_op_c * (-avmMini_mem_op_c + FF(1))); - tmp *= scaling_factor; - std::get<3>(evals) += tmp; - } - // Contribution 4 - { - DECLARE_VIEWS(4); - - auto tmp = (avmMini_rwa * (-avmMini_rwa + FF(1))); - tmp *= scaling_factor; - std::get<4>(evals) += tmp; - } - // Contribution 5 - { - DECLARE_VIEWS(5); - - auto tmp = (avmMini_rwb * (-avmMini_rwb + FF(1))); - tmp *= scaling_factor; - std::get<5>(evals) += tmp; - } - // Contribution 6 - { - DECLARE_VIEWS(6); - - auto tmp = (avmMini_rwc * (-avmMini_rwc + FF(1))); - tmp *= scaling_factor; - std::get<6>(evals) += tmp; - } - // Contribution 7 - { - DECLARE_VIEWS(7); - - auto tmp = (avmMini_subop * ((avmMini_ia + avmMini_ib) - avmMini_ic)); - tmp *= scaling_factor; - std::get<7>(evals) += tmp; - } - // Contribution 8 - { - DECLARE_VIEWS(8); - - auto tmp = (avmMini_m_lastAccess * (-avmMini_m_lastAccess + FF(1))); - tmp *= scaling_factor; - std::get<8>(evals) += tmp; - } - // Contribution 9 - { - DECLARE_VIEWS(9); - - auto tmp = (avmMini_m_rw * (-avmMini_m_rw + FF(1))); - tmp *= scaling_factor; - std::get<9>(evals) += tmp; - } - // Contribution 10 - { - DECLARE_VIEWS(10); - - auto tmp = (((-avmMini_first + FF(1)) * (-avmMini_m_lastAccess + FF(1))) * - (avmMini_m_addr_shift - avmMini_m_addr)); - tmp *= scaling_factor; - std::get<10>(evals) += tmp; - } - // Contribution 11 - { - DECLARE_VIEWS(11); - - auto tmp = (((((-avmMini_first + FF(1)) * (-avmMini_last + FF(1))) * (-avmMini_m_lastAccess + FF(1))) * - (-avmMini_m_rw_shift + FF(1))) * - (avmMini_m_val_shift - avmMini_m_val)); - tmp *= scaling_factor; - std::get<11>(evals) += tmp; - } - } -}; - -template using AvmMini = Relation>; - -} // namespace proof_system::AvmMini_vm \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini/avm_mini.hpp b/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini/avm_mini.hpp new file mode 100644 index 00000000000..14909b98be3 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini/avm_mini.hpp @@ -0,0 +1,106 @@ + +#pragma once +#include "../../relation_parameters.hpp" +#include "../../relation_types.hpp" +#include "./declare_views.hpp" + +namespace proof_system::AvmMini_vm { + +template struct Avm_miniRow { + FF avmMini_rwc{}; + FF avmMini_rwa{}; + FF avmMini_mem_op_b{}; + FF avmMini_ib{}; + FF avmMini_rwb{}; + FF avmMini_subop{}; + FF avmMini_mem_op_c{}; + FF avmMini_ia{}; + FF avmMini_ic{}; + FF avmMini_mem_op_a{}; +}; + +template class avm_miniImpl { + public: + using FF = FF_; + + static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ + 3, 3, 3, 3, 3, 3, 3, 3, + }; + + template + void static accumulate(ContainerOverSubrelations& evals, + const AllEntities& new_term, + [[maybe_unused]] const RelationParameters&, + [[maybe_unused]] const FF& scaling_factor) + { + + // Contribution 0 + { + DECLARE_VIEWS(0); + + auto tmp = (avmMini_subop * (-avmMini_subop + FF(1))); + tmp *= scaling_factor; + std::get<0>(evals) += tmp; + } + // Contribution 1 + { + DECLARE_VIEWS(1); + + auto tmp = (avmMini_mem_op_a * (-avmMini_mem_op_a + FF(1))); + tmp *= scaling_factor; + std::get<1>(evals) += tmp; + } + // Contribution 2 + { + DECLARE_VIEWS(2); + + auto tmp = (avmMini_mem_op_b * (-avmMini_mem_op_b + FF(1))); + tmp *= scaling_factor; + std::get<2>(evals) += tmp; + } + // Contribution 3 + { + DECLARE_VIEWS(3); + + auto tmp = (avmMini_mem_op_c * (-avmMini_mem_op_c + FF(1))); + tmp *= scaling_factor; + std::get<3>(evals) += tmp; + } + // Contribution 4 + { + DECLARE_VIEWS(4); + + auto tmp = (avmMini_rwa * (-avmMini_rwa + FF(1))); + tmp *= scaling_factor; + std::get<4>(evals) += tmp; + } + // Contribution 5 + { + DECLARE_VIEWS(5); + + auto tmp = (avmMini_rwb * (-avmMini_rwb + FF(1))); + tmp *= scaling_factor; + std::get<5>(evals) += tmp; + } + // Contribution 6 + { + DECLARE_VIEWS(6); + + auto tmp = (avmMini_rwc * (-avmMini_rwc + FF(1))); + tmp *= scaling_factor; + std::get<6>(evals) += tmp; + } + // Contribution 7 + { + DECLARE_VIEWS(7); + + auto tmp = (avmMini_subop * ((avmMini_ia + avmMini_ib) - avmMini_ic)); + tmp *= scaling_factor; + std::get<7>(evals) += tmp; + } + } +}; + +template using avm_mini = Relation>; + +} // namespace proof_system::AvmMini_vm \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini/declare_views.hpp b/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini/declare_views.hpp new file mode 100644 index 00000000000..cad793a7b5f --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini/declare_views.hpp @@ -0,0 +1,29 @@ + +#define DECLARE_VIEWS(index) \ + using Accumulator = typename std::tuple_element::type; \ + using View = typename Accumulator::View; \ + [[maybe_unused]] auto avmMini_clk = View(new_term.avmMini_clk); \ + [[maybe_unused]] auto avmMini_first = View(new_term.avmMini_first); \ + [[maybe_unused]] auto memTrace_m_clk = View(new_term.memTrace_m_clk); \ + [[maybe_unused]] auto memTrace_m_sub_clk = View(new_term.memTrace_m_sub_clk); \ + [[maybe_unused]] auto memTrace_m_addr = View(new_term.memTrace_m_addr); \ + [[maybe_unused]] auto memTrace_m_val = View(new_term.memTrace_m_val); \ + [[maybe_unused]] auto memTrace_m_lastAccess = View(new_term.memTrace_m_lastAccess); \ + [[maybe_unused]] auto memTrace_m_rw = View(new_term.memTrace_m_rw); \ + [[maybe_unused]] auto avmMini_subop = View(new_term.avmMini_subop); \ + [[maybe_unused]] auto avmMini_ia = View(new_term.avmMini_ia); \ + [[maybe_unused]] auto avmMini_ib = View(new_term.avmMini_ib); \ + [[maybe_unused]] auto avmMini_ic = View(new_term.avmMini_ic); \ + [[maybe_unused]] auto avmMini_mem_op_a = View(new_term.avmMini_mem_op_a); \ + [[maybe_unused]] auto avmMini_mem_op_b = View(new_term.avmMini_mem_op_b); \ + [[maybe_unused]] auto avmMini_mem_op_c = View(new_term.avmMini_mem_op_c); \ + [[maybe_unused]] auto avmMini_rwa = View(new_term.avmMini_rwa); \ + [[maybe_unused]] auto avmMini_rwb = View(new_term.avmMini_rwb); \ + [[maybe_unused]] auto avmMini_rwc = View(new_term.avmMini_rwc); \ + [[maybe_unused]] auto avmMini_mem_idx_a = View(new_term.avmMini_mem_idx_a); \ + [[maybe_unused]] auto avmMini_mem_idx_b = View(new_term.avmMini_mem_idx_b); \ + [[maybe_unused]] auto avmMini_mem_idx_c = View(new_term.avmMini_mem_idx_c); \ + [[maybe_unused]] auto avmMini_last = View(new_term.avmMini_last); \ + [[maybe_unused]] auto memTrace_m_addr_shift = View(new_term.memTrace_m_addr_shift); \ + [[maybe_unused]] auto memTrace_m_rw_shift = View(new_term.memTrace_m_rw_shift); \ + [[maybe_unused]] auto memTrace_m_val_shift = View(new_term.memTrace_m_val_shift); diff --git a/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini/mem_trace.hpp b/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini/mem_trace.hpp new file mode 100644 index 00000000000..2a7960abd72 --- /dev/null +++ b/barretenberg/cpp/src/barretenberg/relations/generated/AvmMini/mem_trace.hpp @@ -0,0 +1,79 @@ + +#pragma once +#include "../../relation_parameters.hpp" +#include "../../relation_types.hpp" +#include "./declare_views.hpp" + +namespace proof_system::AvmMini_vm { + +template struct Mem_traceRow { + FF avmMini_last{}; + FF memTrace_m_addr{}; + FF memTrace_m_val{}; + FF avmMini_first{}; + FF memTrace_m_addr_shift{}; + FF memTrace_m_rw{}; + FF memTrace_m_rw_shift{}; + FF memTrace_m_val_shift{}; + FF memTrace_m_lastAccess{}; +}; + +template class mem_traceImpl { + public: + using FF = FF_; + + static constexpr std::array SUBRELATION_PARTIAL_LENGTHS{ + 3, + 3, + 4, + 6, + }; + + template + void static accumulate(ContainerOverSubrelations& evals, + const AllEntities& new_term, + [[maybe_unused]] const RelationParameters&, + [[maybe_unused]] const FF& scaling_factor) + { + + // Contribution 0 + { + DECLARE_VIEWS(0); + + auto tmp = (memTrace_m_lastAccess * (-memTrace_m_lastAccess + FF(1))); + tmp *= scaling_factor; + std::get<0>(evals) += tmp; + } + // Contribution 1 + { + DECLARE_VIEWS(1); + + auto tmp = (memTrace_m_rw * (-memTrace_m_rw + FF(1))); + tmp *= scaling_factor; + std::get<1>(evals) += tmp; + } + // Contribution 2 + { + DECLARE_VIEWS(2); + + auto tmp = (((-avmMini_first + FF(1)) * (-memTrace_m_lastAccess + FF(1))) * + (memTrace_m_addr_shift - memTrace_m_addr)); + tmp *= scaling_factor; + std::get<2>(evals) += tmp; + } + // Contribution 3 + { + DECLARE_VIEWS(3); + + auto tmp = (((((-avmMini_first + FF(1)) * (-avmMini_last + FF(1))) * (-memTrace_m_lastAccess + FF(1))) * + (-memTrace_m_rw_shift + FF(1))) * + (memTrace_m_val_shift - memTrace_m_val)); + tmp *= scaling_factor; + std::get<3>(evals) += tmp; + } + } +}; + +template using mem_trace = Relation>; + +} // namespace proof_system::AvmMini_vm \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/relations/translator_vm/goblin_translator_relation_consistency.test.cpp b/barretenberg/cpp/src/barretenberg/relations/translator_vm/goblin_translator_relation_consistency.test.cpp index 4752810a600..af65f0a8aa6 100644 --- a/barretenberg/cpp/src/barretenberg/relations/translator_vm/goblin_translator_relation_consistency.test.cpp +++ b/barretenberg/cpp/src/barretenberg/relations/translator_vm/goblin_translator_relation_consistency.test.cpp @@ -25,8 +25,8 @@ using InputElements = typename Flavor::AllValues; InputElements get_random_input() { InputElements result; - for (FF* element : result.pointer_view()) { - *element = FF::random_element(); + for (FF& element : result.get_all()) { + element = FF::random_element(); } return result; } @@ -35,9 +35,9 @@ InputElements get_special_input() // use non-random values { InputElements result; FF idx = 0; - for (FF* element : result.pointer_view()) { + for (FF& element : result.get_all()) { idx += FF(1); - *element = idx; + element = idx; } return result; } diff --git a/barretenberg/cpp/src/barretenberg/smt_verification/README.md b/barretenberg/cpp/src/barretenberg/smt_verification/README.md index fdb6d1c2072..8dfe2fc1548 100644 --- a/barretenberg/cpp/src/barretenberg/smt_verification/README.md +++ b/barretenberg/cpp/src/barretenberg/smt_verification/README.md @@ -71,7 +71,7 @@ To store it on the disk just do `FFTerm` - the symbolic value that simulates finite field elements. - `FFTerm` - the symbolic value that simulates integer elements which behave like finite field ones. Usefull, when you want to create range constraints or perform operations like XOR. + `FFTerm` - the symbolic value that simulates integer elements which behave like finite field ones. Useful, when you want to create range constraints or perform operations like XOR. `Bool` - simulates the boolean values and mostly will be used only to simulate complex `if` statements if needed. @@ -250,4 +250,4 @@ void model_variables(Circuit& c, Solver* s, FFTerm& evaluatio } ``` -More examples can be found in *.test.cpp files \ No newline at end of file +More examples can be found in *.test.cpp files diff --git a/barretenberg/cpp/src/barretenberg/stdlib/primitives/uint/logic.cpp b/barretenberg/cpp/src/barretenberg/stdlib/primitives/uint/logic.cpp index 72cc1dee046..0effaca5ed4 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/primitives/uint/logic.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/primitives/uint/logic.cpp @@ -110,7 +110,7 @@ uint uint::operator>>(const size_t shift) cons * * We have a special selector configuration in our arithmetic widget that extracts 6.b_x from given the two * relevant accumulators. The factor of 6 is for efficiency reasons. We need to scale our other gate - * coefficients by 6 to accomodate this. + * coefficients by 6 to accommodate this. **/ if ((shift & 1) == 0) { diff --git a/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.hpp b/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.hpp index 49fcdd6bf92..e9943fa2a13 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.hpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.hpp @@ -18,7 +18,7 @@ template class Transcript { public: using field_ct = field_t; using FF = barretenberg::fr; - using BaseTranscript = proof_system::honk::BaseTranscript; + using BaseTranscript = proof_system::honk::BaseTranscript; using StdlibTypes = utility::StdlibTypesUtility; static constexpr size_t HASH_OUTPUT_SIZE = BaseTranscript::HASH_OUTPUT_SIZE; @@ -49,7 +49,7 @@ template class Transcript { { // Compute the indicated challenges from the native transcript constexpr size_t num_challenges = sizeof...(Strings); - std::array native_challenges{}; + std::array native_challenges{}; native_challenges = native_transcript.get_challenges(labels...); /* @@ -60,7 +60,7 @@ template class Transcript { */ std::array challenges; for (size_t i = 0; i < num_challenges; ++i) { - challenges[i] = field_ct::from_witness(builder, native_challenges[i]); + challenges[i] = field_ct::from_witness(builder, static_cast(native_challenges[i])); } return challenges; diff --git a/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.test.cpp index bed3ccee7cb..2bea11a75da 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/transcript/transcript.test.cpp @@ -14,7 +14,7 @@ using Builder = UltraCircuitBuilder; using UltraFlavor = ::proof_system::honk::flavor::Ultra; using UltraRecursiveFlavor = ::proof_system::honk::flavor::UltraRecursive_; using FF = barretenberg::fr; -using BaseTranscript = ::proof_system::honk::BaseTranscript; +using BaseTranscript = ::proof_system::honk::BaseTranscript; /** * @brief Create some mock data; add it to the provided prover transcript in various mock rounds @@ -172,7 +172,8 @@ TEST(RecursiveHonkTranscript, ReturnValuesMatch) for (size_t i = 0; i < LENGTH; ++i) { EXPECT_EQ(native_evaluations[i], stdlib_evaluations[i].get_value()); } - EXPECT_EQ(native_alpha, stdlib_alpha.get_value()); - EXPECT_EQ(native_beta, stdlib_beta.get_value()); + + EXPECT_EQ(static_cast(native_alpha), stdlib_alpha.get_value()); + EXPECT_EQ(static_cast(native_beta), stdlib_beta.get_value()); } } // namespace proof_system::plonk::stdlib::recursion::honk \ No newline at end of file diff --git a/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/goblin_verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/goblin_verifier.test.cpp index 4dbdf3bc178..d1957a6d4ff 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/goblin_verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/goblin_verifier.test.cpp @@ -135,21 +135,20 @@ template class GoblinRecursiveVerifierTest : public testi InnerComposer inner_composer; auto instance = inner_composer.create_instance(inner_circuit); auto prover = inner_composer.create_prover(instance); // A prerequisite for computing VK - const auto native_verification_key = instance->compute_verification_key(); // Instantiate the recursive verification key from the native verification key OuterBuilder outer_circuit; - auto verification_key = std::make_shared(&outer_circuit, native_verification_key); + auto verification_key = std::make_shared(&outer_circuit, instance->verification_key); // Spot check some values in the recursive VK to ensure it was constructed correctly - EXPECT_EQ(verification_key->circuit_size, native_verification_key->circuit_size); - EXPECT_EQ(verification_key->log_circuit_size, native_verification_key->log_circuit_size); - EXPECT_EQ(verification_key->num_public_inputs, native_verification_key->num_public_inputs); - EXPECT_EQ(verification_key->q_m.get_value(), native_verification_key->q_m); - EXPECT_EQ(verification_key->q_r.get_value(), native_verification_key->q_r); - EXPECT_EQ(verification_key->sigma_1.get_value(), native_verification_key->sigma_1); - EXPECT_EQ(verification_key->id_3.get_value(), native_verification_key->id_3); - EXPECT_EQ(verification_key->lagrange_ecc_op.get_value(), native_verification_key->lagrange_ecc_op); + EXPECT_EQ(verification_key->circuit_size, instance->verification_key->circuit_size); + EXPECT_EQ(verification_key->log_circuit_size, instance->verification_key->log_circuit_size); + EXPECT_EQ(verification_key->num_public_inputs, instance->verification_key->num_public_inputs); + EXPECT_EQ(verification_key->q_m.get_value(), instance->verification_key->q_m); + EXPECT_EQ(verification_key->q_r.get_value(), instance->verification_key->q_r); + EXPECT_EQ(verification_key->sigma_1.get_value(), instance->verification_key->sigma_1); + EXPECT_EQ(verification_key->id_3.get_value(), instance->verification_key->id_3); + EXPECT_EQ(verification_key->lagrange_ecc_op.get_value(), instance->verification_key->lagrange_ecc_op); } /** @@ -166,11 +165,10 @@ template class GoblinRecursiveVerifierTest : public testi auto instance = inner_composer.create_instance(inner_circuit); auto inner_prover = inner_composer.create_prover(instance); auto inner_proof = inner_prover.construct_proof(); - const auto native_verification_key = instance->compute_verification_key(); // Create a recursive verification circuit for the proof of the inner circuit OuterBuilder outer_circuit; - auto verification_key = std::make_shared(&outer_circuit, native_verification_key); + auto verification_key = std::make_shared(&outer_circuit, instance->verification_key); RecursiveVerifier verifier(&outer_circuit, verification_key); auto pairing_points = verifier.verify_proof(inner_proof); @@ -213,7 +211,6 @@ template class GoblinRecursiveVerifierTest : public testi auto instance = inner_composer.create_instance(inner_circuit); auto inner_prover = inner_composer.create_prover(instance); auto inner_proof = inner_prover.construct_proof(); - const auto native_verification_key = instance->compute_verification_key(); // Arbitrarily tamper with the proof to be verified inner_prover.transcript.deserialize_full_transcript(); @@ -223,7 +220,7 @@ template class GoblinRecursiveVerifierTest : public testi // Create a recursive verification circuit for the proof of the inner circuit OuterBuilder outer_circuit; - auto verification_key = std::make_shared(&outer_circuit, native_verification_key); + auto verification_key = std::make_shared(&outer_circuit, instance->verification_key); RecursiveVerifier verifier(&outer_circuit, verification_key); verifier.verify_proof(inner_proof); diff --git a/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/verifier.test.cpp b/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/verifier.test.cpp index 29c5b9bd639..0852c45cc70 100644 --- a/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/verifier.test.cpp +++ b/barretenberg/cpp/src/barretenberg/stdlib/recursion/honk/verifier/verifier.test.cpp @@ -120,19 +120,18 @@ template class RecursiveVerifierTest : public testing::Te InnerComposer inner_composer; auto instance = inner_composer.create_instance(inner_circuit); auto prover = inner_composer.create_prover(instance); // A prerequisite for computing VK - const auto native_verification_key = instance->compute_verification_key(); // Instantiate the recursive verification key from the native verification key - auto verification_key = std::make_shared(&outer_circuit, native_verification_key); + auto verification_key = std::make_shared(&outer_circuit, instance->verification_key); // Spot check some values in the recursive VK to ensure it was constructed correctly - EXPECT_EQ(verification_key->circuit_size, native_verification_key->circuit_size); - EXPECT_EQ(verification_key->log_circuit_size, native_verification_key->log_circuit_size); - EXPECT_EQ(verification_key->num_public_inputs, native_verification_key->num_public_inputs); - EXPECT_EQ(verification_key->q_m.get_value(), native_verification_key->q_m); - EXPECT_EQ(verification_key->q_r.get_value(), native_verification_key->q_r); - EXPECT_EQ(verification_key->sigma_1.get_value(), native_verification_key->sigma_1); - EXPECT_EQ(verification_key->id_3.get_value(), native_verification_key->id_3); + EXPECT_EQ(verification_key->circuit_size, instance->verification_key->circuit_size); + EXPECT_EQ(verification_key->log_circuit_size, instance->verification_key->log_circuit_size); + EXPECT_EQ(verification_key->num_public_inputs, instance->verification_key->num_public_inputs); + EXPECT_EQ(verification_key->q_m.get_value(), instance->verification_key->q_m); + EXPECT_EQ(verification_key->q_r.get_value(), instance->verification_key->q_r); + EXPECT_EQ(verification_key->sigma_1.get_value(), instance->verification_key->sigma_1); + EXPECT_EQ(verification_key->id_3.get_value(), instance->verification_key->id_3); } /** @@ -150,11 +149,10 @@ template class RecursiveVerifierTest : public testing::Te auto instance = inner_composer.create_instance(inner_circuit); auto inner_prover = inner_composer.create_prover(instance); auto inner_proof = inner_prover.construct_proof(); - const auto native_verification_key = instance->compute_verification_key(); // Create a recursive verification circuit for the proof of the inner circuit OuterBuilder outer_circuit; - auto verification_key = std::make_shared(&outer_circuit, native_verification_key); + auto verification_key = std::make_shared(&outer_circuit, instance->verification_key); RecursiveVerifier verifier(&outer_circuit, verification_key); auto pairing_points = verifier.verify_proof(inner_proof); @@ -198,7 +196,6 @@ template class RecursiveVerifierTest : public testing::Te auto instance = inner_composer.create_instance(inner_circuit); auto inner_prover = inner_composer.create_prover(instance); auto inner_proof = inner_prover.construct_proof(); - const auto native_verification_key = instance->compute_verification_key(); // Arbitrarily tamper with the proof to be verified inner_prover.transcript.deserialize_full_transcript(); @@ -208,7 +205,7 @@ template class RecursiveVerifierTest : public testing::Te // Create a recursive verification circuit for the proof of the inner circuit OuterBuilder outer_circuit; - auto verification_key = std::make_shared(&outer_circuit, native_verification_key); + auto verification_key = std::make_shared(&outer_circuit, instance->verification_key); RecursiveVerifier verifier(&outer_circuit, verification_key); verifier.verify_proof(inner_proof); diff --git a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.cpp b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.cpp index 34f2842eede..41924b71875 100644 --- a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.cpp +++ b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.cpp @@ -66,11 +66,10 @@ template void ProverInstance_::compute_witness(Circuit& c construct_databus_polynomials(circuit); } - // Construct the sorted concatenated list polynomials for the lookup argument - polynomial s_1(dyadic_circuit_size); - polynomial s_2(dyadic_circuit_size); - polynomial s_3(dyadic_circuit_size); - polynomial s_4(dyadic_circuit_size); + // Initialise the sorted concatenated list polynomials for the lookup argument + for (auto& s_i : sorted_polynomials) { + s_i = Polynomial(dyadic_circuit_size); + } // The sorted list polynomials have (tables_size + lookups_size) populated entries. We define the index below so // that these entries are written into the last indices of the polynomials. The values on the first @@ -116,21 +115,14 @@ template void ProverInstance_::compute_witness(Circuit& c for (const auto& entry : lookup_gates) { const auto components = entry.to_sorted_list_components(table.use_twin_keys); - s_1[s_index] = components[0]; - s_2[s_index] = components[1]; - s_3[s_index] = components[2]; - s_4[s_index] = table_index; + sorted_polynomials[0][s_index] = components[0]; + sorted_polynomials[1][s_index] = components[1]; + sorted_polynomials[2][s_index] = components[2]; + sorted_polynomials[3][s_index] = table_index; ++s_index; } } - // Polynomial memory is zeroed out when constructed with size hint, so we don't have to initialize trailing - // space - proving_key->sorted_1 = s_1; - proving_key->sorted_2 = s_2; - proving_key->sorted_3 = s_3; - proving_key->sorted_4 = s_4; - // Copy memory read/write record data into proving key. Prover needs to know which gates contain a read/write // 'record' witness on the 4th wire. This wire value can only be fully computed once the first 3 wire // polynomials have been committed to. The 4th wire on these gates will be a random linear combination of the @@ -392,8 +384,6 @@ template void ProverInstance_::compute_sorted_list_accumu auto sorted_list_accumulator = Polynomial{ circuit_size }; - auto sorted_polynomials = proving_key->get_sorted_polynomials(); - // Construct s via Horner, i.e. s = s_1 + η(s_2 + η(s_3 + η*s_4)) for (size_t i = 0; i < circuit_size; ++i) { FF T0 = sorted_polynomials[3][i]; @@ -481,60 +471,6 @@ template void ProverInstance_::compute_grand_product_poly grand_product_library::compute_grand_products(proving_key, prover_polynomials, relation_parameters); } -/** - * Compute verification key consisting of selector precommitments. - * - * @return Pointer to the resulting verification key of the Instance. - * */ -template -std::shared_ptr ProverInstance_::compute_verification_key() -{ - if (verification_key) { - return verification_key; - } - - verification_key = - std::make_shared(proving_key->circuit_size, proving_key->num_public_inputs); - - // Compute and store commitments to all precomputed polynomials - verification_key->q_m = commitment_key->commit(proving_key->q_m); - verification_key->q_l = commitment_key->commit(proving_key->q_l); - verification_key->q_r = commitment_key->commit(proving_key->q_r); - verification_key->q_o = commitment_key->commit(proving_key->q_o); - verification_key->q_c = commitment_key->commit(proving_key->q_c); - verification_key->sigma_1 = commitment_key->commit(proving_key->sigma_1); - verification_key->sigma_2 = commitment_key->commit(proving_key->sigma_2); - verification_key->sigma_3 = commitment_key->commit(proving_key->sigma_3); - verification_key->id_1 = commitment_key->commit(proving_key->id_1); - verification_key->id_2 = commitment_key->commit(proving_key->id_2); - verification_key->id_3 = commitment_key->commit(proving_key->id_3); - verification_key->lagrange_first = commitment_key->commit(proving_key->lagrange_first); - verification_key->lagrange_last = commitment_key->commit(proving_key->lagrange_last); - - verification_key->q_4 = commitment_key->commit(proving_key->q_4); - verification_key->q_arith = commitment_key->commit(proving_key->q_arith); - verification_key->q_sort = commitment_key->commit(proving_key->q_sort); - verification_key->q_elliptic = commitment_key->commit(proving_key->q_elliptic); - verification_key->q_aux = commitment_key->commit(proving_key->q_aux); - verification_key->q_lookup = commitment_key->commit(proving_key->q_lookup); - verification_key->sigma_4 = commitment_key->commit(proving_key->sigma_4); - verification_key->id_4 = commitment_key->commit(proving_key->id_4); - verification_key->table_1 = commitment_key->commit(proving_key->table_1); - verification_key->table_2 = commitment_key->commit(proving_key->table_2); - verification_key->table_3 = commitment_key->commit(proving_key->table_3); - verification_key->table_4 = commitment_key->commit(proving_key->table_4); - - // TODO(luke): Similar to the lagrange_first/last polynomials, we dont really need to commit to these polynomials - // due to their simple structure. - if constexpr (IsGoblinFlavor) { - verification_key->lagrange_ecc_op = commitment_key->commit(proving_key->lagrange_ecc_op); - verification_key->q_busread = commitment_key->commit(proving_key->q_busread); - verification_key->databus_id = commitment_key->commit(proving_key->databus_id); - } - - return verification_key; -} - template class ProverInstance_; template class ProverInstance_; diff --git a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.hpp b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.hpp index c72e940a206..5f12dc0c1dd 100644 --- a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.hpp +++ b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.hpp @@ -27,13 +27,16 @@ template class ProverInstance_ { using FoldingParameters = typename Flavor::FoldingParameters; using ProverPolynomials = typename Flavor::ProverPolynomials; using Polynomial = typename Flavor::Polynomial; + using WitnessCommitments = typename Flavor::WitnessCommitments; public: std::shared_ptr proving_key; std::shared_ptr verification_key; - std::shared_ptr commitment_key; ProverPolynomials prover_polynomials; + WitnessCommitments witness_commitments; + + std::array sorted_polynomials; // The number of public inputs has to be the same for all instances because they are // folded element by element. @@ -64,8 +67,6 @@ template class ProverInstance_ { ProverInstance_() = default; ~ProverInstance_() = default; - std::shared_ptr compute_verification_key(); - void initialize_prover_polynomials(); void compute_sorted_accumulator_polynomials(FF); diff --git a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.test.cpp b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.test.cpp index a4c2884904e..6fb7eeab3ed 100644 --- a/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.test.cpp +++ b/barretenberg/cpp/src/barretenberg/sumcheck/instance/prover_instance.test.cpp @@ -60,14 +60,7 @@ template class InstanceTests : public testing::Test { // Get random challenge eta auto eta = FF::random_element(); - // Construct mock sorted list polynomials. - std::vector sorted_lists; - auto sorted_list_polynomials = instance.proving_key->get_sorted_polynomials(); - for (auto& sorted_list_poly : sorted_list_polynomials) { - Polynomial random_polynomial = get_random_polynomial(instance.proving_key->circuit_size); - sorted_lists.emplace_back(random_polynomial); - populate_span(sorted_list_poly, random_polynomial); - } + auto sorted_list_polynomials = instance.sorted_polynomials; // Method 1: computed sorted list accumulator polynomial using prover library method instance.compute_sorted_list_accumulator(eta); @@ -78,10 +71,11 @@ template class InstanceTests : public testing::Test { const FF eta_cube = eta_sqr * eta; // Compute s = s_1 + η*s_2 + η²*s_3 + η³*s_4 - Polynomial sorted_list_accumulator_expected{ sorted_lists[0] }; + Polynomial sorted_list_accumulator_expected{ sorted_list_polynomials[0] }; for (size_t i = 0; i < instance.proving_key->circuit_size; ++i) { - sorted_list_accumulator_expected[i] += - sorted_lists[1][i] * eta + sorted_lists[2][i] * eta_sqr + sorted_lists[3][i] * eta_cube; + sorted_list_accumulator_expected[i] += sorted_list_polynomials[1][i] * eta + + sorted_list_polynomials[2][i] * eta_sqr + + sorted_list_polynomials[3][i] * eta_cube; } EXPECT_EQ(sorted_list_accumulator, sorted_list_accumulator_expected); diff --git a/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.hpp b/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.hpp index eb98b7898a7..ed0b2f52fd1 100644 --- a/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.hpp +++ b/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.hpp @@ -73,7 +73,7 @@ template class SumcheckProver { const proof_system::RelationParameters& relation_parameters, FF alpha) // pass by value, not by reference { - auto zeta = transcript.get_challenge("Sumcheck:zeta"); + FF zeta = transcript.get_challenge("Sumcheck:zeta"); barretenberg::PowUnivariate pow_univariate(zeta); @@ -145,7 +145,7 @@ template class SumcheckProver { auto pep_view = partially_evaluated_polynomials.pointer_view(); auto poly_view = polynomials.pointer_view(); // after the first round, operate in place on partially_evaluated_polynomials - parallel_for(polynomials.size(), [&](size_t j) { + parallel_for(poly_view.size(), [&](size_t j) { for (size_t i = 0; i < round_size; i += 2) { (*pep_view[j])[i >> 1] = (*poly_view[j])[i] + round_challenge * ((*poly_view[j])[i + 1] - (*poly_view[j])[i]); @@ -203,7 +203,7 @@ template class SumcheckVerifier { { bool verified(true); - auto zeta = transcript.get_challenge("Sumcheck:zeta"); + FF zeta = transcript.get_challenge("Sumcheck:zeta"); barretenberg::PowUnivariate pow_univariate(zeta); // All but final round. diff --git a/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.test.cpp b/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.test.cpp index 33864bb8442..04e7f864fff 100644 --- a/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.test.cpp +++ b/barretenberg/cpp/src/barretenberg/sumcheck/sumcheck.test.cpp @@ -107,7 +107,7 @@ TEST_F(SumcheckTests, PolynomialNormalization) Flavor::Transcript transcript = Flavor::Transcript::prover_init_empty(); auto sumcheck = SumcheckProver(multivariate_n, transcript); - auto alpha = transcript.get_challenge("alpha"); + FF alpha = transcript.get_challenge("alpha"); auto output = sumcheck.prove(full_polynomials, {}, alpha); FF u_0 = output.challenge[0]; @@ -176,7 +176,7 @@ TEST_F(SumcheckTests, Prover) auto sumcheck = SumcheckProver(multivariate_n, transcript); - auto alpha = transcript.get_challenge("alpha"); + FF alpha = transcript.get_challenge("alpha"); auto output = sumcheck.prove(full_polynomials, {}, alpha); FF u_0 = output.challenge[0]; FF u_1 = output.challenge[1]; @@ -251,13 +251,13 @@ TEST_F(SumcheckTests, ProverAndVerifierSimple) Flavor::Transcript prover_transcript = Flavor::Transcript::prover_init_empty(); auto sumcheck_prover = SumcheckProver(multivariate_n, prover_transcript); - auto prover_alpha = prover_transcript.get_challenge("alpha"); + FF prover_alpha = prover_transcript.get_challenge("alpha"); auto output = sumcheck_prover.prove(full_polynomials, {}, prover_alpha); Flavor::Transcript verifier_transcript = Flavor::Transcript::verifier_init_empty(prover_transcript); auto sumcheck_verifier = SumcheckVerifier(multivariate_n); - auto verifier_alpha = verifier_transcript.get_challenge("alpha"); + FF verifier_alpha = verifier_transcript.get_challenge("alpha"); auto verifier_output = sumcheck_verifier.verify(relation_parameters, verifier_alpha, verifier_transcript); auto verified = verifier_output.verified.value(); diff --git a/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp b/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp index afdd70f3808..cd7175d4a3d 100644 --- a/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp +++ b/barretenberg/cpp/src/barretenberg/transcript/transcript.hpp @@ -50,10 +50,8 @@ class TranscriptManifest { /** * @brief Common transcript class for both parties. Stores the data for the current round, as well as the * manifest. - * - * @tparam FF Field from which we sample challenges. */ -template class BaseTranscript { +class BaseTranscript { public: BaseTranscript() = default; @@ -67,11 +65,13 @@ template class BaseTranscript { {} static constexpr size_t HASH_OUTPUT_SIZE = 32; + std::ptrdiff_t proof_start = 0; + size_t num_bytes_written = 0; // the number of bytes written to proof_data by the prover or the verifier + size_t num_bytes_read = 0; // the number of bytes read from proof_data by the verifier + size_t round_number = 0; // current round for manifest + private: static constexpr size_t MIN_BYTES_PER_CHALLENGE = 128 / 8; // 128 bit challenges - - size_t num_bytes_read = 0; // keeps track of number of bytes read from proof_data by the verifier - size_t round_number = 0; // current round for manifest bool is_first_challenge = true; // indicates if this is the first challenge this transcript is generating std::array previous_challenge_buffer{}; // default-initialized to zeros std::vector current_round_data; @@ -141,6 +141,8 @@ template class BaseTranscript { manifest.add_entry(round_number, label, element_bytes.size()); current_round_data.insert(current_round_data.end(), element_bytes.begin(), element_bytes.end()); + + num_bytes_written += element_bytes.size(); } /** @@ -181,6 +183,25 @@ template class BaseTranscript { public: // Contains the raw data sent by the prover. std::vector proof_data; + + /** + * @brief Return the proof data starting at proof_start + * @details This is useful for when two different provers share a transcript. + */ + std::vector export_proof() + { + std::vector result(num_bytes_written); + std::copy_n(proof_data.begin() + proof_start, num_bytes_written, result.begin()); + proof_start += static_cast(num_bytes_written); + num_bytes_written = 0; + return result; + }; + + void load_proof(const std::vector& proof) + { + std::copy(proof.begin(), proof.end(), std::back_inserter(proof_data)); + } + /** * @brief After all the prover messages have been sent, finalize the round by hashing all the data and then create * the number of requested challenges. @@ -190,9 +211,9 @@ template class BaseTranscript { * multiple challenges. * * @param labels human-readable names for the challenges for the manifest - * @return std::array challenges for this round. + * @return std::array challenges for this round. */ - template std::array get_challenges(const Strings&... labels) + template std::array get_challenges(const Strings&... labels) { constexpr size_t num_challenges = sizeof...(Strings); @@ -202,19 +223,19 @@ template class BaseTranscript { // Compute the new challenge buffer from which we derive the challenges. // Create challenges from bytes. - std::array challenges{}; + std::array challenges{}; // Generate the challenges by iteratively hashing over the previous challenge. for (size_t i = 0; i < num_challenges; i++) { auto next_challenge_buffer = get_next_challenge_buffer(); // get next challenge buffer - std::array field_element_buffer{}; + std::array field_element_buffer{}; // copy half of the hash to lower 128 bits of challenge // Note: because of how read() from buffers to fields works (in field_declarations.hpp), // we use the later half of the buffer std::copy_n(next_challenge_buffer.begin(), HASH_OUTPUT_SIZE / 2, field_element_buffer.begin() + HASH_OUTPUT_SIZE / 2); - challenges[i] = from_buffer(field_element_buffer); + challenges[i] = from_buffer(field_element_buffer); } // Prepare for next round. @@ -245,7 +266,7 @@ template class BaseTranscript { auto element_bytes = to_buffer(element); proof_data.insert(proof_data.end(), element_bytes.begin(), element_bytes.end()); - BaseTranscript::consume_prover_element_bytes(label, element_bytes); + BaseTranscript::consume_prover_element_bytes(label, element_bytes); } /** @@ -262,7 +283,7 @@ template class BaseTranscript { auto element_bytes = std::span{ proof_data }.subspan(num_bytes_read, element_size); num_bytes_read += element_size; - BaseTranscript::consume_prover_element_bytes(label, element_bytes); + BaseTranscript::consume_prover_element_bytes(label, element_bytes); T element = from_buffer(element_bytes); @@ -275,9 +296,9 @@ template class BaseTranscript { * * @return BaseTranscript */ - static BaseTranscript prover_init_empty() + static BaseTranscript prover_init_empty() { - BaseTranscript transcript; + BaseTranscript transcript; constexpr uint32_t init{ 42 }; // arbitrary transcript.send_to_verifier("Init", init); return transcript; @@ -290,14 +311,14 @@ template class BaseTranscript { * @param transcript * @return BaseTranscript */ - static BaseTranscript verifier_init_empty(const BaseTranscript& transcript) + static BaseTranscript verifier_init_empty(const BaseTranscript& transcript) { - BaseTranscript verifier_transcript{ transcript.proof_data }; + BaseTranscript verifier_transcript{ transcript.proof_data }; [[maybe_unused]] auto _ = verifier_transcript.template receive_from_prover("Init"); return verifier_transcript; }; - FF get_challenge(const std::string& label) { return get_challenges(label)[0]; } + uint256_t get_challenge(const std::string& label) { return get_challenges(label)[0]; } [[nodiscard]] TranscriptManifest get_manifest() const { return manifest; }; @@ -317,4 +338,16 @@ template class BaseTranscript { */ virtual void serialize_full_transcript() { throw_or_abort("Cannot serialize transcript"); } }; + +/** + * @brief Convert an array of uint256_t's to an array of field elements + * @details The syntax `std::array [a, b] = transcript.get_challenges("a", "b")` is unfortunately not allowed + * (structured bindings must be defined with auto return type), so we need a workaround. + */ +template std::array challenges_to_field_elements(std::array&& arr) +{ + std::array result; + std::move(arr.begin(), arr.end(), result.begin()); + return result; +} } // namespace proof_system::honk diff --git a/barretenberg/cpp/src/barretenberg/transcript/transcript.test.cpp b/barretenberg/cpp/src/barretenberg/transcript/transcript.test.cpp index 437b31b90c5..2f140e4b69a 100644 --- a/barretenberg/cpp/src/barretenberg/transcript/transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/transcript/transcript.test.cpp @@ -1,18 +1,51 @@ #include "barretenberg/transcript/transcript.hpp" -#include "barretenberg/ecc/curves/bn254/fr.hpp" #include namespace barretenberg::honk_transcript_tests { using FF = barretenberg::fr; -using Transcript = proof_system::honk::BaseTranscript; +using Fr = barretenberg::fr; +using Fq = barretenberg::fq; +using Transcript = proof_system::honk::BaseTranscript; -TEST(BaseTranscript, Basic) +/** + * @brief Test sending, receiving, and exporting proofs + * + */ +TEST(BaseTranscript, TwoProversTwoFields) { - Transcript transcript; - FF elt = 561; - transcript.send_to_verifier("something", elt); - auto received = transcript.template receive_from_prover("something"); - EXPECT_EQ(received, elt); + const auto EXPECT_STATE = [](const Transcript& transcript, size_t start, size_t written, size_t read) { + EXPECT_EQ(transcript.proof_start, static_cast(start)); + EXPECT_EQ(transcript.num_bytes_written, written); + EXPECT_EQ(transcript.num_bytes_read, read); + }; + + Transcript prover_transcript; + // state initializes to zero + EXPECT_STATE(prover_transcript, /*start*/ 0, /*written*/ 0, /*read*/ 0); + Fr elt_a = 1377; + prover_transcript.send_to_verifier("a", elt_a); + EXPECT_STATE(prover_transcript, /*start*/ 0, /*written*/ 32, /*read*/ 0); + Transcript verifier_transcript{ prover_transcript.export_proof() }; + // export resets read/write state and sets start in prep for next export + EXPECT_STATE(prover_transcript, /*start*/ 32, /*written*/ 0, /*read*/ 0); + // state initializes to zero + EXPECT_STATE(verifier_transcript, /*start*/ 0, /*written*/ 0, /*read*/ 0); + Fr received_a = verifier_transcript.receive_from_prover("a"); + // receiving is reading bytes input and writing them to an internal proof_data buffer + EXPECT_STATE(verifier_transcript, /*start*/ 0, /*written*/ 32, /*read*/ 32); + EXPECT_EQ(received_a, elt_a); + + Fq elt_b = 773; + prover_transcript.send_to_verifier("b", elt_b); + EXPECT_STATE(prover_transcript, /*start*/ 32, /*written*/ 32, /*read*/ 0); + verifier_transcript.load_proof(prover_transcript.export_proof()); + EXPECT_STATE(prover_transcript, /*start*/ 64, /*written*/ 0, /*read*/ 0); + // load proof is not an action by a prover or verifeir, so it does not change read/write counts + EXPECT_STATE(verifier_transcript, /*start*/ 0, /*written*/ 32, /*read*/ 32); + Fq received_b = verifier_transcript.receive_from_prover("b"); + EXPECT_STATE(verifier_transcript, 0, 64, 64); + EXPECT_EQ(received_b, elt_b); } + } // namespace barretenberg::honk_transcript_tests diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.cpp index 5ad81ecb1cb..98e29dc0113 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.cpp @@ -276,7 +276,7 @@ void GoblinTranslatorProver::execute_wire_and_sorted_constraints_commitments_rou void GoblinTranslatorProver::execute_grand_product_computation_round() { // Compute and store parameters required by relations in Sumcheck - auto [gamma] = transcript.get_challenges("gamma"); + FF gamma = transcript.get_challenge("gamma"); const size_t NUM_LIMB_BITS = Flavor::NUM_LIMB_BITS; relation_parameters.beta = 0; relation_parameters.gamma = gamma; @@ -329,7 +329,7 @@ void GoblinTranslatorProver::execute_relation_check_rounds() auto sumcheck = Sumcheck(key->circuit_size, transcript); - auto alpha = transcript.get_challenge("alpha"); + FF alpha = transcript.get_challenge("alpha"); sumcheck_output = sumcheck.prove(prover_polynomials, relation_parameters, alpha); } diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.hpp index 47772d1ca7b..8ac45653953 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_prover.hpp @@ -35,7 +35,7 @@ class GoblinTranslatorProver { plonk::proof& export_proof(); plonk::proof& construct_proof(); - BaseTranscript transcript; + BaseTranscript transcript; proof_system::RelationParameters relation_parameters; diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_verifier.cpp b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_verifier.cpp index d280143e2f2..5c95bd8b75e 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_verifier.cpp @@ -20,7 +20,7 @@ GoblinTranslatorVerifier::GoblinTranslatorVerifier(GoblinTranslatorVerifier&& ot GoblinTranslatorVerifier& GoblinTranslatorVerifier::operator=(GoblinTranslatorVerifier&& other) noexcept { - key = other.key; + key = std::move(other.key); pcs_verification_key = (std::move(other.pcs_verification_key)); commitments.clear(); pcs_fr_elements.clear(); @@ -65,15 +65,10 @@ void GoblinTranslatorVerifier::put_translation_data_in_relation_parameters(const */ bool GoblinTranslatorVerifier::verify_proof(const plonk::proof& proof) { - using Curve = typename Flavor::Curve; - using ZeroMorph = pcs::zeromorph::ZeroMorphVerifier_; - using VerifierCommitments = typename Flavor::VerifierCommitments; - using CommitmentLabels = typename Flavor::CommitmentLabels; + transcript = BaseTranscript{ proof.proof_data }; - transcript = BaseTranscript{ proof.proof_data }; - - auto commitments = VerifierCommitments(key, transcript); - auto commitment_labels = CommitmentLabels(); + Flavor::VerifierCommitments commitments{ key, transcript }; + Flavor::CommitmentLabels commitment_labels; // TODO(Adrian): Change the initialization of the transcript to take the VK hash? const auto circuit_size = transcript.template receive_from_prover("circuit_size"); @@ -235,7 +230,7 @@ bool GoblinTranslatorVerifier::verify_proof(const plonk::proof& proof) commitments.ordered_range_constraints_4 = receive_commitment(commitment_labels.ordered_range_constraints_4); // Get permutation challenges - auto [gamma] = transcript.get_challenges("gamma"); + FF gamma = transcript.get_challenge("gamma"); relation_parameters.beta = 0; relation_parameters.gamma = gamma; @@ -248,7 +243,7 @@ bool GoblinTranslatorVerifier::verify_proof(const plonk::proof& proof) // Execute Sumcheck Verifier auto sumcheck = SumcheckVerifier(circuit_size); - auto alpha = transcript.get_challenge("alpha"); + FF alpha = transcript.get_challenge("alpha"); auto [multivariate_challenge, claimed_evaluations, sumcheck_verified] = sumcheck.verify(relation_parameters, alpha, transcript); @@ -260,14 +255,15 @@ bool GoblinTranslatorVerifier::verify_proof(const plonk::proof& proof) // Execute ZeroMorph rounds. See https://hackmd.io/dlf9xEwhTQyE3hiGbq4FsA?view for a complete description ofthe // unrolled protocol. - auto pairing_points = ZeroMorph::verify(commitments.get_unshifted(), - commitments.get_to_be_shifted(), - claimed_evaluations.get_unshifted(), - claimed_evaluations.get_shifted(), - multivariate_challenge, - transcript, - commitments.get_concatenation_groups(), - claimed_evaluations.get_concatenated_constraints()); + auto pairing_points = + pcs::zeromorph::ZeroMorphVerifier_::verify(commitments.get_unshifted(), + commitments.get_to_be_shifted(), + claimed_evaluations.get_unshifted(), + claimed_evaluations.get_shifted(), + multivariate_challenge, + transcript, + commitments.get_concatenation_groups(), + claimed_evaluations.get_concatenated_constraints()); auto verified = pcs_verification_key->pairing_check(pairing_points[0], pairing_points[1]); diff --git a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_verifier.hpp b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_verifier.hpp index ff49cd546ce..0b16855a0b5 100644 --- a/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/translator_vm/goblin_translator_verifier.hpp @@ -20,7 +20,7 @@ class GoblinTranslatorVerifier { std::map commitments; std::map pcs_fr_elements; std::shared_ptr pcs_verification_key; - BaseTranscript transcript; + BaseTranscript transcript; RelationParameters relation_parameters; explicit GoblinTranslatorVerifier(std::shared_ptr verifier_key = nullptr); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/goblin_ultra_transcript.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/goblin_ultra_transcript.test.cpp index 1b9ae31459d..355bf5c6c65 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/goblin_ultra_transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/goblin_ultra_transcript.test.cpp @@ -200,10 +200,10 @@ TEST_F(GoblinUltraTranscriptTests, ChallengeGenerationTest) constexpr uint32_t random_val{ 17 }; // arbitrary transcript.send_to_verifier("random val", random_val); // test more challenges - auto [a, b, c] = transcript.get_challenges("a", "b", "c"); + auto [a, b, c] = challenges_to_field_elements(transcript.get_challenges("a", "b", "c")); ASSERT_NE(a, 0) << "Challenge a is 0"; - ASSERT_NE(b, 0) << "Challenge a is 0"; - ASSERT_NE(b, 0) << "Challenge a is 0"; + ASSERT_NE(b, 0) << "Challenge b is 0"; + ASSERT_NE(c, 0) << "Challenge c is 0"; } TEST_F(GoblinUltraTranscriptTests, StructureTest) diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp index ba5ed3909a7..24b614f4ea8 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.cpp @@ -64,7 +64,7 @@ template plonk::proof& MergeProver_::construct_proof() // Compute evaluations T_i(\kappa), T_{i-1}(\kappa), t_i^{shift}(\kappa), add to transcript. For each polynomial // we add a univariate opening claim {p(X), (\kappa, p(\kappa))} to the set of claims to be checked via batched KZG. - auto kappa = transcript.get_challenge("kappa"); + FF kappa = transcript.get_challenge("kappa"); // Add univariate opening claims for each polynomial. std::vector opening_claims; @@ -89,7 +89,7 @@ template plonk::proof& MergeProver_::construct_proof() opening_claims.emplace_back(OpeningClaim{ polynomial, { kappa, evaluation } }); } - auto alpha = transcript.get_challenge("alpha"); + FF alpha = transcript.get_challenge("alpha"); // Constuct batched polynomial to opened via KZG auto batched_polynomial = Polynomial(N); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.hpp index a6d31866b41..426caafc521 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_prover.hpp @@ -25,7 +25,7 @@ template class MergeProver_ { using OpeningPair = typename pcs::OpeningPair; public: - BaseTranscript transcript; + BaseTranscript transcript; std::shared_ptr op_queue; std::shared_ptr pcs_commitment_key; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_verifier.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_verifier.cpp index bfac4182d6d..3e76c0f3214 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_verifier.cpp @@ -19,7 +19,7 @@ MergeVerifier_::MergeVerifier_(std::unique_ptr ve */ template bool MergeVerifier_::verify_proof(const plonk::proof& proof) { - transcript = BaseTranscript{ proof.proof_data }; + transcript = BaseTranscript{ proof.proof_data }; // Receive commitments [t_i^{shift}], [T_{i-1}], and [T_i] std::array C_T_prev; @@ -57,7 +57,7 @@ template bool MergeVerifier_::verify_proof(const plonk identity_checked = identity_checked && (T_current_evals[idx] == T_prev_evals[idx] + t_shift_evals[idx]); } - auto alpha = transcript.get_challenge("alpha"); + FF alpha = transcript.get_challenge("alpha"); // Constuct batched commitment and evaluation from constituents auto batched_commitment = opening_claims[0].commitment; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_verifier.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_verifier.hpp index d1da2f786ab..a4005f8a4f7 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/merge_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/merge_verifier.hpp @@ -26,7 +26,7 @@ template class MergeVerifier_ { using VerifierCommitmentKey = typename Flavor::VerifierCommitmentKey; public: - BaseTranscript transcript; + BaseTranscript transcript; std::shared_ptr op_queue; std::shared_ptr pcs_verification_key; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp index 063a1837730..d2f7bd8afb6 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/relation_correctness.test.cpp @@ -246,8 +246,8 @@ class RelationCorrectnessTests : public ::testing::Test { * @details Check that the constraints encoded by the relations are satisfied by the polynomials produced by the * Ultra Honk Composer for a real circuit. * - * TODO(Kesha): We'll have to update this function once we add zk, since the relation will be incorrect for he first few - * indices + * TODO(Kesha): We'll have to update this function once we add zk, since the relation will be incorrect for the first + * few indices * */ // TODO(luke): Add a gate that sets q_arith = 3 to check secondary arithmetic relation @@ -390,7 +390,13 @@ TEST_F(RelationCorrectnessTests, GoblinTranslatorPermutationRelationCorrectness) ProverPolynomials prover_polynomials; std::vector polynomial_container; auto polynomial_pointer_view = prover_polynomials.pointer_view(); - for (size_t i = 0; i < prover_polynomials.size(); i++) { + size_t z_perm_index = 0; + for (size_t i = 0; i < polynomial_pointer_view.size(); i++) { + // TODO(https://github.com/AztecProtocol/barretenberg/issues/743) wouldn't be needed if ProverPolynomials held + // memory + if (&prover_polynomials.z_perm == polynomial_pointer_view[i]) { + z_perm_index = i; + } Polynomial temporary_polynomial(circuit_size); polynomial_container.push_back(temporary_polynomial); *polynomial_pointer_view[i] = polynomial_container[i]; @@ -483,8 +489,7 @@ TEST_F(RelationCorrectnessTests, GoblinTranslatorPermutationRelationCorrectness) // Compute the grand product polynomial grand_product_library::compute_grand_product>( circuit_size, prover_polynomials, params); - prover_polynomials.z_perm_shift = - polynomial_container[90].shifted(); // TODO(https://github.com/AztecProtocol/barretenberg/issues/784) + prover_polynomials.z_perm_shift = polynomial_container[z_perm_index].shifted(); using Relations = typename Flavor::Relations; @@ -511,9 +516,15 @@ TEST_F(RelationCorrectnessTests, GoblinTranslatorGenPermSortRelationCorrectness) std::vector polynomial_container; auto polynomial_pointer_view = prover_polynomials.pointer_view(); + size_t ordered_range_constraints_index = 0; // Allocate polynomials - for (size_t i = 0; i < prover_polynomials.size(); i++) { + for (size_t i = 0; i < polynomial_pointer_view.size(); i++) { Polynomial temporary_polynomial(circuit_size); + // TODO(https://github.com/AztecProtocol/barretenberg/issues/743) wouldn't be needed if ProverPolynomials held + // memory + if (&prover_polynomials.ordered_range_constraints_0 == polynomial_pointer_view[i]) { + ordered_range_constraints_index = i; + } polynomial_container.push_back(temporary_polynomial); *polynomial_pointer_view[i] = polynomial_container[i]; } @@ -559,20 +570,17 @@ TEST_F(RelationCorrectnessTests, GoblinTranslatorGenPermSortRelationCorrectness) polynomial_pointers[i + 1]->begin()); }); - // TODO(https://github.com/AztecProtocol/barretenberg/issues/784) - enum ORDERED_RANGE_CONSTRAINTS : size_t { C0 = 85, C1, C2, C3, C4 }; - // Get shifted polynomials prover_polynomials.ordered_range_constraints_0_shift = - polynomial_container[ORDERED_RANGE_CONSTRAINTS::C0].shifted(); + polynomial_container[ordered_range_constraints_index].shifted(); prover_polynomials.ordered_range_constraints_1_shift = - polynomial_container[ORDERED_RANGE_CONSTRAINTS::C1].shifted(); + polynomial_container[ordered_range_constraints_index + 1].shifted(); prover_polynomials.ordered_range_constraints_2_shift = - polynomial_container[ORDERED_RANGE_CONSTRAINTS::C2].shifted(); + polynomial_container[ordered_range_constraints_index + 2].shifted(); prover_polynomials.ordered_range_constraints_3_shift = - polynomial_container[ORDERED_RANGE_CONSTRAINTS::C3].shifted(); + polynomial_container[ordered_range_constraints_index + 3].shifted(); prover_polynomials.ordered_range_constraints_4_shift = - polynomial_container[ORDERED_RANGE_CONSTRAINTS::C4].shifted(); + polynomial_container[ordered_range_constraints_index + 4].shifted(); using Relations = typename Flavor::Relations; @@ -611,7 +619,7 @@ TEST_F(RelationCorrectnessTests, GoblinTranslatorExtraRelationsCorrectness) auto polynomial_id_pointer_view = prover_polynomial_ids.pointer_view(); std::vector polynomial_container; std::vector polynomial_ids; - for (size_t i = 0; i < prover_polynomials.size(); i++) { + for (size_t i = 0; i < polynomial_id_pointer_view.size(); i++) { Polynomial temporary_polynomial(circuit_size); // Allocate polynomials polynomial_container.push_back(temporary_polynomial); @@ -627,7 +635,7 @@ TEST_F(RelationCorrectnessTests, GoblinTranslatorExtraRelationsCorrectness) } // Assign spans to non-shifted prover polynomials auto polynomial_pointer_view = prover_polynomials.pointer_view(); - for (size_t i = 0; i < prover_polynomials.size(); i++) { + for (size_t i = 0; i < polynomial_pointer_view.size(); i++) { if (!shifted_id_set.contains(i)) { *polynomial_pointer_view[i] = polynomial_container[i]; } @@ -710,7 +718,7 @@ TEST_F(RelationCorrectnessTests, GoblinTranslatorDecompositionRelationCorrectnes std::vector polynomial_ids; auto polynomial_id_pointer_view = prover_polynomial_ids.pointer_view(); auto polynomial_pointer_view = prover_polynomials.pointer_view(); - for (size_t i = 0; i < prover_polynomials.size(); i++) { + for (size_t i = 0; i < polynomial_id_pointer_view.size(); i++) { Polynomial temporary_polynomial(circuit_size); // Allocate polynomials polynomial_container.push_back(temporary_polynomial); @@ -725,7 +733,7 @@ TEST_F(RelationCorrectnessTests, GoblinTranslatorDecompositionRelationCorrectnes shifted_id_set.emplace(id); } // Assign spans to non-shifted prover polynomials - for (size_t i = 0; i < prover_polynomials.size(); i++) { + for (size_t i = 0; i < polynomial_pointer_view.size(); i++) { if (!shifted_id_set.contains(i)) { *polynomial_pointer_view[i] = polynomial_container[i]; } @@ -1129,7 +1137,7 @@ TEST_F(RelationCorrectnessTests, GoblinTranslatorNonNativeRelationCorrectness) std::vector polynomial_ids; auto polynomial_pointer_view = prover_polynomials.pointer_view(); auto polynomial_id_pointer_view = prover_polynomial_ids.pointer_view(); - for (size_t i = 0; i < prover_polynomials.size(); i++) { + for (size_t i = 0; i < polynomial_pointer_view.size(); i++) { Polynomial temporary_polynomial(circuit_size); // Allocate polynomials polynomial_container.push_back(temporary_polynomial); @@ -1144,7 +1152,7 @@ TEST_F(RelationCorrectnessTests, GoblinTranslatorNonNativeRelationCorrectness) shifted_id_set.emplace(id); } // Assign spans to non-shifted prover polynomials - for (size_t i = 0; i < prover_polynomials.size(); i++) { + for (size_t i = 0; i < polynomial_pointer_view.size(); i++) { if (!shifted_id_set.contains(i)) { *polynomial_pointer_view[i] = polynomial_container[i]; } diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/sumcheck.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/sumcheck.test.cpp index 8a7f1269e3d..3ed21abdd39 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/sumcheck.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/sumcheck.test.cpp @@ -169,7 +169,7 @@ TEST_F(SumcheckTestsRealCircuit, Ultra) Flavor::Transcript verifier_transcript = Flavor::Transcript::verifier_init_empty(prover_transcript); auto sumcheck_verifier = SumcheckVerifier(circuit_size); - auto alpha = verifier_transcript.get_challenge("alpha"); + FF alpha = verifier_transcript.get_challenge("alpha"); auto verifier_output = sumcheck_verifier.verify(instance->relation_parameters, alpha, verifier_transcript); auto verified = verifier_output.verified.value(); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.cpp index ff8cf11b869..716c83d09a4 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.cpp @@ -6,20 +6,78 @@ namespace proof_system::honk { +/** + * Compute verification key consisting of selector precommitments. + * + * @return Pointer to the resulting verification key of the Instance. + * */ +template +void UltraComposer_::compute_verification_key(std::shared_ptr> instance) +{ + if (instance->verification_key) { + return; + } + + auto& proving_key = instance->proving_key; + + auto verification_key = + std::make_shared(proving_key->circuit_size, proving_key->num_public_inputs); + + // Compute and store commitments to all precomputed polynomials + verification_key->q_m = commitment_key->commit(proving_key->q_m); + verification_key->q_l = commitment_key->commit(proving_key->q_l); + verification_key->q_r = commitment_key->commit(proving_key->q_r); + verification_key->q_o = commitment_key->commit(proving_key->q_o); + verification_key->q_c = commitment_key->commit(proving_key->q_c); + verification_key->sigma_1 = commitment_key->commit(proving_key->sigma_1); + verification_key->sigma_2 = commitment_key->commit(proving_key->sigma_2); + verification_key->sigma_3 = commitment_key->commit(proving_key->sigma_3); + verification_key->id_1 = commitment_key->commit(proving_key->id_1); + verification_key->id_2 = commitment_key->commit(proving_key->id_2); + verification_key->id_3 = commitment_key->commit(proving_key->id_3); + verification_key->lagrange_first = commitment_key->commit(proving_key->lagrange_first); + verification_key->lagrange_last = commitment_key->commit(proving_key->lagrange_last); + + verification_key->q_4 = commitment_key->commit(proving_key->q_4); + verification_key->q_arith = commitment_key->commit(proving_key->q_arith); + verification_key->q_sort = commitment_key->commit(proving_key->q_sort); + verification_key->q_elliptic = commitment_key->commit(proving_key->q_elliptic); + verification_key->q_aux = commitment_key->commit(proving_key->q_aux); + verification_key->q_lookup = commitment_key->commit(proving_key->q_lookup); + verification_key->sigma_4 = commitment_key->commit(proving_key->sigma_4); + verification_key->id_4 = commitment_key->commit(proving_key->id_4); + verification_key->table_1 = commitment_key->commit(proving_key->table_1); + verification_key->table_2 = commitment_key->commit(proving_key->table_2); + verification_key->table_3 = commitment_key->commit(proving_key->table_3); + verification_key->table_4 = commitment_key->commit(proving_key->table_4); + + // TODO(luke): Similar to the lagrange_first/last polynomials, we dont really need to commit to these polynomials + // due to their simple structure. + if constexpr (IsGoblinFlavor) { + verification_key->lagrange_ecc_op = commitment_key->commit(proving_key->lagrange_ecc_op); + verification_key->q_busread = commitment_key->commit(proving_key->q_busread); + verification_key->databus_id = commitment_key->commit(proving_key->databus_id); + } + + instance->verification_key = std::move(verification_key); +} + template std::shared_ptr> UltraComposer_::create_instance(CircuitBuilder& circuit) { circuit.add_gates_to_ensure_all_polys_are_non_zero(); circuit.finalize_circuit(); auto instance = std::make_shared(circuit); - instance->commitment_key = compute_commitment_key(instance->proving_key->circuit_size); + commitment_key = compute_commitment_key(instance->proving_key->circuit_size); + + compute_verification_key(instance); return instance; } template UltraProver_ UltraComposer_::create_prover(std::shared_ptr instance) { - UltraProver_ output_state(instance); + UltraProver_ output_state(instance, commitment_key); return output_state; } @@ -27,9 +85,9 @@ UltraProver_ UltraComposer_::create_prover(std::shared_ptr UltraVerifier_ UltraComposer_::create_verifier(std::shared_ptr instance) { - auto verification_key = instance->compute_verification_key(); - UltraVerifier_ output_state(verification_key); - auto pcs_verification_key = std::make_unique(verification_key->circuit_size, crs_factory_); + UltraVerifier_ output_state(instance->verification_key); + auto pcs_verification_key = + std::make_unique(instance->verification_key->circuit_size, crs_factory_); output_state.pcs_verification_key = std::move(pcs_verification_key); return output_state; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.hpp index 3cd04b03ff1..33840e32bc7 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_composer.hpp @@ -29,8 +29,6 @@ template class UltraComposer_ { static constexpr size_t num_zero_rows = Flavor::has_zero_row ? 1 : 0; static constexpr std::string_view NAME_STRING = "UltraHonk"; static constexpr size_t NUM_WIRES = CircuitBuilder::NUM_WIRES; - std::shared_ptr proving_key; - std::shared_ptr verification_key; // The crs_factory holds the path to the srs and exposes methods to extract the srs elements std::shared_ptr> crs_factory_; @@ -43,11 +41,6 @@ template class UltraComposer_ { : crs_factory_(std::move(crs_factory)) {} - UltraComposer_(std::shared_ptr p_key, std::shared_ptr v_key) - : proving_key(std::move(p_key)) - , verification_key(std::move(v_key)) - {} - UltraComposer_(UltraComposer_&& other) noexcept = default; UltraComposer_(UltraComposer_ const& other) noexcept = default; UltraComposer_& operator=(UltraComposer_&& other) noexcept = default; @@ -104,13 +97,21 @@ template class UltraComposer_ { { std::vector> vks; for (const auto& inst : instances) { - vks.emplace_back(inst->compute_verification_key()); + vks.emplace_back(inst->verification_key); } VerifierInstances insts(vks); ProtoGalaxyVerifier_ output_state(insts); return output_state; }; + + private: + /** + * @brief Compute the verification key of an Instance, produced from a finalised circuit. + * + * @param inst + */ + void compute_verification_key(std::shared_ptr); }; extern template class UltraComposer_; extern template class UltraComposer_; diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp index 0dcdb608e2b..6c316094ceb 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.cpp @@ -12,9 +12,9 @@ namespace proof_system::honk { * @tparam a type of UltraFlavor * */ template -UltraProver_::UltraProver_(std::shared_ptr inst) +UltraProver_::UltraProver_(std::shared_ptr inst, std::shared_ptr commitment_key) : instance(std::move(inst)) - , commitment_key(instance->commitment_key) + , commitment_key(commitment_key) { instance->initialize_prover_polynomials(); } @@ -40,31 +40,46 @@ template void UltraProver_::execute_preamble_round( } /** - * @brief Compute commitments to the first three wire polynomials (and ECC op wires if using Goblin). - * + * @brief Commit to the wire polynomials (part of the witness), with the exception of the fourth wire, which is + * only commited to after adding memory records. In the Goblin Flavor, we also commit to the ECC OP wires and the + * DataBus columns. */ template void UltraProver_::execute_wire_commitments_round() { + auto& witness_commitments = instance->witness_commitments; + auto& proving_key = instance->proving_key; + // Commit to the first three wire polynomials - // We only commit to the fourth wire polynomial after adding memory records - auto wire_polys = instance->proving_key->get_wires(); + // We only commit to the fourth wire polynomial after adding memory recordss + witness_commitments.w_l = commitment_key->commit(proving_key->w_l); + witness_commitments.w_r = commitment_key->commit(proving_key->w_r); + witness_commitments.w_o = commitment_key->commit(proving_key->w_o); + + auto wire_comms = witness_commitments.get_wires(); auto labels = commitment_labels.get_wires(); for (size_t idx = 0; idx < 3; ++idx) { - transcript.send_to_verifier(labels[idx], commitment_key->commit(wire_polys[idx])); + transcript.send_to_verifier(labels[idx], wire_comms[idx]); } if constexpr (IsGoblinFlavor) { // Commit to Goblin ECC op wires - auto op_wire_polys = instance->proving_key->get_ecc_op_wires(); + witness_commitments.ecc_op_wire_1 = commitment_key->commit(proving_key->ecc_op_wire_1); + witness_commitments.ecc_op_wire_2 = commitment_key->commit(proving_key->ecc_op_wire_2); + witness_commitments.ecc_op_wire_3 = commitment_key->commit(proving_key->ecc_op_wire_3); + witness_commitments.ecc_op_wire_4 = commitment_key->commit(proving_key->ecc_op_wire_4); + + auto op_wire_comms = instance->witness_commitments.get_ecc_op_wires(); auto labels = commitment_labels.get_ecc_op_wires(); for (size_t idx = 0; idx < Flavor::NUM_WIRES; ++idx) { - transcript.send_to_verifier(labels[idx], commitment_key->commit(op_wire_polys[idx])); + transcript.send_to_verifier(labels[idx], op_wire_comms[idx]); } + // Commit to DataBus columns - transcript.send_to_verifier(commitment_labels.calldata, - commitment_key->commit(instance->proving_key->calldata)); + witness_commitments.calldata = commitment_key->commit(proving_key->calldata); + witness_commitments.calldata_read_counts = commitment_key->commit(proving_key->calldata_read_counts); + transcript.send_to_verifier(commitment_labels.calldata, instance->witness_commitments.calldata); transcript.send_to_verifier(commitment_labels.calldata_read_counts, - commitment_key->commit(instance->proving_key->calldata_read_counts)); + instance->witness_commitments.calldata_read_counts); } } @@ -74,16 +89,18 @@ template void UltraProver_::execute_wire_commitment */ template void UltraProver_::execute_sorted_list_accumulator_round() { - auto eta = transcript.get_challenge("eta"); + FF eta = transcript.get_challenge("eta"); instance->compute_sorted_accumulator_polynomials(eta); + auto& witness_commitments = instance->witness_commitments; // Commit to the sorted withness-table accumulator and the finalized (i.e. with memory records) fourth wire // polynomial - auto sorted_accum_commitment = commitment_key->commit(instance->proving_key->sorted_accum); - auto w_4_commitment = commitment_key->commit(instance->proving_key->w_4); - transcript.send_to_verifier(commitment_labels.sorted_accum, sorted_accum_commitment); - transcript.send_to_verifier(commitment_labels.w_4, w_4_commitment); + witness_commitments.sorted_accum = commitment_key->commit(instance->prover_polynomials.sorted_accum); + witness_commitments.w_4 = commitment_key->commit(instance->prover_polynomials.w_4); + + transcript.send_to_verifier(commitment_labels.sorted_accum, instance->witness_commitments.sorted_accum); + transcript.send_to_verifier(commitment_labels.w_4, instance->witness_commitments.w_4); } /** @@ -93,15 +110,15 @@ template void UltraProver_::execute_sorted_list_acc template void UltraProver_::execute_log_derivative_inverse_round() { // Compute and store challenges beta and gamma - auto [beta, gamma] = transcript.get_challenges("beta", "gamma"); + auto [beta, gamma] = challenges_to_field_elements(transcript.get_challenges("beta", "gamma")); relation_parameters.beta = beta; relation_parameters.gamma = gamma; if constexpr (IsGoblinFlavor) { instance->compute_logderivative_inverse(beta, gamma); - - auto lookup_inverses_commitment = commitment_key->commit(instance->proving_key->lookup_inverses); - transcript.send_to_verifier(commitment_labels.lookup_inverses, lookup_inverses_commitment); + instance->witness_commitments.lookup_inverses = + commitment_key->commit(instance->prover_polynomials.lookup_inverses); + transcript.send_to_verifier(commitment_labels.lookup_inverses, instance->witness_commitments.lookup_inverses); } } @@ -114,10 +131,11 @@ template void UltraProver_::execute_grand_product_c instance->compute_grand_product_polynomials(relation_parameters.beta, relation_parameters.gamma); - auto z_perm_commitment = commitment_key->commit(instance->proving_key->z_perm); - auto z_lookup_commitment = commitment_key->commit(instance->proving_key->z_lookup); - transcript.send_to_verifier(commitment_labels.z_perm, z_perm_commitment); - transcript.send_to_verifier(commitment_labels.z_lookup, z_lookup_commitment); + auto& witness_commitments = instance->witness_commitments; + witness_commitments.z_perm = commitment_key->commit(instance->prover_polynomials.z_perm); + witness_commitments.z_lookup = commitment_key->commit(instance->prover_polynomials.z_lookup); + transcript.send_to_verifier(commitment_labels.z_perm, instance->witness_commitments.z_perm); + transcript.send_to_verifier(commitment_labels.z_lookup, instance->witness_commitments.z_lookup); } /** diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.hpp index fe50c8e0c80..9d660067229 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_prover.hpp @@ -23,7 +23,7 @@ template class UltraProver_ { using Transcript = typename Flavor::Transcript; public: - explicit UltraProver_(std::shared_ptr); + explicit UltraProver_(std::shared_ptr, std::shared_ptr); BBERG_PROFILE void execute_preamble_round(); BBERG_PROFILE void execute_wire_commitments_round(); BBERG_PROFILE void execute_sorted_list_accumulator_round(); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_transcript.test.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_transcript.test.cpp index cd6cdf1b40d..1d4e16edd70 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_transcript.test.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_transcript.test.cpp @@ -186,10 +186,10 @@ TEST_F(UltraTranscriptTests, ChallengeGenerationTest) constexpr uint32_t random_val{ 17 }; // arbitrary transcript.send_to_verifier("random val", random_val); // test more challenges - auto [a, b, c] = transcript.get_challenges("a", "b", "c"); + auto [a, b, c] = challenges_to_field_elements(transcript.get_challenges("a", "b", "c")); ASSERT_NE(a, 0) << "Challenge a is 0"; - ASSERT_NE(b, 0) << "Challenge a is 0"; - ASSERT_NE(b, 0) << "Challenge a is 0"; + ASSERT_NE(b, 0) << "Challenge b is 0"; + ASSERT_NE(c, 0) << "Challenge c is 0"; } TEST_F(UltraTranscriptTests, StructureTest) diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp index 35e9539be19..1198bf981ec 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.cpp @@ -42,7 +42,7 @@ template bool UltraVerifier_::verify_proof(const plonk proof_system::RelationParameters relation_parameters; - transcript = BaseTranscript{ proof.proof_data }; + transcript = BaseTranscript{ proof.proof_data }; auto commitments = VerifierCommitments(key, transcript); auto commitment_labels = CommitmentLabels(); @@ -86,7 +86,7 @@ template bool UltraVerifier_::verify_proof(const plonk } // Get challenge for sorted list batching and wire four memory records - auto eta = transcript.get_challenge("eta"); + FF eta = transcript.get_challenge("eta"); relation_parameters.eta = eta; // Get commitments to sorted list accumulator and fourth wire @@ -94,7 +94,7 @@ template bool UltraVerifier_::verify_proof(const plonk commitments.w_4 = transcript.template receive_from_prover(commitment_labels.w_4); // Get permutation challenges - auto [beta, gamma] = transcript.get_challenges("beta", "gamma"); + auto [beta, gamma] = challenges_to_field_elements(transcript.get_challenges("beta", "gamma")); // If Goblin (i.e. using DataBus) receive commitments to log-deriv inverses polynomial if constexpr (IsGoblinFlavor) { @@ -117,7 +117,7 @@ template bool UltraVerifier_::verify_proof(const plonk // Execute Sumcheck Verifier auto sumcheck = SumcheckVerifier(circuit_size); - auto alpha = transcript.get_challenge("alpha"); + FF alpha = transcript.get_challenge("alpha"); auto [multivariate_challenge, claimed_evaluations, sumcheck_verified] = sumcheck.verify(relation_parameters, alpha, transcript); diff --git a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp index ecf6541d0c8..b449f76db4b 100644 --- a/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/ultra_honk/ultra_verifier.hpp @@ -23,7 +23,7 @@ template class UltraVerifier_ { std::shared_ptr key; std::map commitments; std::shared_ptr pcs_verification_key; - BaseTranscript transcript; + BaseTranscript transcript; }; extern template class UltraVerifier_; diff --git a/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_composer.cpp b/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_composer.cpp index b0d8a156711..6b6bb8f5550 100644 --- a/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_composer.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_composer.cpp @@ -18,8 +18,13 @@ void AvmMiniComposer::compute_witness(CircuitConstructor& circuit) auto polynomials = circuit.compute_polynomials(); proving_key->avmMini_clk = polynomials.avmMini_clk; - proving_key->avmMini_positive = polynomials.avmMini_positive; proving_key->avmMini_first = polynomials.avmMini_first; + proving_key->memTrace_m_clk = polynomials.memTrace_m_clk; + proving_key->memTrace_m_sub_clk = polynomials.memTrace_m_sub_clk; + proving_key->memTrace_m_addr = polynomials.memTrace_m_addr; + proving_key->memTrace_m_val = polynomials.memTrace_m_val; + proving_key->memTrace_m_lastAccess = polynomials.memTrace_m_lastAccess; + proving_key->memTrace_m_rw = polynomials.memTrace_m_rw; proving_key->avmMini_subop = polynomials.avmMini_subop; proving_key->avmMini_ia = polynomials.avmMini_ia; proving_key->avmMini_ib = polynomials.avmMini_ib; @@ -34,12 +39,6 @@ void AvmMiniComposer::compute_witness(CircuitConstructor& circuit) proving_key->avmMini_mem_idx_b = polynomials.avmMini_mem_idx_b; proving_key->avmMini_mem_idx_c = polynomials.avmMini_mem_idx_c; proving_key->avmMini_last = polynomials.avmMini_last; - proving_key->avmMini_m_clk = polynomials.avmMini_m_clk; - proving_key->avmMini_m_sub_clk = polynomials.avmMini_m_sub_clk; - proving_key->avmMini_m_addr = polynomials.avmMini_m_addr; - proving_key->avmMini_m_val = polynomials.avmMini_m_val; - proving_key->avmMini_m_lastAccess = polynomials.avmMini_m_lastAccess; - proving_key->avmMini_m_rw = polynomials.avmMini_m_rw; computed_witness = true; } diff --git a/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_prover.cpp b/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_prover.cpp index 730aab4ba4f..30aa284c948 100644 --- a/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_prover.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_prover.cpp @@ -32,8 +32,13 @@ AvmMiniProver::AvmMiniProver(std::shared_ptr input_key, // TODO: take every polynomial and assign it to the key!! prover_polynomials.avmMini_clk = key->avmMini_clk; - prover_polynomials.avmMini_positive = key->avmMini_positive; prover_polynomials.avmMini_first = key->avmMini_first; + prover_polynomials.memTrace_m_clk = key->memTrace_m_clk; + prover_polynomials.memTrace_m_sub_clk = key->memTrace_m_sub_clk; + prover_polynomials.memTrace_m_addr = key->memTrace_m_addr; + prover_polynomials.memTrace_m_val = key->memTrace_m_val; + prover_polynomials.memTrace_m_lastAccess = key->memTrace_m_lastAccess; + prover_polynomials.memTrace_m_rw = key->memTrace_m_rw; prover_polynomials.avmMini_subop = key->avmMini_subop; prover_polynomials.avmMini_ia = key->avmMini_ia; prover_polynomials.avmMini_ib = key->avmMini_ib; @@ -48,21 +53,15 @@ AvmMiniProver::AvmMiniProver(std::shared_ptr input_key, prover_polynomials.avmMini_mem_idx_b = key->avmMini_mem_idx_b; prover_polynomials.avmMini_mem_idx_c = key->avmMini_mem_idx_c; prover_polynomials.avmMini_last = key->avmMini_last; - prover_polynomials.avmMini_m_clk = key->avmMini_m_clk; - prover_polynomials.avmMini_m_sub_clk = key->avmMini_m_sub_clk; - prover_polynomials.avmMini_m_addr = key->avmMini_m_addr; - prover_polynomials.avmMini_m_val = key->avmMini_m_val; - prover_polynomials.avmMini_m_lastAccess = key->avmMini_m_lastAccess; - prover_polynomials.avmMini_m_rw = key->avmMini_m_rw; - prover_polynomials.avmMini_m_val = key->avmMini_m_val; - prover_polynomials.avmMini_m_val_shift = key->avmMini_m_val.shifted(); + prover_polynomials.memTrace_m_addr = key->memTrace_m_addr; + prover_polynomials.memTrace_m_addr_shift = key->memTrace_m_addr.shifted(); - prover_polynomials.avmMini_m_addr = key->avmMini_m_addr; - prover_polynomials.avmMini_m_addr_shift = key->avmMini_m_addr.shifted(); + prover_polynomials.memTrace_m_rw = key->memTrace_m_rw; + prover_polynomials.memTrace_m_rw_shift = key->memTrace_m_rw.shifted(); - prover_polynomials.avmMini_m_rw = key->avmMini_m_rw; - prover_polynomials.avmMini_m_rw_shift = key->avmMini_m_rw.shifted(); + prover_polynomials.memTrace_m_val = key->memTrace_m_val; + prover_polynomials.memTrace_m_val_shift = key->memTrace_m_val.shifted(); // prover_polynomials.lookup_inverses = key->lookup_inverses; // key->z_perm = Polynomial(key->circuit_size); diff --git a/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_verifier.cpp b/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_verifier.cpp index 575d55bd00c..042323b480d 100644 --- a/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_verifier.cpp @@ -43,7 +43,7 @@ bool AvmMiniVerifier::verify_proof(const plonk::proof& proof) RelationParameters relation_parameters; - transcript = BaseTranscript{ proof.proof_data }; + transcript = BaseTranscript{ proof.proof_data }; auto commitments = VerifierCommitments(key, transcript); auto commitment_labels = CommitmentLabels(); @@ -55,6 +55,15 @@ bool AvmMiniVerifier::verify_proof(const plonk::proof& proof) } // Get commitments to VM wires + commitments.memTrace_m_clk = transcript.template receive_from_prover(commitment_labels.memTrace_m_clk); + commitments.memTrace_m_sub_clk = + transcript.template receive_from_prover(commitment_labels.memTrace_m_sub_clk); + commitments.memTrace_m_addr = + transcript.template receive_from_prover(commitment_labels.memTrace_m_addr); + commitments.memTrace_m_val = transcript.template receive_from_prover(commitment_labels.memTrace_m_val); + commitments.memTrace_m_lastAccess = + transcript.template receive_from_prover(commitment_labels.memTrace_m_lastAccess); + commitments.memTrace_m_rw = transcript.template receive_from_prover(commitment_labels.memTrace_m_rw); commitments.avmMini_subop = transcript.template receive_from_prover(commitment_labels.avmMini_subop); commitments.avmMini_ia = transcript.template receive_from_prover(commitment_labels.avmMini_ia); commitments.avmMini_ib = transcript.template receive_from_prover(commitment_labels.avmMini_ib); @@ -75,14 +84,6 @@ bool AvmMiniVerifier::verify_proof(const plonk::proof& proof) commitments.avmMini_mem_idx_c = transcript.template receive_from_prover(commitment_labels.avmMini_mem_idx_c); commitments.avmMini_last = transcript.template receive_from_prover(commitment_labels.avmMini_last); - commitments.avmMini_m_clk = transcript.template receive_from_prover(commitment_labels.avmMini_m_clk); - commitments.avmMini_m_sub_clk = - transcript.template receive_from_prover(commitment_labels.avmMini_m_sub_clk); - commitments.avmMini_m_addr = transcript.template receive_from_prover(commitment_labels.avmMini_m_addr); - commitments.avmMini_m_val = transcript.template receive_from_prover(commitment_labels.avmMini_m_val); - commitments.avmMini_m_lastAccess = - transcript.template receive_from_prover(commitment_labels.avmMini_m_lastAccess); - commitments.avmMini_m_rw = transcript.template receive_from_prover(commitment_labels.avmMini_m_rw); // Execute Sumcheck Verifier auto sumcheck = SumcheckVerifier(circuit_size); diff --git a/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_verifier.hpp b/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_verifier.hpp index 5c8ef05d160..da36ecb8a4d 100644 --- a/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/vm/generated/AvmMini_verifier.hpp @@ -26,7 +26,7 @@ class AvmMiniVerifier { std::shared_ptr key; std::map commitments; std::shared_ptr pcs_verification_key; - BaseTranscript transcript; + BaseTranscript transcript; }; } // namespace proof_system::honk diff --git a/barretenberg/cpp/src/barretenberg/vm/generated/Fib_verifier.cpp b/barretenberg/cpp/src/barretenberg/vm/generated/Fib_verifier.cpp index b413390e897..58cc5cfcbc1 100644 --- a/barretenberg/cpp/src/barretenberg/vm/generated/Fib_verifier.cpp +++ b/barretenberg/cpp/src/barretenberg/vm/generated/Fib_verifier.cpp @@ -43,7 +43,7 @@ bool FibVerifier::verify_proof(const plonk::proof& proof) RelationParameters relation_parameters; - transcript = BaseTranscript{ proof.proof_data }; + transcript = BaseTranscript{ proof.proof_data }; auto commitments = VerifierCommitments(key, transcript); auto commitment_labels = CommitmentLabels(); diff --git a/barretenberg/cpp/src/barretenberg/vm/generated/Fib_verifier.hpp b/barretenberg/cpp/src/barretenberg/vm/generated/Fib_verifier.hpp index 85d98c492ee..4d95adbcff4 100644 --- a/barretenberg/cpp/src/barretenberg/vm/generated/Fib_verifier.hpp +++ b/barretenberg/cpp/src/barretenberg/vm/generated/Fib_verifier.hpp @@ -26,7 +26,7 @@ class FibVerifier { std::shared_ptr key; std::map commitments; std::shared_ptr pcs_verification_key; - BaseTranscript transcript; + BaseTranscript transcript; }; } // namespace proof_system::honk diff --git a/build-system/scripts/deploy b/build-system/scripts/deploy index 06410f5f031..ca43d5b9cfb 100755 --- a/build-system/scripts/deploy +++ b/build-system/scripts/deploy @@ -4,7 +4,7 @@ set -eu REPOSITORY=$1 SERVICES=${2:-$REPOSITORY} -TO_TAINT=$3 +TO_TAINT=${3:-} cd $(query_manifest projectDir $REPOSITORY) @@ -19,7 +19,7 @@ if check_rebuild cache-$CONTENT_HASH-$DEPLOY_TAG-deployed $REPOSITORY; then exit 0 fi -deploy_terraform $REPOSITORY ./terraform/$DEPLOY_ENV "$TO_TAINT" +deploy_terraform $REPOSITORY ./terraform/ "$TO_TAINT" # Restart services. for SERVICE in $SERVICES; do diff --git a/build-system/scripts/deploy_dockerhub b/build-system/scripts/deploy_dockerhub index 6561b471c57..4bc41c57c1c 100755 --- a/build-system/scripts/deploy_dockerhub +++ b/build-system/scripts/deploy_dockerhub @@ -14,11 +14,24 @@ function cmd_or_dryrun { echo "Repo: $REPOSITORY" +<<<<<<< HEAD # Set tag to the commit tag (a version number) if we have one, otherwise branch name normalised to a tag format. if [ -n "$COMMIT_TAG" ]; then TAG=$(extract_tag_version $REPOSITORY true) else TAG=$(echo "$BRANCH" | sed 's/[^a-zA-Z0-9_.-]/_/g') +======= +VERSION_TAG=$(extract_tag_version $REPOSITORY false) + +# if no version tag, check if we're on `master` branch +if [[ -z "$VERSION_TAG" ]]; then + if [[ "$BRANCH" != "master" ]]; then + echo "No version tag found. Exiting" >&2 + exit 1 + fi + # if we're on master, use the DEPLOY_TAG as the version tag + VERSION_TAG=$DEPLOY_TAG +>>>>>>> origin/master fi SRC_URI=$(calculate_image_uri $REPOSITORY) @@ -33,10 +46,26 @@ sudo apt install -y skopeo retry wget https://github.com/lework/skopeo-binary/releases/download/v1.13.3/skopeo-linux-amd64 chmod +x ./skopeo-linux-amd64 && sudo mv ./skopeo-linux-amd64 /usr/bin/skopeo +<<<<<<< HEAD cmd_or_dryrun skopeo copy --all docker://$SRC_URI docker://$DST_URI if [ -n "$COMMIT_TAG" ]; then # Publish as latest. LATEST_URI=$DOCKERHUB_ACCOUNT/$REPOSITORY:latest cmd_or_dryrun skopeo copy --all docker://$DST_URI docker://$LATEST_URI +======= + # Add latest manifest if we're making a release. + if [[ "$VERSION_TAG" != $DEPLOY_TAG ]]; then + echo "Adding image $IMAGE_DEPLOY_URI to manifest list $MANIFEST_DIST_URI" + docker_or_dryrun manifest create $MANIFEST_DIST_URI --amend $IMAGE_DEPLOY_URI + fi +done + +docker_or_dryrun manifest push --purge $MANIFEST_DEPLOY_URI + +# Retag version as latest if we're making a release. +if [[ "$VERSION_TAG" != "$DEPLOY_TAG" ]]; then + echo "Tagging $MANIFEST_DEPLOY_URI as $DIST_TAG..." + docker_or_dryrun manifest push --purge $MANIFEST_DIST_URI +>>>>>>> origin/master fi diff --git a/build-system/scripts/deploy_terraform b/build-system/scripts/deploy_terraform index 6d7f6ef81bb..ce2231853aa 100755 --- a/build-system/scripts/deploy_terraform +++ b/build-system/scripts/deploy_terraform @@ -4,7 +4,7 @@ set -eu REPOSITORY=$1 TF_DIR=$2 -TO_TAINT=${3} +TO_TAINT=${3:-} if [ ! -d "$TF_DIR" ]; then echo "No terraform directory found at $TF_DIR. Skipping." @@ -25,6 +25,10 @@ echo "Deploying terraform found at $PWD..." # Always want to export the DEPLOY_TAG variable to terraform. It's used to easily scope releases. export TF_VAR_DEPLOY_TAG=$DEPLOY_TAG export TF_VAR_COMMIT_HASH=$COMMIT_HASH +export TF_VAR_DOCKERHUB_ACCOUNT=$DOCKERHUB_ACCOUNT +export TF_VAR_FORK_MNEMONIC=$FORK_MNEMONIC +export TF_VAR_INFURA_API_KEY=$INFURA_API_KEY +export TF_VAR_API_KEY=$FORK_API_KEY # If given a repository name, use it to construct and set/override the backend key. # Otherwise use the key as specified in the terraform. diff --git a/build-system/scripts/ensure_terraform b/build-system/scripts/ensure_terraform index f2e33c5445d..d7444aa4e4c 100755 --- a/build-system/scripts/ensure_terraform +++ b/build-system/scripts/ensure_terraform @@ -6,7 +6,7 @@ set -eu [ ! -f /usr/local/bin/terraform ] || exit 0 cd $HOME -TERRAFORM_VERSION=0.13.3 +TERRAFORM_VERSION=1.5.2 curl -sSL https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip -o terraform.zip sudo apt install -y unzip unzip terraform.zip diff --git a/build-system/scripts/setup_env b/build-system/scripts/setup_env index 5127936d346..a63b2a13428 100755 --- a/build-system/scripts/setup_env +++ b/build-system/scripts/setup_env @@ -82,6 +82,9 @@ echo export DEPLOY_TAG=$DEPLOY_TAG >> $BASH_ENV echo export BRANCH=$BRANCH >> $BASH_ENV echo export PULL_REQUEST=$PULL_REQUEST >> $BASH_ENV echo export DRY_DEPLOY=${DRY_DEPLOY:-0} >> $BASH_ENV +echo export FORK_API_KEY=${FORK_API_KEY:-} >> $BASH_ENV +echo export CONTRACT_PUBLISHER_PRIVATE_KEY=${CONTRACT_PUBLISHER_PRIVATE_KEY:-} >> $BASH_ENV + # We want very strict failures on any failing command, undefined variable, or commands that pipe to other commands. echo set -euo pipefail >> $BASH_ENV # Enable logging if [ci debug] is in commit message. diff --git a/build-system/scripts/should_deploy b/build-system/scripts/should_deploy index 7c7143de3d0..11fdcf8abd2 100755 --- a/build-system/scripts/should_deploy +++ b/build-system/scripts/should_deploy @@ -1,4 +1,5 @@ #!/bin/bash +<<<<<<< HEAD # Returns true if we are expected to proceed with a deploy job. # Specifically if we have a commit tag, are master, or are being forced to deploy. # This script should be used at the start of all deployment steps to early out PR runs. @@ -6,4 +7,20 @@ # Dockerhub publishing will publish for master, or PR's run with force-deploy, but only tag latest on tagged commits. set -eu -[[ -n "$COMMIT_TAG" || "$BRANCH" == "master" || "$COMMIT_MESSAGE" == *"[ci force-deploy]"* ]] \ No newline at end of file +[[ -n "$COMMIT_TAG" || "$BRANCH" == "master" || "$COMMIT_MESSAGE" == *"[ci force-deploy]"* ]] +======= +# Returns success if we are expected to do a deployment. +# This is if we have a commit tag (release) or if we're on `master` branch (devnet deployment). + +set -eu + +# This is when we only want to deploy on master, not release with new COMMIT_TAG. +# Specifically for deploying devnet. +RELEASE=${1:-1} + +if [ -n "$COMMIT_TAG" ] && [ "$RELEASE" != "0" ] || [ "$BRANCH" = "master" ]; then + exit 0 +else + exit 1 +fi +>>>>>>> origin/master diff --git a/build_manifest.yml b/build_manifest.yml index 11b851106a6..588c3c70a93 100644 --- a/build_manifest.yml +++ b/build_manifest.yml @@ -212,3 +212,8 @@ docs: - ^.*.nr$ dependencies: - yarn-project + +yellow-paper: + buildDir: yellow-paper + rebuildPatterns: + - ^yellow-paper/ \ No newline at end of file diff --git a/circuits/cpp/src/aztec3/circuits/abis/combined_constant_data.hpp b/circuits/cpp/src/aztec3/circuits/abis/combined_constant_data.hpp index 21beafda197..21d2aef29b4 100644 --- a/circuits/cpp/src/aztec3/circuits/abis/combined_constant_data.hpp +++ b/circuits/cpp/src/aztec3/circuits/abis/combined_constant_data.hpp @@ -2,7 +2,7 @@ #include "tx_context.hpp" -#include "aztec3/circuits/abis/historic_block_data.hpp" +#include "aztec3/circuits/abis/block_header.hpp" #include "aztec3/utils/types/circuit_types.hpp" #include "aztec3/utils/types/native_types.hpp" @@ -10,7 +10,7 @@ namespace aztec3::circuits::abis { -using aztec3::circuits::abis::HistoricBlockData; +using aztec3::circuits::abis::BlockHeader; using aztec3::utils::types::CircuitTypes; using aztec3::utils::types::NativeTypes; using std::is_same; @@ -19,11 +19,11 @@ template struct CombinedConstantData { using fr = typename NCT::fr; using boolean = typename NCT::boolean; - HistoricBlockData block_data{}; + BlockHeader block_header{}; TxContext tx_context{}; // for serialization: update up with new fields - MSGPACK_FIELDS(block_data, tx_context); + MSGPACK_FIELDS(block_header, tx_context); boolean operator==(CombinedConstantData const& other) const { return msgpack_derived_equals(*this, other); @@ -34,7 +34,7 @@ template struct CombinedConstantData { static_assert((std::is_same::value)); CombinedConstantData> constant_data = { - block_data.to_circuit_type(builder), + block_header.to_circuit_type(builder), tx_context.to_circuit_type(builder), }; @@ -48,7 +48,7 @@ template struct CombinedConstantData { auto to_native_type = [](T& e) { return e.template to_native_type(); }; CombinedConstantData constant_data = { - to_native_type(block_data), + to_native_type(block_header), to_native_type(tx_context), }; @@ -59,7 +59,7 @@ template struct CombinedConstantData { { static_assert(!(std::is_same::value)); - block_data.set_public(); + block_header.set_public(); tx_context.set_public(); } }; diff --git a/circuits/cpp/src/aztec3/circuits/abis/historic_block_data.hpp b/circuits/cpp/src/aztec3/circuits/abis/historic_block_data.hpp index 45161128c06..7d07d16fe0a 100644 --- a/circuits/cpp/src/aztec3/circuits/abis/historic_block_data.hpp +++ b/circuits/cpp/src/aztec3/circuits/abis/historic_block_data.hpp @@ -17,7 +17,7 @@ using aztec3::utils::types::CircuitTypes; using aztec3::utils::types::NativeTypes; using std::is_same; -template struct HistoricBlockData { +template struct BlockHeader { using fr = typename NCT::fr; using boolean = typename NCT::boolean; @@ -43,12 +43,12 @@ template struct HistoricBlockData { public_data_tree_root, global_variables_hash); - boolean operator==(HistoricBlockData const& other) const + boolean operator==(BlockHeader const& other) const { return note_hash_tree_root == other.note_hash_tree_root && nullifier_tree_root == other.nullifier_tree_root && contract_tree_root == other.contract_tree_root && l1_to_l2_messages_tree_root == other.l1_to_l2_messages_tree_root && - blocks_tree_root == other.historic_block_root && + blocks_tree_root == other.blocks_tree_root && private_kernel_vk_tree_root == other.private_kernel_vk_tree_root && public_data_tree_root == other.public_data_tree_root && global_variables_hash == other.global_variables_hash; @@ -68,14 +68,14 @@ template struct HistoricBlockData { global_variables_hash.assert_is_zero(); } - template HistoricBlockData> to_circuit_type(Builder& builder) const + template BlockHeader> to_circuit_type(Builder& builder) const { static_assert((std::is_same::value)); // Capture the circuit builder: auto to_ct = [&](auto& e) { return aztec3::utils::types::to_ct(builder, e); }; - HistoricBlockData> data = { + BlockHeader> data = { to_ct(note_hash_tree_root), to_ct(nullifier_tree_root), to_ct(contract_tree_root), to_ct(l1_to_l2_messages_tree_root), to_ct(blocks_tree_root), to_ct(private_kernel_vk_tree_root), to_ct(public_data_tree_root), to_ct(global_variables_hash), @@ -84,12 +84,12 @@ template struct HistoricBlockData { return data; }; - template HistoricBlockData to_native_type() const + template BlockHeader to_native_type() const { static_assert(std::is_same, NCT>::value); auto to_nt = [&](auto& e) { return aztec3::utils::types::to_nt(e); }; - HistoricBlockData data = { + BlockHeader data = { to_nt(note_hash_tree_root), to_nt(nullifier_tree_root), to_nt(contract_tree_root), to_nt(l1_to_l2_messages_tree_root), to_nt(blocks_tree_root), to_nt(private_kernel_vk_tree_root), to_nt(public_data_tree_root), to_nt(global_variables_hash), diff --git a/circuits/cpp/src/aztec3/circuits/abis/packers.hpp b/circuits/cpp/src/aztec3/circuits/abis/packers.hpp index 894481245a6..2ee93840908 100644 --- a/circuits/cpp/src/aztec3/circuits/abis/packers.hpp +++ b/circuits/cpp/src/aztec3/circuits/abis/packers.hpp @@ -59,7 +59,7 @@ struct ConstantsPacker { NOTE_HASH_SUBTREE_HEIGHT, NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH, NULLIFIER_SUBTREE_HEIGHT, - HISTORIC_BLOCKS_TREE_HEIGHT, + BLOCKS_TREE_HEIGHT, NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH, L1_TO_L2_MSG_SUBTREE_HEIGHT), NVP(L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH, @@ -73,7 +73,7 @@ struct ConstantsPacker { MAX_NOTES_PER_PAGE, VIEW_NOTE_ORACLE_RETURN_LENGTH, CALL_CONTEXT_LENGTH, - HISTORIC_BLOCK_DATA_LENGTH, + BLOCK_HEADER_LENGTH, FUNCTION_DATA_LENGTH, CONTRACT_DEPLOYMENT_DATA_LENGTH, PRIVATE_CIRCUIT_PUBLIC_INPUTS_LENGTH, diff --git a/circuits/cpp/src/aztec3/circuits/abis/private_circuit_public_inputs.hpp b/circuits/cpp/src/aztec3/circuits/abis/private_circuit_public_inputs.hpp index 5950c4be70d..e06860ce3c2 100644 --- a/circuits/cpp/src/aztec3/circuits/abis/private_circuit_public_inputs.hpp +++ b/circuits/cpp/src/aztec3/circuits/abis/private_circuit_public_inputs.hpp @@ -3,7 +3,7 @@ #include "call_context.hpp" #include "contract_deployment_data.hpp" -#include "aztec3/circuits/abis/historic_block_data.hpp" +#include "aztec3/circuits/abis/block_header.hpp" #include "aztec3/constants.hpp" #include "aztec3/utils/types/circuit_types.hpp" #include "aztec3/utils/types/convert.hpp" @@ -46,7 +46,7 @@ template class PrivateCircuitPublicInputs { fr encrypted_log_preimages_length = 0; fr unencrypted_log_preimages_length = 0; - HistoricBlockData historic_block_data{}; + BlockHeader block_header{}; ContractDeploymentData contract_deployment_data{}; @@ -69,7 +69,7 @@ template class PrivateCircuitPublicInputs { unencrypted_logs_hash, encrypted_log_preimages_length, unencrypted_log_preimages_length, - historic_block_data, + block_header, contract_deployment_data, chain_id, version); @@ -85,9 +85,8 @@ template class PrivateCircuitPublicInputs { unencrypted_logs_hash == other.unencrypted_logs_hash && encrypted_log_preimages_length == other.encrypted_log_preimages_length && unencrypted_log_preimages_length == other.unencrypted_log_preimages_length && - historic_block_data == other.historic_block_data && - contract_deployment_data == other.contract_deployment_data && chain_id == other.chain_id && - version == other.version; + block_header == other.block_header && contract_deployment_data == other.contract_deployment_data && + chain_id == other.chain_id && version == other.version; }; template @@ -122,7 +121,7 @@ template class PrivateCircuitPublicInputs { to_ct(encrypted_log_preimages_length), to_ct(unencrypted_log_preimages_length), - to_circuit_type(historic_block_data), + to_circuit_type(block_header), to_circuit_type(contract_deployment_data), @@ -162,7 +161,7 @@ template class PrivateCircuitPublicInputs { to_nt(encrypted_log_preimages_length), to_nt(unencrypted_log_preimages_length), - to_native_type(historic_block_data), + to_native_type(block_header), to_native_type(contract_deployment_data), @@ -201,7 +200,7 @@ template class PrivateCircuitPublicInputs { inputs.push_back(encrypted_log_preimages_length); inputs.push_back(unencrypted_log_preimages_length); - spread_arr_into_vec(historic_block_data.to_array(), inputs); + spread_arr_into_vec(block_header.to_array(), inputs); inputs.push_back(contract_deployment_data.hash()); @@ -252,7 +251,7 @@ template class OptionalPrivateCircuitPublicInputs { opt_fr encrypted_log_preimages_length; opt_fr unencrypted_log_preimages_length; - std::optional> historic_block_data; + std::optional> block_header; std::optional> contract_deployment_data; @@ -275,7 +274,7 @@ template class OptionalPrivateCircuitPublicInputs { unencrypted_logs_hash, encrypted_log_preimages_length, unencrypted_log_preimages_length, - historic_block_data, + block_header, contract_deployment_data, chain_id, version); @@ -305,7 +304,7 @@ template class OptionalPrivateCircuitPublicInputs { opt_fr const& encrypted_log_preimages_length, opt_fr const& unencrypted_log_preimages_length, - std::optional> const& historic_block_data, + std::optional> const& block_header, std::optional> const& contract_deployment_data, @@ -326,7 +325,7 @@ template class OptionalPrivateCircuitPublicInputs { , unencrypted_logs_hash(unencrypted_logs_hash) , encrypted_log_preimages_length(encrypted_log_preimages_length) , unencrypted_log_preimages_length(unencrypted_log_preimages_length) - , historic_block_data(historic_block_data) + , block_header(block_header) , contract_deployment_data(contract_deployment_data) , chain_id(chain_id) , version(version){}; @@ -359,7 +358,7 @@ template class OptionalPrivateCircuitPublicInputs { new_inputs.encrypted_log_preimages_length = std::nullopt; new_inputs.unencrypted_log_preimages_length = std::nullopt; - new_inputs.historic_block_data = std::nullopt; + new_inputs.block_header = std::nullopt; new_inputs.contract_deployment_data = std::nullopt; @@ -425,7 +424,7 @@ template class OptionalPrivateCircuitPublicInputs { make_unused_element_zero(builder, encrypted_log_preimages_length); make_unused_element_zero(builder, unencrypted_log_preimages_length); - make_unused_element_zero(builder, historic_block_data); + make_unused_element_zero(builder, block_header); make_unused_element_zero(builder, contract_deployment_data); @@ -465,7 +464,7 @@ template class OptionalPrivateCircuitPublicInputs { (*encrypted_log_preimages_length).set_public(); (*unencrypted_log_preimages_length).set_public(); - (*historic_block_data).set_public(); + (*block_header).set_public(); (*contract_deployment_data).set_public(); @@ -507,7 +506,7 @@ template class OptionalPrivateCircuitPublicInputs { to_ct(encrypted_log_preimages_length), to_ct(unencrypted_log_preimages_length), - to_circuit_type(historic_block_data), + to_circuit_type(block_header), to_circuit_type(contract_deployment_data), @@ -549,7 +548,7 @@ template class OptionalPrivateCircuitPublicInputs { to_nt(encrypted_log_preimages_length), to_nt(unencrypted_log_preimages_length), - to_native_type(historic_block_data), + to_native_type(block_header), to_native_type(contract_deployment_data), @@ -592,7 +591,7 @@ template class OptionalPrivateCircuitPublicInputs { inputs.push_back(*encrypted_log_preimages_length); inputs.push_back(*unencrypted_log_preimages_length); - spread_arr_opt_into_vec((*historic_block_data).to_array(), inputs); + spread_arr_opt_into_vec((*block_header).to_array(), inputs); inputs.push_back((*contract_deployment_data).hash()); @@ -630,7 +629,7 @@ template class OptionalPrivateCircuitPublicInputs { .encrypted_log_preimages_length = encrypted_log_preimages_length.value(), .unencrypted_log_preimages_length = unencrypted_log_preimages_length.value(), - .historic_block_data = historic_block_data.value(), + .block_header = block_header.value(), .contract_deployment_data = contract_deployment_data.value(), diff --git a/circuits/cpp/src/aztec3/circuits/abis/public_circuit_public_inputs.hpp b/circuits/cpp/src/aztec3/circuits/abis/public_circuit_public_inputs.hpp index 75d2b804020..6548b93a28d 100644 --- a/circuits/cpp/src/aztec3/circuits/abis/public_circuit_public_inputs.hpp +++ b/circuits/cpp/src/aztec3/circuits/abis/public_circuit_public_inputs.hpp @@ -5,7 +5,7 @@ #include "contract_storage_update_request.hpp" #include "../../constants.hpp" -#include "aztec3/circuits/abis/historic_block_data.hpp" +#include "aztec3/circuits/abis/block_header.hpp" #include "aztec3/utils/types/circuit_types.hpp" #include "aztec3/utils/types/native_types.hpp" @@ -42,7 +42,7 @@ template struct PublicCircuitPublicInputs { // variable-length data. fr unencrypted_log_preimages_length = 0; - HistoricBlockData historic_block_data{}; + BlockHeader block_header{}; address prover_address{}; @@ -58,7 +58,7 @@ template struct PublicCircuitPublicInputs { new_l2_to_l1_msgs, unencrypted_logs_hash, unencrypted_log_preimages_length, - historic_block_data, + block_header, prover_address); boolean operator==(PublicCircuitPublicInputs const& other) const @@ -91,7 +91,7 @@ template struct PublicCircuitPublicInputs { .unencrypted_logs_hash = to_ct(unencrypted_logs_hash), .unencrypted_log_preimages_length = to_ct(unencrypted_log_preimages_length), - .historic_block_data = to_ct(historic_block_data), + .block_header = to_ct(block_header), .prover_address = to_ct(prover_address), }; @@ -121,7 +121,7 @@ template struct PublicCircuitPublicInputs { spread_arr_into_vec(unencrypted_logs_hash, inputs); inputs.push_back(unencrypted_log_preimages_length); - spread_arr_into_vec(historic_block_data.to_array(), inputs); + spread_arr_into_vec(block_header.to_array(), inputs); inputs.push_back(prover_address); if (inputs.size() != PUBLIC_CIRCUIT_PUBLIC_INPUTS_HASH_INPUT_LENGTH) { diff --git a/circuits/cpp/src/aztec3/circuits/abis/rollup/base/base_rollup_inputs.hpp b/circuits/cpp/src/aztec3/circuits/abis/rollup/base/base_rollup_inputs.hpp index 5b0d5e2ac94..380b1a2eec4 100644 --- a/circuits/cpp/src/aztec3/circuits/abis/rollup/base/base_rollup_inputs.hpp +++ b/circuits/cpp/src/aztec3/circuits/abis/rollup/base/base_rollup_inputs.hpp @@ -21,7 +21,7 @@ template struct BaseRollupInputs { AppendOnlyTreeSnapshot start_nullifier_tree_snapshot{}; AppendOnlyTreeSnapshot start_contract_tree_snapshot{}; fr start_public_data_tree_root{}; - AppendOnlyTreeSnapshot start_historic_blocks_tree_snapshot{}; + AppendOnlyTreeSnapshot start_blocks_tree_snapshot{}; std::array, MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP> low_nullifier_leaf_preimages{}; std::array, MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP> @@ -37,8 +37,8 @@ template struct BaseRollupInputs { std::array, MAX_PUBLIC_DATA_READS_PER_BASE_ROLLUP> new_public_data_reads_sibling_paths{}; - std::array, KERNELS_PER_BASE_ROLLUP> - historic_blocks_tree_root_membership_witnesses{}; + std::array, KERNELS_PER_BASE_ROLLUP> + blocks_tree_root_membership_witnesses{}; ConstantRollupData constants{}; @@ -48,7 +48,7 @@ template struct BaseRollupInputs { start_nullifier_tree_snapshot, start_contract_tree_snapshot, start_public_data_tree_root, - start_historic_blocks_tree_snapshot, + start_blocks_tree_snapshot, low_nullifier_leaf_preimages, low_nullifier_membership_witness, new_commitments_subtree_sibling_path, @@ -56,7 +56,7 @@ template struct BaseRollupInputs { new_contracts_subtree_sibling_path, new_public_data_update_requests_sibling_paths, new_public_data_reads_sibling_paths, - historic_blocks_tree_root_membership_witnesses, + blocks_tree_root_membership_witnesses, constants); boolean operator==(BaseRollupInputs const& other) const diff --git a/circuits/cpp/src/aztec3/circuits/abis/rollup/constant_rollup_data.hpp b/circuits/cpp/src/aztec3/circuits/abis/rollup/constant_rollup_data.hpp index d94788674b4..a1cb37b6a66 100644 --- a/circuits/cpp/src/aztec3/circuits/abis/rollup/constant_rollup_data.hpp +++ b/circuits/cpp/src/aztec3/circuits/abis/rollup/constant_rollup_data.hpp @@ -11,7 +11,7 @@ template struct ConstantRollupData { using fr = typename NCT::fr; // The very latest roots as at the very beginning of the entire rollup: - AppendOnlyTreeSnapshot start_historic_blocks_tree_roots_snapshot{}; + AppendOnlyTreeSnapshot start_blocks_tree_snapshot{}; // Some members of this struct tbd: fr private_kernel_vk_tree_root = 0; @@ -21,7 +21,7 @@ template struct ConstantRollupData { GlobalVariables global_variables{}; - MSGPACK_FIELDS(start_historic_blocks_tree_roots_snapshot, + MSGPACK_FIELDS(start_blocks_tree_snapshot, private_kernel_vk_tree_root, public_kernel_vk_tree_root, base_rollup_vk_hash, diff --git a/circuits/cpp/src/aztec3/circuits/abis/rollup/root/root_rollup_inputs.hpp b/circuits/cpp/src/aztec3/circuits/abis/rollup/root/root_rollup_inputs.hpp index 372390a8c05..c765c9d09ad 100644 --- a/circuits/cpp/src/aztec3/circuits/abis/rollup/root/root_rollup_inputs.hpp +++ b/circuits/cpp/src/aztec3/circuits/abis/rollup/root/root_rollup_inputs.hpp @@ -25,16 +25,16 @@ template struct RootRollupInputs { AppendOnlyTreeSnapshot start_l1_to_l2_messages_tree_snapshot{}; // inputs required to add the block hash - AppendOnlyTreeSnapshot start_historic_blocks_tree_snapshot{}; - std::array new_historic_blocks_tree_sibling_path{}; + AppendOnlyTreeSnapshot start_blocks_tree_snapshot{}; + std::array new_blocks_tree_sibling_path{}; // For serialization, update with new fields MSGPACK_FIELDS(previous_rollup_data, new_l1_to_l2_messages, new_l1_to_l2_messages_tree_root_sibling_path, start_l1_to_l2_messages_tree_snapshot, - start_historic_blocks_tree_snapshot, - new_historic_blocks_tree_sibling_path); + start_blocks_tree_snapshot, + new_blocks_tree_sibling_path); bool operator==(RootRollupInputs const&) const = default; }; diff --git a/circuits/cpp/src/aztec3/circuits/abis/rollup/root/root_rollup_public_inputs.hpp b/circuits/cpp/src/aztec3/circuits/abis/rollup/root/root_rollup_public_inputs.hpp index 51879fd9566..74aff1ba5e8 100644 --- a/circuits/cpp/src/aztec3/circuits/abis/rollup/root/root_rollup_public_inputs.hpp +++ b/circuits/cpp/src/aztec3/circuits/abis/rollup/root/root_rollup_public_inputs.hpp @@ -31,20 +31,20 @@ template struct RootRollupPublicInputs { fr start_public_data_tree_root{}; fr end_public_data_tree_root{}; - AppendOnlyTreeSnapshot start_tree_of_historic_note_hash_tree_roots_snapshot{}; - AppendOnlyTreeSnapshot end_tree_of_historic_note_hash_tree_roots_snapshot{}; + AppendOnlyTreeSnapshot start_tree_of_historical_note_hash_tree_roots_snapshot{}; + AppendOnlyTreeSnapshot end_tree_of_historical_note_hash_tree_roots_snapshot{}; - AppendOnlyTreeSnapshot start_tree_of_historic_contract_tree_roots_snapshot{}; - AppendOnlyTreeSnapshot end_tree_of_historic_contract_tree_roots_snapshot{}; + AppendOnlyTreeSnapshot start_tree_of_historical_contract_tree_roots_snapshot{}; + AppendOnlyTreeSnapshot end_tree_of_historical_contract_tree_roots_snapshot{}; AppendOnlyTreeSnapshot start_l1_to_l2_messages_tree_snapshot{}; AppendOnlyTreeSnapshot end_l1_to_l2_messages_tree_snapshot{}; - AppendOnlyTreeSnapshot start_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot{}; - AppendOnlyTreeSnapshot end_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot{}; + AppendOnlyTreeSnapshot start_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot{}; + AppendOnlyTreeSnapshot end_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot{}; - AppendOnlyTreeSnapshot start_historic_blocks_tree_snapshot{}; - AppendOnlyTreeSnapshot end_historic_blocks_tree_snapshot{}; + AppendOnlyTreeSnapshot start_blocks_tree_snapshot{}; + AppendOnlyTreeSnapshot end_blocks_tree_snapshot{}; std::array calldata_hash{}; std::array l1_to_l2_messages_hash{}; @@ -60,16 +60,16 @@ template struct RootRollupPublicInputs { end_contract_tree_snapshot, start_public_data_tree_root, end_public_data_tree_root, - start_tree_of_historic_note_hash_tree_roots_snapshot, - end_tree_of_historic_note_hash_tree_roots_snapshot, - start_tree_of_historic_contract_tree_roots_snapshot, - end_tree_of_historic_contract_tree_roots_snapshot, + start_tree_of_historical_note_hash_tree_roots_snapshot, + end_tree_of_historical_note_hash_tree_roots_snapshot, + start_tree_of_historical_contract_tree_roots_snapshot, + end_tree_of_historical_contract_tree_roots_snapshot, start_l1_to_l2_messages_tree_snapshot, end_l1_to_l2_messages_tree_snapshot, - start_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot, - end_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot, - start_historic_blocks_tree_snapshot, - end_historic_blocks_tree_snapshot, + start_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot, + end_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot, + start_blocks_tree_snapshot, + end_blocks_tree_snapshot, calldata_hash, l1_to_l2_messages_hash); @@ -83,21 +83,21 @@ template struct RootRollupPublicInputs { write(buf, start_note_hash_tree_snapshot); write(buf, start_nullifier_tree_snapshot); write(buf, start_contract_tree_snapshot); - write(buf, start_tree_of_historic_note_hash_tree_roots_snapshot); - write(buf, start_tree_of_historic_contract_tree_roots_snapshot); + write(buf, start_tree_of_historical_note_hash_tree_roots_snapshot); + write(buf, start_tree_of_historical_contract_tree_roots_snapshot); write(buf, start_public_data_tree_root); write(buf, start_l1_to_l2_messages_tree_snapshot); - write(buf, start_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot); - write(buf, start_historic_blocks_tree_snapshot); + write(buf, start_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot); + write(buf, start_blocks_tree_snapshot); write(buf, end_note_hash_tree_snapshot); write(buf, end_nullifier_tree_snapshot); write(buf, end_contract_tree_snapshot); - write(buf, end_tree_of_historic_note_hash_tree_roots_snapshot); - write(buf, end_tree_of_historic_contract_tree_roots_snapshot); + write(buf, end_tree_of_historical_note_hash_tree_roots_snapshot); + write(buf, end_tree_of_historical_contract_tree_roots_snapshot); write(buf, end_public_data_tree_root); write(buf, end_l1_to_l2_messages_tree_snapshot); - write(buf, end_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot); - write(buf, end_historic_blocks_tree_snapshot); + write(buf, end_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot); + write(buf, end_blocks_tree_snapshot); // Stitching calldata hash together auto high_buffer = calldata_hash[0].to_buffer(); diff --git a/circuits/cpp/src/aztec3/circuits/apps/opcodes/opcodes.tpp b/circuits/cpp/src/aztec3/circuits/apps/opcodes/opcodes.tpp index 6708e266c7d..25c8c81a860 100644 --- a/circuits/cpp/src/aztec3/circuits/apps/opcodes/opcodes.tpp +++ b/circuits/cpp/src/aztec3/circuits/apps/opcodes/opcodes.tpp @@ -54,7 +54,7 @@ Note Opcodes::UTXO_SLOAD(UTXOStateVar* utxo_state_var, // TODO within this function: // - Merkle Membership Check using the contract_address, utxo_datum.{sibling_path, leaf_index, - // historic_note_hash_tree_root} + // historical_note_hash_tree_root} return new_note; }; @@ -95,7 +95,7 @@ std::vector Opcodes::UTXO_SLOAD(UTXOSetStateVar* u // TODO within this function: // - Merkle Membership Check using the contract_address, utxo_datum.{sibling_path, leaf_index, - // historic_note_hash_tree_root} + // historical_note_hash_tree_root} new_notes.push_back(new_note); } diff --git a/circuits/cpp/src/aztec3/circuits/apps/test_apps/escrow/transfer.cpp b/circuits/cpp/src/aztec3/circuits/apps/test_apps/escrow/transfer.cpp index 5b6d5a6afdb..08809a8e4e2 100644 --- a/circuits/cpp/src/aztec3/circuits/apps/test_apps/escrow/transfer.cpp +++ b/circuits/cpp/src/aztec3/circuits/apps/test_apps/escrow/transfer.cpp @@ -93,7 +93,7 @@ OptionalPrivateCircuitPublicInputs transfer(FunctionExecutionContext& exec_c { amount, to.to_field(), asset_id, memo, CT::fr(reveal_msg_sender_to_recipient), fee }); /// TODO: merkle membership check - // public_inputs.historic_note_hash_tree_root + // public_inputs.historical_note_hash_tree_root exec_ctx.finalize(); diff --git a/circuits/cpp/src/aztec3/circuits/apps/test_apps/escrow/withdraw.cpp b/circuits/cpp/src/aztec3/circuits/apps/test_apps/escrow/withdraw.cpp index 5effa070436..d61fcceed27 100644 --- a/circuits/cpp/src/aztec3/circuits/apps/test_apps/escrow/withdraw.cpp +++ b/circuits/cpp/src/aztec3/circuits/apps/test_apps/escrow/withdraw.cpp @@ -86,7 +86,7 @@ OptionalPrivateCircuitPublicInputs withdraw(FunctionExecutionContext& exec_c exec_ctx.finalize(); /// TODO: merkle membership check - // public_inputs.historic_note_hash_tree_root + // public_inputs.historical_note_hash_tree_root // info("public inputs: ", public_inputs); diff --git a/circuits/cpp/src/aztec3/circuits/apps/utxo_datum.hpp b/circuits/cpp/src/aztec3/circuits/apps/utxo_datum.hpp index 360dbeb2ecb..c13bd18527b 100644 --- a/circuits/cpp/src/aztec3/circuits/apps/utxo_datum.hpp +++ b/circuits/cpp/src/aztec3/circuits/apps/utxo_datum.hpp @@ -26,7 +26,7 @@ template struct UTXOSLoadDatum { std::vector sibling_path; uint32 leaf_index; - fr historic_note_hash_tree_root = 0; + fr historical_note_hash_tree_root = 0; template auto to_circuit_type(Builder& builder) const { @@ -39,7 +39,7 @@ template struct UTXOSLoadDatum { UTXOSLoadDatum, decltype(preimage_ct)> datum = { to_ct(commitment), to_ct(contract_address), preimage_ct, - to_ct(sibling_path), to_ct(leaf_index), to_ct(historic_note_hash_tree_root), + to_ct(sibling_path), to_ct(leaf_index), to_ct(historical_note_hash_tree_root), }; return datum; diff --git a/circuits/cpp/src/aztec3/circuits/kernel/private/common.cpp b/circuits/cpp/src/aztec3/circuits/kernel/private/common.cpp index c5b993a340c..f0e23c9e69d 100644 --- a/circuits/cpp/src/aztec3/circuits/kernel/private/common.cpp +++ b/circuits/cpp/src/aztec3/circuits/kernel/private/common.cpp @@ -53,8 +53,8 @@ void common_validate_call_stack(DummyBuilder& builder, PrivateCallData const } /** - * @brief Validate all read requests against the historic note hash tree root. - * Use their membership witnesses to do so. If the historic root is not yet + * @brief Validate all read requests against the historical note hash tree root. + * Use their membership witnesses to do so. If the historical root is not yet * initialized, initialize it using the first read request here (if present). * * @details More info here: @@ -62,7 +62,7 @@ void common_validate_call_stack(DummyBuilder& builder, PrivateCallData const * - https://discourse.aztec.network/t/spending-notes-which-havent-yet-been-inserted/180 * * @param builder - * @param historic_note_hash_tree_root This is a reference to the historic root which all + * @param historical_note_hash_tree_root This is a reference to the historical root which all * read requests are checked against here. * @param read_requests the commitments being read by this private call - 'transient note reads' here are * `inner_note_hashes` (not yet siloed, not unique), but 'pre-existing note reads' are `unique_siloed_note_hashes` @@ -70,7 +70,7 @@ void common_validate_call_stack(DummyBuilder& builder, PrivateCallData const * for a given request which is essentially a membership check */ void common_validate_read_requests(DummyBuilder& builder, - NT::fr const& historic_note_hash_tree_root, + NT::fr const& historical_note_hash_tree_root, std::array const& read_requests, std::array, MAX_READ_REQUESTS_PER_CALL> const& read_request_membership_witnesses) @@ -92,12 +92,12 @@ void common_validate_read_requests(DummyBuilder& builder, const auto& root_for_read_request = root_from_sibling_path(read_request, witness.leaf_index, witness.sibling_path); builder.do_assert( - root_for_read_request == historic_note_hash_tree_root, + root_for_read_request == historical_note_hash_tree_root, format("note hash tree root mismatch at read_request[", rr_idx, "]", "\n\texpected root: ", - historic_note_hash_tree_root, + historical_note_hash_tree_root, "\n\tbut got root*: ", root_for_read_request, "\n\tread_request**: ", @@ -429,7 +429,7 @@ void common_contract_logic(DummyBuilder& builder, private_call.contract_leaf_membership_witness.sibling_path); auto const& purported_contract_tree_root = - private_call.call_stack_item.public_inputs.historic_block_data.contract_tree_root; + private_call.call_stack_item.public_inputs.block_header.contract_tree_root; builder.do_assert( computed_contract_tree_root == purported_contract_tree_root, diff --git a/circuits/cpp/src/aztec3/circuits/kernel/private/common.hpp b/circuits/cpp/src/aztec3/circuits/kernel/private/common.hpp index ee0e5c350fb..2e1191a252f 100644 --- a/circuits/cpp/src/aztec3/circuits/kernel/private/common.hpp +++ b/circuits/cpp/src/aztec3/circuits/kernel/private/common.hpp @@ -31,7 +31,7 @@ using DummyBuilder = aztec3::utils::DummyCircuitBuilder; void common_validate_call_stack(DummyBuilder& builder, PrivateCallData const& private_call); void common_validate_read_requests(DummyBuilder& builder, - NT::fr const& historic_note_hash_tree_root, + NT::fr const& historical_note_hash_tree_root, std::array const& read_requests, std::array, MAX_READ_REQUESTS_PER_CALL> const& read_request_membership_witnesses); diff --git a/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_init.cpp b/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_init.cpp index e5c70bbd174..1d8fc42f8bc 100644 --- a/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_init.cpp +++ b/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_init.cpp @@ -1,8 +1,8 @@ #include "common.hpp" #include "init.hpp" +#include "aztec3/circuits/abis/block_header.hpp" #include "aztec3/circuits/abis/combined_constant_data.hpp" -#include "aztec3/circuits/abis/historic_block_data.hpp" #include "aztec3/circuits/abis/private_kernel/private_kernel_inputs_init.hpp" #include "aztec3/constants.hpp" #include "aztec3/utils/array.hpp" @@ -26,7 +26,7 @@ void initialize_end_values(PrivateKernelInputsInit const& private_inputs, // Define the constants data. auto const& private_call_public_inputs = private_inputs.private_call.call_stack_item.public_inputs; auto const constants = CombinedConstantData{ - .block_data = private_call_public_inputs.historic_block_data, + .block_header = private_call_public_inputs.block_header, .tx_context = private_inputs.tx_request.tx_context, }; @@ -167,7 +167,7 @@ KernelCircuitPublicInputs native_private_kernel_circuit_initial(DummyCircuit common_validate_read_requests( builder, - public_inputs.constants.block_data.note_hash_tree_root, + public_inputs.constants.block_header.note_hash_tree_root, private_inputs.private_call.call_stack_item.public_inputs.read_requests, // read requests from private call private_inputs.private_call.read_request_membership_witnesses); diff --git a/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_init.test.cpp b/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_init.test.cpp index f46023cae17..f502436d603 100644 --- a/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_init.test.cpp +++ b/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_init.test.cpp @@ -433,7 +433,7 @@ TEST_F(native_private_kernel_init_tests, native_read_request_bad_request) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 2); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; // tweak read_request so it gives wrong root when paired with its sibling path read_requests[1] += 1; @@ -467,7 +467,7 @@ TEST_F(native_private_kernel_init_tests, native_read_request_bad_leaf_index) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 2); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; // tweak leaf index so it gives wrong root when paired with its request and sibling path read_request_membership_witnesses[1].leaf_index += 1; @@ -500,7 +500,7 @@ TEST_F(native_private_kernel_init_tests, native_read_request_bad_sibling_path) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 2); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; // tweak sibling path so it gives wrong root when paired with its request read_request_membership_witnesses[1].sibling_path[1] += 1; @@ -534,7 +534,7 @@ TEST_F(native_private_kernel_init_tests, native_read_request_root_mismatch) _transient_read_requests0, _transient_read_request_membership_witnesses0, root] = get_random_reads(first_nullifier, contract_address, 2); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; auto [read_requests1, read_request_membership_witnesses1, _transient_read_requests1, @@ -613,7 +613,7 @@ TEST_F(native_private_kernel_init_tests, native_one_read_requests_works) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 1); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; private_inputs.private_call.call_stack_item.public_inputs.read_requests = read_requests; private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; @@ -650,7 +650,7 @@ TEST_F(native_private_kernel_init_tests, native_two_read_requests_works) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 2); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; private_inputs.private_call.call_stack_item.public_inputs.read_requests = read_requests; private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; @@ -687,7 +687,7 @@ TEST_F(native_private_kernel_init_tests, native_max_read_requests_works) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, MAX_READ_REQUESTS_PER_CALL); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; private_inputs.private_call.call_stack_item.public_inputs.read_requests = read_requests; private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; @@ -728,7 +728,7 @@ TEST_F(native_private_kernel_init_tests, native_one_transient_read_requests_work transient_read_requests, transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 1); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; // Make the read request transient read_requests[0] = transient_read_requests[0]; @@ -766,7 +766,7 @@ TEST_F(native_private_kernel_init_tests, native_max_read_requests_one_transient_ transient_read_requests, transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, MAX_READ_REQUESTS_PER_CALL); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; private_inputs.private_call.call_stack_item.public_inputs.read_requests = read_requests; // Make the read request at position 1 transient @@ -804,7 +804,7 @@ TEST_F(native_private_kernel_init_tests, native_max_read_requests_all_transient_ transient_read_requests, transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, MAX_READ_REQUESTS_PER_CALL); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; private_inputs.private_call.call_stack_item.public_inputs.read_requests = transient_read_requests; private_inputs.private_call.read_request_membership_witnesses = transient_read_request_membership_witnesses; diff --git a/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_inner.cpp b/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_inner.cpp index 371bcbf81dd..d98423e8322 100644 --- a/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_inner.cpp +++ b/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_inner.cpp @@ -58,9 +58,9 @@ void pop_and_validate_this_private_call_hash( void validate_contract_tree_root(DummyCircuitBuilder& builder, PrivateKernelInputsInner const& private_inputs) { auto const& purported_contract_tree_root = - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.contract_tree_root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.contract_tree_root; auto const& previous_kernel_contract_tree_root = - private_inputs.previous_kernel.public_inputs.constants.block_data.contract_tree_root; + private_inputs.previous_kernel.public_inputs.constants.block_header.contract_tree_root; builder.do_assert( purported_contract_tree_root == previous_kernel_contract_tree_root, "purported_contract_tree_root doesn't match previous_kernel_contract_tree_root", @@ -114,7 +114,7 @@ KernelCircuitPublicInputs native_private_kernel_circuit_inner(DummyCircuitBu common_validate_read_requests( builder, - public_inputs.constants.block_data.note_hash_tree_root, + public_inputs.constants.block_header.note_hash_tree_root, private_inputs.private_call.call_stack_item.public_inputs.read_requests, // read requests from private call private_inputs.private_call.read_request_membership_witnesses); @@ -122,7 +122,7 @@ KernelCircuitPublicInputs native_private_kernel_circuit_inner(DummyCircuitBu // TODO(dbanks12): feels like update_end_values should happen later common_update_end_values(builder, private_inputs.private_call, public_inputs); - // ensure that historic/purported contract tree root matches the one in previous kernel + // ensure that historical/purported contract tree root matches the one in previous kernel validate_contract_tree_root(builder, private_inputs); const auto private_call_stack_item = private_inputs.private_call.call_stack_item; diff --git a/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_inner.test.cpp b/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_inner.test.cpp index 4c1d83856a0..d29c08eb590 100644 --- a/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_inner.test.cpp +++ b/circuits/cpp/src/aztec3/circuits/kernel/private/native_private_kernel_circuit_inner.test.cpp @@ -93,8 +93,8 @@ TEST_F(native_private_kernel_inner_tests, private_function_incorrect_contract_tr { auto private_inputs = do_private_call_get_kernel_inputs_inner(false, deposit, standard_test_args()); - // Set historic_tree_root to a random scalar. - private_inputs.previous_kernel.public_inputs.constants.block_data.contract_tree_root = NT::fr::random_element(); + // Set historical_tree_root to a random scalar. + private_inputs.previous_kernel.public_inputs.constants.block_header.contract_tree_root = NT::fr::random_element(); // Invoke the native private kernel circuit DummyBuilder builder = DummyBuilder("private_kernel_tests__private_function_incorrect_contract_tree_root_fails"); @@ -477,8 +477,8 @@ TEST_F(native_private_kernel_inner_tests, native_read_request_bad_request) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 2); - private_inputs.previous_kernel.public_inputs.constants.block_data.note_hash_tree_root = root; - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.previous_kernel.public_inputs.constants.block_header.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; // tweak read_request so it gives wrong root when paired with its sibling path read_requests[1] += 1; @@ -487,7 +487,7 @@ TEST_F(native_private_kernel_inner_tests, native_read_request_bad_request) private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; // We need to update the previous_kernel's private_call_stack because the current_call_stack_item has changed - // i.e. we changed the public_inputs->read_requests and public_inputs->historic_note_hash_tree_root of the + // i.e. we changed the public_inputs->read_requests and public_inputs->historical_note_hash_tree_root of the // current_call_stack_item private_inputs.previous_kernel.public_inputs.end.private_call_stack[0] = private_inputs.private_call.call_stack_item.hash(); @@ -516,8 +516,8 @@ TEST_F(native_private_kernel_inner_tests, native_read_request_bad_leaf_index) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 2); - private_inputs.previous_kernel.public_inputs.constants.block_data.note_hash_tree_root = root; - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.previous_kernel.public_inputs.constants.block_header.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; // tweak leaf index so it gives wrong root when paired with its request and sibling path read_request_membership_witnesses[1].leaf_index += 1; @@ -525,7 +525,7 @@ TEST_F(native_private_kernel_inner_tests, native_read_request_bad_leaf_index) private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; // We need to update the previous_kernel's private_call_stack because the current_call_stack_item has changed - // i.e. we changed the public_inputs->read_requests and public_inputs->historic_note_hash_tree_root of the + // i.e. we changed the public_inputs->read_requests and public_inputs->historical_note_hash_tree_root of the // current_call_stack_item private_inputs.previous_kernel.public_inputs.end.private_call_stack[0] = private_inputs.private_call.call_stack_item.hash(); @@ -554,8 +554,8 @@ TEST_F(native_private_kernel_inner_tests, native_read_request_bad_sibling_path) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 2); - private_inputs.previous_kernel.public_inputs.constants.block_data.note_hash_tree_root = root; - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.previous_kernel.public_inputs.constants.block_header.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; // tweak sibling path so it gives wrong root when paired with its request read_request_membership_witnesses[1].sibling_path[1] += 1; @@ -563,7 +563,7 @@ TEST_F(native_private_kernel_inner_tests, native_read_request_bad_sibling_path) private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; // We need to update the previous_kernel's private_call_stack because the current_call_stack_item has changed - // i.e. we changed the public_inputs->read_requests and public_inputs->historic_note_hash_tree_root of the + // i.e. we changed the public_inputs->read_requests and public_inputs->historical_note_hash_tree_root of the // current_call_stack_item private_inputs.previous_kernel.public_inputs.end.private_call_stack[0] = private_inputs.private_call.call_stack_item.hash(); @@ -593,8 +593,8 @@ TEST_F(native_private_kernel_inner_tests, native_read_request_root_mismatch) _transient_read_requests0, _transient_read_request_membership_witnesses0, root] = get_random_reads(first_nullifier, contract_address, 2); - private_inputs.previous_kernel.public_inputs.constants.block_data.note_hash_tree_root = root; - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.previous_kernel.public_inputs.constants.block_header.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; auto [read_requests1, read_request_membership_witnesses1, _transient_read_requests1, @@ -615,7 +615,7 @@ TEST_F(native_private_kernel_inner_tests, native_read_request_root_mismatch) private_inputs.private_call.read_request_membership_witnesses = bad_witnesses; // We need to update the previous_kernel's private_call_stack because the current_call_stack_item has changed - // i.e. we changed the public_inputs->read_requests and public_inputs->historic_note_hash_tree_root of the + // i.e. we changed the public_inputs->read_requests and public_inputs->historical_note_hash_tree_root of the // current_call_stack_item private_inputs.previous_kernel.public_inputs.end.private_call_stack[0] = private_inputs.private_call.call_stack_item.hash(); @@ -679,8 +679,8 @@ TEST_F(native_private_kernel_inner_tests, native_one_read_requests_works) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 1); - private_inputs.previous_kernel.public_inputs.constants.block_data.note_hash_tree_root = root; - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.previous_kernel.public_inputs.constants.block_header.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; private_inputs.private_call.call_stack_item.public_inputs.read_requests = read_requests; private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; @@ -720,8 +720,8 @@ TEST_F(native_private_kernel_inner_tests, native_two_read_requests_works) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 2); - private_inputs.previous_kernel.public_inputs.constants.block_data.note_hash_tree_root = root; - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.previous_kernel.public_inputs.constants.block_header.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; private_inputs.private_call.call_stack_item.public_inputs.read_requests = read_requests; private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; @@ -761,13 +761,13 @@ TEST_F(native_private_kernel_inner_tests, native_max_read_requests_works) _transient_read_requests, _transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, MAX_READ_REQUESTS_PER_CALL); - private_inputs.previous_kernel.public_inputs.constants.block_data.note_hash_tree_root = root; - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.previous_kernel.public_inputs.constants.block_header.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; private_inputs.private_call.call_stack_item.public_inputs.read_requests = read_requests; private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; // We need to update the previous_kernel's private_call_stack because the current_call_stack_item has changed - // i.e. we changed the public_inputs->read_requests and public_inputs->historic_note_hash_tree_root of the + // i.e. we changed the public_inputs->read_requests and public_inputs->historical_note_hash_tree_root of the // current_call_stack_item private_inputs.previous_kernel.public_inputs.end.private_call_stack[0] = private_inputs.private_call.call_stack_item.hash(); @@ -803,7 +803,7 @@ TEST_F(native_private_kernel_inner_tests, native_one_transient_read_requests_wor transient_read_requests, transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, 1); - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; // Make the read request transient read_requests[0] = transient_read_requests[0]; @@ -812,7 +812,7 @@ TEST_F(native_private_kernel_inner_tests, native_one_transient_read_requests_wor private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; // We need to update the previous_kernel's private_call_stack because the current_call_stack_item has changed - // i.e. we changed the public_inputs->read_requests and public_inputs->historic_note_hash_tree_root of the + // i.e. we changed the public_inputs->read_requests and public_inputs->historical_note_hash_tree_root of the // current_call_stack_item private_inputs.previous_kernel.public_inputs.end.private_call_stack[0] = private_inputs.private_call.call_stack_item.hash(); @@ -847,8 +847,8 @@ TEST_F(native_private_kernel_inner_tests, native_max_read_requests_one_transient transient_read_requests, transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, MAX_READ_REQUESTS_PER_CALL); - private_inputs.previous_kernel.public_inputs.constants.block_data.note_hash_tree_root = root; - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.previous_kernel.public_inputs.constants.block_header.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; // Make the read request at position 1 transient read_requests[1] = transient_read_requests[1]; @@ -857,7 +857,7 @@ TEST_F(native_private_kernel_inner_tests, native_max_read_requests_one_transient private_inputs.private_call.read_request_membership_witnesses = read_request_membership_witnesses; // We need to update the previous_kernel's private_call_stack because the current_call_stack_item has changed - // i.e. we changed the public_inputs->read_requests and public_inputs->historic_note_hash_tree_root of the + // i.e. we changed the public_inputs->read_requests and public_inputs->historical_note_hash_tree_root of the // current_call_stack_item private_inputs.previous_kernel.public_inputs.end.private_call_stack[0] = private_inputs.private_call.call_stack_item.hash(); @@ -894,13 +894,13 @@ TEST_F(native_private_kernel_inner_tests, native_max_read_requests_all_transient transient_read_requests, transient_read_request_membership_witnesses, root] = get_random_reads(first_nullifier, contract_address, MAX_READ_REQUESTS_PER_CALL); - private_inputs.previous_kernel.public_inputs.constants.block_data.note_hash_tree_root = root; - private_inputs.private_call.call_stack_item.public_inputs.historic_block_data.note_hash_tree_root = root; + private_inputs.previous_kernel.public_inputs.constants.block_header.note_hash_tree_root = root; + private_inputs.private_call.call_stack_item.public_inputs.block_header.note_hash_tree_root = root; private_inputs.private_call.call_stack_item.public_inputs.read_requests = transient_read_requests; private_inputs.private_call.read_request_membership_witnesses = transient_read_request_membership_witnesses; // We need to update the previous_kernel's private_call_stack because the current_call_stack_item has changed - // i.e. we changed the public_inputs->read_requests and public_inputs->historic_note_hash_tree_root of the + // i.e. we changed the public_inputs->read_requests and public_inputs->historical_note_hash_tree_root of the // current_call_stack_item private_inputs.previous_kernel.public_inputs.end.private_call_stack[0] = private_inputs.private_call.call_stack_item.hash(); diff --git a/circuits/cpp/src/aztec3/circuits/kernel/private/testing_harness.cpp b/circuits/cpp/src/aztec3/circuits/kernel/private/testing_harness.cpp index 33d0b4cb792..f068662a3e5 100644 --- a/circuits/cpp/src/aztec3/circuits/kernel/private/testing_harness.cpp +++ b/circuits/cpp/src/aztec3/circuits/kernel/private/testing_harness.cpp @@ -3,6 +3,7 @@ #include "index.hpp" #include "init.hpp" +#include "aztec3/circuits/abis/block_header.hpp" #include "aztec3/circuits/abis/call_context.hpp" #include "aztec3/circuits/abis/call_stack_item.hpp" #include "aztec3/circuits/abis/combined_accumulated_data.hpp" @@ -10,7 +11,6 @@ #include "aztec3/circuits/abis/complete_address.hpp" #include "aztec3/circuits/abis/contract_deployment_data.hpp" #include "aztec3/circuits/abis/function_data.hpp" -#include "aztec3/circuits/abis/historic_block_data.hpp" #include "aztec3/circuits/abis/private_circuit_public_inputs.hpp" #include "aztec3/circuits/abis/private_kernel/private_call_data.hpp" #include "aztec3/circuits/abis/tx_context.hpp" @@ -28,13 +28,13 @@ namespace aztec3::circuits::kernel::private_kernel::testing_harness { +using aztec3::circuits::abis::BlockHeader; using aztec3::circuits::abis::CallContext; using aztec3::circuits::abis::CallStackItem; using aztec3::circuits::abis::CombinedAccumulatedData; using aztec3::circuits::abis::CombinedConstantData; using aztec3::circuits::abis::ContractDeploymentData; using aztec3::circuits::abis::FunctionData; -using aztec3::circuits::abis::HistoricBlockData; using aztec3::circuits::abis::PrivateCircuitPublicInputs; using aztec3::circuits::abis::PrivateTypes; using aztec3::circuits::abis::TxContext; @@ -52,7 +52,7 @@ using aztec3::utils::array_length; * @param first_nullifier used when computing nonce for unique_siloed_commitments (note hash tree leaves) * @param contract_address address to use when siloing read requests * @param num_read_requests if negative, use random num. Must be < MAX_READ_REQUESTS_PER_CALL - * @return std::tuple + * @return std::tuple */ std::tuple, std::array, MAX_READ_REQUESTS_PER_CALL>, @@ -285,7 +285,7 @@ std::pair, ContractDeploymentData> create_private_call_d OptionalPrivateCircuitPublicInputs const opt_private_circuit_public_inputs = func(ctx, args_vec); private_circuit_public_inputs = opt_private_circuit_public_inputs.remove_optionality(); // TODO(suyash): this should likely be handled as part of the DB/Oracle/Context infrastructure - private_circuit_public_inputs.historic_block_data.contract_tree_root = contract_tree_root; + private_circuit_public_inputs.block_header.contract_tree_root = contract_tree_root; private_circuit_public_inputs.encrypted_logs_hash = encrypted_logs_hash; private_circuit_public_inputs.unencrypted_logs_hash = unencrypted_logs_hash; @@ -305,7 +305,7 @@ std::pair, ContractDeploymentData> create_private_call_d .unencrypted_logs_hash = unencrypted_logs_hash, .encrypted_log_preimages_length = encrypted_log_preimages_length, .unencrypted_log_preimages_length = unencrypted_log_preimages_length, - .historic_block_data = HistoricBlockData{ .contract_tree_root = contract_tree_root }, + .block_header = BlockHeader{ .contract_tree_root = contract_tree_root }, .contract_deployment_data = contract_deployment_data, }; } @@ -489,10 +489,10 @@ PrivateKernelInputsInner do_private_call_get_kernel_inputs_inner( // Fill in some important fields in public inputs mock_previous_kernel.public_inputs.end.private_call_stack = initial_kernel_private_call_stack; mock_previous_kernel.public_inputs.constants = CombinedConstantData{ - .block_data = - HistoricBlockData{ - .note_hash_tree_root = private_circuit_public_inputs.historic_block_data.note_hash_tree_root, - .contract_tree_root = private_circuit_public_inputs.historic_block_data.contract_tree_root, + .block_header = + BlockHeader{ + .note_hash_tree_root = private_circuit_public_inputs.block_header.note_hash_tree_root, + .contract_tree_root = private_circuit_public_inputs.block_header.contract_tree_root, }, .tx_context = tx_context, }; diff --git a/circuits/cpp/src/aztec3/circuits/kernel/private/testing_harness.hpp b/circuits/cpp/src/aztec3/circuits/kernel/private/testing_harness.hpp index 0da5106b240..8ed96456998 100644 --- a/circuits/cpp/src/aztec3/circuits/kernel/private/testing_harness.hpp +++ b/circuits/cpp/src/aztec3/circuits/kernel/private/testing_harness.hpp @@ -89,7 +89,7 @@ inline const auto& get_empty_contract_siblings() * read_request_memberships_witnesses, * transient_read_requests, * transient_read_request_memberships_witnesses, - * historic_note_hash_tree_root> + * historical_note_hash_tree_root> */ std::tuple, std::array, MAX_READ_REQUESTS_PER_CALL>, diff --git a/circuits/cpp/src/aztec3/circuits/kernel/public/.test.cpp b/circuits/cpp/src/aztec3/circuits/kernel/public/.test.cpp index e136a00d265..0e9c5394aa8 100644 --- a/circuits/cpp/src/aztec3/circuits/kernel/public/.test.cpp +++ b/circuits/cpp/src/aztec3/circuits/kernel/public/.test.cpp @@ -26,13 +26,13 @@ namespace aztec3::circuits::kernel::public_kernel { using DummyCircuitBuilder = aztec3::utils::DummyCircuitBuilder; using aztec3::circuits::abis::public_kernel::PublicKernelInputs; using NT = aztec3::utils::types::NativeTypes; +using aztec3::circuits::abis::BlockHeader; using aztec3::circuits::abis::CallContext; using aztec3::circuits::abis::CallStackItem; using aztec3::circuits::abis::CombinedAccumulatedData; using aztec3::circuits::abis::CombinedConstantData; using aztec3::circuits::abis::ContractStorageRead; using aztec3::circuits::abis::ContractStorageUpdateRequest; -using aztec3::circuits::abis::HistoricBlockData; using aztec3::circuits::abis::NewContractData; using aztec3::circuits::abis::OptionallyRevealedData; using aztec3::circuits::abis::PreviousKernelData; @@ -328,7 +328,7 @@ PublicKernelInputs get_kernel_inputs_with_previous_kernel(NT::boolean privat std::array const unencrypted_logs_hash = array_of_values(seed, NUM_FIELDS_PER_SHA256); fr const unencrypted_log_preimages_length = ++seed; - HistoricBlockData block_data = { + BlockHeader block_header = { .note_hash_tree_root = ++seed, .nullifier_tree_root = ++seed, .contract_tree_root = ++seed, @@ -352,7 +352,7 @@ PublicKernelInputs get_kernel_inputs_with_previous_kernel(NT::boolean privat .new_l2_to_l1_msgs = new_l2_to_l1_msgs, .unencrypted_logs_hash = unencrypted_logs_hash, .unencrypted_log_preimages_length = unencrypted_log_preimages_length, - .historic_block_data = block_data, + .block_header = block_header, }; const PublicCallStackItem call_stack_item{ @@ -369,18 +369,18 @@ PublicKernelInputs get_kernel_inputs_with_previous_kernel(NT::boolean privat }; // TODO(914) Should this be unused? - [[maybe_unused]] HistoricBlockData const historic_tree_roots = { + [[maybe_unused]] BlockHeader const historical_tree_roots = { .note_hash_tree_root = 1000, .contract_tree_root = 2000, .l1_to_l2_messages_tree_root = 3000, .private_kernel_vk_tree_root = 4000, }; - CombinedConstantData const end_constants = { .block_data = - HistoricBlockData{ .note_hash_tree_root = ++seed, - .nullifier_tree_root = ++seed, - .contract_tree_root = ++seed, - .private_kernel_vk_tree_root = ++seed }, + CombinedConstantData const end_constants = { .block_header = + BlockHeader{ .note_hash_tree_root = ++seed, + .nullifier_tree_root = ++seed, + .contract_tree_root = ++seed, + .private_kernel_vk_tree_root = ++seed }, .tx_context = TxContext{ .is_fee_payment_tx = false, .is_rebate_payment_tx = false, diff --git a/circuits/cpp/src/aztec3/circuits/rollup/base/.test.cpp b/circuits/cpp/src/aztec3/circuits/rollup/base/.test.cpp index a5109cabf3b..652e6b0ca52 100644 --- a/circuits/cpp/src/aztec3/circuits/rollup/base/.test.cpp +++ b/circuits/cpp/src/aztec3/circuits/rollup/base/.test.cpp @@ -628,26 +628,26 @@ TEST_F(base_rollup_tests, native_calldata_hash) // run_cbind(inputs, outputs); } -TEST_F(base_rollup_tests, native_compute_membership_historic_blocks_tree_negative) +TEST_F(base_rollup_tests, native_compute_membership_blocks_tree_negative) { // WRITE a negative test that will fail the inclusion proof // Test membership works for empty trees DummyCircuitBuilder builder = - DummyCircuitBuilder("base_rollup_tests__native_compute_membership_historic_private_data_negative"); + DummyCircuitBuilder("base_rollup_tests__native_compute_membership_historical_private_data_negative"); std::array, 2> const kernel_data = { get_empty_kernel(), get_empty_kernel() }; BaseRollupInputs inputs = base_rollup_inputs_from_kernels(kernel_data); MemoryStore blocks_store; - auto blocks_tree = MerkleTree(blocks_store, HISTORIC_BLOCKS_TREE_HEIGHT); + auto blocks_tree = MerkleTree(blocks_store, BLOCKS_TREE_HEIGHT); - // Create an INCORRECT sibling path for the note hash tree root in the historic tree roots. + // Create an INCORRECT sibling path for the note hash tree root in the historical tree roots. auto hash_path = blocks_tree.get_sibling_path(0); - std::array sibling_path{}; - for (size_t i = 0; i < HISTORIC_BLOCKS_TREE_HEIGHT; ++i) { + std::array sibling_path{}; + for (size_t i = 0; i < BLOCKS_TREE_HEIGHT; ++i) { sibling_path[i] = hash_path[i] + 1; } - inputs.historic_blocks_tree_root_membership_witnesses[0] = { + inputs.blocks_tree_root_membership_witnesses[0] = { .leaf_index = 0, .sibling_path = sibling_path, }; @@ -658,8 +658,7 @@ TEST_F(base_rollup_tests, native_compute_membership_historic_blocks_tree_negativ ASSERT_TRUE(builder.failed()); ASSERT_EQ(builder.get_first_failure().message, "Membership check failed: base_rollup_circuit: historical root is in rollup constants but not in " - "historic block tree roots at kernel input 0 to this " - "base rollup circuit"); + "blocks tree at kernel input 0 to this base rollup circuit"); } diff --git a/circuits/cpp/src/aztec3/circuits/rollup/base/native_base_rollup_circuit.cpp b/circuits/cpp/src/aztec3/circuits/rollup/base/native_base_rollup_circuit.cpp index a75226964e5..1b2fa67ea7d 100644 --- a/circuits/cpp/src/aztec3/circuits/rollup/base/native_base_rollup_circuit.cpp +++ b/circuits/cpp/src/aztec3/circuits/rollup/base/native_base_rollup_circuit.cpp @@ -132,42 +132,41 @@ NT::fr calculate_commitments_subtree(DummyBuilder& builder, BaseRollupInputs con * @param constantBaseRollupData * @param baseRollupInputs */ -void perform_historical_blocks_tree_membership_checks(DummyBuilder& builder, BaseRollupInputs const& baseRollupInputs) +void perform_blocks_tree_membership_checks(DummyBuilder& builder, BaseRollupInputs const& baseRollupInputs) { - // For each of the historic_note_hash_tree_membership_checks, we need to do an inclusion proof + // For each of the historical_note_hash_tree_membership_checks, we need to do an inclusion proof // against the historical root provided in the rollup constants - auto historic_root = baseRollupInputs.constants.start_historic_blocks_tree_roots_snapshot.root; + auto historical_root = baseRollupInputs.constants.start_blocks_tree_snapshot.root; for (size_t i = 0; i < 2; i++) { // Rebuild the block hash - auto historic_block = baseRollupInputs.kernel_data[i].public_inputs.constants.block_data; + auto historical_block = baseRollupInputs.kernel_data[i].public_inputs.constants.block_header; - auto note_hash_tree_root = historic_block.note_hash_tree_root; - auto nullifier_tree_root = historic_block.nullifier_tree_root; - auto contract_tree_root = historic_block.contract_tree_root; - auto l1_to_l2_messages_tree_root = historic_block.l1_to_l2_messages_tree_root; - auto public_data_tree_root = historic_block.public_data_tree_root; + auto note_hash_tree_root = historical_block.note_hash_tree_root; + auto nullifier_tree_root = historical_block.nullifier_tree_root; + auto contract_tree_root = historical_block.contract_tree_root; + auto l1_to_l2_messages_tree_root = historical_block.l1_to_l2_messages_tree_root; + auto public_data_tree_root = historical_block.public_data_tree_root; - auto previous_block_hash = compute_block_hash(historic_block.global_variables_hash, + auto previous_block_hash = compute_block_hash(historical_block.global_variables_hash, note_hash_tree_root, nullifier_tree_root, contract_tree_root, l1_to_l2_messages_tree_root, public_data_tree_root); - abis::MembershipWitness const historic_root_witness = - baseRollupInputs.historic_blocks_tree_root_membership_witnesses[i]; - - check_membership( - builder, - previous_block_hash, - historic_root_witness.leaf_index, - historic_root_witness.sibling_path, - historic_root, - format(BASE_CIRCUIT_ERROR_MESSAGE_BEGINNING, - "historical root is in rollup constants but not in historic block tree roots at kernel input ", - i, - " to this base rollup circuit")); + abis::MembershipWitness const historical_root_witness = + baseRollupInputs.blocks_tree_root_membership_witnesses[i]; + + check_membership(builder, + previous_block_hash, + historical_root_witness.leaf_index, + historical_root_witness.sibling_path, + historical_root, + format(BASE_CIRCUIT_ERROR_MESSAGE_BEGINNING, + "historical root is in rollup constants but not in blocks tree at kernel input ", + i, + " to this base rollup circuit")); } } @@ -524,8 +523,8 @@ BaseOrMergeRollupPublicInputs base_rollup_circuit(DummyBuilder& builder, BaseRol std::array const calldata_hash = components::compute_kernels_calldata_hash(baseRollupInputs.kernel_data); - // Perform membership checks that the notes provided exist within the historic trees data - perform_historical_blocks_tree_membership_checks(builder, baseRollupInputs); + // Perform membership checks that the notes provided exist within the historical trees data + perform_blocks_tree_membership_checks(builder, baseRollupInputs); AggregationObject const aggregation_object = aggregate_proofs(baseRollupInputs); diff --git a/circuits/cpp/src/aztec3/circuits/rollup/root/.test.cpp b/circuits/cpp/src/aztec3/circuits/rollup/root/.test.cpp index b9c05fe4bca..ed6f6ca0c25 100644 --- a/circuits/cpp/src/aztec3/circuits/rollup/root/.test.cpp +++ b/circuits/cpp/src/aztec3/circuits/rollup/root/.test.cpp @@ -177,7 +177,7 @@ TEST_F(root_rollup_tests, native_root_missing_nullifier_logic) auto nullifier_tree = get_initial_nullifier_tree_empty(); MemoryStore blocks_tree_store; - MerkleTree blocks_tree(blocks_tree_store, HISTORIC_BLOCKS_TREE_HEIGHT); + MerkleTree blocks_tree(blocks_tree_store, BLOCKS_TREE_HEIGHT); std::array kernels = { get_empty_kernel(), get_empty_kernel(), get_empty_kernel(), get_empty_kernel() @@ -292,8 +292,8 @@ TEST_F(root_rollup_tests, native_root_missing_nullifier_logic) rootRollupInputs.previous_rollup_data[1].base_or_merge_rollup_public_inputs.end_contract_tree_snapshot); ASSERT_EQ(outputs.end_l1_to_l2_messages_tree_snapshot, end_l1_to_l2_messages_tree_snapshot); - ASSERT_EQ(outputs.start_historic_blocks_tree_snapshot, start_blocks_tree_snapshot); - ASSERT_EQ(outputs.end_historic_blocks_tree_snapshot, end_blocks_tree_snapshot); + ASSERT_EQ(outputs.start_blocks_tree_snapshot, start_blocks_tree_snapshot); + ASSERT_EQ(outputs.end_blocks_tree_snapshot, end_blocks_tree_snapshot); // Compute the expected calldata hash for the root rollup (including the l2 -> l1 messages) auto left = components::compute_kernels_calldata_hash({ kernels[0], kernels[1] }); diff --git a/circuits/cpp/src/aztec3/circuits/rollup/root/native_root_rollup_circuit.cpp b/circuits/cpp/src/aztec3/circuits/rollup/root/native_root_rollup_circuit.cpp index f2c5fd304a4..3c801da601f 100644 --- a/circuits/cpp/src/aztec3/circuits/rollup/root/native_root_rollup_circuit.cpp +++ b/circuits/cpp/src/aztec3/circuits/rollup/root/native_root_rollup_circuit.cpp @@ -107,7 +107,7 @@ RootRollupPublicInputs root_rollup_circuit(DummyBuilder& builder, RootRollupInpu "l1 to l2 message tree not empty at location where subtree would be inserted")); // Build the block hash for this iteration from the tree roots and global variables - // Then insert the block into the historic blocks tree + // Then insert the block into the blocks tree auto block_hash = compute_block_hash_with_globals(left.constants.global_variables, right.end_note_hash_tree_snapshot.root, right.end_nullifier_tree_snapshot.root, @@ -115,16 +115,16 @@ RootRollupPublicInputs root_rollup_circuit(DummyBuilder& builder, RootRollupInpu new_l1_to_l2_messages_tree_snapshot.root, right.end_public_data_tree_root); - // Update the historic blocks tree - auto end_historic_blocks_tree_snapshot = components::insert_subtree_to_snapshot_tree( + // Update the blocks tree + auto end_blocks_tree_snapshot = components::insert_subtree_to_snapshot_tree( builder, - rootRollupInputs.start_historic_blocks_tree_snapshot, - rootRollupInputs.new_historic_blocks_tree_sibling_path, + rootRollupInputs.start_blocks_tree_snapshot, + rootRollupInputs.new_blocks_tree_sibling_path, fr::zero(), block_hash, 0, format(ROOT_CIRCUIT_ERROR_MESSAGE_BEGINNING, - "historic blocks tree roots not empty at location where subtree would be inserted")); + "blocks tree roots not empty at location where subtree would be inserted")); RootRollupPublicInputs public_inputs = { @@ -140,8 +140,8 @@ RootRollupPublicInputs root_rollup_circuit(DummyBuilder& builder, RootRollupInpu .end_public_data_tree_root = right.end_public_data_tree_root, .start_l1_to_l2_messages_tree_snapshot = rootRollupInputs.start_l1_to_l2_messages_tree_snapshot, .end_l1_to_l2_messages_tree_snapshot = new_l1_to_l2_messages_tree_snapshot, - .start_historic_blocks_tree_snapshot = rootRollupInputs.start_historic_blocks_tree_snapshot, - .end_historic_blocks_tree_snapshot = end_historic_blocks_tree_snapshot, + .start_blocks_tree_snapshot = rootRollupInputs.start_blocks_tree_snapshot, + .end_blocks_tree_snapshot = end_blocks_tree_snapshot, .calldata_hash = components::compute_calldata_hash(rootRollupInputs.previous_rollup_data), .l1_to_l2_messages_hash = compute_messages_hash(rootRollupInputs.new_l1_to_l2_messages) }; diff --git a/circuits/cpp/src/aztec3/circuits/rollup/test_utils/utils.cpp b/circuits/cpp/src/aztec3/circuits/rollup/test_utils/utils.cpp index a872a26497b..72fee73a733 100644 --- a/circuits/cpp/src/aztec3/circuits/rollup/test_utils/utils.cpp +++ b/circuits/cpp/src/aztec3/circuits/rollup/test_utils/utils.cpp @@ -86,8 +86,8 @@ BaseRollupInputs base_rollup_inputs_from_kernels(std::array kerne { // @todo Look at the starting points for all of these. // By supporting as inputs we can make very generic tests, where it is trivial to try new setups. - MemoryStore historic_blocks_tree_store; - MerkleTree historic_blocks_tree = MerkleTree(historic_blocks_tree_store, HISTORIC_BLOCKS_TREE_HEIGHT); + MemoryStore blocks_tree_store; + MerkleTree blocks_tree = MerkleTree(blocks_tree_store, BLOCKS_TREE_HEIGHT); BaseRollupInputs baseRollupInputs = { .kernel_data = kernel_data, @@ -154,31 +154,31 @@ BaseRollupInputs base_rollup_inputs_from_kernels(std::array kerne baseRollupInputs.start_public_data_tree_root = public_data_tree.root(); - // create the original historic blocks tree leaf + // create the original blocks tree leaf auto block_hash = compute_block_hash(prev_global_variables_hash, note_hash_tree.root(), nullifier_tree.root(), contract_tree.root(), l1_to_l2_msg_tree.root(), public_data_tree.root()); - historic_blocks_tree.update_element(0, block_hash); + blocks_tree.update_element(0, block_hash); - ConstantRollupData const constantRollupData = { .start_historic_blocks_tree_roots_snapshot = { - .root = historic_blocks_tree.root(), + ConstantRollupData const constantRollupData = { .start_blocks_tree_snapshot = { + .root = blocks_tree.root(), .next_available_leaf_index = 1, } }; baseRollupInputs.constants = constantRollupData; - // Set historic tree roots data in the public inputs. + // Set historical tree roots data in the public inputs. for (size_t i = 0; i < 2; i++) { - kernel_data[i].public_inputs.constants.block_data.note_hash_tree_root = note_hash_tree.root(); - kernel_data[i].public_inputs.constants.block_data.nullifier_tree_root = nullifier_tree.root(); - kernel_data[i].public_inputs.constants.block_data.nullifier_tree_root = nullifier_tree.root(); - kernel_data[i].public_inputs.constants.block_data.contract_tree_root = contract_tree.root(); - kernel_data[i].public_inputs.constants.block_data.l1_to_l2_messages_tree_root = l1_to_l2_msg_tree.root(); - kernel_data[i].public_inputs.constants.block_data.blocks_tree_root = historic_blocks_tree.root(); - kernel_data[i].public_inputs.constants.block_data.public_data_tree_root = public_data_tree.root(); - kernel_data[i].public_inputs.constants.block_data.global_variables_hash = prev_global_variables_hash; + kernel_data[i].public_inputs.constants.block_header.note_hash_tree_root = note_hash_tree.root(); + kernel_data[i].public_inputs.constants.block_header.nullifier_tree_root = nullifier_tree.root(); + kernel_data[i].public_inputs.constants.block_header.nullifier_tree_root = nullifier_tree.root(); + kernel_data[i].public_inputs.constants.block_header.contract_tree_root = contract_tree.root(); + kernel_data[i].public_inputs.constants.block_header.l1_to_l2_messages_tree_root = l1_to_l2_msg_tree.root(); + kernel_data[i].public_inputs.constants.block_header.blocks_tree_root = blocks_tree.root(); + kernel_data[i].public_inputs.constants.block_header.public_data_tree_root = public_data_tree.root(); + kernel_data[i].public_inputs.constants.block_header.global_variables_hash = prev_global_variables_hash; } // Then we collect all sibling paths for the reads in the left tx, and then apply the update requests while @@ -207,13 +207,13 @@ BaseRollupInputs base_rollup_inputs_from_kernels(std::array kerne } } - // Get historic_root sibling paths - baseRollupInputs.historic_blocks_tree_root_membership_witnesses[0] = { + // Get historical_root sibling paths + baseRollupInputs.blocks_tree_root_membership_witnesses[0] = { .leaf_index = 0, - .sibling_path = get_sibling_path(historic_blocks_tree, 0, 0), + .sibling_path = get_sibling_path(blocks_tree, 0, 0), }; - baseRollupInputs.historic_blocks_tree_root_membership_witnesses[1] = - baseRollupInputs.historic_blocks_tree_root_membership_witnesses[0]; + baseRollupInputs.blocks_tree_root_membership_witnesses[1] = + baseRollupInputs.blocks_tree_root_membership_witnesses[0]; baseRollupInputs.kernel_data = kernel_data; @@ -378,8 +378,8 @@ RootRollupInputs get_root_rollup_inputs(utils::DummyBuilder& builder, MemoryStore public_data_tree_store; MerkleTree public_data_tree(public_data_tree_store, PUBLIC_DATA_TREE_HEIGHT); - MemoryStore historic_blocks_tree_store; - MerkleTree historic_blocks_tree(historic_blocks_tree_store, HISTORIC_BLOCKS_TREE_HEIGHT); + MemoryStore blocks_tree_store; + MerkleTree blocks_tree(blocks_tree_store, BLOCKS_TREE_HEIGHT); // Start blocks tree auto block_hash = compute_block_hash_with_globals(globals, @@ -388,16 +388,16 @@ RootRollupInputs get_root_rollup_inputs(utils::DummyBuilder& builder, contract_tree.root(), l1_to_l2_msg_tree.root(), public_data_tree.root()); - historic_blocks_tree.update_element(0, block_hash); + blocks_tree.update_element(0, block_hash); // Blocks tree snapshots - AppendOnlyTreeSnapshot const start_historic_blocks_tree_snapshot = { - .root = historic_blocks_tree.root(), + AppendOnlyTreeSnapshot const start_blocks_tree_snapshot = { + .root = blocks_tree.root(), .next_available_leaf_index = 1, }; // Blocks tree - auto blocks_tree_sibling_path = get_sibling_path(historic_blocks_tree, 1, 0); + auto blocks_tree_sibling_path = get_sibling_path(blocks_tree, 1, 0); // l1 to l2 tree auto l1_to_l2_tree_sibling_path = @@ -414,8 +414,8 @@ RootRollupInputs get_root_rollup_inputs(utils::DummyBuilder& builder, .new_l1_to_l2_messages = l1_to_l2_messages, .new_l1_to_l2_messages_tree_root_sibling_path = l1_to_l2_tree_sibling_path, .start_l1_to_l2_messages_tree_snapshot = start_l1_to_l2_msg_tree_snapshot, - .start_historic_blocks_tree_snapshot = start_historic_blocks_tree_snapshot, - .new_historic_blocks_tree_sibling_path = blocks_tree_sibling_path, + .start_blocks_tree_snapshot = start_blocks_tree_snapshot, + .new_blocks_tree_sibling_path = blocks_tree_sibling_path, }; return rootRollupInputs; } diff --git a/circuits/cpp/src/aztec3/constants.hpp b/circuits/cpp/src/aztec3/constants.hpp index feb162d9a87..39062de3b55 100644 --- a/circuits/cpp/src/aztec3/constants.hpp +++ b/circuits/cpp/src/aztec3/constants.hpp @@ -104,7 +104,7 @@ constexpr size_t NOTE_HASH_TREE_HEIGHT = 32; constexpr size_t PUBLIC_DATA_TREE_HEIGHT = 254; constexpr size_t NULLIFIER_TREE_HEIGHT = 20; constexpr size_t L1_TO_L2_MSG_TREE_HEIGHT = 16; -constexpr size_t HISTORIC_BLOCKS_TREE_HEIGHT = 16; +constexpr size_t BLOCKS_TREE_HEIGHT = 16; constexpr size_t ROLLUP_VK_TREE_HEIGHT = 8; // TODO: update @@ -315,7 +315,7 @@ constexpr size_t VIEW_NOTE_ORACLE_RETURN_LENGTH = MAX_NOTES_PER_PAGE * (MAX_NOTE constexpr size_t CALL_CONTEXT_LENGTH = 7; // Must be updated if any data is added into the block hash calculation. -constexpr size_t HISTORIC_BLOCK_DATA_LENGTH = 7; +constexpr size_t BLOCK_HEADER_LENGTH = 7; constexpr size_t FUNCTION_DATA_LENGTH = 4; constexpr size_t CONTRACT_DEPLOYMENT_DATA_LENGTH = 6; @@ -327,16 +327,16 @@ constexpr size_t PRIVATE_CIRCUIT_PUBLIC_INPUTS_LENGTH = + RETURN_VALUES_LENGTH + MAX_READ_REQUESTS_PER_CALL + MAX_PENDING_READ_REQUESTS_PER_CALL + MAX_NEW_COMMITMENTS_PER_CALL + 2 * MAX_NEW_NULLIFIERS_PER_CALL + MAX_PRIVATE_CALL_STACK_LENGTH_PER_CALL + MAX_PUBLIC_CALL_STACK_LENGTH_PER_CALL + MAX_NEW_L2_TO_L1_MSGS_PER_CALL + NUM_FIELDS_PER_SHA256 + - NUM_FIELDS_PER_SHA256 + 2 // + 2 for logs preimage lengths - + HISTORIC_BLOCK_DATA_LENGTH + CONTRACT_DEPLOYMENT_DATA_LENGTH + 2; // + 2 for chain_id and version + NUM_FIELDS_PER_SHA256 + 2 // + 2 for logs preimage lengths + + BLOCK_HEADER_LENGTH + CONTRACT_DEPLOYMENT_DATA_LENGTH + 2; // + 2 for chain_id and version constexpr size_t PRIVATE_CIRCUIT_PUBLIC_INPUTS_HASH_INPUT_LENGTH = 1 + 1 // call_context_hash + args_hash + RETURN_VALUES_LENGTH + MAX_READ_REQUESTS_PER_CALL + MAX_PENDING_READ_REQUESTS_PER_CALL + MAX_NEW_COMMITMENTS_PER_CALL + 2 * MAX_NEW_NULLIFIERS_PER_CALL + MAX_PRIVATE_CALL_STACK_LENGTH_PER_CALL + MAX_PUBLIC_CALL_STACK_LENGTH_PER_CALL + MAX_NEW_L2_TO_L1_MSGS_PER_CALL + NUM_FIELDS_PER_SHA256 + - NUM_FIELDS_PER_SHA256 + 2 // + 2 for logs preimage lengths - + HISTORIC_BLOCK_DATA_LENGTH + 3; // + 3 for contract_deployment_data.hash(), chain_id, version + NUM_FIELDS_PER_SHA256 + 2 // + 2 for logs preimage lengths + + BLOCK_HEADER_LENGTH + 3; // + 3 for contract_deployment_data.hash(), chain_id, version constexpr size_t CONTRACT_STORAGE_UPDATE_REQUEST_LENGTH = 3; constexpr size_t CONTRACT_STORAGE_READ_LENGTH = 2; @@ -347,15 +347,15 @@ constexpr size_t PUBLIC_CIRCUIT_PUBLIC_INPUTS_LENGTH = MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_CALL * CONTRACT_STORAGE_UPDATE_REQUEST_LENGTH + MAX_PUBLIC_DATA_READS_PER_CALL * CONTRACT_STORAGE_READ_LENGTH + MAX_PUBLIC_CALL_STACK_LENGTH_PER_CALL + MAX_NEW_COMMITMENTS_PER_CALL + MAX_NEW_NULLIFIERS_PER_CALL + MAX_NEW_L2_TO_L1_MSGS_PER_CALL + - NUM_FIELDS_PER_SHA256 + 1 + // + 1 for unencrypted logs preimage length - HISTORIC_BLOCK_DATA_LENGTH + 2; // + 2 for chain_id and version + NUM_FIELDS_PER_SHA256 + 1 + // + 1 for unencrypted logs preimage length + BLOCK_HEADER_LENGTH + 2; // + 2 for chain_id and version constexpr size_t PUBLIC_CIRCUIT_PUBLIC_INPUTS_HASH_INPUT_LENGTH = 2 + RETURN_VALUES_LENGTH + // + 1 for args_hash + 1 call_context.hash MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_CALL + MAX_PUBLIC_DATA_READS_PER_CALL + MAX_PUBLIC_CALL_STACK_LENGTH_PER_CALL + MAX_NEW_COMMITMENTS_PER_CALL + MAX_NEW_NULLIFIERS_PER_CALL + MAX_NEW_L2_TO_L1_MSGS_PER_CALL + - NUM_FIELDS_PER_SHA256 + // unencrypted_logs_hash (being represented by NUM_FIELDS_PER_SHA256) - HISTORIC_BLOCK_DATA_LENGTH + 2; // unencrypted_log_preimages_length + prover_address + NUM_FIELDS_PER_SHA256 + // unencrypted_logs_hash (being represented by NUM_FIELDS_PER_SHA256) + BLOCK_HEADER_LENGTH + 2; // unencrypted_log_preimages_length + prover_address // Size of the return value of a private function call, diff --git a/circuits/cpp/src/aztec3/dbs/private_state_db.hpp b/circuits/cpp/src/aztec3/dbs/private_state_db.hpp index 5689dc163ca..60d4ecaedf8 100644 --- a/circuits/cpp/src/aztec3/dbs/private_state_db.hpp +++ b/circuits/cpp/src/aztec3/dbs/private_state_db.hpp @@ -17,7 +17,7 @@ // std::vector active_private_state_preimages; // }; -// // contract->storageSlots->[current->historic] +// // contract->storageSlots->[current->historical] // /** // * Hmmm... There are multiple active leaves for partitioned states. diff --git a/circuits/cpp/src/aztec3/oracle/fake_db.hpp b/circuits/cpp/src/aztec3/oracle/fake_db.hpp index 96ddfd62a21..a14ffa6f57e 100644 --- a/circuits/cpp/src/aztec3/oracle/fake_db.hpp +++ b/circuits/cpp/src/aztec3/oracle/fake_db.hpp @@ -61,7 +61,7 @@ class FakeDB { .sibling_path = sibling_path, .leaf_index = 2, - .historic_note_hash_tree_root = required_utxo_tree_root, + .historical_note_hash_tree_root = required_utxo_tree_root, }; }; @@ -109,7 +109,7 @@ class FakeDB { .sibling_path = sibling_path, .leaf_index = 2, - .historic_note_hash_tree_root = required_utxo_tree_root, + .historical_note_hash_tree_root = required_utxo_tree_root, }); } @@ -153,7 +153,7 @@ class FakeDB { .sibling_path = sibling_path, .leaf_index = 2, - .historic_note_hash_tree_root = required_utxo_tree_root, + .historical_note_hash_tree_root = required_utxo_tree_root, }; }; }; diff --git a/docs/docs/about_aztec/overview.mdx b/docs/docs/about_aztec/overview.mdx index dde9f2c1192..3fceee95c47 100644 --- a/docs/docs/about_aztec/overview.mdx +++ b/docs/docs/about_aztec/overview.mdx @@ -10,7 +10,7 @@ Aztec is an L2 that brings programmable privacy to Ethereum. A smart contract on Aztec is a collection of functions, written as ZK-SNARK circuits. These circuits can have different modes of execution: -1. Secret Functions -- can read and write private state, read historic public state, consume or send messages to / from Ethereum, and read Ethereum state. They can call other secret functions in the same contract, or other contracts, and can call public functions. +1. Secret Functions -- can read and write private state, read historical public state, consume or send messages to / from Ethereum, and read Ethereum state. They can call other secret functions in the same contract, or other contracts, and can call public functions. 2. Public Functions -- can read and write public state, write private state, consume or send messages to / from Ethereum and read Ethereum state. They can call other public functions on the same or other contracts. 3. Portal Contracts -- these are contracts on Ethereum that can receive messages from Aztec or send messages to Aztec from Ethereum contracts. diff --git a/docs/docs/about_aztec/roadmap/engineering_roadmap.md b/docs/docs/about_aztec/roadmap/engineering_roadmap.md index a9f71aa40fa..890d0f1e14a 100644 --- a/docs/docs/about_aztec/roadmap/engineering_roadmap.md +++ b/docs/docs/about_aztec/roadmap/engineering_roadmap.md @@ -119,7 +119,9 @@ CI takes up a significant amount of time. It gets its own section here, so we re ## Slow Updates tree? -We _need_ a way to read mutable public data from a private function. +We _need_ a way to read mutable public data from a private function. + +Note: we just published the [Slow Updates Tree](../../concepts/foundation/communication/public_private_calls/slow_updates_tree.md). ## Contract classes and instances? diff --git a/docs/docs/concepts/advanced/data_structures/trees.md b/docs/docs/concepts/advanced/data_structures/trees.md index 45fc60c51a8..79ee1149d67 100644 --- a/docs/docs/concepts/advanced/data_structures/trees.md +++ b/docs/docs/concepts/advanced/data_structures/trees.md @@ -160,10 +160,10 @@ The contract tree contains information about every function of every contract de > Note: Aztec supports the ability to keep the logic of private functions of a smart contract private. In such cases, no information about the logic of that private function will be broadcast; only a randomized merkle root of that contract's data. -## Trees of historic trees' roots +## Blocks Tree -- `treeOfHistoricNoteHashTreeRoots`: for membership checks against historic roots of the `noteHashTree` -- `treeOfHistoricContractTreeRoots`: for membership checks against historic roots of the `contractTree` +Leaves are hashes of blocks (of block headers). +Can be used to access any of the trees above at some older point in time by doing a membership check of the old root in the block header and of the block header hash in the blocks tree. ## Trees of valid Kernel/Rollup circuit Verification Keys diff --git a/docs/docs/concepts/foundation/accounts/keys.md b/docs/docs/concepts/foundation/accounts/keys.md index b3a49ffbe93..589667cbab3 100644 --- a/docs/docs/concepts/foundation/accounts/keys.md +++ b/docs/docs/concepts/foundation/accounts/keys.md @@ -28,7 +28,7 @@ Similar to using a private note, but using an immutable private note removes the ### Using the slow updates tree -A compromise between the two solutions above is to use the slow updates tree. This would not generate additional nullifiers and commitments for each transaction while allowing the user to rotate their key. However, this causes every transaction to now have a time-to-live determined by the frequency of the slow updates tree. +A compromise between the two solutions above is to use the [slow updates tree](../communication/public_private_calls/slow_updates_tree.md). This would not generate additional nullifiers and commitments for each transaction while allowing the user to rotate their key. However, this causes every transaction to now have a time-to-live determined by the frequency of the slow updates tree. ### Reusing the privacy master key diff --git a/docs/docs/concepts/foundation/accounts/main.md b/docs/docs/concepts/foundation/accounts/main.md index 23dcfa6c549..c96238632be 100644 --- a/docs/docs/concepts/foundation/accounts/main.md +++ b/docs/docs/concepts/foundation/accounts/main.md @@ -127,7 +127,7 @@ These two patterns combined allow an account contract to answer whether an actio Aztec requires users to define [encryption and nullifying keys](./keys.md) that are needed for receiving and spending private notes. Unlike transaction signing, encryption and nullifying is enshrined at the protocol. This means that there is a single scheme used for encryption and nullifying. These keys are derived from a master public key. This master public key, in turn, is used when deterministically deriving the account's address. -A side effect of committing to a master public key as part of the address is that _this key cannot be rotated_. While an account contract implementation could include methods for rotating the signing key, this is unfortunately not possible for encryption and nullifying keys (note that rotating nullifying keys also creates other challenges such as preventing double spends). We are exploring usage of the slow updates tree to enable rotating these keys. +A side effect of committing to a master public key as part of the address is that _this key cannot be rotated_. While an account contract implementation could include methods for rotating the signing key, this is unfortunately not possible for encryption and nullifying keys (note that rotating nullifying keys also creates other challenges such as preventing double spends). We are exploring usage of the [slow updates tree](../communication/public_private_calls/slow_updates_tree.md) to enable rotating these keys. NOTE: While we entertained the idea of abstracting note encryption, where account contracts would define an `encrypt` method that would use a user-defined scheme, there are two main reasons we decided against this. First is that this entailed that, in order to receive funds, a user had to first deploy their account contract, which is a major UX issue. Second, users could define malicious `encrypt` methods that failed in certain circumstances, breaking application flows that required them to receive a private note. While this issue already exists in Ethereum when transferring ETH (see the [king of the hill](https://coinsbench.com/27-king-ethernaut-da5021cd4aa6)), its impact is made worse in Aztec since any execution failure in a private function makes the entire transaction unprovable (ie it is not possible to catch errors in calls to other private functions), and furthermore because encryption is required for any private state (not just for transferring ETH). Nevertheless, both of these problems are solvable. Initialization can be worked around by embedding a commitment to the bytecode in the address and removing the need for actually deploying contracts before interacting with them, and the king of the hill issue can be mitigated by introducing a full private VM that allows catching reverts. As such, we may be able to abstract encryption in the future as well. diff --git a/docs/docs/concepts/foundation/communication/main.md b/docs/docs/concepts/foundation/communication/main.md index 3dcd7fb7f5b..1f85585dacc 100644 --- a/docs/docs/concepts/foundation/communication/main.md +++ b/docs/docs/concepts/foundation/communication/main.md @@ -4,6 +4,6 @@ title: Contract Communication This section will walk over communication types that behaves differently than normal function calls from. -Namely, if functions are in different domains, private vs. public, their execution behaves a little differently to what you might expect! See [Private <--> Public execution](./public_private_calls.md). +Namely, if functions are in different domains, private vs. public, their execution behaves a little differently to what you might expect! See [Private <--> Public execution](./public_private_calls/main.md). Likewise, executing a function on a different domain than its origin needs a bit extra thought. See [L1 <--> L2 communication](./cross_chain_calls.md). \ No newline at end of file diff --git a/docs/docs/concepts/foundation/communication/public_private_calls.md b/docs/docs/concepts/foundation/communication/public_private_calls/main.md similarity index 88% rename from docs/docs/concepts/foundation/communication/public_private_calls.md rename to docs/docs/concepts/foundation/communication/public_private_calls/main.md index 683126a14a3..34a98047f4f 100644 --- a/docs/docs/concepts/foundation/communication/public_private_calls.md +++ b/docs/docs/concepts/foundation/communication/public_private_calls/main.md @@ -4,7 +4,7 @@ title: Private <--> Public execution import Image from "@theme/IdealImage"; -import Disclaimer from "../../../misc/common/\_disclaimer.mdx"; +import Disclaimer from "../../../../misc/common/\_disclaimer.mdx"; @@ -34,7 +34,7 @@ To avoid this issue, we permit the use of historical data as long as the data ha In this model, instead of informing the builder of our intentions, we construct the proof $\pi$ and then provide them with the transaction results (new commitments and nullifiers, contract deployments and cross-chain messages) in addition to $\pi$. The builder will then be responsible for inserting these new commitments and nullifiers into the state. They will be aware of the intermediates and can discard transactions that try to produce existing nullifiers (double spend), as doing so would invalidate the rollup proof. -On the left-hand side of the diagram below, we see the fully public world where storage is shared, while on the right-hand side, we see the private world where all reads are historic. +On the left-hand side of the diagram below, we see the fully public world where storage is shared, while on the right-hand side, we see the private world where all reads are historical. @@ -82,15 +82,15 @@ From the above, we should have a decent idea about what private and public funct Many applications rely on some form of access control to function well. USDC have a blacklist, where only parties not on the list should be able to transfer. And other systems such as Aave have limits such that only the pool contract is able to mint debt tokens and transfers held funds. -Access control like this cannot easily be enforced in the private domain, as reading is also nullifying(to ensure data is up to date). However, as it is possible to read historic public state, one can combine private and public functions to get the desired effect. +Access control like this cannot easily be enforced in the private domain, as reading is also nullifying(to ensure data is up to date). However, as it is possible to read historical public state, one can combine private and public functions to get the desired effect. -Say the public state holds a `mapping(address user => bool blacklisted)` and a value with the block number of the last update `last_updated`. The private functions can then use this public blacklist IF it also performs a public function call that reverts if the block number of the historic state is older than the `last_updated`. This means that updating the blacklist would make pending transactions fail, but allow a public blacklist to be used. Similar would work for the Aave example, where it is just a public value with the allowed caller contracts. Example of how this would be written is seen below. Note that because the `onlyFresh` is done in public, the user might not know when he is generating his proof whether it will be valid or not. +Say the public state holds a `mapping(address user => bool blacklisted)` and a value with the block number of the last update `last_updated`. The private functions can then use this public blacklist IF it also performs a public function call that reverts if the block number of the historical state is older than the `last_updated`. This means that updating the blacklist would make pending transactions fail, but allow a public blacklist to be used. Similar would work for the Aave example, where it is just a public value with the allowed caller contracts. Example of how this would be written is seen below. Note that because the `onlyFresh` is done in public, the user might not know when he is generating his proof whether it will be valid or not. ```solidity function transfer( secret address to, secret uint256 amount, - secret HistoricState state + secret HistoricalState state ) secret returns(bool) { if (blacklisted[msg.sender] || blacklisted[to]) revert("Blacklisted"); onlyFresh(state.blockNumber); @@ -105,3 +105,5 @@ function onlyFresh(pub uint256 blockNumber) public { :::info This is not a perfect solution, as any functions using access control might end up doing a lot of public calls it could put a significant burden on sequencers and greatly increase the cost of the transaction for the user. We are investigating ways to improve. ::: + +Using a dual-tree structure with a pending and a current tree, it is possible to update public data from a private function. The update is fulfilled when the pending tree becomes the current after the end of a specified epoch. It is also possible to read historical public data directly from a private function. This works perfectly for public data that is not updated often, such as a blacklist. This structure is called a slow updates tree, and you can read about how it works [in the next section](./slow_updates_tree.md). diff --git a/docs/docs/concepts/foundation/communication/public_private_calls/slow_updates_tree.md b/docs/docs/concepts/foundation/communication/public_private_calls/slow_updates_tree.md new file mode 100644 index 00000000000..c6ceb070e26 --- /dev/null +++ b/docs/docs/concepts/foundation/communication/public_private_calls/slow_updates_tree.md @@ -0,0 +1,73 @@ +--- +title: Privately access Historical Public Data +--- + +In Aztec, private and public execution environments are completely separate and operate with distinct state management. It is not possible for private functions to reliably access the most recent public data public state - only sequencers can do that. You'll want to [read the previous section](./main.md) to understand this before reading this page. + +But, what about historical public data (or public data that changes infrequently)? Through a **slow updates tree**, private functions can access historical public state. Please note that we are still experimenting with this feature. + +On this page you will learn: + +1. Why a slow updates tree exists & use cases +2. How it works +3. How it can be used to access historical public data +4. Limitations + +## The need for a slow updates tree + +This structure was created specifically to privately & publicly access historical public data. It should be used to store public data that doesn't change often. + +- Access historical public data from a private function +- Access historical public data from a public function +- Update public data (that does not need updated often) from public and private functions + +This data structure is ideal for these use cases: + +- Address Registry: Enabling contracts to interact with other contracts more easily requires address storage accessible in both public and private executions. This can be particularly helpful in things such as proxy contracts. +- Access Control: Managing privileges in contracts, such as a token contract owner’s ability to mint new tokens, is streamlined when control information is shared between public and private executions. This might include things like blacklists and whitelists. + +## How it works + +We developed the Slow Updates Tree to help balance public and private execution in a blockchain context. Earlier systems typically used either fully public or entirely private state trees. + +The Slow Updates Tree is a dual-tree structure - a current tree and a pending tree. Any updates are added to the pending tree, which then becomes the current tree at the end of an epoch. The pending tree is replicated from the current tree, and the cycle begins again. + +```mermaid +graph TD; + Change{Epoch Over} -->|True| Current{Current} + Change -->|False| Pending{Pending} + Current --> Current1[Current Commitment 1] + Current --> CurrentM[Current Commitment 2] + CurrentM --> Value1[Current Value 1] + CurrentM --> Value2[Current Value 2] + CurrentM --> ValueN[Current Value n] + Pending --> PendingM[Pending Commitment 1] + PendingM --> PValue1[Pending Value 1] + PendingM --> PValue2[Pending Value 2] + PendingM --> PValueN[Pending Value n] +``` + +This way, we can ensure that the values are stable throughout the epoch, and that the membership proofs are not invalidated by changes in other contracts more than once every epoch. + +## Reads and Writes + +### Accessing Data + +*From public state:* Accessed directly from the state +*From private state:* Performs a membership proof for the values in the tree, ensuring that they are part of the commitment. + +### Updating Data + +Updates are made to the pending tree. Then at the end of each epoch, the updates in the pending tree are committed and it becomes the current tree. + +## Limitations + +### Delayed State Finality + +Updates in the Slow Updates Tree are only finalized at the end of an epoch. + +Developers are used to instant state updates, so the Slow Updates Tree might take some getting used to. But we believe this won't take long! + +## Dive into the code + +For a code walkthrough of how a token blacklist contract can use a slow updates tree, read [this](../../../../dev_docs/contracts/syntax/slow_updates_tree.md). \ No newline at end of file diff --git a/docs/docs/concepts/foundation/main.md b/docs/docs/concepts/foundation/main.md index 0791cd279d3..5d7e9579882 100644 --- a/docs/docs/concepts/foundation/main.md +++ b/docs/docs/concepts/foundation/main.md @@ -24,7 +24,7 @@ A user of the Aztec network will interact with the network through Aztec.js. Azt ### Private Execution Environment -The PXE provides a secure environment for the execution of sensitive operations, ensuring private information and decrypted data are not accessible to unauthorized applications. It hides the details of the [state model](./state_model/main.md) from end users, but the state model is important for Aztec developers to understand as it has implications for [private/public execution](./communication/public_private_calls.md) and [L1/L2 communication](./communication/cross_chain_calls.md). The PXE also includes the [ACIR Simulator](../advanced/acir_simulator.md) for private executions and the KeyStore for secure key management. +The PXE provides a secure environment for the execution of sensitive operations, ensuring private information and decrypted data are not accessible to unauthorized applications. It hides the details of the [state model](./state_model/main.md) from end users, but the state model is important for Aztec developers to understand as it has implications for [private/public execution](./communication/public_private_calls/main.md) and [L1/L2 communication](./communication/cross_chain_calls.md). The PXE also includes the [ACIR Simulator](../advanced/acir_simulator.md) for private executions and the KeyStore for secure key management. Procedurally, the PXE sends results of private function execution and requests for public function executions to the [sequencer](./nodes_clients/sequencer.md), which will update the state of the rollup. diff --git a/docs/docs/dev_docs/cli/main.md b/docs/docs/dev_docs/cli/main.md index 4b356a7b157..d811c2adb74 100644 --- a/docs/docs/dev_docs/cli/main.md +++ b/docs/docs/dev_docs/cli/main.md @@ -26,7 +26,7 @@ With the help of Aztec.js you will be able to: ## What's in the Sandbox? -The sandbox contains a local Ethereum instance running [Anvil](https://book.getfoundry.sh/anvil/), a local instance of the Aztec rollup, an aztec private execution client for handling user transactions and state, and, if using Docker, an [Otterscan](https://github.com/otterscan/otterscan) block explorer for the local Ethereum network. +The sandbox contains a local Ethereum instance running [Anvil](https://book.getfoundry.sh/anvil/), a local instance of the Aztec rollup and an aztec private execution client for handling user transactions and state. These provide a self contained environment which deploys Aztec on a local (empty) Ethereum network, creates 3 smart contract wallet accounts on the rollup, and allows transactions to be processed on the local Aztec sequencer. diff --git a/docs/docs/dev_docs/cli/sandbox-reference.md b/docs/docs/dev_docs/cli/sandbox-reference.md index 8c8b31723af..171ee587295 100644 --- a/docs/docs/dev_docs/cli/sandbox-reference.md +++ b/docs/docs/dev_docs/cli/sandbox-reference.md @@ -148,12 +148,6 @@ P2P_ANNOUNCE_HOSTNAME='' # The IPAddress/Hostname that other peers should use to P2P_ANNOUNCE_PORT='' # The port that other peers should use to connect to this node, this may be different to P2P_TCP_LISTEN_PORT if e.g. the node is behind a NAT. ``` -## Otterscan - -If you have set up the Sandbox with Docker, you will also have Otterscan. - -You can see Ethereum Layer 1 activity through the local Otterscan on `http://localhost:5100`. This is especially useful for dapps that use L1-L2 messaging through [portal contracts](../contracts/portals/main.md). - ## Cheat Codes To help with testing, the sandbox is shipped with a set of cheatcodes. diff --git a/docs/docs/dev_docs/contracts/resources/common_patterns/main.md b/docs/docs/dev_docs/contracts/resources/common_patterns/main.md index 82afee5c96d..a75e128fa6e 100644 --- a/docs/docs/dev_docs/contracts/resources/common_patterns/main.md +++ b/docs/docs/dev_docs/contracts/resources/common_patterns/main.md @@ -41,7 +41,7 @@ Note - you could also create a note and send it to the user. The problem is ther ### Reading public storage in private You can't read public storage in private domain. But nevertheless reading public storage is desirable. There are two ways: -1. For public storage that changes infrequently, use the slow updates tree! More details TBD +1. For public storage that changes infrequently, use the slow updates tree! Learn more about it [here](../../../../concepts/foundation/communication/public_private_calls/slow_updates_tree.md). 2. You pass the data as a parameter to your private method and later assert in public that the data is correct. E.g.: ```rust @@ -125,7 +125,7 @@ There are several patterns here: There are several other designs we are discussing through [in this discourse post](https://discourse.aztec.network/t/how-to-handle-private-escrows-between-two-parties/2440) but they need some changes in the protocol or in our demo contract. If you are interested in this discussion, please participate in the discourse post! ### Share Private Notes -If you have private state that needs to be handled by more than a single user (but no more than a handful), you can add the note commitment to the private data tree, and then encrypt the note once for each of the users that need to see it. And if any of those users should be able to consume the note, you can generate a random nullifier on creation and store it in the encrypted note, instead of relying on the user secret. +If you have private state that needs to be handled by more than a single user (but no more than a handful), you can add the note commitment to the note hash tree, and then encrypt the note once for each of the users that need to see it. And if any of those users should be able to consume the note, you can generate a random nullifier on creation and store it in the encrypted note, instead of relying on the user secret. ## Anti Patterns There are mistakes one can make to reduce their privacy set and therefore make it trivial to do analysis and link addresses. Some of them are: diff --git a/docs/docs/dev_docs/contracts/syntax/context.mdx b/docs/docs/dev_docs/contracts/syntax/context.mdx index f73bfbe72ad..1855f59222e 100644 --- a/docs/docs/dev_docs/contracts/syntax/context.mdx +++ b/docs/docs/dev_docs/contracts/syntax/context.mdx @@ -18,7 +18,7 @@ On this page, you'll learn - The details and functionalities of the private context in Aztec.nr - Difference between the private and public contexts and their unified APIs -- Components of the private context, such as inputs, historic block data, and contract deployment data +- Components of the private context, such as inputs, block header, and contract deployment data - Elements like return values, read requests, new commitments, and nullifiers in transaction processing - Differences between the private and public contexts, especially the unique features and variables in the public context @@ -44,7 +44,7 @@ The context inputs includes all of the information that is passed from the kerne #include_code private-context-inputs /yarn-project/aztec-nr/aztec/src/abi.nr rust -As shown in the snippet, the application context is made up of 4 main structures. The call context, the block data, the contract deployment data and the private global variables. +As shown in the snippet, the application context is made up of 4 main structures. The call context, the block header, the contract deployment data and the private global variables. First of all, the call context. @@ -71,11 +71,11 @@ The call context contains information about the current call being made: - is_static_call: This will be set if and only if the current call is a static call. In a static call, state changing altering operations are not allowed. - is_contract_deployment: This will be set if and only if the current call is the contract's constructor. -### Historic Block Data +### Block Header -Another structure that is contained within the context is the Historic Block Data object. This object is a special one as it contains all of the roots of Aztec's data trees. +Another structure that is contained within the context is the Block Header object. This object is a special one as it contains all of the roots of Aztec's data trees. -#include_code historic-block-data /yarn-project/aztec-nr/aztec/src/abi.nr rust +#include_code block-header /yarn-project/aztec-nr/aztec/src/abi.nr rust ### Contract Deployment Data @@ -137,7 +137,7 @@ The Public Context includes all of the information passed from the `Public VM` i ### Public Context Inputs -In the current version of the system, the public context is almost a clone of the private execution context. It contains the same call context data, access to the same historic tree roots, however it does NOT have access to contract deployment data, this is due to traditional contract deployments only currently being possible from private transactions. +In the current version of the system, the public context is almost a clone of the private execution context. It contains the same call context data, access to the same historical tree roots, however it does NOT have access to contract deployment data, this is due to traditional contract deployments only currently being possible from private transactions. #include_code public-context-inputs /yarn-project/aztec-nr/aztec/src/abi.nr rust diff --git a/docs/docs/dev_docs/contracts/syntax/functions.md b/docs/docs/dev_docs/contracts/syntax/functions.md index 996e369dff1..334aaa1401b 100644 --- a/docs/docs/dev_docs/contracts/syntax/functions.md +++ b/docs/docs/dev_docs/contracts/syntax/functions.md @@ -20,7 +20,7 @@ In Aztec there are multiple different types of visibility that can be applied to ### Data Visibility -Data visibility is used to describe whether the data (or state) used in a function is generally accessible (public) or on a need to know basis (private). Functions with public data visibility are executed by the sequencer, and functions with private data visibility are executed by the user. For more information on why this is the case, see [communication](../../../concepts/foundation/communication/public_private_calls.md). +Data visibility is used to describe whether the data (or state) used in a function is generally accessible (public) or on a need to know basis (private). Functions with public data visibility are executed by the sequencer, and functions with private data visibility are executed by the user. For more information on why this is the case, see [communication](../../../concepts/foundation/communication/public_private_calls/main.md). In the following sections, we are going to see how these two "types" co-exists and interact. @@ -213,7 +213,7 @@ Calling a public function from another public function is quite similar to what ### Private -> Public -As discussed above, private function execution and calls take place on the user's device, while public function execution and calls take place on a sequencer, in two different places at two different times, it is natural to question how we can achieve composability between the two. The solution is asynchronicity. Further reading can be found in the foundational concepts [here](../../../concepts/foundation/communication/public_private_calls.md). +As discussed above, private function execution and calls take place on the user's device, while public function execution and calls take place on a sequencer, in two different places at two different times, it is natural to question how we can achieve composability between the two. The solution is asynchronicity. Further reading can be found in the foundational concepts [here](../../../concepts/foundation/communication/public_private_calls/main.md). Private function execution takes place on the users device, where it keeps track of any public function calls that have been made. Whenever private execution completes, and a kernel proof is produced, the transaction sent to the network will include all of the public calls that were dispatched. When the sequencer receives the messages, it will take over and execute the public parts of the transaction. diff --git a/docs/docs/dev_docs/contracts/syntax/main.md b/docs/docs/dev_docs/contracts/syntax/main.md index 17a40bf9f42..599c19d379d 100644 --- a/docs/docs/dev_docs/contracts/syntax/main.md +++ b/docs/docs/dev_docs/contracts/syntax/main.md @@ -5,7 +5,7 @@ import { AztecPackagesVersion } from "@site/src/components/Version"; [Noir](https://noir-lang.org/) is a language which is agnostic to proof systems and use cases. Rather than baking Aztec-specific keywords and smart contract types directly into Noir (which would break this agnosticism), we have developed a framework -- written in Noir -- whose types and methods provide rich smart contract semantics. -On top of [Noir's stdlib](https://noir-lang.org/standard_library/array_methods), we provide [Aztec.nr](https://github.com/AztecProtocol/aztec-packages/tree/master/yarn-project/aztec-nr) for writing contracts on Aztec. +On top of [Noir's stdlib](https://noir-lang.org/standard_library/cryptographic_primitives/), we provide [Aztec.nr](https://github.com/AztecProtocol/aztec-packages/tree/master/yarn-project/aztec-nr) for writing contracts on Aztec. Aztec.nr contains abstractions which remove the need to understand the low-level Aztec protocol. Notably, it provides: diff --git a/docs/docs/dev_docs/contracts/syntax/slow_updates_tree.md b/docs/docs/dev_docs/contracts/syntax/slow_updates_tree.md new file mode 100644 index 00000000000..e108e0cff23 --- /dev/null +++ b/docs/docs/dev_docs/contracts/syntax/slow_updates_tree.md @@ -0,0 +1,268 @@ +--- +title: Slow Updates Tree +--- + +Slow Updates Tree is a data structure that allows for historical public data to be accessed in both private and public domains. Read the high level overview in the [concepts section](../../../concepts/foundation/communication/public_private_calls/slow_updates_tree.md). + +The slow updates tree works by having a current tree and a pending tree, and replacing the current tree with the pending tree after an epoch has passed. Public functions can read directly from the current tree, and private functions can perform a membership proof that values are part of a commitment to the current state of the tree. + +On this page you will learn: + +1. [The components involved in using the slow updates tree](slow_updates_tree.md#components-involved-in-implementing-a-slow-updates-tree) +2. [How you can integrate it into your own smart contract](slow_updates_tree.md#how-to-integrate-a-slow-updates-tree) +3. [An example of a token blacklisting contract that uses the slow updates tree](slow_updates_tree.md#exploring-an-example-integration-through-a-tokenblacklist-smart-contract) +4. [Interface Reference](slow_updates_tree.md#reference) + +# Components involved in implementing a slow updates tree + +There are generally 4 main components involved to make it easier to use a slow updates tree, with 3 already implemented by Aztec. This makes it easier to interact with a slow updates tree through a simple interface. These four components are: + +## Main smart contract + +This is the primary smart contract that will use the slow updates tree. In the example we use a [token with blacklisting features](slow_updates_tree.md#exploring-an-example-integration-through-a-tokenblacklist-smart-contract). + +## Interface + +This interface of the slow updates tree contract allows your contract to interact with the Slow Updates Tree contract. It provides methods for reading and updating values in the tree in both public and private contexts. You can find it [here](https://github.com/AztecProtocol/aztec-packages/blob/master/yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/interfaces.nr). + +## SlowTree.nr contract + +This is a smart contract developed by Aztec that establishes and manages a slow updates tree structure. It allows developers to access and interact with the tree, such as reading and updating data. + +You can find it [here](https://github.com/AztecProtocol/aztec-packages/tree/master/yarn-project/noir-contracts/src/contracts/slow_tree_contract). + +## SlowMap type + +This is a type in the Aztec.nr library that is utilized by the SlowTree contract. It defines the underlying data structure for the slow updates tree, and handles storing both the current and pending values for each data entry. + +You can find it [here](https://github.com/AztecProtocol/aztec-nr/blob/master/slow-updates-tree/src/slow_map.nr). + +The diagram below describes how these components work together. It does not contain all the functionality. + +```mermaid +graph TD + MSC[Main Smart Contract] --> INT[Interface] + STC --> SMT + + INT_RAP[read_at_pub] <--> STC_RAP[read_at_public] + INT_RA[read_at] <--> STC_RA[read_at] + INT_UAP[update_at_public] <--> STC_UAP[update_at_public] + INT_UA[update_at_private] <--> STC_UA[update_at_private] + + STC_RA <--> VMP[verify_membership_proof] + STC_UA <--> CR[compute_roots] + + subgraph INT[Interface] + INT_RAP + INT_UAP + INT_RA + INT_UA + end + + subgraph STC[SlowTree.nr] + STC_RAP + STC_UAP + STC_RA + STC_UA + end + + subgraph SMT[SlowMap Type] + Change{Epoch Over} -->|True| Current{Current} + Change -->|False| Pending{Pending} + Current --> Current1[Current Commitment 1] + Current --> CurrentM[Current Commitment M] + CurrentM --> Value1[Current Value 1] + CurrentM --> Value2[Current Value 2] + CurrentM --> ValueN[Current Value N] + Pending --> PendingM[Pending Commitment 1] + PendingM --> PValue1[Pending Value 1] + PendingM --> PValue2[Pending Value 2] + PendingM --> PValueN[Pending Value N] + end + + style INT fill:#fff,stroke:#333,stroke-width:1px + style STC fill:#fff,stroke:#333,stroke-width:1px + style SMT fill:#fff,stroke:#333,stroke-width:1px +``` + +# How to integrate a slow updates tree + +1. Copy the *SlowTree.nr* example and its dependencies, found [here](https://github.com/AztecProtocol/aztec-packages/tree/master/yarn-project/noir-contracts/src/contracts/slow_tree_contract). Replace the constants with whatever you like and deploy it to your sandbox +2. Copy the *SlowMap interface* for easy interaction with your deployed SlowTree. Find it [here](https://github.com/AztecProtocol/aztec-packages/blob/master/yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/interfaces.nr) +3. Import this interface into your contract + +#include_code interface yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +5. Create a storage init function for the same value in both public and private storage + +#include_code slow_updates_storage yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +6. Store the SlowTree address in private storage as a FieldNote + +#include_code constructor yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +7. Store the SlowTree address in public storage and initialize an instance of SlowMap using this address + +#include_code write_slow_update_public yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +8. Now you can read and update from private functions: + +#include_code get_and_update_private yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +9. Or from public functions: + +#include_code get_public yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +View the [reference](slow_updates_tree.md#reference) for more information. + +## Exploring an example integration through a **`TokenBlacklist`** Smart Contract + +The `TokenBlacklist` contract is a token contract that does not allow blacklisted accounts to perform mints or transfers. In this section we will go through how this is achieved through the slow updates tree. + +You can find the full code for the TokenBlacklist smart contract [here](https://github.com/AztecProtocol/aztec-packages/tree/master/yarn-project/noir-contracts/src/contracts/token_blacklist_contract). + +### Importing SlowMap + +The contract first imports the **`SlowMap`** interface: + +#include_code interface yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +This interface allows the contract to interact with its attached SlowTree. It abstracts these functions so they do not have to be implemented in the TokenBlacklist contract. + +### Constructor and initialization + +The contract's constructor takes the address of the slow updates contract: + +#include_code constructor yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +This initialization sets up the connection between the **`TokenBlacklist`** contract and a previously deployed SlowTree, allowing it to use the interface to directly interact with the SlowTree. + +### Private transfer function utilizing the slow updates tree + +In the private transfer function, the contract uses the interface to check if a user is blacklisted: + +#include_code transfer_private yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +Here, the contract reads the roles of the sender and recipient from the SlowTree using the **`read_at`** function in the interface. It checks if either party is blacklisted, and if so, the transaction does not go ahead. + +# Reference + +## Struct `SlowMap` + +### Overview +The `SlowMap` struct is used to interact with a slow updates tree deployed via the SlowTree smart contract. + +### Fields + +| Name | Type | Description | +|---------|-----------|---------------------------------| +| address | `Field` | The address of the SlowTree contract | + +## Functions + +### at + +Returns an instance of `SlowMap` at the specified address. + +**Parameters** + +| Name | Type | Description | +|----------|----------------|----------------------------| +| `address`| `AztecAddress` | The address of the SlowTree | + +**Return** + +| Name | Type | Description | +|-------|-----------|------------------------------| +| - | `SlowMap` | The `SlowMap` instance | + +**Example** + +#include_code slowmap_at yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +### initialize + +Initializes the `SlowMap`. + +**Parameters** + +| Name | Type | Description | +|-----------|-----------------|----------------------| +| `context` | `PublicContext` | The execution context | + +**Return** + +| Name | Type | Description | +|------|------|-------------| +| - | - | - | + +**Example** + +#include_code slowmap_initialize yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +### read_at_pub + +Reads a value at a specified index from a public function. + +**Parameters** + +| Name | Type | Description | +|-----------|-----------------|-----------------------| +| `context` | `PublicContext` | The execution context | +| `index` | `Field` | The index to read at | + +**Return** + +| Name | Type | Description | +|----------|--------|-----------------------| +| `result` | `Field`| The value at `index` | + +**Example** + +#include_code read_at_pub yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +### read_at + +Reads a value at a specified index from a private function. + +**Parameters** + +| Name | Type | Description | +|-----------|--------------------|------------------------| +| `context` | `PrivateContext` | The execution context | +| `index` | `Field` | The index to read at | + +**Return** + +| Name | Type | Description | +|----------|--------|-----------------------| +| `result` | `Field`| The value at `index` | + +**Example** + +#include_code slowmap_read_at yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +### update_at_private + +Updates a value at a specified index from a private function. Does not return anything. + +**Parameters** + +| Name | Type | Description | +|-------------|--------------------|------------------------| +| `context` | `PrivateContext` | The execution context | +| `index` | `Field` | The index to update | +| `new_value` | `Field` | The new value | + +**Example** + +#include_code get_and_update_private yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + +## Updating from public + +This is not a method in the interface as it can be done using regular Aztec.nr public storage update syntax. + +**Example** + +#include_code write_slow_update_public yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr rust + diff --git a/docs/docs/dev_docs/contracts/syntax/storage/main.md b/docs/docs/dev_docs/contracts/syntax/storage/main.md index 0a33ce92a6a..b647cc6ff50 100644 --- a/docs/docs/dev_docs/contracts/syntax/storage/main.md +++ b/docs/docs/dev_docs/contracts/syntax/storage/main.md @@ -23,7 +23,7 @@ On this page, you’ll learn: Public state variables can be read by anyone, while private state variables can only be read by their owner (or people whom the owner has shared the decrypted data or note viewing key with). -Public state follows the Ethereum style account model, where each contract has its own key-value datastore. Private state follows a UTXO model, where note contents (pre-images) are only known by the sender and those able to decrypt them - see ([state model](../../../../concepts/foundation/state_model/main.md) and [private/public execution](../../../../concepts/foundation/communication/public_private_calls.md)) for more background. +Public state follows the Ethereum style account model, where each contract has its own key-value datastore. Private state follows a UTXO model, where note contents (pre-images) are only known by the sender and those able to decrypt them - see ([state model](../../../../concepts/foundation/state_model/main.md) and [private/public execution](../../../../concepts/foundation/communication/public_private_calls/main.md)) for more background. ## Storage struct @@ -172,7 +172,7 @@ We know its verbose, and are working on making it less so. #### Mapping example -Say we want to have a group of `minters` that are able to mint assets in our contract, and we want them in public storage, because [access control in private is quite cumbersome](../../../../concepts/foundation/communication/public_private_calls.md#a-note-on-l2-access-control). In the `Storage` struct we can add it as follows: +Say we want to have a group of `minters` that are able to mint assets in our contract, and we want them in public storage, because [access control in private is quite cumbersome](../../../../concepts/foundation/communication/public_private_calls/main.md#a-note-on-l2-access-control). In the `Storage` struct we can add it as follows: #include_code storage_minters /yarn-project/noir-contracts/src/contracts/token_contract/src/main.nr rust diff --git a/docs/docs/dev_docs/debugging/sandbox-errors.md b/docs/docs/dev_docs/debugging/sandbox-errors.md index f31752a8d69..c7c2879eef3 100644 --- a/docs/docs/dev_docs/debugging/sandbox-errors.md +++ b/docs/docs/dev_docs/debugging/sandbox-errors.md @@ -62,7 +62,7 @@ Confirms that the TxRequest (user's intent) matches the private call being execu #### 2018 - PRIVATE_KERNEL\_\_READ_REQUEST_NOTE_HASH_TREE_ROOT_MISMATCH -Given a read request and provided witness, we check that the merkle root obtained from the witness' sibling path and it's leaf is similar to the historic state root we want to read against. This is a sanity check to ensure we are reading from the right state. +Given a read request and provided witness, we check that the merkle root obtained from the witness' sibling path and it's leaf is similar to the historical state root we want to read against. This is a sanity check to ensure we are reading from the right state. For a non transient read, we fetch the merkle root from the membership witnesses and the leaf index #### 2019 - PRIVATE_KERNEL\_\_TRANSIENT_READ_REQUEST_NO_MATCH @@ -86,7 +86,7 @@ Calling a private Aztec.nr function in a public kernel is not allowed. #### 3005 - PUBLIC_KERNEL\_\_NON_EMPTY_PRIVATE_CALL_STACK -Public functions are executed after all the private functions are (see [private-public execution](../../concepts/foundation/communication/public_private_calls.md)). As such, private call stack must be empty when executing in the public kernel. +Public functions are executed after all the private functions are (see [private-public execution](../../concepts/foundation/communication/public_private_calls/main.md)). As such, private call stack must be empty when executing in the public kernel. #### 3011 - PUBLIC_KERNEL\_\_CALCULATED_PRIVATE_CALL_HASH_AND_PROVIDED_PRIVATE_CALL_HASH_MISMATCH @@ -164,11 +164,11 @@ You can have a look at our current constants/limitations in [constants.nr](https #### 7008 - MEMBERSHIP_CHECK_FAILED -Users may create a proof against a historic state in Aztec. The rollup circuits performs a merkle membership check to ensure this state existed at some point. If the historic state doesn't exist, you get this error. Some examples when you may hit this error are: +Users may create a proof against a historical state in Aztec. The rollup circuits performs a merkle membership check to ensure this state existed at some point. If the historical state doesn't exist, you get this error. Some examples when you may hit this error are: -- using invalid historic note hash tree state (aka historic commitments tree) -- using invalid historic contracts data tree state -- using invalid historic L1 to L2 message data tree state +- using invalid historical note hash tree state (aka historical commitments tree) +- using invalid historical contracts data tree state +- using invalid historical L1 to L2 message data tree state - inserting a subtree into the greater tree - we make a smaller merkle tree of all the new commitments/nullifiers etc that were created in a transaction or in a rollup and add it to the bigger state tree. Before inserting, we do a merkle membership check to ensure that the index to insert at is indeed an empty subtree (otherwise we would be overwriting state). This can happen when `next_available_leaf_index` in the state tree's snapshot is wrong (it is fetched by the sequencer from the archiver). The error message should reveal which tree is causing this issue - nullifier tree related errors - The nullifier tree uses an [Indexed Merkle Tree](../../concepts/advanced/data_structures/indexed_merkle_tree.md). It requires additional data from the archiver to know which is the nullifier in the tree that is just below the current nullifier before it can perform batch insertion. If the low nullifier is wrong, or the nullifier is in incorrect range, you may receive this error. @@ -191,7 +191,7 @@ Users may create a proof against a historic state in Aztec. The rollup circuits - "${treeName} tree next available leaf index mismatch" - validating a tree's root is not enough. It also checks that the `next_available_leaf_index` is as expected. This is the next index we can insert new values into. Note that for the public data tree, this test is skipped since as it is a sparse tree unlike the others. -- "Public call stack size exceeded" - In Aztec, the sequencer executes all enqueued public functions in a transaction (to prevent race conditions - see [private-public execution](../../concepts/foundation/communication/public_private_calls.md)). This error says there are too many public functions requested. +- "Public call stack size exceeded" - In Aztec, the sequencer executes all enqueued public functions in a transaction (to prevent race conditions - see [private-public execution](../../concepts/foundation/communication/public_private_calls/main.md)). This error says there are too many public functions requested. - "Array size exceeds target length" - happens if you add more items than allowed by the constants set due to our circuit limitations (eg sending too many L2 to L1 messages or creating a function that exceeds the call stack length or returns more values than what Aztec.nr functions allow) diff --git a/docs/docs/dev_docs/getting_started/aztecjs-getting-started.md b/docs/docs/dev_docs/getting_started/aztecjs-getting-started.md index e7be7d00720..185c8bdaf15 100644 --- a/docs/docs/dev_docs/getting_started/aztecjs-getting-started.md +++ b/docs/docs/dev_docs/getting_started/aztecjs-getting-started.md @@ -337,7 +337,7 @@ This function takes: 1. A quantity of tokens to be minted. 2. A secret hash. -This function is public and it inserts a new note into the private data tree and increases the total token supply by the amount minted. +This function is public and it inserts a new note into the note hash tree and increases the total token supply by the amount minted. To make the note spendable the note has to be redeemed. A user can do that by calling the `redeem_shield` function. diff --git a/docs/docs/dev_docs/getting_started/core-concepts.md b/docs/docs/dev_docs/getting_started/core-concepts.md index e22e1d3a04d..e5087c624c1 100644 --- a/docs/docs/dev_docs/getting_started/core-concepts.md +++ b/docs/docs/dev_docs/getting_started/core-concepts.md @@ -31,7 +31,7 @@ You can call a public function from a private function by using `context.call_pu #include_code call_public_function yarn-project/noir-contracts/src/contracts/card_game_contract/src/main.nr rust -You cannot call a private function from a public function, but you can use a slow updates tree to read historic public state and stage writes to public state from a private function. +You cannot call a private function from a public function, but you can use a slow updates tree to read historical public state and stage writes to public state from a private function. ### Data types diff --git a/docs/docs/dev_docs/limitations/main.md b/docs/docs/dev_docs/limitations/main.md index 08a911d29e2..c697898a6f7 100644 --- a/docs/docs/dev_docs/limitations/main.md +++ b/docs/docs/dev_docs/limitations/main.md @@ -130,7 +130,7 @@ A contract can't perform a delegatecall yet (if ever). Delegatecalls are quite a Ethereum has a notion of a 'full node' which keeps-up with the blockchain and stores the full chain state. Many users don't wish to run full nodes, so rely on 3rd-party 'full-node-as-a-service' infrastructure providers, who service blockchain queries from their users. -This pattern is likely to develop in Aztec as well, except there's a problem: privacy. If a privacy-seeking user makes a query to a 3rd-party 'full node', that user might leak data about who they are, or about their historic network activity, or about their future intentions. One solution to this problem is "always run a full node", but pragmatically, not everyone will. To protect less-advanced users' privacy, research is underway to explore how a privacy-seeking user may request and receive data from a 3rd-party node without revealing what that data is, nor who is making the request. +This pattern is likely to develop in Aztec as well, except there's a problem: privacy. If a privacy-seeking user makes a query to a 3rd-party 'full node', that user might leak data about who they are, or about their historical network activity, or about their future intentions. One solution to this problem is "always run a full node", but pragmatically, not everyone will. To protect less-advanced users' privacy, research is underway to explore how a privacy-seeking user may request and receive data from a 3rd-party node without revealing what that data is, nor who is making the request. ### No private data authentication diff --git a/docs/docs/dev_docs/privacy/main.md b/docs/docs/dev_docs/privacy/main.md index b1818b7f84b..7964a582888 100644 --- a/docs/docs/dev_docs/privacy/main.md +++ b/docs/docs/dev_docs/privacy/main.md @@ -97,7 +97,7 @@ It's not just the broadcasting of transactions to the network that can leak data Ethereum has a notion of a 'full node' which keeps-up with the blockchain and stores the full chain state. Many users don't wish to run full nodes, so rely on 3rd-party 'full-node-as-a-service' infrastructure providers, who service blockchain queries from their users. -This pattern is likely to develop in Aztec as well, except there's a problem: privacy. If a privacy-seeking user makes a query to a 3rd-party 'full node', that user might leak data about who they are; about their historic network activity; or about their future intentions. One solution to this problem is "always run a full node", but pragmatically, not everyone will. To protect less-advanced users' privacy, research is underway to explore how a privacy-seeking user may request and receive data from a 3rd-party node without revealing what that data is, nor who is making the request. +This pattern is likely to develop in Aztec as well, except there's a problem: privacy. If a privacy-seeking user makes a query to a 3rd-party 'full node', that user might leak data about who they are; about their historical network activity; or about their future intentions. One solution to this problem is "always run a full node", but pragmatically, not everyone will. To protect less-advanced users' privacy, research is underway to explore how a privacy-seeking user may request and receive data from a 3rd-party node without revealing what that data is, nor who is making the request. App developers should be aware of this avenue for private data leakage. **Whenever an app requests information from a node, the entity running that node is unlikely be your user!** @@ -105,9 +105,9 @@ App developers should be aware of this avenue for private data leakage. **Whenev ##### Querying for up-to-date note sibling paths -To read a private state is to read a note from the note hash tree. To read a note is to prove existence of that note in the note hash tree. And to prove existence is to re-compute the root of the note hash tree using the leaf value, the leaf index, and the sibling path of that leaf. This computed root is then exposed to the world, as a way of saying "This note exists", or more precisely "This note has existed at least since this historic snapshot time". +To read a private state is to read a note from the note hash tree. To read a note is to prove existence of that note in the note hash tree. And to prove existence is to re-compute the root of the note hash tree using the leaf value, the leaf index, and the sibling path of that leaf. This computed root is then exposed to the world, as a way of saying "This note exists", or more precisely "This note has existed at least since this historical snapshot time". -If an old historic snapshot is used, then that old historic root will be exposed, and this leaks some information about the nature of your transaction: it leaks that your note was created before the snapshot date. It shrinks the 'privacy set' of the transaction to a smaller window of time than the entire history of the network. +If an old historical snapshot is used, then that old historical root will be exposed, and this leaks some information about the nature of your transaction: it leaks that your note was created before the snapshot date. It shrinks the 'privacy set' of the transaction to a smaller window of time than the entire history of the network. So for maximal privacy, it's in a user's best interest to read from the very-latest snapshot of the data tree. diff --git a/docs/docs/dev_docs/tutorials/writing_private_voting_contract.md b/docs/docs/dev_docs/tutorials/writing_private_voting_contract.md index ecfa4fb2b2d..a541e3abf5d 100644 --- a/docs/docs/dev_docs/tutorials/writing_private_voting_contract.md +++ b/docs/docs/dev_docs/tutorials/writing_private_voting_contract.md @@ -105,7 +105,7 @@ This `init` function will be called every time we access `storage` in our functi The next step is to initialize the contract with a constructor. The constructor will take an address as a parameter and set the admin. -All constructors must be private, and because the admin is in public storage, we cannot directly update it from the constructor. You can find more information about this [here](../../concepts/foundation/communication/public_private_calls.md). +All constructors must be private, and because the admin is in public storage, we cannot directly update it from the constructor. You can find more information about this [here](../../concepts/foundation/communication/public_private_calls/main.md). Therefore our constructor must call a public function by using `context.call_public_function()`. Paste this under the `impl` storage block: diff --git a/docs/internal_notes/building_dapps.md b/docs/internal_notes/building_dapps.md index fc807def92b..b6d4f3a1be1 100644 --- a/docs/internal_notes/building_dapps.md +++ b/docs/internal_notes/building_dapps.md @@ -29,7 +29,7 @@ Explain how to write a dapp using [`aztec.js`](https://github.com/AztecProtocol/ - How to query state - How to query whether a tx has been 'mined' - How to subscribe to logs - - How to filter for historic data in the historic block tree? + - How to filter for historical data in the historical block tree? - How to query data from any of the trees (advanced) FOR INSTRUCTIONS FOR BUILDING A WALLET, WE SHOULD WRITE DOCS HERE diff --git a/docs/internal_notes/dev_docs/sandbox/components.md b/docs/internal_notes/dev_docs/sandbox/components.md index 509e851da71..5045e2c18fd 100644 --- a/docs/internal_notes/dev_docs/sandbox/components.md +++ b/docs/internal_notes/dev_docs/sandbox/components.md @@ -246,7 +246,7 @@ Responsibilities: - "Persists" the various merkle trees (configurable). - For this milestone 1.1, we'll need the following trees: - Contract Tree - - Contract Tree Roots Tree (the tree whose leaves are the roots of historic rollups' contract trees) + - Contract Tree Roots Tree (the tree whose leaves are the roots of historical rollups' contract trees) - Nullifier Tree (so that the contract address can never be re-registered in a future deployment) - Note: Suyash has implemented C++ for the 'new' kind of nullifier tree. - Provides methods for updating the trees with commit, rollback semantics. diff --git a/docs/sidebars.js b/docs/sidebars.js index 3a20fac23d1..4eb6dbdcac7 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -105,7 +105,17 @@ const sidebars = { id: "concepts/foundation/communication/main", }, items: [ - "concepts/foundation/communication/public_private_calls", + { + label: "Public <> Private Communication", + type: "category", + link: { + type: "doc", + id: "concepts/foundation/communication/public_private_calls/main", + }, + items: [ + "concepts/foundation/communication/public_private_calls/slow_updates_tree" + ] + }, "concepts/foundation/communication/cross_chain_calls", ], }, @@ -320,6 +330,7 @@ const sidebars = { }, "dev_docs/contracts/syntax/events", "dev_docs/contracts/syntax/functions", + "dev_docs/contracts/syntax/slow_updates_tree", "dev_docs/contracts/syntax/context", "dev_docs/contracts/syntax/globals", ], diff --git a/iac/mainnet-fork/scripts/wait_for_fork b/iac/mainnet-fork/scripts/wait_for_fork index 4d990e30f88..326582c25af 100755 --- a/iac/mainnet-fork/scripts/wait_for_fork +++ b/iac/mainnet-fork/scripts/wait_for_fork @@ -6,7 +6,7 @@ set -e # This script waits on a healthy status from the fork - a valid response to the chainid request # We retry every 20 seconds, and wait for a total of 5 minutes (15 times) -export ETHEREUM_HOST="https://aztec-mainnet-fork.aztec.network:8545/$FORK_API_KEY" +export ETHEREUM_HOST="https://aztec-mainnet-fork.aztec.network:8545/$API_KEY" curl -H "Content-Type: application/json" -X POST --data '{"method":"eth_chainId","params":[],"id":33,"jsonrpc":"2.0"}' \ --connect-timeout 30 \ diff --git a/iac/mainnet-fork/terraform/main.tf b/iac/mainnet-fork/terraform/main.tf index 8cec1f4a18b..08198ba5e0b 100644 --- a/iac/mainnet-fork/terraform/main.tf +++ b/iac/mainnet-fork/terraform/main.tf @@ -1,7 +1,6 @@ terraform { backend "s3" { bucket = "aztec-terraform" - key = "aztec-network/mainnet-fork" region = "eu-west-2" } required_providers { @@ -50,7 +49,7 @@ provider "aws" { } resource "aws_service_discovery_service" "aztec_mainnet_fork" { - name = "aztec-network-mainnet-fork" + name = "${var.DEPLOY_TAG}-mainnet-fork" health_check_custom_config { failure_threshold = 1 @@ -75,10 +74,10 @@ resource "aws_service_discovery_service" "aztec_mainnet_fork" { # EFS filesystem for mainnet fork resource "aws_efs_file_system" "aztec_mainnet_fork_data_store" { - creation_token = "aztec-network-mainnet-fork-data" + creation_token = "${var.DEPLOY_TAG}-mainnet-fork-data" tags = { - Name = "aztec-network-mainnet-fork-data" + Name = "${var.DEPLOY_TAG}-mainnet-fork-data" } lifecycle_policy { @@ -100,7 +99,7 @@ resource "aws_efs_mount_target" "aztec_fork_private_az2" { # Define deployment task and service resource "aws_ecs_task_definition" "aztec_mainnet_fork" { - family = "aztec-network-mainnet-fork" + family = "${var.DEPLOY_TAG}-mainnet-fork" requires_compatibilities = ["FARGATE"] network_mode = "awsvpc" cpu = "2048" @@ -117,13 +116,13 @@ resource "aws_ecs_task_definition" "aztec_mainnet_fork" { container_definitions = <serve/contract_addresses.json + +cat serve/contract_addresses.json + +echo "Contract addresses have been written to serve/contract_addresses.json" diff --git a/l1-contracts/src/core/libraries/ConstantsGen.sol b/l1-contracts/src/core/libraries/ConstantsGen.sol index 9cb97e57a6b..d63790f5e03 100644 --- a/l1-contracts/src/core/libraries/ConstantsGen.sol +++ b/l1-contracts/src/core/libraries/ConstantsGen.sol @@ -56,7 +56,7 @@ library Constants { uint256 internal constant NOTE_HASH_SUBTREE_HEIGHT = 7; uint256 internal constant NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH = 25; uint256 internal constant NULLIFIER_SUBTREE_HEIGHT = 7; - uint256 internal constant HISTORIC_BLOCKS_TREE_HEIGHT = 16; + uint256 internal constant BLOCKS_TREE_HEIGHT = 16; uint256 internal constant NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH = 13; uint256 internal constant L1_TO_L2_MSG_SUBTREE_HEIGHT = 4; uint256 internal constant L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH = 12; @@ -70,7 +70,7 @@ library Constants { uint256 internal constant MAX_NOTES_PER_PAGE = 10; uint256 internal constant VIEW_NOTE_ORACLE_RETURN_LENGTH = 212; uint256 internal constant CALL_CONTEXT_LENGTH = 7; - uint256 internal constant HISTORIC_BLOCK_DATA_LENGTH = 7; + uint256 internal constant BLOCK_HEADER_LENGTH = 7; uint256 internal constant FUNCTION_DATA_LENGTH = 4; uint256 internal constant CONTRACT_DEPLOYMENT_DATA_LENGTH = 6; uint256 internal constant PRIVATE_CIRCUIT_PUBLIC_INPUTS_LENGTH = 155; diff --git a/l1-contracts/src/core/libraries/Decoder.sol b/l1-contracts/src/core/libraries/Decoder.sol index ccc7d0cf677..110522190b1 100644 --- a/l1-contracts/src/core/libraries/Decoder.sol +++ b/l1-contracts/src/core/libraries/Decoder.sol @@ -35,8 +35,8 @@ import {Hash} from "./Hash.sol"; * | 0x00ec | 0x20 | startPublicDataTreeRoot * | 0x010c | 0x20 | startL1ToL2MessageTreeSnapshot.root * | 0x012c | 0x04 | startL1ToL2MessageTreeSnapshot.nextAvailableLeafIndex - * | 0x0130 | 0x20 | startHistoricBlocksTreeSnapshot.root - * | 0x0150 | 0x04 | startHistoricBlocksTreeSnapshot.nextAvailableLeafIndex + * | 0x0130 | 0x20 | startBlocksTreeSnapshot.root + * | 0x0150 | 0x04 | startBlocksTreeSnapshot.nextAvailableLeafIndex * | 0x0154 | 0x20 | endNoteHashTreeSnapshot.root * | 0x0174 | 0x04 | endNoteHashTreeSnapshot.nextAvailableLeafIndex * | 0x0178 | 0x20 | endNullifierTreeSnapshot.root @@ -46,8 +46,8 @@ import {Hash} from "./Hash.sol"; * | 0x01c0 | 0x20 | endPublicDataTreeRoot * | 0x01e0 | 0x20 | endL1ToL2MessageTreeSnapshot.root * | 0x0200 | 0x04 | endL1ToL2MessageTreeSnapshot.nextAvailableLeafIndex - * | 0x0204 | 0x20 | endHistoricBlocksTreeSnapshot.root - * | 0x0224 | 0x04 | endHistoricBlocksTreeSnapshot.nextAvailableLeafIndex + * | 0x0204 | 0x20 | endBlocksTreeSnapshot.root + * | 0x0224 | 0x04 | endBlocksTreeSnapshot.nextAvailableLeafIndex * | 0x0228 | 0x04 | len(newCommitments) (denoted a) * | 0x022c | a * 0x20 | newCommitments * | 0x022c + a * 0x20 | 0x04 | len(newNullifiers) (denoted b) diff --git a/l1-contracts/src/core/messagebridge/Registry.sol b/l1-contracts/src/core/messagebridge/Registry.sol index 15f6ce6b2c8..947597c8b02 100644 --- a/l1-contracts/src/core/messagebridge/Registry.sol +++ b/l1-contracts/src/core/messagebridge/Registry.sol @@ -15,7 +15,7 @@ import {Errors} from "../libraries/Errors.sol"; /** * @title Registry * @author Aztec Labs - * @notice Keeps track of addresses of rollup, inbox and outbox as well as historic addresses. + * @notice Keeps track of addresses of rollup, inbox and outbox as well as historical addresses. * Used as the source of truth for finding the "head" of the rollup chain. Very important information * for L1<->L2 communication. */ diff --git a/l1-contracts/terraform/main.tf b/l1-contracts/terraform/main.tf new file mode 100644 index 00000000000..89cb2c801e5 --- /dev/null +++ b/l1-contracts/terraform/main.tf @@ -0,0 +1,57 @@ +terraform { + backend "s3" { + bucket = "aztec-terraform" + region = "eu-west-2" + } + required_providers { + aws = { + source = "hashicorp/aws" + version = "3.74.2" + } + } +} + +variable "ROLLUP_CONTRACT_ADDRESS" { + type = string + default = "" +} + +output "rollup_contract_address" { + value = var.ROLLUP_CONTRACT_ADDRESS +} + +variable "REGISTRY_CONTRACT_ADDRESS" { + type = string + default = "" +} + +output "registry_contract_address" { + value = var.REGISTRY_CONTRACT_ADDRESS +} + +variable "INBOX_CONTRACT_ADDRESS" { + type = string + default = "" +} + +output "inbox_contract_address" { + value = var.INBOX_CONTRACT_ADDRESS +} + +variable "OUTBOX_CONTRACT_ADDRESS" { + type = string + default = "" +} + +output "outbox_contract_address" { + value = var.OUTBOX_CONTRACT_ADDRESS +} + +variable "CONTRACT_DEPLOYMENT_EMITTER_ADDRESS" { + type = string + default = "" +} + +output "contract_deployment_emitter_address" { + value = var.CONTRACT_DEPLOYMENT_EMITTER_ADDRESS +} diff --git a/yarn-project/acir-simulator/src/acvm/deserialize.ts b/yarn-project/acir-simulator/src/acvm/deserialize.ts index d82640a42e9..649ad620cf0 100644 --- a/yarn-project/acir-simulator/src/acvm/deserialize.ts +++ b/yarn-project/acir-simulator/src/acvm/deserialize.ts @@ -1,10 +1,10 @@ import { + BlockHeader, CallContext, ContractDeploymentData, ContractStorageRead, ContractStorageUpdateRequest, FunctionSelector, - HistoricBlockData, MAX_NEW_COMMITMENTS_PER_CALL, MAX_NEW_L2_TO_L1_MSGS_PER_CALL, MAX_NEW_NULLIFIERS_PER_CALL, @@ -163,7 +163,7 @@ export function extractPrivateCircuitPublicInputs( const encryptedLogPreimagesLength = witnessReader.readField(); const unencryptedLogPreimagesLength = witnessReader.readField(); - const historicBlockData = new HistoricBlockData( + const blockHeader = new BlockHeader( witnessReader.readField(), witnessReader.readField(), witnessReader.readField(), @@ -201,7 +201,7 @@ export function extractPrivateCircuitPublicInputs( unencryptedLogsHash, encryptedLogPreimagesLength, unencryptedLogPreimagesLength, - historicBlockData, + blockHeader, contractDeploymentData, chainId, version, @@ -255,7 +255,7 @@ export function extractPublicCircuitPublicInputs(partialWitness: ACVMWitness, ac const unencryptedLogsHash = witnessReader.readFieldArray(NUM_FIELDS_PER_SHA256); const unencryptedLogPreimagesLength = witnessReader.readField(); - const historicBlockData = new HistoricBlockData( + const blockHeader = new BlockHeader( witnessReader.readField(), witnessReader.readField(), witnessReader.readField(), @@ -282,7 +282,7 @@ export function extractPublicCircuitPublicInputs(partialWitness: ACVMWitness, ac newL2ToL1Msgs, unencryptedLogsHash, unencryptedLogPreimagesLength, - historicBlockData, + blockHeader, proverAddress, ); } diff --git a/yarn-project/acir-simulator/src/acvm/oracle/oracle.ts b/yarn-project/acir-simulator/src/acvm/oracle/oracle.ts index ebc8ca180f1..87a32f072c0 100644 --- a/yarn-project/acir-simulator/src/acvm/oracle/oracle.ts +++ b/yarn-project/acir-simulator/src/acvm/oracle/oracle.ts @@ -109,14 +109,14 @@ export class Oracle { return witness.toFieldArray().map(toACVMField); } - async getBlockData([blockNumber]: ACVMField[]): Promise { + async getBlockHeader([blockNumber]: ACVMField[]): Promise { const parsedBlockNumber = frToNumber(fromACVMField(blockNumber)); - const blockData = await this.typedOracle.getBlockData(parsedBlockNumber); - if (!blockData) { - throw new Error(`Block data not found for block ${parsedBlockNumber}.`); + const blockHeader = await this.typedOracle.getBlockHeader(parsedBlockNumber); + if (!blockHeader) { + throw new Error(`Block header not found for block ${parsedBlockNumber}.`); } - return blockData.toArray().map(toACVMField); + return blockHeader.toArray().map(toACVMField); } async getAuthWitness([messageHash]: ACVMField[]): Promise { diff --git a/yarn-project/acir-simulator/src/acvm/oracle/typed_oracle.ts b/yarn-project/acir-simulator/src/acvm/oracle/typed_oracle.ts index 9138009309c..7013689a8ca 100644 --- a/yarn-project/acir-simulator/src/acvm/oracle/typed_oracle.ts +++ b/yarn-project/acir-simulator/src/acvm/oracle/typed_oracle.ts @@ -1,4 +1,4 @@ -import { HistoricBlockData, PrivateCallStackItem, PublicCallRequest } from '@aztec/circuits.js'; +import { BlockHeader, PrivateCallStackItem, PublicCallRequest } from '@aztec/circuits.js'; import { FunctionSelector } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { EthAddress } from '@aztec/foundation/eth-address'; @@ -102,7 +102,7 @@ export abstract class TypedOracle { throw new Error('Not available.'); } - getBlockData(_blockNumber: number): Promise { + getBlockHeader(_blockNumber: number): Promise { throw new Error('Not available.'); } diff --git a/yarn-project/acir-simulator/src/acvm/serialize.ts b/yarn-project/acir-simulator/src/acvm/serialize.ts index dff75ab6cb9..abf78f63c1b 100644 --- a/yarn-project/acir-simulator/src/acvm/serialize.ts +++ b/yarn-project/acir-simulator/src/acvm/serialize.ts @@ -1,9 +1,9 @@ import { + BlockHeader, CallContext, ContractDeploymentData, FunctionData, GlobalVariables, - HistoricBlockData, PrivateCallStackItem, PrivateCircuitPublicInputs, PublicCallRequest, @@ -101,19 +101,19 @@ export function toACVMContractDeploymentData(contractDeploymentData: ContractDep } /** - * Converts a historic block data into ACVM fields. - * @param historicBlockData - The historic block data object to convert. + * Converts a block header into ACVM fields. + * @param blockHeader - The block header object to convert. * @returns The ACVM fields. */ -export function toACVMHistoricBlockData(historicBlockData: HistoricBlockData): ACVMField[] { +export function toACVMBlockHeader(blockHeader: BlockHeader): ACVMField[] { return [ - toACVMField(historicBlockData.noteHashTreeRoot), - toACVMField(historicBlockData.nullifierTreeRoot), - toACVMField(historicBlockData.contractTreeRoot), - toACVMField(historicBlockData.l1ToL2MessagesTreeRoot), - toACVMField(historicBlockData.blocksTreeRoot), - toACVMField(historicBlockData.publicDataTreeRoot), - toACVMField(historicBlockData.globalVariablesHash), + toACVMField(blockHeader.noteHashTreeRoot), + toACVMField(blockHeader.nullifierTreeRoot), + toACVMField(blockHeader.contractTreeRoot), + toACVMField(blockHeader.l1ToL2MessagesTreeRoot), + toACVMField(blockHeader.blocksTreeRoot), + toACVMField(blockHeader.publicDataTreeRoot), + toACVMField(blockHeader.globalVariablesHash), ]; } @@ -156,7 +156,7 @@ export function toACVMPublicInputs(publicInputs: PrivateCircuitPublicInputs): AC toACVMField(publicInputs.encryptedLogPreimagesLength), toACVMField(publicInputs.unencryptedLogPreimagesLength), - ...toACVMHistoricBlockData(publicInputs.historicBlockData), + ...toACVMBlockHeader(publicInputs.blockHeader), ...toACVMContractDeploymentData(publicInputs.contractDeploymentData), diff --git a/yarn-project/acir-simulator/src/client/client_execution_context.ts b/yarn-project/acir-simulator/src/client/client_execution_context.ts index 5ccbbcc2b99..d113bed04b3 100644 --- a/yarn-project/acir-simulator/src/client/client_execution_context.ts +++ b/yarn-project/acir-simulator/src/client/client_execution_context.ts @@ -1,9 +1,9 @@ import { + BlockHeader, CallContext, ContractDeploymentData, FunctionData, FunctionSelector, - HistoricBlockData, PublicCallRequest, ReadRequestMembershipWitness, TxContext, @@ -18,9 +18,9 @@ import { AuthWitness, FunctionL2Logs, L1NotePayload, Note, UnencryptedL2Log } fr import { NoteData, + toACVMBlockHeader, toACVMCallContext, toACVMContractDeploymentData, - toACVMHistoricBlockData, toACVMWitness, } from '../acvm/index.js'; import { SideEffectCounter } from '../common/index.js'; @@ -64,8 +64,8 @@ export class ClientExecutionContext extends ViewDataOracle { private readonly argsHash: Fr, private readonly txContext: TxContext, private readonly callContext: CallContext, - /** Data required to reconstruct the block hash, it contains historic roots. */ - protected readonly historicBlockData: HistoricBlockData, + /** Data required to reconstruct the block hash, it contains historical roots. */ + protected readonly blockHeader: BlockHeader, /** List of transient auth witnesses to be used during this simulation */ protected readonly authWitnesses: AuthWitness[], private readonly packedArgsCache: PackedArgsCache, @@ -75,7 +75,7 @@ export class ClientExecutionContext extends ViewDataOracle { private readonly curve: Grumpkin, protected log = createDebugLogger('aztec:simulator:client_execution_context'), ) { - super(contractAddress, historicBlockData, authWitnesses, db, undefined, log); + super(contractAddress, blockHeader, authWitnesses, db, undefined, log); } // We still need this function until we can get user-defined ordering of structs for fn arguments @@ -98,7 +98,7 @@ export class ClientExecutionContext extends ViewDataOracle { const fields = [ ...toACVMCallContext(this.callContext), - ...toACVMHistoricBlockData(this.historicBlockData), + ...toACVMBlockHeader(this.blockHeader), ...toACVMContractDeploymentData(contractDeploymentData), this.txContext.chainId, @@ -326,7 +326,7 @@ export class ClientExecutionContext extends ViewDataOracle { argsHash, derivedTxContext, derivedCallContext, - this.historicBlockData, + this.blockHeader, this.authWitnesses, this.packedArgsCache, this.noteCache, diff --git a/yarn-project/acir-simulator/src/client/db_oracle.ts b/yarn-project/acir-simulator/src/client/db_oracle.ts index ad2cb1a19c4..6ed7a6a8e6a 100644 --- a/yarn-project/acir-simulator/src/client/db_oracle.ts +++ b/yarn-project/acir-simulator/src/client/db_oracle.ts @@ -1,4 +1,4 @@ -import { CompleteAddress, GrumpkinPrivateKey, HistoricBlockData, PublicKey } from '@aztec/circuits.js'; +import { BlockHeader, CompleteAddress, GrumpkinPrivateKey, PublicKey } from '@aztec/circuits.js'; import { FunctionArtifact, FunctionDebugMetadata, FunctionSelector } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { EthAddress } from '@aztec/foundation/eth-address'; @@ -109,12 +109,12 @@ export interface DBOracle extends CommitmentsDB { getNullifierIndex(nullifier: Fr): Promise; /** - * Retrieve the databases view of the Historic Block Data object. - * This structure is fed into the circuits simulator and is used to prove against certain historic roots. + * Retrieve the databases view of the Block Header object. + * This structure is fed into the circuits simulator and is used to prove against certain historical roots. * - * @returns A Promise that resolves to a HistoricBlockData object. + * @returns A Promise that resolves to a BlockHeader object. */ - getHistoricBlockData(): Promise; + getBlockHeader(): Promise; /** * Fetch the index of the leaf in the respective tree diff --git a/yarn-project/acir-simulator/src/client/private_execution.test.ts b/yarn-project/acir-simulator/src/client/private_execution.test.ts index b96d69dc338..6b0a7816a51 100644 --- a/yarn-project/acir-simulator/src/client/private_execution.test.ts +++ b/yarn-project/acir-simulator/src/client/private_execution.test.ts @@ -1,11 +1,11 @@ import { + BlockHeader, CallContext, CompleteAddress, ContractDeploymentData, EMPTY_NULLIFIED_COMMITMENT, FieldsOf, FunctionData, - HistoricBlockData, L1_TO_L2_MSG_TREE_HEIGHT, MAX_NEW_COMMITMENTS_PER_CALL, NOTE_HASH_TREE_HEIGHT, @@ -58,7 +58,7 @@ describe('Private Execution test suite', () => { let oracle: MockProxy; let acirSimulator: AcirSimulator; - let blockData = HistoricBlockData.empty(); + let blockHeader = BlockHeader.empty(); let logger: DebugLogger; const defaultContractAddress = AztecAddress.random(); @@ -132,10 +132,10 @@ describe('Private Execution test suite', () => { // Update root. const newRoot = trees[name].getRoot(false); - const prevRoots = blockData.toBuffer(); + const prevRoots = blockHeader.toBuffer(); const rootIndex = name === 'noteHash' ? 0 : 32 * 3; const newRoots = Buffer.concat([prevRoots.subarray(0, rootIndex), newRoot, prevRoots.subarray(rootIndex + 32)]); - blockData = HistoricBlockData.fromBuffer(newRoots); + blockHeader = BlockHeader.fromBuffer(newRoots); return trees[name]; }; @@ -163,7 +163,7 @@ describe('Private Execution test suite', () => { } throw new Error(`Unknown address ${pubKey}`); }); - oracle.getHistoricBlockData.mockResolvedValue(blockData); + oracle.getBlockHeader.mockResolvedValue(blockHeader); acirSimulator = new AcirSimulator(oracle); }); diff --git a/yarn-project/acir-simulator/src/client/simulator.ts b/yarn-project/acir-simulator/src/client/simulator.ts index cf7222cbe20..520626add64 100644 --- a/yarn-project/acir-simulator/src/client/simulator.ts +++ b/yarn-project/acir-simulator/src/client/simulator.ts @@ -79,7 +79,7 @@ export class AcirSimulator { const curve = new Grumpkin(); - const historicBlockData = await this.db.getHistoricBlockData(); + const blockHeader = await this.db.getBlockHeader(); const callContext = new CallContext( msgSender, contractAddress, @@ -94,7 +94,7 @@ export class AcirSimulator { request.argsHash, request.txContext, callContext, - historicBlockData, + blockHeader, request.authWitnesses, PackedArgsCache.create(request.packedArguments), new ExecutionNoteCache(), @@ -133,8 +133,8 @@ export class AcirSimulator { throw new Error(`Cannot run ${entryPointArtifact.functionType} function as constrained`); } - const historicBlockData = await this.db.getHistoricBlockData(); - const context = new ViewDataOracle(contractAddress, historicBlockData, [], this.db, aztecNode); + const blockHeader = await this.db.getBlockHeader(); + const context = new ViewDataOracle(contractAddress, blockHeader, [], this.db, aztecNode); try { return await executeUnconstrainedFunction( diff --git a/yarn-project/acir-simulator/src/client/unconstrained_execution.test.ts b/yarn-project/acir-simulator/src/client/unconstrained_execution.test.ts index 7e7eb3716d4..3db5f2e9f3d 100644 --- a/yarn-project/acir-simulator/src/client/unconstrained_execution.test.ts +++ b/yarn-project/acir-simulator/src/client/unconstrained_execution.test.ts @@ -1,4 +1,4 @@ -import { CompleteAddress, FunctionData, HistoricBlockData } from '@aztec/circuits.js'; +import { BlockHeader, CompleteAddress, FunctionData } from '@aztec/circuits.js'; import { FunctionSelector, encodeArguments } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { Fr, GrumpkinScalar } from '@aztec/foundation/fields'; @@ -46,7 +46,7 @@ describe('Unconstrained Execution test suite', () => { const notes: Note[] = [...Array(5).fill(buildNote(1n, owner)), ...Array(2).fill(buildNote(2n, owner))]; - oracle.getHistoricBlockData.mockResolvedValue(HistoricBlockData.empty()); + oracle.getBlockHeader.mockResolvedValue(BlockHeader.empty()); oracle.getNotes.mockResolvedValue( notes.map((note, index) => ({ contractAddress, diff --git a/yarn-project/acir-simulator/src/client/view_data_oracle.ts b/yarn-project/acir-simulator/src/client/view_data_oracle.ts index fe124f853d2..f744e54dd6c 100644 --- a/yarn-project/acir-simulator/src/client/view_data_oracle.ts +++ b/yarn-project/acir-simulator/src/client/view_data_oracle.ts @@ -1,4 +1,4 @@ -import { HistoricBlockData, PublicKey } from '@aztec/circuits.js'; +import { BlockHeader, PublicKey } from '@aztec/circuits.js'; import { computeGlobalsHash, siloNullifier } from '@aztec/circuits.js/abis'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { Fr } from '@aztec/foundation/fields'; @@ -16,8 +16,8 @@ import { pickNotes } from './pick_notes.js'; export class ViewDataOracle extends TypedOracle { constructor( protected readonly contractAddress: AztecAddress, - /** Data required to reconstruct the block hash, it contains historic roots. */ - protected readonly historicBlockData: HistoricBlockData, + /** Data required to reconstruct the block hash, it contains historical roots. */ + protected readonly blockHeader: BlockHeader, /** List of transient auth witnesses to be used during this simulation */ protected readonly authWitnesses: AuthWitness[], protected readonly db: DBOracle, @@ -92,21 +92,21 @@ export class ViewDataOracle extends TypedOracle { } /** - * Fetches historic block data for a given block. - * @param blockNumber - The block number at which to get the historic block data. - * @returns Historic block data extracted from a block with block number `blockNumber`. + * Fetches a block header of a given block. + * @param blockNumber - The number of a block of which to get the block header. + * @returns Block extracted from a block with block number `blockNumber`. */ - public async getBlockData(blockNumber: number): Promise { + public async getBlockHeader(blockNumber: number): Promise { const block = await this.db.getBlock(blockNumber); if (!block) { return undefined; } - return new HistoricBlockData( + return new BlockHeader( block.endNoteHashTreeSnapshot.root, block.endNullifierTreeSnapshot.root, block.endContractTreeSnapshot.root, block.endL1ToL2MessagesTreeSnapshot.root, - block.endHistoricBlocksTreeSnapshot.root, + block.endBlocksTreeSnapshot.root, new Fr(0), // TODO(#3441) privateKernelVkTreeRoot is not present in L2Block and it's not yet populated in noir block.endPublicDataTreeRoot, computeGlobalsHash(block.globalVariables), @@ -199,7 +199,7 @@ export class ViewDataOracle extends TypedOracle { */ public async getL1ToL2Message(msgKey: Fr) { const message = await this.db.getL1ToL2Message(msgKey); - return { ...message, root: this.historicBlockData.l1ToL2MessagesTreeRoot }; + return { ...message, root: this.blockHeader.l1ToL2MessagesTreeRoot }; } /** diff --git a/yarn-project/acir-simulator/src/public/executor.ts b/yarn-project/acir-simulator/src/public/executor.ts index fdcb691898d..77d9b1e9baa 100644 --- a/yarn-project/acir-simulator/src/public/executor.ts +++ b/yarn-project/acir-simulator/src/public/executor.ts @@ -1,4 +1,4 @@ -import { GlobalVariables, HistoricBlockData } from '@aztec/circuits.js'; +import { BlockHeader, GlobalVariables } from '@aztec/circuits.js'; import { createDebugLogger } from '@aztec/foundation/log'; import { Oracle, acvm, extractCallStack, extractPublicCircuitPublicInputs } from '../acvm/index.js'; @@ -81,7 +81,7 @@ export class PublicExecutor { private readonly stateDb: PublicStateDB, private readonly contractsDb: PublicContractsDB, private readonly commitmentsDb: CommitmentsDB, - private readonly blockData: HistoricBlockData, + private readonly blockHeader: BlockHeader, ) {} /** @@ -105,7 +105,7 @@ export class PublicExecutor { const context = new PublicExecutionContext( execution, - this.blockData, + this.blockHeader, globalVariables, packedArgs, sideEffectCounter, diff --git a/yarn-project/acir-simulator/src/public/index.test.ts b/yarn-project/acir-simulator/src/public/index.test.ts index 4d985f476af..1ab7da481be 100644 --- a/yarn-project/acir-simulator/src/public/index.test.ts +++ b/yarn-project/acir-simulator/src/public/index.test.ts @@ -1,10 +1,4 @@ -import { - CallContext, - FunctionData, - GlobalVariables, - HistoricBlockData, - L1_TO_L2_MSG_TREE_HEIGHT, -} from '@aztec/circuits.js'; +import { BlockHeader, CallContext, FunctionData, GlobalVariables, L1_TO_L2_MSG_TREE_HEIGHT } from '@aztec/circuits.js'; import { FunctionArtifact, FunctionSelector, encodeArguments } from '@aztec/foundation/abi'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { pedersenHash } from '@aztec/foundation/crypto'; @@ -34,15 +28,15 @@ describe('ACIR public execution simulator', () => { let publicContracts: MockProxy; let commitmentsDb: MockProxy; let executor: PublicExecutor; - let blockData: HistoricBlockData; + let blockHeader: BlockHeader; beforeEach(() => { publicState = mock(); publicContracts = mock(); commitmentsDb = mock(); - blockData = HistoricBlockData.empty(); - executor = new PublicExecutor(publicState, publicContracts, commitmentsDb, blockData); + blockHeader = BlockHeader.empty(); + executor = new PublicExecutor(publicState, publicContracts, commitmentsDb, blockHeader); }, 10000); describe('Token contract', () => { diff --git a/yarn-project/acir-simulator/src/public/public_execution_context.ts b/yarn-project/acir-simulator/src/public/public_execution_context.ts index 6db7acfd3fd..de7ee848009 100644 --- a/yarn-project/acir-simulator/src/public/public_execution_context.ts +++ b/yarn-project/acir-simulator/src/public/public_execution_context.ts @@ -1,4 +1,4 @@ -import { CallContext, FunctionData, FunctionSelector, GlobalVariables, HistoricBlockData } from '@aztec/circuits.js'; +import { BlockHeader, CallContext, FunctionData, FunctionSelector, GlobalVariables } from '@aztec/circuits.js'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { EthAddress } from '@aztec/foundation/eth-address'; import { Fr } from '@aztec/foundation/fields'; @@ -7,9 +7,9 @@ import { FunctionL2Logs, UnencryptedL2Log } from '@aztec/types'; import { TypedOracle, + toACVMBlockHeader, toACVMCallContext, toACVMGlobalVariables, - toACVMHistoricBlockData, toACVMWitness, } from '../acvm/index.js'; import { PackedArgsCache, SideEffectCounter } from '../common/index.js'; @@ -31,7 +31,7 @@ export class PublicExecutionContext extends TypedOracle { * Data for this execution. */ public readonly execution: PublicExecution, - private readonly historicBlockData: HistoricBlockData, + private readonly blockHeader: BlockHeader, private readonly globalVariables: GlobalVariables, private readonly packedArgsCache: PackedArgsCache, private readonly sideEffectCounter: SideEffectCounter, @@ -48,7 +48,7 @@ export class PublicExecutionContext extends TypedOracle { * Generates the initial witness for a public function. * @param args - The arguments to the function. * @param callContext - The call context of the function. - * @param historicBlockData - Historic Trees roots and data required to reconstruct block hash. + * @param blockHeader - Contains data required to reconstruct a block hash (historical roots etc.). * @param globalVariables - The global variables. * @param witnessStartIndex - The index where to start inserting the parameters. * @returns The initial witness. @@ -57,7 +57,7 @@ export class PublicExecutionContext extends TypedOracle { const { callContext, args } = this.execution; const fields = [ ...toACVMCallContext(callContext), - ...toACVMHistoricBlockData(this.historicBlockData), + ...toACVMBlockHeader(this.blockHeader), ...toACVMGlobalVariables(this.globalVariables), ...args, @@ -104,7 +104,7 @@ export class PublicExecutionContext extends TypedOracle { public async getL1ToL2Message(msgKey: Fr) { // l1 to l2 messages in public contexts TODO: https://github.com/AztecProtocol/aztec-packages/issues/616 const message = await this.commitmentsDb.getL1ToL2Message(msgKey); - return { ...message, root: this.historicBlockData.l1ToL2MessagesTreeRoot }; + return { ...message, root: this.blockHeader.l1ToL2MessagesTreeRoot }; } /** @@ -210,7 +210,7 @@ export class PublicExecutionContext extends TypedOracle { const context = new PublicExecutionContext( nestedExecution, - this.historicBlockData, + this.blockHeader, this.globalVariables, this.packedArgsCache, this.sideEffectCounter, diff --git a/yarn-project/aztec-faucet/terraform/main.tf b/yarn-project/aztec-faucet/terraform/main.tf index 8a1f901fd09..bfbb82a982b 100644 --- a/yarn-project/aztec-faucet/terraform/main.tf +++ b/yarn-project/aztec-faucet/terraform/main.tf @@ -41,7 +41,7 @@ resource "aws_cloudwatch_log_group" "aztec-faucet" { } resource "aws_service_discovery_service" "aztec-faucet" { - name = "${var.DEPLOY_TAG}-aztec-faucet" + name = "${var.DEPLOY_TAG}-faucet" health_check_custom_config { failure_threshold = 1 @@ -72,7 +72,7 @@ resource "aws_service_discovery_service" "aztec-faucet" { # Define task definition and service. resource "aws_ecs_task_definition" "aztec-faucet" { - family = "${var.DEPLOY_TAG}-aztec-faucet" + family = "${var.DEPLOY_TAG}-faucet" requires_compatibilities = ["FARGATE"] network_mode = "awsvpc" cpu = "2048" @@ -83,8 +83,8 @@ resource "aws_ecs_task_definition" "aztec-faucet" { container_definitions = < { - const committedDb = await this.#getWorldState(); + public async findLeafIndex( + blockNumber: number | 'latest', + treeId: MerkleTreeId, + leafValue: Fr, + ): Promise { + const committedDb = await this.#getWorldState(blockNumber); return committedDb.findLeafIndex(treeId, leafValue.toBuffer()); } /** * Returns a sibling path for the given index in the contract tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - The index of the leaf for which the sibling path is required. * @returns The sibling path for the leaf index. */ - public async getContractSiblingPath(leafIndex: bigint): Promise> { - const committedDb = await this.#getWorldState(); + public async getContractSiblingPath( + blockNumber: number | 'latest', + leafIndex: bigint, + ): Promise> { + const committedDb = await this.#getWorldState(blockNumber); return committedDb.getSiblingPath(MerkleTreeId.CONTRACT_TREE, leafIndex); } /** * Returns a sibling path for the given index in the nullifier tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - The index of the leaf for which the sibling path is required. * @returns The sibling path for the leaf index. */ - public async getNullifierTreeSiblingPath(leafIndex: bigint): Promise> { - const committedDb = await this.#getWorldState(); + public async getNullifierTreeSiblingPath( + blockNumber: number | 'latest', + leafIndex: bigint, + ): Promise> { + const committedDb = await this.#getWorldState(blockNumber); return committedDb.getSiblingPath(MerkleTreeId.NULLIFIER_TREE, leafIndex); } /** * Returns a sibling path for the given index in the data tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - The index of the leaf for which the sibling path is required. * @returns The sibling path for the leaf index. */ - public async getNoteHashSiblingPath(leafIndex: bigint): Promise> { - const committedDb = await this.#getWorldState(); + public async getNoteHashSiblingPath( + blockNumber: number | 'latest', + leafIndex: bigint, + ): Promise> { + const committedDb = await this.#getWorldState(blockNumber); return committedDb.getSiblingPath(MerkleTreeId.NOTE_HASH_TREE, leafIndex); } @@ -348,40 +366,50 @@ export class AztecNodeService implements AztecNode { */ public async getL1ToL2MessageAndIndex(messageKey: Fr): Promise { // todo: #697 - make this one lookup. - const index = (await this.findLeafIndex(MerkleTreeId.L1_TO_L2_MESSAGES_TREE, messageKey))!; + const index = (await this.findLeafIndex('latest', MerkleTreeId.L1_TO_L2_MESSAGES_TREE, messageKey))!; const message = await this.l1ToL2MessageSource.getConfirmedL1ToL2Message(messageKey); return Promise.resolve(new L1ToL2MessageAndIndex(index, message)); } /** * Returns a sibling path for a leaf in the committed l1 to l2 data tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - Index of the leaf in the tree. * @returns The sibling path. */ - public async getL1ToL2MessageSiblingPath(leafIndex: bigint): Promise> { - const committedDb = await this.#getWorldState(); + public async getL1ToL2MessageSiblingPath( + blockNumber: number | 'latest', + leafIndex: bigint, + ): Promise> { + const committedDb = await this.#getWorldState(blockNumber); return committedDb.getSiblingPath(MerkleTreeId.L1_TO_L2_MESSAGES_TREE, leafIndex); } /** - * Returns a sibling path for a leaf in the committed historic blocks tree. + * Returns a sibling path for a leaf in the committed blocks tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - Index of the leaf in the tree. * @returns The sibling path. */ - public async getHistoricBlocksTreeSiblingPath( + public async getBlocksTreeSiblingPath( + blockNumber: number | 'latest', leafIndex: bigint, - ): Promise> { - const committedDb = await this.#getWorldState(); + ): Promise> { + const committedDb = await this.#getWorldState(blockNumber); return committedDb.getSiblingPath(MerkleTreeId.BLOCKS_TREE, leafIndex); } /** * Returns a sibling path for a leaf in the committed public data tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - Index of the leaf in the tree. * @returns The sibling path. */ - public async getPublicDataTreeSiblingPath(leafIndex: bigint): Promise> { - const committedDb = await this.#getWorldState(); + public async getPublicDataTreeSiblingPath( + blockNumber: number | 'latest', + leafIndex: bigint, + ): Promise> { + const committedDb = await this.#getWorldState(blockNumber); return committedDb.getSiblingPath(MerkleTreeId.PUBLIC_DATA_TREE, leafIndex); } @@ -392,17 +420,17 @@ export class AztecNodeService implements AztecNode { * @returns The nullifier membership witness (if found). */ public async getNullifierMembershipWitness( - blockNumber: number, + blockNumber: number | 'latest', nullifier: Fr, ): Promise { - const committedDb = await this.#getWorldState(); - const index = await committedDb.findLeafIndex(MerkleTreeId.NULLIFIER_TREE, nullifier.toBuffer()); + const db = await this.#getWorldState(blockNumber); + const index = await db.findLeafIndex(MerkleTreeId.NULLIFIER_TREE, nullifier.toBuffer()); if (!index) { return undefined; } - const leafDataPromise = committedDb.getLeafData(MerkleTreeId.NULLIFIER_TREE, Number(index)); - const siblingPathPromise = committedDb.getSiblingPath( + const leafDataPromise = db.getLeafData(MerkleTreeId.NULLIFIER_TREE, Number(index)); + const siblingPathPromise = db.getSiblingPath( MerkleTreeId.NULLIFIER_TREE, BigInt(index), ); @@ -431,10 +459,10 @@ export class AztecNodeService implements AztecNode { * TODO: This is a confusing behavior and we should eventually address that. */ public async getLowNullifierMembershipWitness( - blockNumber: number, + blockNumber: number | 'latest', nullifier: Fr, ): Promise { - const committedDb = await this.#getWorldState(); + const committedDb = await this.#getWorldState(blockNumber); const { index, alreadyPresent } = await committedDb.getPreviousValueIndex( MerkleTreeId.NULLIFIER_TREE, nullifier.toBigInt(), @@ -464,7 +492,7 @@ export class AztecNodeService implements AztecNode { * @returns Storage value at the given contract slot (or undefined if not found). */ public async getPublicStorageAt(contract: AztecAddress, slot: Fr): Promise { - const committedDb = await this.#getWorldState(); + const committedDb = await this.#getWorldState('latest'); const leafIndex = computePublicDataTreeIndex(contract, slot); const value = await committedDb.getLeafValue(MerkleTreeId.PUBLIC_DATA_TREE, leafIndex.value); return value ? Fr.fromBuffer(value) : undefined; @@ -475,7 +503,7 @@ export class AztecNodeService implements AztecNode { * @returns The current committed roots for the data trees. */ public async getTreeRoots(): Promise> { - const committedDb = await this.#getWorldState(); + const committedDb = await this.#getWorldState('latest'); const getTreeRoot = async (id: MerkleTreeId) => Fr.fromBuffer((await committedDb.getTreeInfo(id)).root); const [noteHashTree, nullifierTree, contractTree, l1ToL2MessagesTree, blocksTree, publicDataTree] = @@ -499,14 +527,14 @@ export class AztecNodeService implements AztecNode { } /** - * Returns the currently committed historic block data. - * @returns The current committed block data. + * Returns the currently committed block header. + * @returns The current committed block header. */ - public async getHistoricBlockData(): Promise { - const committedDb = await this.#getWorldState(); + public async getBlockHeader(): Promise { + const committedDb = await this.#getWorldState('latest'); const [roots, globalsHash] = await Promise.all([this.getTreeRoots(), committedDb.getLatestGlobalVariablesHash()]); - return new HistoricBlockData( + return new BlockHeader( roots[MerkleTreeId.NOTE_HASH_TREE], roots[MerkleTreeId.NULLIFIER_TREE], roots[MerkleTreeId.CONTRACT_TREE], @@ -557,24 +585,40 @@ export class AztecNodeService implements AztecNode { /** * Returns an instance of MerkleTreeOperations having first ensured the world state is fully synched + * @param blockNumber - The block number at which to get the data. * @returns An instance of a committed MerkleTreeOperations */ - async #getWorldState() { + async #getWorldState(blockNumber: number | 'latest') { + if (typeof blockNumber === 'number' && blockNumber < INITIAL_L2_BLOCK_NUM) { + throw new Error('Invalid block number to get world state for: ' + blockNumber); + } + + let blockSyncedTo: number = 0; try { // Attempt to sync the world state if necessary - await this.#syncWorldState(); + blockSyncedTo = await this.#syncWorldState(); } catch (err) { this.log.error(`Error getting world state: ${err}`); } - return this.worldStateSynchronizer.getCommitted(); + + // using a snapshot could be less efficient than using the committed db + if (blockNumber === 'latest' || blockNumber === blockSyncedTo) { + this.log(`Using committed db for block ${blockNumber}, world state synced upto ${blockSyncedTo}`); + return this.worldStateSynchronizer.getCommitted(); + } else if (blockNumber < blockSyncedTo) { + this.log(`Using snapshot for block ${blockNumber}, world state synced upto ${blockSyncedTo}`); + return this.worldStateSynchronizer.getSnapshot(blockNumber); + } else { + throw new Error(`Block ${blockNumber} not yet synced`); + } } /** * Ensure we fully sync the world state * @returns A promise that fulfils once the world state is synced */ - async #syncWorldState() { + async #syncWorldState(): Promise { const blockSourceHeight = await this.blockSource.getBlockNumber(); - await this.worldStateSynchronizer.syncImmediate(blockSourceHeight); + return this.worldStateSynchronizer.syncImmediate(blockSourceHeight); } } diff --git a/yarn-project/aztec-node/terraform/main.tf b/yarn-project/aztec-node/terraform/main.tf index 52b5c903e7c..5d1b767e364 100644 --- a/yarn-project/aztec-node/terraform/main.tf +++ b/yarn-project/aztec-node/terraform/main.tf @@ -1,6 +1,6 @@ # Terraform to setup a prototype network of Aztec Nodes in AWS # It sets up 2 full nodes with different ports/keys etc. -# Some duplication across the 2 defined services, could possibly +# Some duplication across the 2 defined services, could possibly # be refactored to use modules as and when we build out infrastructure for real @@ -49,315 +49,36 @@ data "terraform_remote_state" "aztec-network_iac" { } } - -resource "aws_cloudwatch_log_group" "aztec-node-log-group-1" { - name = "/fargate/service/${var.DEPLOY_TAG}/aztec-node-1" - retention_in_days = 14 -} - -resource "aws_service_discovery_service" "aztec-node-1" { - name = "${var.DEPLOY_TAG}-aztec-node-1" - - health_check_custom_config { - failure_threshold = 1 - } - - dns_config { - namespace_id = data.terraform_remote_state.setup_iac.outputs.local_service_discovery_id - - dns_records { - ttl = 60 - type = "A" - } - - dns_records { - ttl = 60 - type = "SRV" - } - - routing_policy = "MULTIVALUE" - } - - # Terraform just fails if this resource changes and you have registered instances. - provisioner "local-exec" { - when = destroy - command = "${path.module}/../servicediscovery-drain.sh ${self.id}" - } -} - -# Define task definition and service. -resource "aws_ecs_task_definition" "aztec-node-1" { - family = "${var.DEPLOY_TAG}-aztec-node-1" - requires_compatibilities = ["FARGATE"] - network_mode = "awsvpc" - cpu = "2048" - memory = "4096" - execution_role_arn = data.terraform_remote_state.setup_iac.outputs.ecs_task_execution_role_arn - task_role_arn = data.terraform_remote_state.aztec2_iac.outputs.cloudwatch_logging_ecs_role_arn - - container_definitions = <" && exit 1 + +serviceId="--service-id=$1" + +echo "Draining servicediscovery instances from $1 ..." +ids="$(aws servicediscovery list-instances $serviceId --query 'Instances[].Id' --output text | tr '\t' ' ')" + +found= +for id in $ids; do + if [ -n "$id" ]; then + echo "Deregistering $1 / $id ..." + aws servicediscovery deregister-instance $serviceId --instance-id "$id" + found=1 + fi +done + +# Yes, I'm being lazy here... +[ -n "$found" ] && sleep 5 || true \ No newline at end of file diff --git a/yarn-project/aztec-node/terraform/variables.tf b/yarn-project/aztec-node/terraform/variables.tf index d80ae23529d..30e0f59f597 100644 --- a/yarn-project/aztec-node/terraform/variables.tf +++ b/yarn-project/aztec-node/terraform/variables.tf @@ -6,22 +6,6 @@ variable "API_KEY" { type = string } -variable "CONTRACT_DEPLOYMENT_EMITTER_ADDRESS" { - type = string -} - -variable "ROLLUP_CONTRACT_ADDRESS" { - type = string -} - -variable "INBOX_CONTRACT_ADDRESS" { - type = string -} - -variable "REGISTRY_CONTRACT_ADDRESS" { - type = string -} - variable "SEQ_1_PUBLISHER_PRIVATE_KEY" { type = string } @@ -31,15 +15,13 @@ variable "SEQ_2_PUBLISHER_PRIVATE_KEY" { } variable "CHAIN_ID" { - type = string + type = string + default = 31337 } -variable "BOOTNODE_1_LISTEN_PORT" { - type = string -} - -variable "BOOTNODE_2_LISTEN_PORT" { - type = string +variable "BOOTNODE_LISTEN_PORT" { + type = number + default = 40500 } variable "BOOTNODE_1_PEER_ID" { @@ -50,38 +32,41 @@ variable "BOOTNODE_2_PEER_ID" { type = string } -variable "NODE_1_TCP_PORT" { - type = string -} - -variable "NODE_2_TCP_PORT" { - type = string +variable "NODE_TCP_PORT" { + type = number + default = 40400 } variable "NODE_1_PRIVATE_KEY" { - type = string + type = string + default = "" } variable "NODE_2_PRIVATE_KEY" { - type = string + type = string + default = "" } -variable "ECR_URL" { +variable "DOCKERHUB_ACCOUNT" { type = string } variable "SEQ_MAX_TX_PER_BLOCK" { - type = string + type = string + default = 64 } variable "SEQ_MIN_TX_PER_BLOCK" { - type = string + type = string + default = 1 } variable "P2P_MIN_PEERS" { - type = string + type = string + default = 50 } variable "P2P_MAX_PEERS" { - type = string + type = string + default = 100 } diff --git a/yarn-project/aztec-nr/aztec/src/abi.nr b/yarn-project/aztec-nr/aztec/src/abi.nr index c84ad8b6d8b..88923767db7 100644 --- a/yarn-project/aztec-nr/aztec/src/abi.nr +++ b/yarn-project/aztec-nr/aztec/src/abi.nr @@ -11,7 +11,7 @@ use crate::constants_gen::{ MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_CALL, MAX_PUBLIC_DATA_READS_PER_CALL, GENERATOR_INDEX__FUNCTION_ARGS, - HISTORIC_BLOCK_DATA_LENGTH, + BLOCK_HEADER_LENGTH, CONTRACT_DEPLOYMENT_DATA_LENGTH, CALL_CONTEXT_LENGTH, PRIVATE_CIRCUIT_PUBLIC_INPUTS_LENGTH, @@ -94,7 +94,7 @@ impl ContractDeploymentData { // docs:start:private-context-inputs struct PrivateContextInputs { call_context : CallContext, - block_data: HistoricBlockData, + block_header: BlockHeader, contract_deployment_data: ContractDeploymentData, private_global_variables: PrivateGlobalVariables, } @@ -104,7 +104,7 @@ struct PrivateContextInputs { // docs:start:public-context-inputs struct PublicContextInputs { call_context: CallContext, - block_data: HistoricBlockData, + block_header: BlockHeader, public_global_variables: PublicGlobalVariables, } @@ -141,8 +141,8 @@ impl CallContext { } } -// docs:start:historic-block-data -struct HistoricBlockData { +// docs:start:block-header +struct BlockHeader { note_hash_tree_root : Field, nullifier_tree_root : Field, contract_tree_root : Field, @@ -151,11 +151,11 @@ struct HistoricBlockData { public_data_tree_root: Field, global_variables_hash: Field, } -// docs:end:historic-block-data +// docs:end:block-header -impl HistoricBlockData { +impl BlockHeader { // NOTE: this order must match the order in `private_circuit_public_inputs.hpp` - pub fn serialize(self) -> [Field; HISTORIC_BLOCK_DATA_LENGTH] { + pub fn serialize(self) -> [Field; BLOCK_HEADER_LENGTH] { [ self.note_hash_tree_root, self.nullifier_tree_root, @@ -167,8 +167,8 @@ impl HistoricBlockData { ] } - pub fn deserialize(deserialized: [Field; HISTORIC_BLOCK_DATA_LENGTH]) -> Self { - HistoricBlockData { + pub fn deserialize(deserialized: [Field; BLOCK_HEADER_LENGTH]) -> Self { + BlockHeader { note_hash_tree_root: deserialized[0], nullifier_tree_root: deserialized[1], contract_tree_root: deserialized[2], @@ -184,7 +184,7 @@ impl HistoricBlockData { } pub fn block_hash(self) -> Field { - // TODO(#3442): Unify the ordering in `HistoricBlockData::serialize` function and the ordering + // TODO(#3442): Unify the ordering in `BlockHeader::serialize` function and the ordering // in the block hash preimage --> This requires changes in the circuits. let inputs = [ self.global_variables_hash, @@ -234,7 +234,7 @@ struct PrivateCircuitPublicInputs { unencrypted_logs_hash: [Field; NUM_FIELDS_PER_SHA256], encrypted_log_preimages_length: Field, unencrypted_log_preimages_length: Field, - block_data: HistoricBlockData, + block_header: BlockHeader, contract_deployment_data: ContractDeploymentData, chain_id: Field, version: Field, @@ -258,7 +258,7 @@ impl PrivateCircuitPublicInputs { fields.push_array(self.unencrypted_logs_hash); fields.push(self.encrypted_log_preimages_length); fields.push(self.unencrypted_log_preimages_length); - fields.push_array(self.block_data.serialize()); + fields.push_array(self.block_header.serialize()); fields.push(self.contract_deployment_data.hash()); fields.push(self.chain_id); fields.push(self.version); @@ -282,7 +282,7 @@ impl PrivateCircuitPublicInputs { fields.push_array(self.unencrypted_logs_hash); fields.push(self.encrypted_log_preimages_length); fields.push(self.unencrypted_log_preimages_length); - fields.push_array(self.block_data.serialize()); + fields.push_array(self.block_header.serialize()); fields.push_array(self.contract_deployment_data.serialize()); fields.push(self.chain_id); fields.push(self.version); @@ -341,7 +341,7 @@ struct PublicCircuitPublicInputs { new_l2_to_l1_msgs: [Field; crate::abi::MAX_NEW_L2_TO_L1_MSGS_PER_CALL], unencrypted_logs_hash: [Field; NUM_FIELDS_PER_SHA256], unencrypted_log_preimages_length: Field, - block_data: HistoricBlockData, + block_header: BlockHeader, prover_address: Field, } @@ -365,7 +365,7 @@ impl PublicCircuitPublicInputs { inputs.push_array(self.unencrypted_logs_hash); inputs.push(self.unencrypted_log_preimages_length); - inputs.push_array(self.block_data.serialize()); + inputs.push_array(self.block_header.serialize()); inputs.push(self.prover_address); pedersen_hash(inputs.storage, GENERATOR_INDEX__PUBLIC_CIRCUIT_PUBLIC_INPUTS) @@ -388,7 +388,7 @@ impl PublicCircuitPublicInputs { fields.push_array(self.new_l2_to_l1_msgs); fields.push_array(self.unencrypted_logs_hash); fields.push(self.unencrypted_log_preimages_length); - fields.push_array(self.block_data.serialize()); + fields.push_array(self.block_header.serialize()); fields.push(self.prover_address); fields.storage } diff --git a/yarn-project/aztec-nr/aztec/src/constants_gen.nr b/yarn-project/aztec-nr/aztec/src/constants_gen.nr index 368810a0a42..6ac5589e0f8 100644 --- a/yarn-project/aztec-nr/aztec/src/constants_gen.nr +++ b/yarn-project/aztec-nr/aztec/src/constants_gen.nr @@ -73,7 +73,7 @@ global CONTRACT_SUBTREE_SIBLING_PATH_LENGTH: Field = 15; global NOTE_HASH_SUBTREE_HEIGHT: Field = 7; global NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH: Field = 25; global NULLIFIER_SUBTREE_HEIGHT: Field = 7; -global HISTORIC_BLOCKS_TREE_HEIGHT: Field = 16; +global BLOCKS_TREE_HEIGHT: Field = 16; global NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH: Field = 13; global L1_TO_L2_MSG_SUBTREE_HEIGHT: Field = 4; global L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH: Field = 12; @@ -98,7 +98,7 @@ global MAX_NOTES_PER_PAGE: Field = 10; // VIEW_NOTE_ORACLE_RETURN_LENGTH = MAX_NOTES_PER_PAGE * (MAX_NOTE_FIELDS_LENGTH + 1) + 2; global VIEW_NOTE_ORACLE_RETURN_LENGTH: Field = 212; global CALL_CONTEXT_LENGTH: Field = 7; -global HISTORIC_BLOCK_DATA_LENGTH: Field = 7; +global BLOCK_HEADER_LENGTH: Field = 7; global FUNCTION_DATA_LENGTH: Field = 4; global CONTRACT_DEPLOYMENT_DATA_LENGTH: Field = 6; // Change this ONLY if you have changed the PrivateCircuitPublicInputs structure. diff --git a/yarn-project/aztec-nr/aztec/src/context.nr b/yarn-project/aztec-nr/aztec/src/context.nr index 3efe8bda4ac..720ce9981fb 100644 --- a/yarn-project/aztec-nr/aztec/src/context.nr +++ b/yarn-project/aztec-nr/aztec/src/context.nr @@ -19,7 +19,7 @@ use crate::abi::{ hash_args, CallContext, ContractDeploymentData, - HistoricBlockData, + BlockHeader, FunctionData, PrivateCircuitPublicInputs, PublicCircuitPublicInputs, @@ -46,7 +46,7 @@ use crate::oracle::{ public_call::call_public_function_internal, enqueue_public_function_call::enqueue_public_function_call_internal, context::get_portal_address, - get_block_data::get_block_data, + get_block_header::get_block_header, }; use dep::std::option::Option; @@ -71,7 +71,7 @@ struct PrivateContext { new_l2_to_l1_msgs : BoundedVec, // docs:end:private-context - block_data: HistoricBlockData, + block_header: BlockHeader, // TODO(https://github.com/AztecProtocol/aztec-packages/issues/1165) // encrypted_logs_preimages: Vec, @@ -93,7 +93,7 @@ impl PrivateContext { new_nullifiers: BoundedVec::new(0), nullified_commitments: BoundedVec::new(0), - block_data: inputs.block_data, + block_header: inputs.block_header, private_call_stack: BoundedVec::new(0), public_call_stack: BoundedVec::new(0), @@ -129,8 +129,8 @@ impl PrivateContext { self.inputs.call_context.function_selector } - pub fn get_block_data(self, block_number: Field) -> HistoricBlockData { - get_block_data(block_number, self) + pub fn get_block_header(self, block_number: Field) -> BlockHeader { + get_block_header(block_number, self) } pub fn finish(self) -> abi::PrivateCircuitPublicInputs { @@ -156,7 +156,7 @@ impl PrivateContext { unencrypted_logs_hash: unencrypted_logs_hash, encrypted_log_preimages_length: encrypted_log_preimages_length, unencrypted_log_preimages_length: unencrypted_log_preimages_length, - block_data: self.block_data, + block_header: self.block_header, contract_deployment_data: self.inputs.contract_deployment_data, chain_id: self.inputs.private_global_variables.chain_id, version: self.inputs.private_global_variables.version, @@ -207,7 +207,7 @@ impl PrivateContext { ) // docs:end:context_consume_l1_to_l2_message { - let nullifier = process_l1_to_l2_message(self.block_data.l1_to_l2_messages_tree_root, self.this_address(), self.this_portal_address(), self.chain_id(), self.version(), msg_key, content, secret); + let nullifier = process_l1_to_l2_message(self.block_header.l1_to_l2_messages_tree_root, self.this_address(), self.this_portal_address(), self.chain_id(), self.version(), msg_key, content, secret); // Push nullifier (and the "commitment" corresponding to this can be "empty") self.push_new_nullifier(nullifier, EMPTY_NULLIFIED_COMMITMENT) @@ -289,7 +289,7 @@ impl PrivateContext { unencrypted_logs_hash: arr_copy_slice(fields, [0; NUM_FIELDS_PER_SHA256], 141), encrypted_log_preimages_length: fields[143], unencrypted_log_preimages_length: fields[144], - block_data: HistoricBlockData { + block_header: BlockHeader { // Must match order in `private_circuit_public_inputs.hpp` note_hash_tree_root : fields[145], nullifier_tree_root : fields[146], @@ -391,7 +391,7 @@ impl PrivateContext { new_l2_to_l1_msgs:[0; MAX_NEW_L2_TO_L1_MSGS_PER_CALL], unencrypted_logs_hash:[0; NUM_FIELDS_PER_SHA256], unencrypted_log_preimages_length: 0, - block_data: HistoricBlockData::empty(), + block_header: BlockHeader::empty(), prover_address: 0, }, is_execution_request: true, @@ -439,7 +439,7 @@ struct PublicContext { unencrypted_logs_hash: BoundedVec, unencrypted_logs_preimages_length: Field, - block_data: HistoricBlockData, + block_header: BlockHeader, prover_address: Field, } @@ -466,7 +466,7 @@ impl PublicContext { unencrypted_logs_hash: BoundedVec::new(0), unencrypted_logs_preimages_length: 0, - block_data: inputs.block_data, + block_header: inputs.block_header, prover_address: 0, // TODO(https://github.com/AztecProtocol/aztec-packages/issues/1165) @@ -526,7 +526,7 @@ impl PublicContext { new_l2_to_l1_msgs: self.new_l2_to_l1_msgs.storage, unencrypted_logs_hash: unencrypted_logs_hash, unencrypted_log_preimages_length: unencrypted_log_preimages_length, - block_data: self.inputs.block_data, + block_header: self.inputs.block_header, prover_address: self.prover_address, }; pub_circuit_pub_inputs @@ -548,7 +548,7 @@ impl PublicContext { // Note this returns self to get around an issue where mutable structs do not maintain mutations unless reassigned pub fn consume_l1_to_l2_message(&mut self, msg_key: Field, content: Field, secret: Field) { let this = (*self).this_address(); - let nullifier = process_l1_to_l2_message(self.block_data.l1_to_l2_messages_tree_root, this, self.this_portal_address(), self.chain_id(), self.version(), msg_key, content, secret); + let nullifier = process_l1_to_l2_message(self.block_header.l1_to_l2_messages_tree_root, this, self.this_portal_address(), self.chain_id(), self.version(), msg_key, content, secret); // Push nullifier (and the "commitment" corresponding to this can be "empty") self.push_new_nullifier(nullifier, EMPTY_NULLIFIED_COMMITMENT) diff --git a/yarn-project/aztec-nr/aztec/src/oracle.nr b/yarn-project/aztec-nr/aztec/src/oracle.nr index 4bed8383bfa..6a7aac259db 100644 --- a/yarn-project/aztec-nr/aztec/src/oracle.nr +++ b/yarn-project/aztec-nr/aztec/src/oracle.nr @@ -14,7 +14,7 @@ mod get_secret_key; mod get_sibling_path; mod rand; mod enqueue_public_function_call; -mod get_block_data; +mod get_block_header; mod public_call; mod notes; mod storage; diff --git a/yarn-project/aztec-nr/aztec/src/oracle/get_block_data.nr b/yarn-project/aztec-nr/aztec/src/oracle/get_block_data.nr deleted file mode 100644 index 2b645a62dd9..00000000000 --- a/yarn-project/aztec-nr/aztec/src/oracle/get_block_data.nr +++ /dev/null @@ -1,39 +0,0 @@ -use dep::std::merkle::compute_merkle_root; -use crate::{ - abi::HistoricBlockData, - constants_gen::{ - HISTORIC_BLOCK_DATA_LENGTH, - HISTORIC_BLOCKS_TREE_HEIGHT, - }, - context::PrivateContext, - oracle::get_membership_witness::{ - get_membership_witness, - MembershipWitness, - }, -}; - -#[oracle(getBlockData)] -fn get_block_data_oracle(_block_number: Field) -> [Field; HISTORIC_BLOCK_DATA_LENGTH] {} - -unconstrained pub fn get_block_data_internal(block_number: Field) -> HistoricBlockData { - let block_data = get_block_data_oracle(block_number); - HistoricBlockData::deserialize(block_data) -} - -pub fn get_block_data(block_number: Field, context: PrivateContext) -> HistoricBlockData { - // 1) Get historic block data from oracle at the given block - let block_data = get_block_data_internal(block_number); - - // 2) Compute the block hash from the block data - let block_hash = block_data.block_hash(); - - // 3) Get the membership wintess of the block in the blocks tree - let blocks_tree_id = 5; // TODO(#3443) - let witness: MembershipWitness = get_membership_witness(block_number, blocks_tree_id, block_hash); - - // 4) Check that the block is in the blocks tree (i.e. the witness is valid) - assert(context.block_data.blocks_tree_root == compute_merkle_root(block_hash, witness.index, witness.path), "Proving membership of a block in blocks tree failed"); - - // 5) Return the block data - block_data -} diff --git a/yarn-project/aztec-nr/aztec/src/oracle/get_block_header.nr b/yarn-project/aztec-nr/aztec/src/oracle/get_block_header.nr new file mode 100644 index 00000000000..5f453221952 --- /dev/null +++ b/yarn-project/aztec-nr/aztec/src/oracle/get_block_header.nr @@ -0,0 +1,39 @@ +use dep::std::merkle::compute_merkle_root; +use crate::{ + abi::BlockHeader, + constants_gen::{ + BLOCK_HEADER_LENGTH, + BLOCKS_TREE_HEIGHT, + }, + context::PrivateContext, + oracle::get_membership_witness::{ + get_membership_witness, + MembershipWitness, + }, +}; + +#[oracle(getBlockHeader)] +fn get_block_header_oracle(_block_number: Field) -> [Field; BLOCK_HEADER_LENGTH] {} + +unconstrained pub fn get_block_header_internal(block_number: Field) -> BlockHeader { + let block_header = get_block_header_oracle(block_number); + BlockHeader::deserialize(block_header) +} + +pub fn get_block_header(block_number: Field, context: PrivateContext) -> BlockHeader { + // 1) Get block header of a given block from oracle + let block_header = get_block_header_internal(block_number); + + // 2) Compute the block hash from the block header + let block_hash = block_header.block_hash(); + + // 3) Get the membership wintess of the block in the blocks tree + let blocks_tree_id = 5; // TODO(#3443) + let witness: MembershipWitness = get_membership_witness(block_number, blocks_tree_id, block_hash); + + // 4) Check that the block is in the blocks tree (i.e. the witness is valid) + assert(context.block_header.blocks_tree_root == compute_merkle_root(block_hash, witness.index, witness.path), "Proving membership of a block in blocks tree failed"); + + // 5) Return the block header + block_header +} diff --git a/yarn-project/aztec-nr/slow-updates-tree/src/slow_map.nr b/yarn-project/aztec-nr/slow-updates-tree/src/slow_map.nr index f36d9f79593..d2dc5c67830 100644 --- a/yarn-project/aztec-nr/slow-updates-tree/src/slow_map.nr +++ b/yarn-project/aztec-nr/slow-updates-tree/src/slow_map.nr @@ -43,6 +43,7 @@ struct SlowUpdateInner { sibling_path: [Field; N], } +// docs:start:slow_update_proof // The slow update proof. Containing two merkle paths // One for the before and one for the after trees. // M = 2 * N + 4 @@ -52,6 +53,7 @@ struct SlowUpdateProof { before: SlowUpdateInner, after: SlowUpdateInner, } +// docs:end:slow_update_proof pub fn deserialize_slow_update_proof(serialized: [Field; M]) -> SlowUpdateProof { SlowUpdateProof::deserialize(serialized) @@ -142,11 +144,14 @@ impl SlowMap { } } + // docs:start:read_leaf_at pub fn read_leaf_at(self: Self, key: Field) -> Leaf { let derived_storage_slot = pedersen_hash([self.storage_slot, key]); storage_read(derived_storage_slot, deserialize_leaf) } + // docs:end:read_leaf_at + // docs:start:read_at // Reads the "CURRENT" value of the leaf pub fn read_at(self: Self, key: Field) -> Field { let time = self.context.public.unwrap().timestamp() as u120; @@ -157,6 +162,7 @@ impl SlowMap { leaf.after } } + // docs:end:read_at // Will update values in the "AFTER" tree // - updates the leaf and root to follow current values, moving from after to before if diff --git a/yarn-project/aztec-sandbox/docker-compose.yml b/yarn-project/aztec-sandbox/docker-compose.yml index 3245f4380e6..2741f74c069 100644 --- a/yarn-project/aztec-sandbox/docker-compose.yml +++ b/yarn-project/aztec-sandbox/docker-compose.yml @@ -32,14 +32,3 @@ services: ARCHIVER_VIEM_POLLING_INTERVAL_MS: 500 volumes: - ./log:/usr/src/yarn-project/aztec-sandbox/log:rw - - otterscan: - image: otterscan/otterscan:develop - # platform: linux/amd64 - ports: - - '5100:80' - container_name: otterscan - environment: - # otterscan env var is hardcoded to support erigon client - # but it also works for anvil - - ERIGON_URL=http://127.0.0.1:${SANDBOX_ANVIL_PORT:-8545} diff --git a/yarn-project/aztec.js/src/artifacts/ecdsa_account_contract.json b/yarn-project/aztec.js/src/artifacts/ecdsa_account_contract.json index 0c4d56e844c..491da2af12c 100644 --- a/yarn-project/aztec.js/src/artifacts/ecdsa_account_contract.json +++ b/yarn-project/aztec.js/src/artifacts/ecdsa_account_contract.json @@ -365,10 +365,10 @@ } }, { - "name": "block_data", + "name": "block_header", "type": { "kind": "struct", - "path": "aztec::abi::HistoricBlockData", + "path": "aztec::abi::BlockHeader", "fields": [ { "name": "note_hash_tree_root", @@ -632,10 +632,10 @@ } }, { - "name": "block_data", + "name": "block_header", "type": { "kind": "struct", - "path": "aztec::abi::HistoricBlockData", + "path": "aztec::abi::BlockHeader", "fields": [ { "name": "note_hash_tree_root", diff --git a/yarn-project/aztec.js/src/artifacts/schnorr_account_contract.json b/yarn-project/aztec.js/src/artifacts/schnorr_account_contract.json index 8163729ec65..f05dd9664f7 100644 --- a/yarn-project/aztec.js/src/artifacts/schnorr_account_contract.json +++ b/yarn-project/aztec.js/src/artifacts/schnorr_account_contract.json @@ -353,10 +353,10 @@ } }, { - "name": "block_data", + "name": "block_header", "type": { "kind": "struct", - "path": "aztec::abi::HistoricBlockData", + "path": "aztec::abi::BlockHeader", "fields": [ { "name": "note_hash_tree_root", @@ -620,10 +620,10 @@ } }, { - "name": "block_data", + "name": "block_header", "type": { "kind": "struct", - "path": "aztec::abi::HistoricBlockData", + "path": "aztec::abi::BlockHeader", "fields": [ { "name": "note_hash_tree_root", diff --git a/yarn-project/aztec.js/src/artifacts/schnorr_single_key_account_contract.json b/yarn-project/aztec.js/src/artifacts/schnorr_single_key_account_contract.json index cc776fe5d4e..fb155f98ec1 100644 --- a/yarn-project/aztec.js/src/artifacts/schnorr_single_key_account_contract.json +++ b/yarn-project/aztec.js/src/artifacts/schnorr_single_key_account_contract.json @@ -288,10 +288,10 @@ } }, { - "name": "block_data", + "name": "block_header", "type": { "kind": "struct", - "path": "aztec::abi::HistoricBlockData", + "path": "aztec::abi::BlockHeader", "fields": [ { "name": "note_hash_tree_root", @@ -555,10 +555,10 @@ } }, { - "name": "block_data", + "name": "block_header", "type": { "kind": "struct", - "path": "aztec::abi::HistoricBlockData", + "path": "aztec::abi::BlockHeader", "fields": [ { "name": "note_hash_tree_root", diff --git a/yarn-project/circuits.js/src/abis/abis.ts b/yarn-project/circuits.js/src/abis/abis.ts index dd249bbb5d6..ecee67a7528 100644 --- a/yarn-project/circuits.js/src/abis/abis.ts +++ b/yarn-project/circuits.js/src/abis/abis.ts @@ -531,13 +531,13 @@ function computePrivateInputsHash(input: PrivateCircuitPublicInputs) { ...input.unencryptedLogsHash.map(fr => fr.toBuffer()), input.encryptedLogPreimagesLength.toBuffer(), input.unencryptedLogPreimagesLength.toBuffer(), - input.historicBlockData.noteHashTreeRoot.toBuffer(), - input.historicBlockData.nullifierTreeRoot.toBuffer(), - input.historicBlockData.contractTreeRoot.toBuffer(), - input.historicBlockData.l1ToL2MessagesTreeRoot.toBuffer(), - input.historicBlockData.blocksTreeRoot.toBuffer(), - input.historicBlockData.publicDataTreeRoot.toBuffer(), - input.historicBlockData.globalVariablesHash.toBuffer(), + input.blockHeader.noteHashTreeRoot.toBuffer(), + input.blockHeader.nullifierTreeRoot.toBuffer(), + input.blockHeader.contractTreeRoot.toBuffer(), + input.blockHeader.l1ToL2MessagesTreeRoot.toBuffer(), + input.blockHeader.blocksTreeRoot.toBuffer(), + input.blockHeader.publicDataTreeRoot.toBuffer(), + input.blockHeader.globalVariablesHash.toBuffer(), computeContractDeploymentDataHash(input.contractDeploymentData).toBuffer(), input.chainId.toBuffer(), input.version.toBuffer(), @@ -599,13 +599,13 @@ function computePublicInputsHash(input: PublicCircuitPublicInputs) { ...input.newL2ToL1Msgs.map(fr => fr.toBuffer()), ...input.unencryptedLogsHash.map(fr => fr.toBuffer()), input.unencryptedLogPreimagesLength.toBuffer(), - input.historicBlockData.noteHashTreeRoot.toBuffer(), - input.historicBlockData.nullifierTreeRoot.toBuffer(), - input.historicBlockData.contractTreeRoot.toBuffer(), - input.historicBlockData.l1ToL2MessagesTreeRoot.toBuffer(), - input.historicBlockData.blocksTreeRoot.toBuffer(), - input.historicBlockData.publicDataTreeRoot.toBuffer(), - input.historicBlockData.globalVariablesHash.toBuffer(), + input.blockHeader.noteHashTreeRoot.toBuffer(), + input.blockHeader.nullifierTreeRoot.toBuffer(), + input.blockHeader.contractTreeRoot.toBuffer(), + input.blockHeader.l1ToL2MessagesTreeRoot.toBuffer(), + input.blockHeader.blocksTreeRoot.toBuffer(), + input.blockHeader.publicDataTreeRoot.toBuffer(), + input.blockHeader.globalVariablesHash.toBuffer(), input.proverAddress.toBuffer(), ]; if (toHash.length != PUBLIC_CIRCUIT_PUBLIC_INPUTS_HASH_INPUT_LENGTH) { diff --git a/yarn-project/circuits.js/src/constants.gen.ts b/yarn-project/circuits.js/src/constants.gen.ts index 3cbbab21121..e5fc7359815 100644 --- a/yarn-project/circuits.js/src/constants.gen.ts +++ b/yarn-project/circuits.js/src/constants.gen.ts @@ -42,7 +42,7 @@ export const CONTRACT_SUBTREE_SIBLING_PATH_LENGTH = 15; export const NOTE_HASH_SUBTREE_HEIGHT = 7; export const NOTE_HASH_SUBTREE_SIBLING_PATH_LENGTH = 25; export const NULLIFIER_SUBTREE_HEIGHT = 7; -export const HISTORIC_BLOCKS_TREE_HEIGHT = 16; +export const BLOCKS_TREE_HEIGHT = 16; export const NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH = 13; export const L1_TO_L2_MSG_SUBTREE_HEIGHT = 4; export const L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH = 12; @@ -56,7 +56,7 @@ export const GET_NOTE_ORACLE_RETURN_LENGTH = 23; export const MAX_NOTES_PER_PAGE = 10; export const VIEW_NOTE_ORACLE_RETURN_LENGTH = 212; export const CALL_CONTEXT_LENGTH = 7; -export const HISTORIC_BLOCK_DATA_LENGTH = 7; +export const BLOCK_HEADER_LENGTH = 7; export const FUNCTION_DATA_LENGTH = 4; export const CONTRACT_DEPLOYMENT_DATA_LENGTH = 6; export const PRIVATE_CIRCUIT_PUBLIC_INPUTS_LENGTH = 155; diff --git a/yarn-project/circuits.js/src/structs/index.ts b/yarn-project/circuits.js/src/structs/index.ts index c8ed1585aea..7ce2c3d0ced 100644 --- a/yarn-project/circuits.js/src/structs/index.ts +++ b/yarn-project/circuits.js/src/structs/index.ts @@ -11,7 +11,7 @@ export * from './kernel/private_kernel.js'; export * from './kernel/public_kernel.js'; export * from './kernel/combined_accumulated_data.js'; export * from './kernel/combined_constant_data.js'; -export * from './kernel/historic_block_data.js'; +export * from './kernel/block_header.js'; export * from './kernel/previous_kernel_data.js'; export * from './kernel/public_inputs.js'; export * from './kernel/public_inputs_final.js'; diff --git a/yarn-project/circuits.js/src/structs/kernel/block_header.test.ts b/yarn-project/circuits.js/src/structs/kernel/block_header.test.ts new file mode 100644 index 00000000000..3bb78282c2a --- /dev/null +++ b/yarn-project/circuits.js/src/structs/kernel/block_header.test.ts @@ -0,0 +1,17 @@ +import { BlockHeader } from './block_header.js'; + +describe('BlockHeader', () => { + it('serializes to buffer and back', () => { + const blockHeader = BlockHeader.random(); + const serialized = blockHeader.toBuffer(); + const deserialized = BlockHeader.fromBuffer(serialized); + expect(deserialized).toEqual(blockHeader); + }); + + it('serializes to string and back', () => { + const blockHeader = BlockHeader.random(); + const serialized = blockHeader.toString(); + const deserialized = BlockHeader.fromString(serialized); + expect(deserialized).toEqual(blockHeader); + }); +}); diff --git a/yarn-project/circuits.js/src/structs/kernel/historic_block_data.ts b/yarn-project/circuits.js/src/structs/kernel/block_header.ts similarity index 80% rename from yarn-project/circuits.js/src/structs/kernel/historic_block_data.ts rename to yarn-project/circuits.js/src/structs/kernel/block_header.ts index eda21d334f1..1c83f50351c 100644 --- a/yarn-project/circuits.js/src/structs/kernel/historic_block_data.ts +++ b/yarn-project/circuits.js/src/structs/kernel/block_header.ts @@ -5,14 +5,14 @@ import { FieldsOf } from '../../utils/jsUtils.js'; import { serializeToBuffer } from '../../utils/serialize.js'; /** - * The string encoding used for serializing HistoricBlockData objects. + * The string encoding used for serializing BlockHeader objects. */ const STRING_ENCODING: BufferEncoding = 'hex'; /** * Information about the tree roots used for both public and private kernels. */ -export class HistoricBlockData { +export class BlockHeader { constructor( /** * Root of the note hash tree at the time of when this information was assembled. @@ -31,7 +31,7 @@ export class HistoricBlockData { */ public l1ToL2MessagesTreeRoot: Fr, /** - * Root of the historic blocks tree at the time of when this information was assembled. + * Root of the blocks tree at the time of when this information was assembled. */ public blocksTreeRoot: Fr, /** @@ -48,12 +48,12 @@ export class HistoricBlockData { public globalVariablesHash: Fr, ) {} - static from(fields: FieldsOf) { - return new HistoricBlockData(...HistoricBlockData.getFields(fields)); + static from(fields: FieldsOf) { + return new BlockHeader(...BlockHeader.getFields(fields)); } static random() { - return new HistoricBlockData( + return new BlockHeader( Fr.random(), Fr.random(), Fr.random(), @@ -65,7 +65,7 @@ export class HistoricBlockData { ); } - static getFields(fields: FieldsOf) { + static getFields(fields: FieldsOf) { return [ fields.noteHashTreeRoot, fields.nullifierTreeRoot, @@ -79,7 +79,7 @@ export class HistoricBlockData { } toBuffer() { - return serializeToBuffer(...HistoricBlockData.getFields(this)); + return serializeToBuffer(...BlockHeader.getFields(this)); } toString() { @@ -88,7 +88,7 @@ export class HistoricBlockData { } /** - * Return the historic block data as an array of items in the order they are serialized in noir. + * Return the block header as an array of items in the order they are serialized in noir. * @returns Array of items in the order they are stored in the contract */ toArray(): Fr[] { @@ -106,7 +106,7 @@ export class HistoricBlockData { static fromBuffer(buffer: Buffer | BufferReader) { const reader = BufferReader.asReader(buffer); - return new HistoricBlockData( + return new BlockHeader( Fr.fromBuffer(reader), Fr.fromBuffer(reader), Fr.fromBuffer(reader), @@ -119,7 +119,7 @@ export class HistoricBlockData { } static fromString(str: string) { - return HistoricBlockData.fromBuffer(Buffer.from(str, STRING_ENCODING)); + return BlockHeader.fromBuffer(Buffer.from(str, STRING_ENCODING)); } isEmpty() { @@ -136,6 +136,6 @@ export class HistoricBlockData { } static empty() { - return new HistoricBlockData(Fr.ZERO, Fr.ZERO, Fr.ZERO, Fr.ZERO, Fr.ZERO, Fr.ZERO, Fr.ZERO, Fr.ZERO); + return new BlockHeader(Fr.ZERO, Fr.ZERO, Fr.ZERO, Fr.ZERO, Fr.ZERO, Fr.ZERO, Fr.ZERO, Fr.ZERO); } } diff --git a/yarn-project/circuits.js/src/structs/kernel/combined_accumulated_data.ts b/yarn-project/circuits.js/src/structs/kernel/combined_accumulated_data.ts index 4f84c10ff55..f77a3d910c4 100644 --- a/yarn-project/circuits.js/src/structs/kernel/combined_accumulated_data.ts +++ b/yarn-project/circuits.js/src/structs/kernel/combined_accumulated_data.ts @@ -491,6 +491,7 @@ export class FinalAccumulatedData { public nullifiedCommitments: Tuple, /** * Current private call stack. + * TODO(#3417): Given this field must empty, should we just remove it? */ public privateCallStack: Tuple, /** diff --git a/yarn-project/circuits.js/src/structs/kernel/combined_constant_data.ts b/yarn-project/circuits.js/src/structs/kernel/combined_constant_data.ts index 34f10df6761..2410d624191 100644 --- a/yarn-project/circuits.js/src/structs/kernel/combined_constant_data.ts +++ b/yarn-project/circuits.js/src/structs/kernel/combined_constant_data.ts @@ -2,7 +2,7 @@ import { BufferReader } from '@aztec/foundation/serialize'; import { serializeToBuffer } from '../../utils/serialize.js'; import { TxContext } from '../tx_context.js'; -import { HistoricBlockData } from './historic_block_data.js'; +import { BlockHeader } from './block_header.js'; /** * Data that is constant/not modified by neither of the kernels. @@ -12,7 +12,7 @@ export class CombinedConstantData { /** * Roots of the trees relevant for both kernel circuits. */ - public blockData: HistoricBlockData, + public blockHeader: BlockHeader, /** * Context of the transaction. */ @@ -20,7 +20,7 @@ export class CombinedConstantData { ) {} toBuffer() { - return serializeToBuffer(this.blockData, this.txContext); + return serializeToBuffer(this.blockHeader, this.txContext); } /** @@ -30,10 +30,10 @@ export class CombinedConstantData { */ static fromBuffer(buffer: Buffer | BufferReader): CombinedConstantData { const reader = BufferReader.asReader(buffer); - return new CombinedConstantData(reader.readObject(HistoricBlockData), reader.readObject(TxContext)); + return new CombinedConstantData(reader.readObject(BlockHeader), reader.readObject(TxContext)); } static empty() { - return new CombinedConstantData(HistoricBlockData.empty(), TxContext.empty()); + return new CombinedConstantData(BlockHeader.empty(), TxContext.empty()); } } diff --git a/yarn-project/circuits.js/src/structs/kernel/historic_block_data.test.ts b/yarn-project/circuits.js/src/structs/kernel/historic_block_data.test.ts deleted file mode 100644 index 9179c979ca4..00000000000 --- a/yarn-project/circuits.js/src/structs/kernel/historic_block_data.test.ts +++ /dev/null @@ -1,17 +0,0 @@ -import { HistoricBlockData } from './historic_block_data.js'; - -describe('HistoricBlockData', () => { - it('serializes to buffer and back', () => { - const historicBlockData = HistoricBlockData.random(); - const serialized = historicBlockData.toBuffer(); - const deserialized = HistoricBlockData.fromBuffer(serialized); - expect(deserialized).toEqual(historicBlockData); - }); - - it('serializes to string and back', () => { - const historicBlockData = HistoricBlockData.random(); - const serialized = historicBlockData.toString(); - const deserialized = HistoricBlockData.fromString(serialized); - expect(deserialized).toEqual(historicBlockData); - }); -}); diff --git a/yarn-project/circuits.js/src/structs/private_circuit_public_inputs.ts b/yarn-project/circuits.js/src/structs/private_circuit_public_inputs.ts index 11dd3c26d39..1b3fe3222fc 100644 --- a/yarn-project/circuits.js/src/structs/private_circuit_public_inputs.ts +++ b/yarn-project/circuits.js/src/structs/private_circuit_public_inputs.ts @@ -16,7 +16,7 @@ import { import { FieldsOf, makeTuple } from '../utils/jsUtils.js'; import { serializeToBuffer } from '../utils/serialize.js'; import { CallContext } from './call_context.js'; -import { HistoricBlockData } from './index.js'; +import { BlockHeader } from './index.js'; import { ContractDeploymentData } from './tx_context.js'; /** @@ -90,9 +90,9 @@ export class PrivateCircuitPublicInputs { */ public unencryptedLogPreimagesLength: Fr, /** - * Historic roots of the data trees, used to calculate the block hash the user is proving against. + * Historical roots of the data trees, used to calculate the block hash the user is proving against. */ - public historicBlockData: HistoricBlockData, + public blockHeader: BlockHeader, /** * Deployment data of contracts being deployed in this kernel iteration. */ @@ -136,7 +136,7 @@ export class PrivateCircuitPublicInputs { makeTuple(NUM_FIELDS_PER_SHA256, Fr.zero), Fr.ZERO, Fr.ZERO, - HistoricBlockData.empty(), + BlockHeader.empty(), ContractDeploymentData.empty(), Fr.ZERO, Fr.ZERO, @@ -161,7 +161,7 @@ export class PrivateCircuitPublicInputs { isFrArrayEmpty(this.unencryptedLogsHash) && this.encryptedLogPreimagesLength.isZero() && this.unencryptedLogPreimagesLength.isZero() && - this.historicBlockData.isEmpty() && + this.blockHeader.isEmpty() && this.contractDeploymentData.isEmpty() && this.chainId.isZero() && this.version.isZero() @@ -190,7 +190,7 @@ export class PrivateCircuitPublicInputs { fields.unencryptedLogsHash, fields.encryptedLogPreimagesLength, fields.unencryptedLogPreimagesLength, - fields.historicBlockData, + fields.blockHeader, fields.contractDeploymentData, fields.chainId, fields.version, diff --git a/yarn-project/circuits.js/src/structs/public_call_request.ts b/yarn-project/circuits.js/src/structs/public_call_request.ts index b8cb2cacd50..70556f079eb 100644 --- a/yarn-project/circuits.js/src/structs/public_call_request.ts +++ b/yarn-project/circuits.js/src/structs/public_call_request.ts @@ -27,10 +27,12 @@ export class PublicCallRequest { public contractAddress: AztecAddress, /** * Data identifying the function being called. + * TODO(#3417): Remove this since the only useful data is the function selector, which is already part of the call context. */ public functionData: FunctionData, /** * Context of the public call. + * TODO(#3417): Check if all fields of CallContext are actually needed. */ public callContext: CallContext, /** diff --git a/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts b/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts index aafd9b454bb..bb160941456 100644 --- a/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts +++ b/yarn-project/circuits.js/src/structs/public_circuit_public_inputs.ts @@ -15,7 +15,7 @@ import { import { FieldsOf, makeTuple } from '../utils/jsUtils.js'; import { serializeToBuffer } from '../utils/serialize.js'; import { CallContext } from './call_context.js'; -import { HistoricBlockData } from './index.js'; +import { BlockHeader } from './index.js'; /** * Contract storage read operation on a specific contract. @@ -200,7 +200,7 @@ export class PublicCircuitPublicInputs { /** * Root of the commitment trees when the call started. */ - public historicBlockData: HistoricBlockData, + public blockHeader: BlockHeader, /** * Address of the prover. */ @@ -233,7 +233,7 @@ export class PublicCircuitPublicInputs { makeTuple(MAX_NEW_L2_TO_L1_MSGS_PER_CALL, Fr.zero), makeTuple(2, Fr.zero), Fr.ZERO, - HistoricBlockData.empty(), + BlockHeader.empty(), AztecAddress.ZERO, ); } @@ -252,7 +252,7 @@ export class PublicCircuitPublicInputs { isFrArrayEmpty(this.newL2ToL1Msgs) && isFrArrayEmpty(this.unencryptedLogsHash) && this.unencryptedLogPreimagesLength.isZero() && - this.historicBlockData.isEmpty() && + this.blockHeader.isEmpty() && this.proverAddress.isZero() ); } @@ -275,7 +275,7 @@ export class PublicCircuitPublicInputs { fields.newL2ToL1Msgs, fields.unencryptedLogsHash, fields.unencryptedLogPreimagesLength, - fields.historicBlockData, + fields.blockHeader, fields.proverAddress, ] as const; } diff --git a/yarn-project/circuits.js/src/structs/rollup/base_rollup.ts b/yarn-project/circuits.js/src/structs/rollup/base_rollup.ts index 13e8edf46e1..1d6babb0ebe 100644 --- a/yarn-project/circuits.js/src/structs/rollup/base_rollup.ts +++ b/yarn-project/circuits.js/src/structs/rollup/base_rollup.ts @@ -2,8 +2,8 @@ import { Fr } from '@aztec/foundation/fields'; import { BufferReader, Tuple } from '@aztec/foundation/serialize'; import { + BLOCKS_TREE_HEIGHT, CONTRACT_SUBTREE_SIBLING_PATH_LENGTH, - HISTORIC_BLOCKS_TREE_HEIGHT, KERNELS_PER_BASE_ROLLUP, MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP, MAX_PUBLIC_DATA_READS_PER_BASE_ROLLUP, @@ -56,9 +56,9 @@ export class NullifierLeafPreimage { export class ConstantRollupData { constructor( /** - * Snapshot of the historic blocks roots tree at the start of the rollup. + * Snapshot of the blocks tree at the start of the rollup. */ - public startHistoricBlocksTreeRootsSnapshot: AppendOnlyTreeSnapshot, + public startBlocksTreeSnapshot: AppendOnlyTreeSnapshot, /** * Root of the private kernel verification key tree. @@ -100,7 +100,7 @@ export class ConstantRollupData { static getFields(fields: FieldsOf) { return [ - fields.startHistoricBlocksTreeRootsSnapshot, + fields.startBlocksTreeSnapshot, fields.privateKernelVkTreeRoot, fields.publicKernelVkTreeRoot, fields.baseRollupVkHash, @@ -140,10 +140,18 @@ export class BaseRollupInputs { */ public startPublicDataTreeRoot: Fr, /** - * Snapshot of the historic blocks tree at the start of the base rollup circuit. + * Snapshot of the blocks tree at the start of the base rollup circuit. */ - public startHistoricBlocksTreeSnapshot: AppendOnlyTreeSnapshot, + public startBlocksTreeSnapshot: AppendOnlyTreeSnapshot, + /** + * The nullifiers to be inserted in the tree, sorted high to low. + */ + public sortedNewNullifiers: Tuple, + /** + * The indexes of the sorted nullifiers to the original ones. + */ + public sortednewNullifiersIndexes: Tuple, /** * The nullifiers which need to be updated to perform the batch insertion of the new nullifiers. * See `StandardIndexedTree.batchInsert` function for more details. @@ -186,10 +194,10 @@ export class BaseRollupInputs { typeof MAX_PUBLIC_DATA_READS_PER_BASE_ROLLUP >, /** - * Membership witnesses of historic blocks referred by each of the 2 kernels. + * Membership witnesses of blocks referred by each of the 2 kernels. */ - public historicBlocksTreeRootMembershipWitnesses: Tuple< - MembershipWitness, + public blocksTreeRootMembershipWitnesses: Tuple< + MembershipWitness, typeof KERNELS_PER_BASE_ROLLUP >, /** @@ -209,7 +217,9 @@ export class BaseRollupInputs { fields.startNullifierTreeSnapshot, fields.startContractTreeSnapshot, fields.startPublicDataTreeRoot, - fields.startHistoricBlocksTreeSnapshot, + fields.startBlocksTreeSnapshot, + fields.sortedNewNullifiers, + fields.sortednewNullifiersIndexes, fields.lowNullifierLeafPreimages, fields.lowNullifierMembershipWitness, fields.newCommitmentsSubtreeSiblingPath, @@ -217,7 +227,7 @@ export class BaseRollupInputs { fields.newContractsSubtreeSiblingPath, fields.newPublicDataUpdateRequestsSiblingPaths, fields.newPublicDataReadsSiblingPaths, - fields.historicBlocksTreeRootMembershipWitnesses, + fields.blocksTreeRootMembershipWitnesses, fields.constants, ] as const; } diff --git a/yarn-project/circuits.js/src/structs/rollup/root_rollup.ts b/yarn-project/circuits.js/src/structs/rollup/root_rollup.ts index 5688f5e5f5c..087accf1526 100644 --- a/yarn-project/circuits.js/src/structs/rollup/root_rollup.ts +++ b/yarn-project/circuits.js/src/structs/rollup/root_rollup.ts @@ -2,7 +2,7 @@ import { Fr } from '@aztec/foundation/fields'; import { BufferReader, Tuple } from '@aztec/foundation/serialize'; import { - HISTORIC_BLOCKS_TREE_HEIGHT, + BLOCKS_TREE_HEIGHT, L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP, } from '../../constants.gen.js'; @@ -37,13 +37,13 @@ export class RootRollupInputs { */ public startL1ToL2MessagesTreeSnapshot: AppendOnlyTreeSnapshot, /** - * Snapshot of the historic block roots tree at the start of the rollup. + * Snapshot of the historical block roots tree at the start of the rollup. */ - public startHistoricBlocksTreeSnapshot: AppendOnlyTreeSnapshot, + public startBlocksTreeSnapshot: AppendOnlyTreeSnapshot, /** - * Sibling path of the new historic block roots tree root. + * Sibling path of the new block tree root. */ - public newHistoricBlocksTreeSiblingPath: Tuple, + public newBlocksTreeSiblingPath: Tuple, ) {} toBuffer() { @@ -60,8 +60,8 @@ export class RootRollupInputs { fields.newL1ToL2Messages, fields.newL1ToL2MessagesTreeRootSiblingPath, fields.startL1ToL2MessagesTreeSnapshot, - fields.startHistoricBlocksTreeSnapshot, - fields.newHistoricBlocksTreeSiblingPath, + fields.startBlocksTreeSnapshot, + fields.newBlocksTreeSiblingPath, ] as const; } } @@ -120,22 +120,22 @@ export class RootRollupPublicInputs { public endPublicDataTreeRoot: Fr, /** - * Snapshot of the historic note hash tree roots tree at the start of the rollup. + * Snapshot of the historical note hash tree roots tree at the start of the rollup. */ - public startTreeOfHistoricNoteHashTreeRootsSnapshot: AppendOnlyTreeSnapshot, + public startTreeOfHistoricalNoteHashTreeRootsSnapshot: AppendOnlyTreeSnapshot, /** - * Snapshot of the historic note hash tree roots tree at the end of the rollup. + * Snapshot of the historical note hash tree roots tree at the end of the rollup. */ - public endTreeOfHistoricNoteHashTreeRootsSnapshot: AppendOnlyTreeSnapshot, + public endTreeOfHistoricalNoteHashTreeRootsSnapshot: AppendOnlyTreeSnapshot, /** - * Snapshot of the historic contract tree roots tree at the start of the rollup. + * Snapshot of the historical contract tree roots tree at the start of the rollup. */ - public startTreeOfHistoricContractTreeRootsSnapshot: AppendOnlyTreeSnapshot, + public startTreeOfHistoricalContractTreeRootsSnapshot: AppendOnlyTreeSnapshot, /** - * Snapshot of the historic contract tree roots tree at the end of the rollup. + * Snapshot of the historical contract tree roots tree at the end of the rollup. */ - public endTreeOfHistoricContractTreeRootsSnapshot: AppendOnlyTreeSnapshot, + public endTreeOfHistoricalContractTreeRootsSnapshot: AppendOnlyTreeSnapshot, /** * Snapshot of the L1 to L2 message tree at the start of the rollup. @@ -147,22 +147,22 @@ export class RootRollupPublicInputs { public endL1ToL2MessagesTreeSnapshot: AppendOnlyTreeSnapshot, /** - * Snapshot of the historic L1 to L2 message tree roots tree at the start of the rollup. + * Snapshot of the historical L1 to L2 message tree roots tree at the start of the rollup. */ - public startTreeOfHistoricL1ToL2MessagesTreeRootsSnapshot: AppendOnlyTreeSnapshot, + public startTreeOfHistoricalL1ToL2MessagesTreeRootsSnapshot: AppendOnlyTreeSnapshot, /** - * Snapshot of the historic L1 to L2 message tree roots tree at the end of the rollup. + * Snapshot of the historical L1 to L2 message tree roots tree at the end of the rollup. */ - public endTreeOfHistoricL1ToL2MessagesTreeRootsSnapshot: AppendOnlyTreeSnapshot, + public endTreeOfHistoricalL1ToL2MessagesTreeRootsSnapshot: AppendOnlyTreeSnapshot, /** - * Snapshot of the historic blocks tree roots tree at the start of the rollup. + * Snapshot of the blocks tree roots tree at the start of the rollup. */ - public startHistoricBlocksTreeSnapshot: AppendOnlyTreeSnapshot, + public startBlocksTreeSnapshot: AppendOnlyTreeSnapshot, /** - * Snapshot of the historic blocks tree roots tree at the end of the rollup. + * Snapshot of the blocks tree roots tree at the end of the rollup. */ - public endHistoricBlocksTreeSnapshot: AppendOnlyTreeSnapshot, + public endBlocksTreeSnapshot: AppendOnlyTreeSnapshot, /** * Hash of the calldata. @@ -186,16 +186,16 @@ export class RootRollupPublicInputs { fields.endContractTreeSnapshot, fields.startPublicDataTreeRoot, fields.endPublicDataTreeRoot, - fields.startTreeOfHistoricNoteHashTreeRootsSnapshot, - fields.endTreeOfHistoricNoteHashTreeRootsSnapshot, - fields.startTreeOfHistoricContractTreeRootsSnapshot, - fields.endTreeOfHistoricContractTreeRootsSnapshot, + fields.startTreeOfHistoricalNoteHashTreeRootsSnapshot, + fields.endTreeOfHistoricalNoteHashTreeRootsSnapshot, + fields.startTreeOfHistoricalContractTreeRootsSnapshot, + fields.endTreeOfHistoricalContractTreeRootsSnapshot, fields.startL1ToL2MessagesTreeSnapshot, fields.endL1ToL2MessagesTreeSnapshot, - fields.startTreeOfHistoricL1ToL2MessagesTreeRootsSnapshot, - fields.endTreeOfHistoricL1ToL2MessagesTreeRootsSnapshot, - fields.startHistoricBlocksTreeSnapshot, - fields.endHistoricBlocksTreeSnapshot, + fields.startTreeOfHistoricalL1ToL2MessagesTreeRootsSnapshot, + fields.endTreeOfHistoricalL1ToL2MessagesTreeRootsSnapshot, + fields.startBlocksTreeSnapshot, + fields.endBlocksTreeSnapshot, fields.calldataHash, fields.l1ToL2MessagesHash, ] as const; diff --git a/yarn-project/circuits.js/src/structs/tx_context.ts b/yarn-project/circuits.js/src/structs/tx_context.ts index 644c8812690..adab6ff8031 100644 --- a/yarn-project/circuits.js/src/structs/tx_context.ts +++ b/yarn-project/circuits.js/src/structs/tx_context.ts @@ -85,6 +85,7 @@ export class TxContext { constructor( /** * Whether this is a fee paying tx. If not other tx in a bundle will pay the fee. + * TODO(#3417): Remove fee and rebate payment fields. */ public isFeePaymentTx: boolean, /** diff --git a/yarn-project/circuits.js/src/tests/factories.ts b/yarn-project/circuits.js/src/tests/factories.ts index 3df6e72dccd..2c08c857243 100644 --- a/yarn-project/circuits.js/src/tests/factories.ts +++ b/yarn-project/circuits.js/src/tests/factories.ts @@ -7,8 +7,10 @@ import { ARGS_LENGTH, AggregationObject, AppendOnlyTreeSnapshot, + BLOCKS_TREE_HEIGHT, BaseOrMergeRollupPublicInputs, BaseRollupInputs, + BlockHeader, CONTRACT_SUBTREE_SIBLING_PATH_LENGTH, CONTRACT_TREE_HEIGHT, CallContext, @@ -28,8 +30,6 @@ import { FunctionData, FunctionSelector, G1AffineElement, - HISTORIC_BLOCKS_TREE_HEIGHT, - HistoricBlockData, KERNELS_PER_BASE_ROLLUP, KernelCircuitPublicInputs, L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH, @@ -114,13 +114,13 @@ export function makeTxContext(seed: number): TxContext { } /** - * Creates an arbitrary combined historic tree roots object from the given seed. + * Creates an arbitrary combined historical tree roots object from the given seed. * Note: "Combined" indicates that it's the combined output of both private and public circuit flows. - * @param seed - The seed to use for generating the combined historic tree roots. - * @returns A combined historic tree roots object. + * @param seed - The seed to use for generating the combined historical tree roots. + * @returns A combined historical tree roots object. */ -export function makeHistoricBlockData(seed: number): HistoricBlockData { - return new HistoricBlockData( +export function makeBlockHeader(seed: number): BlockHeader { + return new BlockHeader( fr(seed), fr(seed + 1), fr(seed + 2), @@ -138,7 +138,7 @@ export function makeHistoricBlockData(seed: number): HistoricBlockData { * @returns A constant data object. */ export function makeConstantData(seed = 1): CombinedConstantData { - return new CombinedConstantData(makeHistoricBlockData(seed), makeTxContext(seed + 4)); + return new CombinedConstantData(makeBlockHeader(seed), makeTxContext(seed + 4)); } /** @@ -340,7 +340,7 @@ export function makePublicCircuitPublicInputs( tupleGenerator(MAX_NEW_L2_TO_L1_MSGS_PER_CALL, fr, seed + 0x900), tupleGenerator(2, fr, seed + 0x901), fr(seed + 0x902), - makeHistoricBlockData(seed + 0xa00), + makeBlockHeader(seed + 0xa00), makeAztecAddress(seed + 0xb01), ); } @@ -679,7 +679,7 @@ export function makePrivateCircuitPublicInputs(seed = 0): PrivateCircuitPublicIn unencryptedLogsHash: makeTuple(NUM_FIELDS_PER_SHA256, fr, seed + 0xa00), encryptedLogPreimagesLength: fr(seed + 0xb00), unencryptedLogPreimagesLength: fr(seed + 0xc00), - historicBlockData: makeHistoricBlockData(seed + 0xd00), + blockHeader: makeBlockHeader(seed + 0xd00), contractDeploymentData: makeContractDeploymentData(seed + 0xe00), chainId: fr(seed + 0x1400), version: fr(seed + 0x1500), @@ -726,7 +726,7 @@ export function makeConstantBaseRollupData( globalVariables: GlobalVariables | undefined = undefined, ): ConstantRollupData { return ConstantRollupData.from({ - startHistoricBlocksTreeRootsSnapshot: makeAppendOnlyTreeSnapshot(seed + 0x300), + startBlocksTreeSnapshot: makeAppendOnlyTreeSnapshot(seed + 0x300), privateKernelVkTreeRoot: fr(seed + 0x401), publicKernelVkTreeRoot: fr(seed + 0x402), baseRollupVkHash: fr(seed + 0x403), @@ -840,7 +840,7 @@ export function makeRootRollupInputs(seed = 0, globalVariables?: GlobalVariables makeTuple(L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH, fr, 0x2100), makeAppendOnlyTreeSnapshot(seed + 0x2200), makeAppendOnlyTreeSnapshot(seed + 0x2200), - makeTuple(HISTORIC_BLOCKS_TREE_HEIGHT, fr, 0x2400), + makeTuple(BLOCKS_TREE_HEIGHT, fr, 0x2400), ); } @@ -866,16 +866,16 @@ export function makeRootRollupPublicInputs( endContractTreeSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), startPublicDataTreeRoot: fr((seed += 0x100)), endPublicDataTreeRoot: fr((seed += 0x100)), - startTreeOfHistoricNoteHashTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), - endTreeOfHistoricNoteHashTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), - startTreeOfHistoricContractTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), - endTreeOfHistoricContractTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), + startTreeOfHistoricalNoteHashTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), + endTreeOfHistoricalNoteHashTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), + startTreeOfHistoricalContractTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), + endTreeOfHistoricalContractTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), startL1ToL2MessagesTreeSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), endL1ToL2MessagesTreeSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), - startTreeOfHistoricL1ToL2MessagesTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), - endTreeOfHistoricL1ToL2MessagesTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), - startHistoricBlocksTreeSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), - endHistoricBlocksTreeSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), + startTreeOfHistoricalL1ToL2MessagesTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), + endTreeOfHistoricalL1ToL2MessagesTreeRootsSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), + startBlocksTreeSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), + endBlocksTreeSnapshot: makeAppendOnlyTreeSnapshot((seed += 0x100)), calldataHash: [new Fr(1n), new Fr(2n)], l1ToL2MessagesHash: [new Fr(3n), new Fr(4n)], }); @@ -902,7 +902,7 @@ export function makeBaseRollupInputs(seed = 0): BaseRollupInputs { const startNullifierTreeSnapshot = makeAppendOnlyTreeSnapshot(seed + 0x200); const startContractTreeSnapshot = makeAppendOnlyTreeSnapshot(seed + 0x300); const startPublicDataTreeRoot = fr(seed + 0x400); - const startHistoricBlocksTreeSnapshot = makeAppendOnlyTreeSnapshot(seed + 0x500); + const startBlocksTreeSnapshot = makeAppendOnlyTreeSnapshot(seed + 0x500); const lowNullifierLeafPreimages = makeTuple( MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP, @@ -920,20 +920,23 @@ export function makeBaseRollupInputs(seed = 0): BaseRollupInputs { const newNullifiersSubtreeSiblingPath = makeTuple(NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH, fr, seed + 0x4000); const newContractsSubtreeSiblingPath = makeTuple(CONTRACT_SUBTREE_SIBLING_PATH_LENGTH, fr, seed + 0x5000); + const sortedNewNullifiers = makeTuple(MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP, fr, seed + 0x6000); + const sortednewNullifiersIndexes = makeTuple(MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP, i => i, seed + 0x7000); + const newPublicDataUpdateRequestsSiblingPaths = makeTuple( MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_BASE_ROLLUP, x => makeTuple(PUBLIC_DATA_TREE_HEIGHT, fr, x), - seed + 0x6000, + seed + 0x8000, ); const newPublicDataReadsSiblingPaths = makeTuple( MAX_PUBLIC_DATA_READS_PER_BASE_ROLLUP, x => makeTuple(PUBLIC_DATA_TREE_HEIGHT, fr, x), - seed + 0x6000, + seed + 0x8000, ); - const historicBlocksTreeRootMembershipWitnesses = makeTuple(KERNELS_PER_BASE_ROLLUP, x => - makeMembershipWitness(HISTORIC_BLOCKS_TREE_HEIGHT, seed + x * 0x1000 + 0x7000), + const blocksTreeRootMembershipWitnesses = makeTuple(KERNELS_PER_BASE_ROLLUP, x => + makeMembershipWitness(BLOCKS_TREE_HEIGHT, seed + x * 0x1000 + 0x9000), ); const constants = makeConstantBaseRollupData(0x100); @@ -945,14 +948,16 @@ export function makeBaseRollupInputs(seed = 0): BaseRollupInputs { startNullifierTreeSnapshot, startContractTreeSnapshot, startPublicDataTreeRoot, - startHistoricBlocksTreeSnapshot, + startBlocksTreeSnapshot, + sortedNewNullifiers, + sortednewNullifiersIndexes, lowNullifierLeafPreimages, newCommitmentsSubtreeSiblingPath, newNullifiersSubtreeSiblingPath, newContractsSubtreeSiblingPath, newPublicDataUpdateRequestsSiblingPaths, newPublicDataReadsSiblingPaths, - historicBlocksTreeRootMembershipWitnesses, + blocksTreeRootMembershipWitnesses, constants, }); } diff --git a/yarn-project/deploy_npm.sh b/yarn-project/deploy_npm.sh index 029eb3a135b..9a91592b3ed 100755 --- a/yarn-project/deploy_npm.sh +++ b/yarn-project/deploy_npm.sh @@ -2,8 +2,14 @@ [ -n "${BUILD_SYSTEM_DEBUG:-}" ] && set -x # conditionally trace set -eu +<<<<<<< HEAD if [ -z "$COMMIT_TAG" ]; then echo "No commit tag, not deploying to npm." +======= +# Check we're on a release flow. +if [ -z "$COMMIT_TAG" ] && [ ! "$DRY_DEPLOY" -eq 1 ]; then + echo "Not on a release flow, skipping deploy." +>>>>>>> origin/master exit 0 fi diff --git a/yarn-project/end-to-end/src/integration_l1_publisher.test.ts b/yarn-project/end-to-end/src/integration_l1_publisher.test.ts index 533bf66783a..6d7805dd4d2 100644 --- a/yarn-project/end-to-end/src/integration_l1_publisher.test.ts +++ b/yarn-project/end-to-end/src/integration_l1_publisher.test.ts @@ -28,10 +28,10 @@ import { L1Publisher, RealRollupCircuitSimulator, SoloBlockBuilder, - getHistoricBlockData, + getBlockHeader, getL1Publisher, getVerificationKeys, - makeEmptyProcessedTx as makeEmptyProcessedTxFromHistoricTreeRoots, + makeEmptyProcessedTx as makeEmptyProcessedTxFromHistoricalTreeRoots, makeProcessedTx, } from '@aztec/sequencer-client'; import { MerkleTreeOperations, MerkleTrees } from '@aztec/world-state'; @@ -152,12 +152,8 @@ describe('L1Publisher integration', () => { }, 100_000); const makeEmptyProcessedTx = async () => { - const historicTreeRoots = await getHistoricBlockData(builderDb, prevGlobals); - const tx = await makeEmptyProcessedTxFromHistoricTreeRoots( - historicTreeRoots, - new Fr(chainId), - new Fr(config.version), - ); + const blockHeader = await getBlockHeader(builderDb, prevGlobals); + const tx = await makeEmptyProcessedTxFromHistoricalTreeRoots(blockHeader, new Fr(chainId), new Fr(config.version)); return tx; }; @@ -166,7 +162,7 @@ describe('L1Publisher integration', () => { const kernelOutput = KernelCircuitPublicInputs.empty(); kernelOutput.constants.txContext.chainId = fr(chainId); kernelOutput.constants.txContext.version = fr(config.version); - kernelOutput.constants.blockData = await getHistoricBlockData(builderDb, prevGlobals); + kernelOutput.constants.blockHeader = await getBlockHeader(builderDb, prevGlobals); kernelOutput.end.publicDataUpdateRequests = makeTuple( MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, i => new PublicDataUpdateRequest(fr(i), fr(0), fr(i + 10)), diff --git a/yarn-project/ethereum/src/testnet.ts b/yarn-project/ethereum/src/testnet.ts index b260356df75..4c3694f9361 100644 --- a/yarn-project/ethereum/src/testnet.ts +++ b/yarn-project/ethereum/src/testnet.ts @@ -2,6 +2,8 @@ import { Chain } from 'viem'; import { EthereumChain } from './ethereum_chain.js'; +const { DEPLOY_TAG = 'aztec-dev' } = process.env; + export const createTestnetChain = (apiKey: string) => { const chain: Chain = { id: 677868, @@ -14,10 +16,10 @@ export const createTestnetChain = (apiKey: string) => { }, rpcUrls: { default: { - http: [`https://aztec-connect-testnet-eth-host.aztec.network:8545/${apiKey}`], + http: [`https://${DEPLOY_TAG}-mainnet-fork.aztec.network:8545/${apiKey}`], }, public: { - http: [`https://aztec-connect-testnet-eth-host.aztec.network:8545/${apiKey}`], + http: [`https://${DEPLOY_TAG}-mainnet-fork.aztec.network:8545/${apiKey}`], }, }, }; diff --git a/yarn-project/foundation/src/abi/function_selector.ts b/yarn-project/foundation/src/abi/function_selector.ts index f2776319f15..b898519d53e 100644 --- a/yarn-project/foundation/src/abi/function_selector.ts +++ b/yarn-project/foundation/src/abi/function_selector.ts @@ -88,6 +88,10 @@ export class FunctionSelector { * @returns Function selector. */ static fromSignature(signature: string): FunctionSelector { + // throw if signature contains whitespace + if (/\s/.test(signature)) { + throw new Error('Function Signature cannot contain whitespace'); + } return FunctionSelector.fromBuffer(keccak(Buffer.from(signature)).subarray(0, FunctionSelector.SIZE)); } diff --git a/yarn-project/merkle-tree/src/index.ts b/yarn-project/merkle-tree/src/index.ts index de19e295bfd..5181cecfc15 100644 --- a/yarn-project/merkle-tree/src/index.ts +++ b/yarn-project/merkle-tree/src/index.ts @@ -4,8 +4,12 @@ export * from './interfaces/merkle_tree.js'; export * from './interfaces/update_only_tree.js'; export * from './pedersen.js'; export * from './sparse_tree/sparse_tree.js'; -export * from './standard_indexed_tree/standard_indexed_tree.js'; +export { LowLeafWitnessData, StandardIndexedTree } from './standard_indexed_tree/standard_indexed_tree.js'; export * from './standard_tree/standard_tree.js'; export { INITIAL_LEAF } from './tree_base.js'; export { newTree } from './new_tree.js'; export { loadTree } from './load_tree.js'; +export * from './snapshots/snapshot_builder.js'; +export * from './snapshots/full_snapshot.js'; +export * from './snapshots/append_only_snapshot.js'; +export * from './snapshots/indexed_tree_snapshot.js'; diff --git a/yarn-project/merkle-tree/src/interfaces/append_only_tree.ts b/yarn-project/merkle-tree/src/interfaces/append_only_tree.ts index f4ecaabe157..77dd7ae9e5d 100644 --- a/yarn-project/merkle-tree/src/interfaces/append_only_tree.ts +++ b/yarn-project/merkle-tree/src/interfaces/append_only_tree.ts @@ -1,9 +1,10 @@ +import { TreeSnapshotBuilder } from '../snapshots/snapshot_builder.js'; import { MerkleTree } from './merkle_tree.js'; /** * A Merkle tree that supports only appending leaves and not updating existing leaves. */ -export interface AppendOnlyTree extends MerkleTree { +export interface AppendOnlyTree extends MerkleTree, TreeSnapshotBuilder { /** * Appends a set of leaf values to the tree. * @param leaves - The set of leaves to be appended. diff --git a/yarn-project/merkle-tree/src/interfaces/indexed_tree.ts b/yarn-project/merkle-tree/src/interfaces/indexed_tree.ts index 1b8bade092e..46c13f49bd9 100644 --- a/yarn-project/merkle-tree/src/interfaces/indexed_tree.ts +++ b/yarn-project/merkle-tree/src/interfaces/indexed_tree.ts @@ -3,6 +3,28 @@ import { LeafData, SiblingPath } from '@aztec/types'; import { LowLeafWitnessData } from '../index.js'; import { AppendOnlyTree } from './append_only_tree.js'; +/** + * The result of a batch insertion in an indexed merkle tree. + */ +export interface BatchInsertionResult { + /** + * Data for the leaves to be updated when inserting the new ones. + */ + lowLeavesWitnessData?: LowLeafWitnessData[]; + /** + * Sibling path "pointing to" where the new subtree should be inserted into the tree. + */ + newSubtreeSiblingPath: SiblingPath; + /** + * The new leaves being inserted in high to low order. This order corresponds with the order of the low leaves witness. + */ + sortedNewLeaves: Buffer[]; + /** + * The indexes of the sorted new leaves to the original ones. + */ + sortedNewLeavesIndexes: number[]; +} + /** * Indexed merkle tree. */ @@ -45,8 +67,5 @@ export interface IndexedTree extends AppendOnlyTree { leaves: Buffer[], subtreeHeight: SubtreeHeight, includeUncommitted: boolean, - ): Promise< - | [LowLeafWitnessData[], SiblingPath] - | [undefined, SiblingPath] - >; + ): Promise>; } diff --git a/yarn-project/merkle-tree/src/interfaces/update_only_tree.ts b/yarn-project/merkle-tree/src/interfaces/update_only_tree.ts index 59a82d0b118..6bd5c024d0c 100644 --- a/yarn-project/merkle-tree/src/interfaces/update_only_tree.ts +++ b/yarn-project/merkle-tree/src/interfaces/update_only_tree.ts @@ -1,11 +1,12 @@ import { LeafData } from '@aztec/types'; +import { TreeSnapshotBuilder } from '../snapshots/snapshot_builder.js'; import { MerkleTree } from './merkle_tree.js'; /** * A Merkle tree that supports updates at arbitrary indices but not appending. */ -export interface UpdateOnlyTree extends MerkleTree { +export interface UpdateOnlyTree extends MerkleTree, TreeSnapshotBuilder { /** * Updates a leaf at a given index in the tree. * @param leaf - The leaf value to be updated. diff --git a/yarn-project/merkle-tree/src/snapshots/append_only_snapshot.test.ts b/yarn-project/merkle-tree/src/snapshots/append_only_snapshot.test.ts new file mode 100644 index 00000000000..b66eb2af22b --- /dev/null +++ b/yarn-project/merkle-tree/src/snapshots/append_only_snapshot.test.ts @@ -0,0 +1,28 @@ +import levelup, { LevelUp } from 'levelup'; + +import { Pedersen, StandardTree, newTree } from '../index.js'; +import { createMemDown } from '../test/utils/create_mem_down.js'; +import { AppendOnlySnapshotBuilder } from './append_only_snapshot.js'; +import { describeSnapshotBuilderTestSuite } from './snapshot_builder_test_suite.js'; + +describe('AppendOnlySnapshot', () => { + let tree: StandardTree; + let snapshotBuilder: AppendOnlySnapshotBuilder; + let db: LevelUp; + + beforeEach(async () => { + db = levelup(createMemDown()); + const hasher = new Pedersen(); + tree = await newTree(StandardTree, db, hasher, 'test', 4); + snapshotBuilder = new AppendOnlySnapshotBuilder(db, tree, hasher); + }); + + describeSnapshotBuilderTestSuite( + () => tree, + () => snapshotBuilder, + async tree => { + const newLeaves = Array.from({ length: 2 }).map(() => Buffer.from(Math.random().toString())); + await tree.appendLeaves(newLeaves); + }, + ); +}); diff --git a/yarn-project/merkle-tree/src/snapshots/append_only_snapshot.ts b/yarn-project/merkle-tree/src/snapshots/append_only_snapshot.ts new file mode 100644 index 00000000000..b530e981b27 --- /dev/null +++ b/yarn-project/merkle-tree/src/snapshots/append_only_snapshot.ts @@ -0,0 +1,232 @@ +import { Hasher, SiblingPath } from '@aztec/types'; + +import { LevelUp } from 'levelup'; + +import { AppendOnlyTree } from '../interfaces/append_only_tree.js'; +import { TreeBase } from '../tree_base.js'; +import { TreeSnapshot, TreeSnapshotBuilder } from './snapshot_builder.js'; + +// stores the last block that modified this node +const nodeModifiedAtBlockKey = (treeName: string, level: number, index: bigint) => + `snapshot:node:${treeName}:${level}:${index}:block`; + +// stores the value of the node at the above block +const historicalNodeKey = (treeName: string, level: number, index: bigint) => + `snapshot:node:${treeName}:${level}:${index}:value`; + +// metadata for a snapshot +const snapshotRootKey = (treeName: string, block: number) => `snapshot:root:${treeName}:${block}`; +const snapshotNumLeavesKey = (treeName: string, block: number) => `snapshot:numLeaves:${treeName}:${block}`; + +/** + * A more space-efficient way of storing snapshots of AppendOnlyTrees that trades space need for slower + * sibling path reads. + * + * Complexity: + * + * N - count of non-zero nodes in tree + * M - count of snapshots + * H - tree height + * + * Space complexity: O(N + M) (N nodes - stores the last snapshot for each node and M - ints, for each snapshot stores up to which leaf its written to) + * Sibling path access: + * Best case: O(H) database reads + O(1) hashes + * Worst case: O(H) database reads + O(H) hashes + */ +export class AppendOnlySnapshotBuilder implements TreeSnapshotBuilder { + constructor(private db: LevelUp, private tree: TreeBase & AppendOnlyTree, private hasher: Hasher) {} + async getSnapshot(block: number): Promise { + const meta = await this.#getSnapshotMeta(block); + + if (typeof meta === 'undefined') { + throw new Error(`Snapshot for tree ${this.tree.getName()} at block ${block} does not exist`); + } + + return new AppendOnlySnapshot(this.db, block, meta.numLeaves, meta.root, this.tree, this.hasher); + } + + async snapshot(block: number): Promise { + const meta = await this.#getSnapshotMeta(block); + if (typeof meta !== 'undefined') { + // no-op, we already have a snapshot + return new AppendOnlySnapshot(this.db, block, meta.numLeaves, meta.root, this.tree, this.hasher); + } + + const batch = this.db.batch(); + const root = this.tree.getRoot(false); + const depth = this.tree.getDepth(); + const treeName = this.tree.getName(); + const queue: [Buffer, number, bigint][] = [[root, 0, 0n]]; + + // walk the tree in BF and store latest nodes + while (queue.length > 0) { + const [node, level, index] = queue.shift()!; + + const historicalValue = await this.db.get(historicalNodeKey(treeName, level, index)).catch(() => undefined); + if (!historicalValue || !node.equals(historicalValue)) { + // we've never seen this node before or it's different than before + // update the historical tree and tag it with the block that modified it + batch.put(nodeModifiedAtBlockKey(treeName, level, index), String(block)); + batch.put(historicalNodeKey(treeName, level, index), node); + } else { + // if this node hasn't changed, that means, nothing below it has changed either + continue; + } + + if (level + 1 > depth) { + // short circuit if we've reached the leaf level + // otherwise getNode might throw if we ask for the children of a leaf + continue; + } + + // these could be undefined because zero hashes aren't stored in the tree + const [lhs, rhs] = await Promise.all([ + this.tree.getNode(level + 1, 2n * index), + this.tree.getNode(level + 1, 2n * index + 1n), + ]); + + if (lhs) { + queue.push([lhs, level + 1, 2n * index]); + } + + if (rhs) { + queue.push([rhs, level + 1, 2n * index + 1n]); + } + } + + const numLeaves = this.tree.getNumLeaves(false); + batch.put(snapshotNumLeavesKey(treeName, block), String(numLeaves)); + batch.put(snapshotRootKey(treeName, block), root); + await batch.write(); + + return new AppendOnlySnapshot(this.db, block, numLeaves, root, this.tree, this.hasher); + } + + async #getSnapshotMeta(block: number): Promise< + | { + /** The root of the tree snapshot */ + root: Buffer; + /** The number of leaves in the tree snapshot */ + numLeaves: bigint; + } + | undefined + > { + try { + const treeName = this.tree.getName(); + const root = await this.db.get(snapshotRootKey(treeName, block)); + const numLeaves = BigInt(await this.db.get(snapshotNumLeavesKey(treeName, block))); + return { root, numLeaves }; + } catch (err) { + return undefined; + } + } +} + +/** + * a + */ +class AppendOnlySnapshot implements TreeSnapshot { + constructor( + private db: LevelUp, + private block: number, + private leafCount: bigint, + private historicalRoot: Buffer, + private tree: TreeBase & AppendOnlyTree, + private hasher: Hasher, + ) {} + + public async getSiblingPath(index: bigint): Promise> { + const path: Buffer[] = []; + const depth = this.tree.getDepth(); + let level = depth; + + while (level > 0) { + const isRight = index & 0x01n; + const siblingIndex = isRight ? index - 1n : index + 1n; + + const sibling = await this.#getHistoricalNodeValue(level, siblingIndex); + path.push(sibling); + + level -= 1; + index >>= 1n; + } + + return new SiblingPath(depth as N, path); + } + + getDepth(): number { + return this.tree.getDepth(); + } + + getNumLeaves(): bigint { + return this.leafCount; + } + + getRoot(): Buffer { + // we could recompute it, but it's way cheaper to just store the root + return this.historicalRoot; + } + + async getLeafValue(index: bigint): Promise { + const leafLevel = this.getDepth(); + const blockNumber = await this.#getBlockNumberThatModifiedNode(leafLevel, index); + + // leaf hasn't been set yet + if (typeof blockNumber === 'undefined') { + return undefined; + } + + // leaf was set some time in the past + if (blockNumber <= this.block) { + return this.db.get(historicalNodeKey(this.tree.getName(), leafLevel, index)); + } + + // leaf has been set but in a block in the future + return undefined; + } + + async #getHistoricalNodeValue(level: number, index: bigint): Promise { + const blockNumber = await this.#getBlockNumberThatModifiedNode(level, index); + + // node has never been set + if (typeof blockNumber === 'undefined') { + return this.tree.getZeroHash(level); + } + + // node was set some time in the past + if (blockNumber <= this.block) { + return this.db.get(historicalNodeKey(this.tree.getName(), level, index)); + } + + // the node has been modified since this snapshot was taken + // because we're working with an AppendOnly tree, historical leaves never change + // so what we do instead is rebuild this Merkle path up using zero hashes as needed + // worst case this will do O(H) hashes + // + // we first check if this subtree was touched by the block + // compare how many leaves this block added to the leaf interval of this subtree + // if they don't intersect then the whole subtree was a hash of zero + // if they do then we need to rebuild the merkle tree + const depth = this.tree.getDepth(); + const leafStart = index * 2n ** BigInt(depth - level); + if (leafStart >= this.leafCount) { + return this.tree.getZeroHash(level); + } + + const [lhs, rhs] = await Promise.all([ + this.#getHistoricalNodeValue(level + 1, 2n * index), + this.#getHistoricalNodeValue(level + 1, 2n * index + 1n), + ]); + + return this.hasher.hash(lhs, rhs); + } + + async #getBlockNumberThatModifiedNode(level: number, index: bigint): Promise { + try { + const value: Buffer | string = await this.db.get(nodeModifiedAtBlockKey(this.tree.getName(), level, index)); + return parseInt(value.toString(), 10); + } catch (err) { + return undefined; + } + } +} diff --git a/yarn-project/merkle-tree/src/snapshots/base_full_snapshot.ts b/yarn-project/merkle-tree/src/snapshots/base_full_snapshot.ts new file mode 100644 index 00000000000..d77204beafa --- /dev/null +++ b/yarn-project/merkle-tree/src/snapshots/base_full_snapshot.ts @@ -0,0 +1,221 @@ +import { SiblingPath } from '@aztec/types'; + +import { LevelUp, LevelUpChain } from 'levelup'; + +import { TreeBase } from '../tree_base.js'; +import { TreeSnapshot, TreeSnapshotBuilder } from './snapshot_builder.js'; + +// key for a node's children +const snapshotChildKey = (node: Buffer, child: 0 | 1) => + Buffer.concat([Buffer.from('snapshot:node:'), node, Buffer.from(':' + child)]); + +// metadata for a snapshot +const snapshotRootKey = (treeName: string, block: number) => `snapshot:root:${treeName}:${block}`; +const snapshotNumLeavesKey = (treeName: string, block: number) => `snapshot:numLeaves:${treeName}:${block}`; + +/** + * Builds a full snapshot of a tree. This implementation works for any Merkle tree and stores + * it in a database in a similar way to how a tree is stored in memory, using pointers. + * + * Sharing the same database between versions and trees is recommended as the trees would share + * structure. + * + * Implement the protected method `handleLeaf` to store any additional data you need for each leaf. + * + * Complexity: + * N - count of non-zero nodes in tree + * M - count of snapshots + * H - tree height + * Worst case space complexity: O(N * M) + * Sibling path access: O(H) database reads + */ +export abstract class BaseFullTreeSnapshotBuilder + implements TreeSnapshotBuilder +{ + constructor(protected db: LevelUp, protected tree: T) {} + + async snapshot(block: number): Promise { + const snapshotMetadata = await this.#getSnapshotMeta(block); + + if (snapshotMetadata) { + return this.openSnapshot(snapshotMetadata.root, snapshotMetadata.numLeaves); + } + + const batch = this.db.batch(); + const root = this.tree.getRoot(false); + const numLeaves = this.tree.getNumLeaves(false); + const depth = this.tree.getDepth(); + const queue: [Buffer, number, bigint][] = [[root, 0, 0n]]; + + // walk the tree breadth-first and store each of its nodes in the database + // for each node we save two keys + // :0 -> + // :1 -> + while (queue.length > 0) { + const [node, level, i] = queue.shift()!; + // check if the database already has a child for this tree + // if it does, then we know we've seen the whole subtree below it before + // and we don't have to traverse it anymore + // we use the left child here, but it could be anything that shows we've stored the node before + const exists: Buffer | undefined = await this.db.get(snapshotChildKey(node, 0)).catch(() => undefined); + if (exists) { + continue; + } + + if (level + 1 > depth) { + // short circuit if we've reached the leaf level + // otherwise getNode might throw if we ask for the children of a leaf + this.handleLeaf(i, node, batch); + continue; + } + + const [lhs, rhs] = await Promise.all([ + this.tree.getNode(level + 1, 2n * i), + this.tree.getNode(level + 1, 2n * i + 1n), + ]); + + // we want the zero hash at the children's level, not the node's level + const zeroHash = this.tree.getZeroHash(level + 1); + + batch.put(snapshotChildKey(node, 0), lhs ?? zeroHash); + batch.put(snapshotChildKey(node, 1), rhs ?? zeroHash); + + // enqueue the children only if they're not zero hashes + if (lhs) { + queue.push([lhs, level + 1, 2n * i]); + } + + if (rhs) { + queue.push([rhs, level + 1, 2n * i + 1n]); + } + } + + batch.put(snapshotRootKey(this.tree.getName(), block), root); + batch.put(snapshotNumLeavesKey(this.tree.getName(), block), String(numLeaves)); + await batch.write(); + + return this.openSnapshot(root, numLeaves); + } + + protected handleLeaf(_index: bigint, _node: Buffer, _batch: LevelUpChain) { + return; + } + + async getSnapshot(version: number): Promise { + const snapshotMetadata = await this.#getSnapshotMeta(version); + + if (!snapshotMetadata) { + throw new Error(`Version ${version} does not exist for tree ${this.tree.getName()}`); + } + + return this.openSnapshot(snapshotMetadata.root, snapshotMetadata.numLeaves); + } + + protected abstract openSnapshot(root: Buffer, numLeaves: bigint): S; + + async #getSnapshotMeta(block: number): Promise< + | { + /** The root of the tree snapshot */ + root: Buffer; + /** The number of leaves in the tree snapshot */ + numLeaves: bigint; + } + | undefined + > { + try { + const treeName = this.tree.getName(); + const root = await this.db.get(snapshotRootKey(treeName, block)); + const numLeaves = BigInt(await this.db.get(snapshotNumLeavesKey(treeName, block))); + return { root, numLeaves }; + } catch (err) { + return undefined; + } + } +} + +/** + * A source of sibling paths from a snapshot tree + */ +export class BaseFullTreeSnapshot implements TreeSnapshot { + constructor( + protected db: LevelUp, + protected historicRoot: Buffer, + protected numLeaves: bigint, + protected tree: TreeBase, + ) {} + + async getSiblingPath(index: bigint): Promise> { + const siblings: Buffer[] = []; + + for await (const [_node, sibling] of this.pathFromRootToLeaf(index)) { + siblings.push(sibling); + } + + // we got the siblings we were looking for, but they are in root-leaf order + // reverse them here so we have leaf-root (what SiblingPath expects) + siblings.reverse(); + + return new SiblingPath(this.tree.getDepth() as N, siblings); + } + + async getLeafValue(index: bigint): Promise { + let leafNode: Buffer | undefined = undefined; + for await (const [node, _sibling] of this.pathFromRootToLeaf(index)) { + leafNode = node; + } + + return leafNode; + } + + getDepth(): number { + return this.tree.getDepth(); + } + + getRoot(): Buffer { + return this.historicRoot; + } + + getNumLeaves(): bigint { + return this.numLeaves; + } + + protected async *pathFromRootToLeaf(leafIndex: bigint) { + const root = this.historicRoot; + const pathFromRoot = this.#getPathFromRoot(leafIndex); + + let node: Buffer = root; + for (let i = 0; i < pathFromRoot.length; i++) { + // get both children. We'll need both anyway (one to keep track of, the other to walk down to) + const children: [Buffer, Buffer] = await Promise.all([ + this.db.get(snapshotChildKey(node, 0)), + this.db.get(snapshotChildKey(node, 1)), + ]).catch(() => [this.tree.getZeroHash(i + 1), this.tree.getZeroHash(i + 1)]); + const next = children[pathFromRoot[i]]; + const sibling = children[(pathFromRoot[i] + 1) % 2]; + + yield [next, sibling]; + + node = next; + } + } + + /** + * Calculates the path from the root to the target leaf. Returns an array of 0s and 1s, + * each 0 represents walking down a left child and each 1 walking down to the child on the right. + * + * @param leafIndex - The target leaf + * @returns An array of 0s and 1s + */ + #getPathFromRoot(leafIndex: bigint): ReadonlyArray<0 | 1> { + const path: Array<0 | 1> = []; + let level = this.tree.getDepth(); + while (level > 0) { + path.push(leafIndex & 0x01n ? 1 : 0); + leafIndex >>= 1n; + level--; + } + + path.reverse(); + return path; + } +} diff --git a/yarn-project/merkle-tree/src/snapshots/full_snapshot.test.ts b/yarn-project/merkle-tree/src/snapshots/full_snapshot.test.ts new file mode 100644 index 00000000000..3f2cc2af791 --- /dev/null +++ b/yarn-project/merkle-tree/src/snapshots/full_snapshot.test.ts @@ -0,0 +1,27 @@ +import levelup, { LevelUp } from 'levelup'; + +import { Pedersen, StandardTree, newTree } from '../index.js'; +import { createMemDown } from '../test/utils/create_mem_down.js'; +import { FullTreeSnapshotBuilder } from './full_snapshot.js'; +import { describeSnapshotBuilderTestSuite } from './snapshot_builder_test_suite.js'; + +describe('FullSnapshotBuilder', () => { + let tree: StandardTree; + let snapshotBuilder: FullTreeSnapshotBuilder; + let db: LevelUp; + + beforeEach(async () => { + db = levelup(createMemDown()); + tree = await newTree(StandardTree, db, new Pedersen(), 'test', 4); + snapshotBuilder = new FullTreeSnapshotBuilder(db, tree); + }); + + describeSnapshotBuilderTestSuite( + () => tree, + () => snapshotBuilder, + async () => { + const newLeaves = Array.from({ length: 2 }).map(() => Buffer.from(Math.random().toString())); + await tree.appendLeaves(newLeaves); + }, + ); +}); diff --git a/yarn-project/merkle-tree/src/snapshots/full_snapshot.ts b/yarn-project/merkle-tree/src/snapshots/full_snapshot.ts new file mode 100644 index 00000000000..c78d0ebb188 --- /dev/null +++ b/yarn-project/merkle-tree/src/snapshots/full_snapshot.ts @@ -0,0 +1,26 @@ +import { TreeBase } from '../tree_base.js'; +import { BaseFullTreeSnapshot, BaseFullTreeSnapshotBuilder } from './base_full_snapshot.js'; +import { TreeSnapshot, TreeSnapshotBuilder } from './snapshot_builder.js'; + +/** + * Builds a full snapshot of a tree. This implementation works for any Merkle tree and stores + * it in a database in a similar way to how a tree is stored in memory, using pointers. + * + * Sharing the same database between versions and trees is recommended as the trees would share + * structure. + * + * Complexity: + * N - count of non-zero nodes in tree + * M - count of snapshots + * H - tree height + * Worst case space complexity: O(N * M) + * Sibling path access: O(H) database reads + */ +export class FullTreeSnapshotBuilder + extends BaseFullTreeSnapshotBuilder + implements TreeSnapshotBuilder +{ + protected openSnapshot(root: Buffer, numLeaves: bigint): TreeSnapshot { + return new BaseFullTreeSnapshot(this.db, root, numLeaves, this.tree); + } +} diff --git a/yarn-project/merkle-tree/src/snapshots/indexed_tree_snapshot.test.ts b/yarn-project/merkle-tree/src/snapshots/indexed_tree_snapshot.test.ts new file mode 100644 index 00000000000..631531fcfc2 --- /dev/null +++ b/yarn-project/merkle-tree/src/snapshots/indexed_tree_snapshot.test.ts @@ -0,0 +1,96 @@ +import levelup, { LevelUp } from 'levelup'; + +import { Pedersen, newTree } from '../index.js'; +import { StandardIndexedTreeWithAppend } from '../standard_indexed_tree/test/standard_indexed_tree_with_append.js'; +import { createMemDown } from '../test/utils/create_mem_down.js'; +import { IndexedTreeSnapshotBuilder } from './indexed_tree_snapshot.js'; +import { describeSnapshotBuilderTestSuite } from './snapshot_builder_test_suite.js'; + +describe('IndexedTreeSnapshotBuilder', () => { + let db: LevelUp; + let tree: StandardIndexedTreeWithAppend; + let snapshotBuilder: IndexedTreeSnapshotBuilder; + + beforeEach(async () => { + db = levelup(createMemDown()); + tree = await newTree(StandardIndexedTreeWithAppend, db, new Pedersen(), 'test', 4); + snapshotBuilder = new IndexedTreeSnapshotBuilder(db, tree); + }); + + describeSnapshotBuilderTestSuite( + () => tree, + () => snapshotBuilder, + async () => { + const newLeaves = Array.from({ length: 2 }).map(() => Buffer.from(Math.random().toString())); + await tree.appendLeaves(newLeaves); + }, + ); + + describe('getSnapshot', () => { + it('returns historical leaf data', async () => { + await tree.appendLeaves([Buffer.from('a'), Buffer.from('b'), Buffer.from('c')]); + await tree.commit(); + const expectedLeavesAtBlock1 = await Promise.all([ + tree.getLatestLeafDataCopy(0, false), + tree.getLatestLeafDataCopy(1, false), + tree.getLatestLeafDataCopy(2, false), + // id'expect these to be undefined, but leaf 3 isn't? + // must be some indexed-tree quirk I don't quite understand yet + tree.getLatestLeafDataCopy(3, false), + tree.getLatestLeafDataCopy(4, false), + tree.getLatestLeafDataCopy(5, false), + ]); + + await snapshotBuilder.snapshot(1); + + await tree.appendLeaves([Buffer.from('d'), Buffer.from('e'), Buffer.from('f')]); + await tree.commit(); + const expectedLeavesAtBlock2 = await Promise.all([ + tree.getLatestLeafDataCopy(0, false), + tree.getLatestLeafDataCopy(1, false), + tree.getLatestLeafDataCopy(2, false), + tree.getLatestLeafDataCopy(3, false), + tree.getLatestLeafDataCopy(4, false), + tree.getLatestLeafDataCopy(5, false), + ]); + + await snapshotBuilder.snapshot(2); + + const snapshot1 = await snapshotBuilder.getSnapshot(1); + const actualLeavesAtBlock1 = await Promise.all([ + snapshot1.getLatestLeafDataCopy(0n), + snapshot1.getLatestLeafDataCopy(1n), + snapshot1.getLatestLeafDataCopy(2n), + snapshot1.getLatestLeafDataCopy(3n), + snapshot1.getLatestLeafDataCopy(4n), + snapshot1.getLatestLeafDataCopy(5n), + ]); + expect(actualLeavesAtBlock1).toEqual(expectedLeavesAtBlock1); + + const snapshot2 = await snapshotBuilder.getSnapshot(2); + const actualLeavesAtBlock2 = await Promise.all([ + snapshot2.getLatestLeafDataCopy(0n), + snapshot2.getLatestLeafDataCopy(1n), + snapshot2.getLatestLeafDataCopy(2n), + snapshot2.getLatestLeafDataCopy(3n), + snapshot2.getLatestLeafDataCopy(4n), + snapshot2.getLatestLeafDataCopy(5n), + ]); + expect(actualLeavesAtBlock2).toEqual(expectedLeavesAtBlock2); + }); + }); + + describe('findIndexOfPreviousValue', () => { + it('returns the index of the leaf with the closest value to the given value', async () => { + await tree.appendLeaves([Buffer.from('a'), Buffer.from('f'), Buffer.from('d')]); + await tree.commit(); + const snapshot = await snapshotBuilder.snapshot(1); + const historicalPrevValue = tree.findIndexOfPreviousValue(2n, false); + + await tree.appendLeaves([Buffer.from('c'), Buffer.from('b'), Buffer.from('e')]); + await tree.commit(); + + await expect(snapshot.findIndexOfPreviousValue(2n)).resolves.toEqual(historicalPrevValue); + }); + }); +}); diff --git a/yarn-project/merkle-tree/src/snapshots/indexed_tree_snapshot.ts b/yarn-project/merkle-tree/src/snapshots/indexed_tree_snapshot.ts new file mode 100644 index 00000000000..6725bd394e5 --- /dev/null +++ b/yarn-project/merkle-tree/src/snapshots/indexed_tree_snapshot.ts @@ -0,0 +1,92 @@ +import { toBufferBE } from '@aztec/foundation/bigint-buffer'; +import { LeafData } from '@aztec/types'; + +import { LevelUp, LevelUpChain } from 'levelup'; + +import { IndexedTree } from '../interfaces/indexed_tree.js'; +import { decodeTreeValue, encodeTreeValue } from '../standard_indexed_tree/standard_indexed_tree.js'; +import { TreeBase } from '../tree_base.js'; +import { BaseFullTreeSnapshot, BaseFullTreeSnapshotBuilder } from './base_full_snapshot.js'; +import { IndexedTreeSnapshot, TreeSnapshotBuilder } from './snapshot_builder.js'; + +const snapshotLeafValue = (node: Buffer, index: bigint) => + Buffer.concat([Buffer.from('snapshot:leaf:'), node, Buffer.from(':' + index)]); + +/** a */ +export class IndexedTreeSnapshotBuilder + extends BaseFullTreeSnapshotBuilder + implements TreeSnapshotBuilder +{ + constructor(db: LevelUp, tree: IndexedTree & TreeBase) { + super(db, tree); + } + + protected openSnapshot(root: Buffer, numLeaves: bigint): IndexedTreeSnapshot { + return new IndexedTreeSnapshotImpl(this.db, root, numLeaves, this.tree); + } + + protected handleLeaf(index: bigint, node: Buffer, batch: LevelUpChain) { + const leafData = this.tree.getLatestLeafDataCopy(Number(index), false); + if (leafData) { + batch.put(snapshotLeafValue(node, index), encodeTreeValue(leafData)); + } + } +} + +/** A snapshot of an indexed tree at a particular point in time */ +class IndexedTreeSnapshotImpl extends BaseFullTreeSnapshot implements IndexedTreeSnapshot { + async getLeafValue(index: bigint): Promise { + const leafData = await this.getLatestLeafDataCopy(index); + return leafData ? toBufferBE(leafData.value, 32) : undefined; + } + + async getLatestLeafDataCopy(index: bigint): Promise { + const leafNode = await super.getLeafValue(index); + const leafValue = await this.db.get(snapshotLeafValue(leafNode!, index)).catch(() => undefined); + if (leafValue) { + return decodeTreeValue(leafValue); + } else { + return undefined; + } + } + + async findIndexOfPreviousValue(newValue: bigint): Promise<{ + /** + * The index of the found leaf. + */ + index: number; + /** + * A flag indicating if the corresponding leaf's value is equal to `newValue`. + */ + alreadyPresent: boolean; + }> { + const numLeaves = this.getNumLeaves(); + const diff: bigint[] = []; + + for (let i = 0; i < numLeaves; i++) { + // this is very inefficient + const storedLeaf = await this.getLatestLeafDataCopy(BigInt(i))!; + + // The stored leaf can be undefined if it addresses an empty leaf + // If the leaf is empty we do the same as if the leaf was larger + if (storedLeaf === undefined) { + diff.push(newValue); + } else if (storedLeaf.value > newValue) { + diff.push(newValue); + } else if (storedLeaf.value === newValue) { + return { index: i, alreadyPresent: true }; + } else { + diff.push(newValue - storedLeaf.value); + } + } + + let minIndex = 0; + for (let i = 1; i < diff.length; i++) { + if (diff[i] < diff[minIndex]) { + minIndex = i; + } + } + + return { index: minIndex, alreadyPresent: false }; + } +} diff --git a/yarn-project/merkle-tree/src/snapshots/snapshot_builder.ts b/yarn-project/merkle-tree/src/snapshots/snapshot_builder.ts new file mode 100644 index 00000000000..a6722306301 --- /dev/null +++ b/yarn-project/merkle-tree/src/snapshots/snapshot_builder.ts @@ -0,0 +1,75 @@ +import { LeafData, SiblingPath } from '@aztec/types'; + +/** + * An interface for a tree that can record snapshots of its contents. + */ +export interface TreeSnapshotBuilder { + /** + * Creates a snapshot of the tree at the given version. + * @param block - The version to snapshot the tree at. + */ + snapshot(block: number): Promise; + + /** + * Returns a snapshot of the tree at the given version. + * @param block - The version of the snapshot to return. + */ + getSnapshot(block: number): Promise; +} + +/** + * A tree snapshot + */ +export interface TreeSnapshot { + /** + * Returns the current root of the tree. + */ + getRoot(): Buffer; + + /** + * Returns the number of leaves in the tree. + */ + getDepth(): number; + + /** + * Returns the number of leaves in the tree. + */ + getNumLeaves(): bigint; + + /** + * Returns the value of a leaf at the specified index. + * @param index - The index of the leaf value to be returned. + */ + getLeafValue(index: bigint): Promise; + + /** + * Returns the sibling path for a requested leaf index. + * @param index - The index of the leaf for which a sibling path is required. + */ + getSiblingPath(index: bigint): Promise>; +} + +/** A snapshot of an indexed tree */ +export interface IndexedTreeSnapshot extends TreeSnapshot { + /** + * Gets the historical data for a leaf + * @param index - The index of the leaf to get the data for + */ + getLatestLeafDataCopy(index: bigint): Promise; + + /** + * Finds the index of the largest leaf whose value is less than or equal to the provided value. + * @param newValue - The new value to be inserted into the tree. + * @returns The found leaf index and a flag indicating if the corresponding leaf's value is equal to `newValue`. + */ + findIndexOfPreviousValue(newValue: bigint): Promise<{ + /** + * The index of the found leaf. + */ + index: number; + /** + * A flag indicating if the corresponding leaf's value is equal to `newValue`. + */ + alreadyPresent: boolean; + }>; +} diff --git a/yarn-project/merkle-tree/src/snapshots/snapshot_builder_test_suite.ts b/yarn-project/merkle-tree/src/snapshots/snapshot_builder_test_suite.ts new file mode 100644 index 00000000000..3b66c36164c --- /dev/null +++ b/yarn-project/merkle-tree/src/snapshots/snapshot_builder_test_suite.ts @@ -0,0 +1,197 @@ +import { TreeBase } from '../tree_base.js'; +import { TreeSnapshotBuilder } from './snapshot_builder.js'; + +/** Creates a test suit for snapshots */ +export function describeSnapshotBuilderTestSuite( + getTree: () => T, + getSnapshotBuilder: () => S, + modifyTree: (tree: T) => Promise, +) { + describe('SnapshotBuilder', () => { + let tree: T; + let snapshotBuilder: S; + let leaves: bigint[]; + + beforeEach(() => { + tree = getTree(); + snapshotBuilder = getSnapshotBuilder(); + + leaves = Array.from({ length: 4 }).map(() => BigInt(Math.floor(Math.random() * 2 ** tree.getDepth()))); + }); + + describe('snapshot', () => { + it('takes snapshots', async () => { + await modifyTree(tree); + await tree.commit(); + await expect(snapshotBuilder.snapshot(1)).resolves.toBeDefined(); + }); + + it('is idempotent', async () => { + await modifyTree(tree); + await tree.commit(); + + const block = 1; + const snapshot = await snapshotBuilder.snapshot(block); + await expect(snapshotBuilder.snapshot(block)).resolves.toEqual(snapshot); + }); + + it('returns the same path if tree has not diverged', async () => { + await modifyTree(tree); + await tree.commit(); + const snapshot = await snapshotBuilder.snapshot(1); + + const historicPaths = await Promise.all(leaves.map(leaf => snapshot.getSiblingPath(leaf))); + const expectedPaths = await Promise.all(leaves.map(leaf => tree.getSiblingPath(leaf, false))); + + for (const [index, path] of historicPaths.entries()) { + expect(path).toEqual(expectedPaths[index]); + } + }); + + it('returns historic paths if tree has diverged and no new snapshots have been taken', async () => { + await modifyTree(tree); + await tree.commit(); + const snapshot = await snapshotBuilder.snapshot(1); + + const expectedPaths = await Promise.all(leaves.map(leaf => tree.getSiblingPath(leaf, false))); + + await modifyTree(tree); + await tree.commit(); + + const historicPaths = await Promise.all(leaves.map(leaf => snapshot.getSiblingPath(leaf))); + + for (const [index, path] of historicPaths.entries()) { + expect(path).toEqual(expectedPaths[index]); + } + }); + + it('retains old snapshots even if new one are created', async () => { + await modifyTree(tree); + await tree.commit(); + + const expectedPaths = await Promise.all(leaves.map(leaf => tree.getSiblingPath(leaf, false))); + + const snapshot = await snapshotBuilder.snapshot(1); + + await modifyTree(tree); + await tree.commit(); + + await snapshotBuilder.snapshot(2); + + // check that snapshot 2 has not influenced snapshot(1) at all + const historicPaths = await Promise.all(leaves.map(leaf => snapshot.getSiblingPath(leaf))); + + for (const [index, path] of historicPaths.entries()) { + expect(path).toEqual(expectedPaths[index]); + } + }); + + it('retains old snapshots even if new one are created and the tree diverges', async () => { + await modifyTree(tree); + await tree.commit(); + + const expectedPaths = await Promise.all(leaves.map(leaf => tree.getSiblingPath(leaf, false))); + + const snapshot = await snapshotBuilder.snapshot(1); + + await modifyTree(tree); + await tree.commit(); + + await snapshotBuilder.snapshot(2); + + await modifyTree(tree); + await tree.commit(); + + // check that snapshot 2 has not influenced snapshot(1) at all + // and that the diverging tree does not influence the old snapshot + const historicPaths = await Promise.all(leaves.map(leaf => snapshot.getSiblingPath(leaf))); + + for (const [index, path] of historicPaths.entries()) { + expect(path).toEqual(expectedPaths[index]); + } + }); + }); + + describe('getSnapshot', () => { + it('returns old snapshots', async () => { + await modifyTree(tree); + await tree.commit(); + const expectedPaths = await Promise.all(leaves.map(leaf => tree.getSiblingPath(leaf, false))); + await snapshotBuilder.snapshot(1); + + for (let i = 2; i < 5; i++) { + await modifyTree(tree); + await tree.commit(); + await snapshotBuilder.snapshot(i); + } + + const firstSnapshot = await snapshotBuilder.getSnapshot(1); + const historicPaths = await Promise.all(leaves.map(leaf => firstSnapshot.getSiblingPath(leaf))); + + for (const [index, path] of historicPaths.entries()) { + expect(path).toEqual(expectedPaths[index]); + } + }); + + it('throws if an unknown snapshot is requested', async () => { + await modifyTree(tree); + await tree.commit(); + await snapshotBuilder.snapshot(1); + + await expect(snapshotBuilder.getSnapshot(2)).rejects.toThrow(); + }); + }); + + describe('getRoot', () => { + it('returns the historical root of the tree when the snapshot was taken', async () => { + await modifyTree(tree); + await tree.commit(); + const snapshot = await snapshotBuilder.snapshot(1); + const historicalRoot = tree.getRoot(false); + + await modifyTree(tree); + await tree.commit(); + + expect(snapshot.getRoot()).toEqual(historicalRoot); + expect(snapshot.getRoot()).not.toEqual(tree.getRoot(false)); + }); + }); + + describe('getDepth', () => { + it('returns the same depth as the tree', async () => { + await modifyTree(tree); + await tree.commit(); + const snapshot = await snapshotBuilder.snapshot(1); + expect(snapshot.getDepth()).toEqual(tree.getDepth()); + }); + }); + + describe('getNumLeaves', () => { + it('returns the historical leaves count when the snapshot was taken', async () => { + await modifyTree(tree); + await tree.commit(); + const snapshot = await snapshotBuilder.snapshot(1); + const historicalNumLeaves = tree.getNumLeaves(false); + + await modifyTree(tree); + await tree.commit(); + + expect(snapshot.getNumLeaves()).toEqual(historicalNumLeaves); + }); + }); + + describe('getLeafValue', () => { + it('returns the historical leaf value when the snapshot was taken', async () => { + await modifyTree(tree); + await tree.commit(); + const snapshot = await snapshotBuilder.snapshot(1); + const historicalLeafValue = await tree.getLeafValue(0n, false); + + await modifyTree(tree); + await tree.commit(); + + await expect(snapshot.getLeafValue(0n)).resolves.toEqual(historicalLeafValue); + }); + }); + }); +} diff --git a/yarn-project/merkle-tree/src/sparse_tree/sparse_tree.ts b/yarn-project/merkle-tree/src/sparse_tree/sparse_tree.ts index 463c6431fa9..92cdc4152fc 100644 --- a/yarn-project/merkle-tree/src/sparse_tree/sparse_tree.ts +++ b/yarn-project/merkle-tree/src/sparse_tree/sparse_tree.ts @@ -1,10 +1,14 @@ import { UpdateOnlyTree } from '../interfaces/update_only_tree.js'; +import { FullTreeSnapshotBuilder } from '../snapshots/full_snapshot.js'; +import { TreeSnapshot } from '../snapshots/snapshot_builder.js'; import { INITIAL_LEAF, TreeBase } from '../tree_base.js'; /** * A Merkle tree implementation that uses a LevelDB database to store the tree. */ export class SparseTree extends TreeBase implements UpdateOnlyTree { + #snapshotBuilder = new FullTreeSnapshotBuilder(this.db, this); + /** * Updates a leaf in the tree. * @param leaf - New contents of the leaf. @@ -29,4 +33,12 @@ export class SparseTree extends TreeBase implements UpdateOnlyTree { this.cachedSize = (this.cachedSize ?? this.size) + 1n; } } + + public snapshot(block: number): Promise { + return this.#snapshotBuilder.snapshot(block); + } + + public getSnapshot(block: number): Promise { + return this.#snapshotBuilder.getSnapshot(block); + } } diff --git a/yarn-project/merkle-tree/src/standard_indexed_tree/standard_indexed_tree.ts b/yarn-project/merkle-tree/src/standard_indexed_tree/standard_indexed_tree.ts index c32ce3221e1..ebbf3a3d0ee 100644 --- a/yarn-project/merkle-tree/src/standard_indexed_tree/standard_indexed_tree.ts +++ b/yarn-project/merkle-tree/src/standard_indexed_tree/standard_indexed_tree.ts @@ -1,8 +1,11 @@ import { toBigIntBE, toBufferBE } from '@aztec/foundation/bigint-buffer'; +import { Fr } from '@aztec/foundation/fields'; import { createDebugLogger } from '@aztec/foundation/log'; import { LeafData, SiblingPath } from '@aztec/types'; -import { IndexedTree } from '../interfaces/indexed_tree.js'; +import { BatchInsertionResult, IndexedTree } from '../interfaces/indexed_tree.js'; +import { IndexedTreeSnapshotBuilder } from '../snapshots/indexed_tree_snapshot.js'; +import { IndexedTreeSnapshot } from '../snapshots/snapshot_builder.js'; import { TreeBase } from '../tree_base.js'; const log = createDebugLogger('aztec:standard-indexed-tree'); @@ -53,15 +56,14 @@ function getEmptyLowLeafWitness(treeHeight: N): LowLeafWitness }; } -// eslint-disable-next-line @typescript-eslint/no-unused-vars -const encodeTreeValue = (leafData: LeafData) => { +export const encodeTreeValue = (leafData: LeafData) => { const valueAsBuffer = toBufferBE(leafData.value, 32); const indexAsBuffer = toBufferBE(leafData.nextIndex, 32); const nextValueAsBuffer = toBufferBE(leafData.nextValue, 32); return Buffer.concat([valueAsBuffer, indexAsBuffer, nextValueAsBuffer]); }; -const decodeTreeValue = (buf: Buffer) => { +export const decodeTreeValue = (buf: Buffer) => { const value = toBigIntBE(buf.subarray(0, 32)); const nextIndex = toBigIntBE(buf.subarray(32, 64)); const nextValue = toBigIntBE(buf.subarray(64, 96)); @@ -76,6 +78,8 @@ const decodeTreeValue = (buf: Buffer) => { * Indexed merkle tree. */ export class StandardIndexedTree extends TreeBase implements IndexedTree { + #snapshotBuilder = new IndexedTreeSnapshotBuilder(this.db, this); + protected leaves: LeafData[] = []; protected cachedLeaves: { [key: number]: LeafData } = {}; @@ -317,8 +321,6 @@ export class StandardIndexedTree extends TreeBase implements IndexedTree { * * This offers massive circuit performance savings over doing incremental insertions. * - * A description of the algorithm can be found here: https://colab.research.google.com/drive/1A0gizduSi4FIiIJZ8OylwIpO9-OTqV-R - * * WARNING: This function has side effects, it will insert values into the tree. * * Assumptions: @@ -338,81 +340,78 @@ export class StandardIndexedTree extends TreeBase implements IndexedTree { * roots. * * This become tricky when two items that are being batch inserted need to update the same low nullifier, or need to use - * a value that is part of the same batch insertion as their low nullifier. In this case a zero low nullifier path is given - * to the circuit, and it must determine from the set of batch inserted values if the insertion is valid. + * a value that is part of the same batch insertion as their low nullifier. What we do to avoid this case is to + * update the existing leaves in the tree with the nullifiers in high to low order, ensuring that this case never occurs. + * The circuit has to sort the nullifiers (or take a hint of the sorted nullifiers and prove that it's a valid permutation). + * Then we just batch insert the new nullifiers in the original order. * * The following example will illustrate attempting to insert 2,3,20,19 into a tree already containing 0,5,10,15 * * The example will explore two cases. In each case the values low nullifier will exist within the batch insertion, * One where the low nullifier comes before the item in the set (2,3), and one where it comes after (20,19). * + * First, we sort the nullifiers high to low, that's 20,19,3,2 + * * The original tree: Pending insertion subtree * - * index 0 2 3 4 - - - - + * index 0 1 2 3 - - - - * ------------------------------------- ---------------------------- * val 0 5 10 15 - - - - * nextIdx 1 2 3 0 - - - - * nextVal 5 10 15 0 - - - - * * - * Inserting 2: (happy path) - * 1. Find the low nullifier (0) - provide inclusion proof + * Inserting 20: + * 1. Find the low nullifier (3) - provide inclusion proof * 2. Update its pointers - * 3. Insert 2 into the pending subtree + * 3. Insert 20 into the pending subtree * - * index 0 2 3 4 5 - - - + * index 0 1 2 3 - - 6 - * ------------------------------------- ---------------------------- - * val 0 5 10 15 2 - - - - * nextIdx 5 2 3 0 2 - - - - * nextVal 2 10 15 0 5 - - - + * val 0 5 10 15 - - 20 - + * nextIdx 1 2 3 6 - - 0 - + * nextVal 5 10 15 20 - - 0 - * - * Inserting 3: The low nullifier exists within the insertion current subtree - * 1. When looking for the low nullifier for 3, we will receive 0 again as we have not inserted 2 into the main tree - * This is problematic, as we cannot use either 0 or 2 as our inclusion proof. - * Why cant we? - * - Index 0 has a val 0 and nextVal of 2. This is NOT enough to prove non inclusion of 2. - * - Our existing tree is in a state where we cannot prove non inclusion of 3. - * We do not provide a non inclusion proof to out circuit, but prompt it to look within the insertion subtree. - * 2. Update pending insertion subtree - * 3. Insert 3 into pending subtree + * Inserting 19: + * 1. Find the low nullifier (3) - provide inclusion proof + * 2. Update its pointers + * 3. Insert 19 into the pending subtree * - * (no inclusion proof provided) - * index 0 2 3 4 5 6 - - + * index 0 1 2 3 - - 6 7 * ------------------------------------- ---------------------------- - * val 0 5 10 15 2 3 - - - * nextIdx 5 2 3 0 6 2 - - - * nextVal 2 10 15 0 3 5 - - + * val 0 5 10 15 - - 20 19 + * nextIdx 1 2 3 7 - - 0 6 + * nextVal 5 10 15 19 - - 0 20 * - * Inserting 20: (happy path) - * 1. Find the low nullifier (15) - provide inclusion proof + * Inserting 3: + * 1. Find the low nullifier (0) - provide inclusion proof * 2. Update its pointers - * 3. Insert 20 into the pending subtree + * 3. Insert 3 into the pending subtree * - * index 0 2 3 4 5 6 7 - + * index 0 1 2 3 - 5 6 7 * ------------------------------------- ---------------------------- - * val 0 5 10 15 2 3 20 - - * nextIdx 5 2 3 7 6 2 0 - - * nextVal 2 10 15 20 3 5 0 - + * val 0 5 10 15 - 3 20 19 + * nextIdx 5 2 3 7 - 1 0 6 + * nextVal 3 10 15 19 - 5 0 20 * - * Inserting 19: - * 1. In this case we can find a low nullifier, but we are updating a low nullifier that has already been updated - * We can provide an inclusion proof of this intermediate tree state. + * Inserting 2: + * 1. Find the low nullifier (0) - provide inclusion proof * 2. Update its pointers - * 3. Insert 19 into the pending subtree + * 3. Insert 2 into the pending subtree * - * index 0 2 3 4 5 6 7 8 + * index 0 1 2 3 4 5 6 7 * ------------------------------------- ---------------------------- - * val 0 5 10 15 2 3 20 19 - * nextIdx 5 2 3 8 6 2 0 7 - * nextVal 2 10 15 19 3 5 0 20 + * val 0 5 10 15 2 3 20 19 + * nextIdx 4 2 3 7 5 1 0 6 + * nextVal 2 10 15 19 3 5 0 20 * * Perform subtree insertion * - * index 0 2 3 4 5 6 7 8 + * index 0 1 2 3 4 5 6 7 * --------------------------------------------------------------------- - * val 0 5 10 15 2 3 20 19 - * nextIdx 5 2 3 8 6 2 0 7 - * nextVal 2 10 15 19 3 5 0 20 + * val 0 5 10 15 2 3 20 19 + * nextIdx 4 2 3 7 5 1 0 6 + * nextVal 2 10 15 19 3 5 0 20 * * TODO: this implementation will change once the zero value is changed from h(0,0,0). Changes incoming over the next sprint * @param leaves - Values to insert into the tree. @@ -426,107 +425,67 @@ export class StandardIndexedTree extends TreeBase implements IndexedTree { >( leaves: Buffer[], subtreeHeight: SubtreeHeight, - ): Promise< - | [LowLeafWitnessData[], SiblingPath] - | [undefined, SiblingPath] - > { - // Keep track of touched low leaves - const touched = new Map(); - + ): Promise> { const emptyLowLeafWitness = getEmptyLowLeafWitness(this.getDepth() as TreeHeight); // Accumulators - const lowLeavesWitnesses: LowLeafWitnessData[] = []; - const pendingInsertionSubtree: LeafData[] = []; + const lowLeavesWitnesses: LowLeafWitnessData[] = leaves.map(() => emptyLowLeafWitness); + const pendingInsertionSubtree: LeafData[] = leaves.map(() => zeroLeaf); // Start info const startInsertionIndex = this.getNumLeaves(true); + const leavesToInsert = leaves.map(leaf => toBigIntBE(leaf)); + const sortedDescendingLeafTuples = leavesToInsert + .map((leaf, index) => ({ leaf, index })) + .sort((a, b) => Number(b.leaf - a.leaf)); + const sortedDescendingLeaves = sortedDescendingLeafTuples.map(leafTuple => leafTuple.leaf); + // Get insertion path for each leaf - for (let i = 0; i < leaves.length; i++) { - const newValue = toBigIntBE(leaves[i]); + for (let i = 0; i < leavesToInsert.length; i++) { + const newValue = sortedDescendingLeaves[i]; + const originalIndex = leavesToInsert.indexOf(newValue); - // Keep space and just insert zero values if (newValue === 0n) { - pendingInsertionSubtree.push(zeroLeaf); - lowLeavesWitnesses.push(emptyLowLeafWitness); continue; } const indexOfPrevious = this.findIndexOfPreviousValue(newValue, true); - // If a touched node has a value that is less than the current value - const prevNodes = touched.get(indexOfPrevious.index); - if (prevNodes && prevNodes.some(v => v < newValue)) { - // check the pending low nullifiers for a low nullifier that works - // This is the case where the next value is less than the pending - for (let j = 0; j < pendingInsertionSubtree.length; j++) { - if (pendingInsertionSubtree[j].value === 0n) { - continue; - } - - if ( - pendingInsertionSubtree[j].value < newValue && - (pendingInsertionSubtree[j].nextValue > newValue || pendingInsertionSubtree[j].nextValue === 0n) - ) { - // add the new value to the pending low nullifiers - const currentLowLeaf: LeafData = { - value: newValue, - nextValue: pendingInsertionSubtree[j].nextValue, - nextIndex: pendingInsertionSubtree[j].nextIndex, - }; - - pendingInsertionSubtree.push(currentLowLeaf); - - // Update the pending low leaf to point at the new value - pendingInsertionSubtree[j].nextValue = newValue; - pendingInsertionSubtree[j].nextIndex = startInsertionIndex + BigInt(i); - - break; - } - } - - // Any node updated in this space will need to calculate its low nullifier from a previously inserted value - lowLeavesWitnesses.push(emptyLowLeafWitness); - } else { - // Update the touched mapping - if (prevNodes) { - prevNodes.push(newValue); - touched.set(indexOfPrevious.index, prevNodes); - } else { - touched.set(indexOfPrevious.index, [newValue]); - } - - // get the low leaf - const lowLeaf = this.getLatestLeafDataCopy(indexOfPrevious.index, true); - if (lowLeaf === undefined) { - return [undefined, await this.getSubtreeSiblingPath(subtreeHeight, true)]; - } - const siblingPath = await this.getSiblingPath(BigInt(indexOfPrevious.index), true); - - const witness: LowLeafWitnessData = { - leafData: { ...lowLeaf }, - index: BigInt(indexOfPrevious.index), - siblingPath, + // get the low leaf + const lowLeaf = this.getLatestLeafDataCopy(indexOfPrevious.index, true); + if (lowLeaf === undefined) { + return { + lowLeavesWitnessData: undefined, + sortedNewLeaves: sortedDescendingLeafTuples.map(leafTuple => new Fr(leafTuple.leaf).toBuffer()), + sortedNewLeavesIndexes: sortedDescendingLeafTuples.map(leafTuple => leafTuple.index), + newSubtreeSiblingPath: await this.getSubtreeSiblingPath(subtreeHeight, true), }; + } + const siblingPath = await this.getSiblingPath(BigInt(indexOfPrevious.index), true); - // Update the running paths - lowLeavesWitnesses.push(witness); + const witness: LowLeafWitnessData = { + leafData: { ...lowLeaf }, + index: BigInt(indexOfPrevious.index), + siblingPath, + }; - const currentLowLeaf: LeafData = { - value: newValue, - nextValue: lowLeaf.nextValue, - nextIndex: lowLeaf.nextIndex, - }; + // Update the running paths + lowLeavesWitnesses[i] = witness; + + const currentPendingLeaf: LeafData = { + value: newValue, + nextValue: lowLeaf.nextValue, + nextIndex: lowLeaf.nextIndex, + }; - pendingInsertionSubtree.push(currentLowLeaf); + pendingInsertionSubtree[originalIndex] = currentPendingLeaf; - lowLeaf.nextValue = newValue; - lowLeaf.nextIndex = startInsertionIndex + BigInt(i); + lowLeaf.nextValue = newValue; + lowLeaf.nextIndex = startInsertionIndex + BigInt(originalIndex); - const lowLeafIndex = indexOfPrevious.index; - this.cachedLeaves[lowLeafIndex] = lowLeaf; - await this.updateLeaf(lowLeaf, BigInt(lowLeafIndex)); - } + const lowLeafIndex = indexOfPrevious.index; + this.cachedLeaves[lowLeafIndex] = lowLeaf; + await this.updateLeaf(lowLeaf, BigInt(lowLeafIndex)); } const newSubtreeSiblingPath = await this.getSubtreeSiblingPath( @@ -538,7 +497,13 @@ export class StandardIndexedTree extends TreeBase implements IndexedTree { // Note: In this case we set `hash0Leaf` param to false because batch insertion algorithm use forced null leaf // inclusion. See {@link encodeLeaf} for a more through param explanation. await this.encodeAndAppendLeaves(pendingInsertionSubtree, false); - return [lowLeavesWitnesses, newSubtreeSiblingPath]; + + return { + lowLeavesWitnessData: lowLeavesWitnesses, + sortedNewLeaves: sortedDescendingLeafTuples.map(leafTuple => Buffer.from(new Fr(leafTuple.leaf).toBuffer())), + sortedNewLeavesIndexes: sortedDescendingLeafTuples.map(leafTuple => leafTuple.index), + newSubtreeSiblingPath, + }; } async getSubtreeSiblingPath( @@ -552,6 +517,14 @@ export class StandardIndexedTree extends TreeBase implements IndexedTree { return fullSiblingPath.getSubtreeSiblingPath(subtreeHeight); } + snapshot(blockNumber: number): Promise { + return this.#snapshotBuilder.snapshot(blockNumber); + } + + getSnapshot(block: number): Promise { + return this.#snapshotBuilder.getSnapshot(block); + } + /** * Encodes leaves and appends them to a tree. * @param leaves - Leaves to encode. diff --git a/yarn-project/merkle-tree/src/standard_tree/standard_tree.ts b/yarn-project/merkle-tree/src/standard_tree/standard_tree.ts index 2c479168def..0b92572a4b8 100644 --- a/yarn-project/merkle-tree/src/standard_tree/standard_tree.ts +++ b/yarn-project/merkle-tree/src/standard_tree/standard_tree.ts @@ -1,3 +1,4 @@ +import { AppendOnlySnapshotBuilder, TreeSnapshot } from '../index.js'; import { AppendOnlyTree } from '../interfaces/append_only_tree.js'; import { TreeBase } from '../tree_base.js'; @@ -5,6 +6,8 @@ import { TreeBase } from '../tree_base.js'; * A Merkle tree implementation that uses a LevelDB database to store the tree. */ export class StandardTree extends TreeBase implements AppendOnlyTree { + #snapshotBuilder = new AppendOnlySnapshotBuilder(this.db, this, this.hasher); + /** * Appends the given leaves to the tree. * @param leaves - The leaves to append. @@ -13,4 +16,12 @@ export class StandardTree extends TreeBase implements AppendOnlyTree { public async appendLeaves(leaves: Buffer[]): Promise { await super.appendLeaves(leaves); } + + public snapshot(block: number): Promise { + return this.#snapshotBuilder.snapshot(block); + } + + public getSnapshot(block: number): Promise { + return this.#snapshotBuilder.getSnapshot(block); + } } diff --git a/yarn-project/merkle-tree/src/tree_base.ts b/yarn-project/merkle-tree/src/tree_base.ts index 6b715280380..c57a0499171 100644 --- a/yarn-project/merkle-tree/src/tree_base.ts +++ b/yarn-project/merkle-tree/src/tree_base.ts @@ -150,6 +150,26 @@ export abstract class TreeBase implements MerkleTree { return this.getLatestValueAtIndex(this.depth, index, includeUncommitted); } + public getNode(level: number, index: bigint): Promise { + if (level < 0 || level > this.depth) { + throw Error('Invalid level: ' + level); + } + + if (index < 0 || index >= 2n ** BigInt(level)) { + throw Error('Invalid index: ' + index); + } + + return this.dbGet(indexToKeyHash(this.name, level, index)); + } + + public getZeroHash(level: number): Buffer { + if (level <= 0 || level > this.depth) { + throw new Error('Invalid level'); + } + + return this.zeroHashes[level - 1]; + } + /** * Clears the cache. */ diff --git a/yarn-project/noir-contracts/src/contracts/inclusion_proofs_contract/src/main.nr b/yarn-project/noir-contracts/src/contracts/inclusion_proofs_contract/src/main.nr index 7a8871b7218..462f95091be 100644 --- a/yarn-project/noir-contracts/src/contracts/inclusion_proofs_contract/src/main.nr +++ b/yarn-project/noir-contracts/src/contracts/inclusion_proofs_contract/src/main.nr @@ -22,13 +22,11 @@ contract InclusionProofs { }, constants_gen::{ NOTE_HASH_TREE_HEIGHT, - NULLIFIER_TREE_HEIGHT, - HISTORIC_BLOCKS_TREE_HEIGHT, PUBLIC_DATA_TREE_HEIGHT, GENERATOR_INDEX__PUBLIC_LEAF_INDEX, }, oracle::{ - get_block_data::get_block_data, + get_block_header::get_block_header, get_membership_witness::{ get_membership_witness, MembershipWitness, @@ -103,13 +101,13 @@ contract InclusionProofs { block_number: Field, // The block at which we'll prove that the note exists spare_commitment: Field, // This is only used when the note is not found --> used to test the failure case ) { - // TODO: assert that block number is less than the block number of context.block_data - // --> This will either require a new oracle method that returns block_data.global_variables_hash preimage + // TODO: assert that block number is less than the block number of context.block_header + // --> This will either require a new oracle method that returns block_header.global_variables_hash preimage // or modifying the private context so that we somehow expose it. - // 1) Get historic block data from oracle and ensure that the block hash is included in the current blocks tree + // 1) Get block header from oracle and ensure that the block hash is included in the current blocks tree // root. - let block_data = context.get_block_data(block_number); + let block_header = context.get_block_header(block_number); // 2) Get the note from PXE. let private_values = storage.private_values.at(owner.address); @@ -132,7 +130,7 @@ contract InclusionProofs { // 5) Prove that the commitment is in the note hash tree assert( - block_data.note_hash_tree_root == compute_merkle_root(note_commitment, witness.index, witness.path), + block_header.note_hash_tree_root == compute_merkle_root(note_commitment, witness.index, witness.path), "Proving note inclusion failed" ); @@ -146,13 +144,13 @@ contract InclusionProofs { block_number: Field, // The block at which we'll prove that the nullifier does not exists spare_nullifier: Field, // This is only used when the note is not found --> used to test the failure case ) { - // TODO: assert that block number is less than the block number of context.block_data - // --> This will either require a new oracle method that returns block_data.global_variables_hash preimage + // TODO: assert that block number is less than the block number of context.block_header + // --> This will either require a new oracle method that returns block_header.global_variables_hash preimage // or modifying the private context so that we somehow expose it. - // 1) Get historic block data from oracle and ensure that the block hash is included in the current blocks tree + // 1) Get block header from oracle and ensure that the block hash is included in the current blocks tree // root. - let block_data = context.get_block_data(block_number); + let block_header = context.get_block_header(block_number); // 2) Get the note from PXE let private_values = storage.private_values.at(owner.address); @@ -176,7 +174,7 @@ contract InclusionProofs { // 5.a) Compute the low nullifier leaf and prove that it is in the nullifier tree let low_nullifier_leaf = witness.leaf_data.hash(); assert( - block_data.nullifier_tree_root == compute_merkle_root(low_nullifier_leaf, witness.index, witness.path), + block_header.nullifier_tree_root == compute_merkle_root(low_nullifier_leaf, witness.index, witness.path), "Proving nullifier non-inclusion failed: Could not prove low nullifier inclusion" ); @@ -217,13 +215,13 @@ contract InclusionProofs { nullifier: Field, block_number: Field, // The block at which we'll prove that the nullifier not exists in the tree ) { - // TODO: assert that block number is less than the block number of context.block_data - // --> This will either require a new oracle method that returns block_data.global_variables_hash preimage + // TODO: assert that block number is less than the block number of context.block_header + // --> This will either require a new oracle method that returns block_header.global_variables_hash preimage // or modifying the private context so that we somehow expose it. - // 1) Get historic block data from oracle and ensure that the block hash is included in the current blocks tree + // 1) Get block header from oracle and ensure that the block hash is included in the current blocks tree // root. - let block_data = context.get_block_data(block_number); + let block_header = context.get_block_header(block_number); // 2) Get the membership witness of the nullifier let witness = get_nullifier_membership_witness(block_number, nullifier); @@ -236,7 +234,7 @@ contract InclusionProofs { // 5) Prove that the nullifier is in the nullifier tree assert( - block_data.nullifier_tree_root == compute_merkle_root(nullifier_leaf, witness.index, witness.path), + block_header.nullifier_tree_root == compute_merkle_root(nullifier_leaf, witness.index, witness.path), "Proving nullifier inclusion failed" ); @@ -249,13 +247,13 @@ contract InclusionProofs { public_value: Field, block_number: Field, // The block at which we'll prove that the public value exists ) { - // TODO: assert that block number is less than the block number of context.block_data - // --> This will either require a new oracle method that returns block_data.global_variables_hash preimage + // TODO: assert that block number is less than the block number of context.block_header + // --> This will either require a new oracle method that returns block_header.global_variables_hash preimage // or modifying the private context so that we somehow expose it. - // 1) Get historic block data from oracle and ensure that the block hash is included in the current blocks tree + // 1) Get block header from oracle and ensure that the block hash is included in the current blocks tree // root. - let block_data = context.get_block_data(block_number); + let block_header = context.get_block_header(block_number); // 2) Compute the public value leaf index. // We have to compute the leaf index here because unlike in the case of note commitments, public values are @@ -273,7 +271,7 @@ contract InclusionProofs { // 4) Prove that the public value provided on input is in the public data tree assert( - block_data.public_data_tree_root == compute_merkle_root(public_value, public_value_leaf_index, path), + block_header.public_data_tree_root == compute_merkle_root(public_value, public_value_leaf_index, path), "Proving public value inclusion failed" ); @@ -288,4 +286,4 @@ contract InclusionProofs { let note_header = NoteHeader::new(contract_address, nonce, storage_slot); note_utils::compute_note_hash_and_nullifier(ValueNoteMethods, note_header, serialized_note) } -} +} \ No newline at end of file diff --git a/yarn-project/noir-contracts/src/contracts/slow_tree_contract/src/main.nr b/yarn-project/noir-contracts/src/contracts/slow_tree_contract/src/main.nr index 0af6f5565a2..f745e9acac0 100644 --- a/yarn-project/noir-contracts/src/contracts/slow_tree_contract/src/main.nr +++ b/yarn-project/noir-contracts/src/contracts/slow_tree_contract/src/main.nr @@ -31,6 +31,7 @@ contract SlowTree { use crate::capsule::pop_capsule; use crate::types::{MembershipProof, deserialize_membership_proof}; + // docs:start:constants_and_storage global TREE_HEIGHT: Field = 254; global MEMBERSHIP_SIZE: Field = 256; // TREE_HEIGHT + 2 global UPDATE_SIZE: Field = 512; // TREE_HEIGHT * 2 + 4 @@ -40,6 +41,7 @@ contract SlowTree { struct Storage { trees: Map>, } + // docs:end:constants_and_storage impl Storage { fn init(context: Context) -> pub Self { @@ -60,22 +62,23 @@ contract SlowTree { #[aztec(private)] fn constructor() {} - + // docs:start:initialize #[aztec(public)] fn initialize() { storage.trees.at(context.msg_sender()).initialize(EMPTY_ROOT); } - + // docs:end:initialize + // docs:start:read_at_pub #[aztec(public)] fn read_at_pub(key: Field) -> Field { storage.trees.at(context.msg_sender()).read_at(key) } - + // docs:end:read_at_pub #[aztec(public)] fn read_leaf_at_pub(key: Field) -> Leaf { storage.trees.at(context.msg_sender()).read_leaf_at(key) } - + // docs:start:read_at_private #[aztec(private)] fn read_at(index: Field) -> Field { let fields = pop_capsule(); @@ -90,18 +93,22 @@ contract SlowTree { p.value } - + // docs:end:read_at_private + // docs:start:assert_current_root #[aztec(public)] internal fn _assert_current_root(caller: Field, expected: Field) { let root = storage.trees.at(caller).current_root(); assert(root == expected, "Root does not match expected"); } + // docs:end:assert_current_root + // docs:start:update_at_pub #[aztec(public)] fn update_at_public(p: SlowUpdateProof) { storage.trees.at(context.msg_sender()).update_at(p); } - + // docs:end:update_at_pub + // docs:start:update_at_private #[aztec(private)] fn update_at_private(index: Field, new_value: Field) { let fields = pop_capsule(); @@ -126,7 +133,8 @@ contract SlowTree { new_after_root ]); } - + // docs:end:update_at_private + // docs:start:_update #[aztec(public)] internal fn _update(caller: Field, index: Field, new_value: Field, before: Field, after: Field, new_root: Field) { let current_root = storage.trees.at(caller).current_root(); @@ -137,7 +145,7 @@ contract SlowTree { storage.trees.at(caller).update_unsafe_at(index, new_value, new_root); } - + // docs:end:_update unconstrained fn un_read_leaf_at(address: Field, key: Field) -> Leaf { storage.trees.at(address).read_leaf_at(key) } diff --git a/yarn-project/noir-contracts/src/contracts/slow_tree_contract/src/types.nr b/yarn-project/noir-contracts/src/contracts/slow_tree_contract/src/types.nr index 0f4f5eb4119..9245cb9aa13 100644 --- a/yarn-project/noir-contracts/src/contracts/slow_tree_contract/src/types.nr +++ b/yarn-project/noir-contracts/src/contracts/slow_tree_contract/src/types.nr @@ -1,3 +1,4 @@ +// docs:start:membership_proof // A single inclusion proof. // M = N + 2 struct MembershipProof { @@ -5,6 +6,7 @@ struct MembershipProof { value: Field, sibling_path: [Field; N], } +// docs:end:membership_proof fn deserialize_membership_proof(serialized: [Field; M]) -> MembershipProof { let mut sibling_path = [0; N]; diff --git a/yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr b/yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr index 95e9a33d65f..73b14c9cf43 100644 --- a/yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr +++ b/yarn-project/noir-contracts/src/contracts/token_blacklist_contract/src/main.nr @@ -52,7 +52,9 @@ contract TokenBlacklist { safe_u120_serialization::{SafeU120SerializationMethods, SAFE_U120_SERIALIZED_LEN}, roles::UserFlags, }; + // docs:start:interface use crate::interfaces::SlowMap; + // docs:end:interface struct Storage { admin: PublicState, @@ -91,21 +93,24 @@ contract TokenBlacklist { }, ), // Below is an abomination to have same value in private and public (immutable in solidity). + // docs:start:slow_updates_storage slow_update: ImmutableSingleton::new(context, 7, FieldNoteMethods), public_slow_update: PublicState::new( context, 8, AztecAddressSerializationMethods, ), + // docs:end:slow_updates_storage + } } } - + // docs:start:constructor #[aztec(private)] fn constructor(admin: AztecAddress, slow_updates_contract: AztecAddress) { let mut slow_note = FieldNote::new(slow_updates_contract.address); storage.slow_update.initialize(&mut slow_note, Option::none(), false); - + // docs:end:constructor let selector = compute_selector("_initialize((Field),(Field))"); context.call_public_function(context.this_address(), selector, @@ -121,8 +126,10 @@ contract TokenBlacklist { #[aztec(private)] fn init_slow_tree(user: AztecAddress) { let roles = UserFlags { is_admin: true, is_minter: false, is_blacklisted: false }.get_value() as Field; + // docs:start:get_and_update_private let slow = SlowMap::at(AztecAddress::new(storage.slow_update.get_note().value)); slow.update_at_private(&mut context, user.address, roles); + // docs:end:get_and_update_private context.call_public_function(context.this_address(), compute_selector("_init_slow_tree((Field))"), [context.msg_sender()]); @@ -138,13 +145,20 @@ contract TokenBlacklist { internal fn _initialize(new_admin: AztecAddress, slow_updates_contract: AztecAddress) { assert(new_admin.address != 0, "invalid admin"); storage.admin.write(new_admin); + // docs:start:write_slow_update_public storage.public_slow_update.write(slow_updates_contract); + // docs:end:write_slow_update_public + // docs:start:slowmap_initialize SlowMap::at(slow_updates_contract).initialize(context); + // docs:end:slowmap_initialize + } #[aztec(private)] fn update_roles(user: AztecAddress, roles: Field) { + // docs:start:slowmap_at let slow = SlowMap::at(AztecAddress::new(storage.slow_update.get_note().value)); + // docs:end:slowmap_at let caller_roles = UserFlags::new(slow.read_at(&mut context, context.msg_sender()) as u120); assert(caller_roles.is_admin, "caller is not admin"); @@ -153,8 +167,12 @@ contract TokenBlacklist { #[aztec(public)] fn mint_public(to: AztecAddress, amount: Field) { + // docs:start:get_public let slow = SlowMap::at(storage.public_slow_update.read()); + // docs:end:get_public + // docs:start:read_at_pub let to_roles = UserFlags::new(slow.read_at_pub(context, to.address) as u120); + // docs:end:read_at_pub assert(!to_roles.is_blacklisted, "Blacklisted: Recipient"); let caller_roles = UserFlags::new(slow.read_at_pub(context, context.msg_sender()) as u120); @@ -250,7 +268,9 @@ contract TokenBlacklist { #[aztec(private)] fn redeem_shield(to: AztecAddress, amount: Field, secret: Field) { let slow = SlowMap::at(AztecAddress::new(storage.slow_update.get_note().value)); + // docs:start:slowmap_read_at let to_roles = UserFlags::new(slow.read_at(&mut context, to.address) as u120); + // docs:end:slowmap_read_at assert(!to_roles.is_blacklisted, "Blacklisted: Recipient"); let pending_shields = storage.pending_shields; @@ -287,6 +307,7 @@ contract TokenBlacklist { context.call_public_function(context.this_address(), selector, [to.address, amount]); } + // docs:start:transfer_private #[aztec(private)] fn transfer(from: AztecAddress, to: AztecAddress, amount: Field, nonce: Field) { let slow = SlowMap::at(AztecAddress::new(storage.slow_update.get_note().value)); @@ -294,6 +315,7 @@ contract TokenBlacklist { assert(!from_roles.is_blacklisted, "Blacklisted: Sender"); let to_roles = UserFlags::new(slow.read_at(&mut context, to.address) as u120); assert(!to_roles.is_blacklisted, "Blacklisted: Recipient"); + // docs:end:transfer_private if (from.address != context.msg_sender()) { assert_current_call_valid_authwit(&mut context, from); @@ -322,6 +344,7 @@ contract TokenBlacklist { let selector = compute_selector("_reduce_total_supply(Field)"); context.call_public_function(context.this_address(), selector, [amount]); + } /// Internal /// diff --git a/yarn-project/noir-protocol-circuits/src/__snapshots__/index.test.ts.snap b/yarn-project/noir-protocol-circuits/src/__snapshots__/index.test.ts.snap index 0918bd55a07..af2dbd89ecb 100644 --- a/yarn-project/noir-protocol-circuits/src/__snapshots__/index.test.ts.snap +++ b/yarn-project/noir-protocol-circuits/src/__snapshots__/index.test.ts.snap @@ -103,7 +103,7 @@ exports[`Noir compatibility tests (interop_testing.nr) TxRequest Hash matches No exports[`Private kernel Executes private kernel init circuit for a contract deployment 1`] = ` KernelCircuitPublicInputs { "constants": CombinedConstantData { - "blockData": HistoricBlockData { + "blockHeader": BlockHeader { "blocksTreeRoot": Fr { "asBigInt": 10561895175368852737061915973188839857007468377789560793687187642867659280638n, "asBuffer": { @@ -25719,7 +25719,7 @@ KernelCircuitPublicInputs { exports[`Private kernel Executes private kernel inner for a nested call 1`] = ` KernelCircuitPublicInputs { "constants": CombinedConstantData { - "blockData": HistoricBlockData { + "blockHeader": BlockHeader { "blocksTreeRoot": Fr { "asBigInt": 5141115076863619919216387293080007096006645021873634395499188999297490933851n, "asBuffer": { @@ -51335,7 +51335,7 @@ KernelCircuitPublicInputs { exports[`Private kernel Executes private kernel ordering after a deployment 1`] = ` KernelCircuitPublicInputsFinal { "constants": CombinedConstantData { - "blockData": HistoricBlockData { + "blockHeader": BlockHeader { "blocksTreeRoot": Fr { "asBigInt": 10561895175368852737061915973188839857007468377789560793687187642867659280638n, "asBuffer": { diff --git a/yarn-project/noir-protocol-circuits/src/__snapshots__/noir_test_gen.test.ts.snap b/yarn-project/noir-protocol-circuits/src/__snapshots__/noir_test_gen.test.ts.snap index 63015d107db..86437f04b43 100644 --- a/yarn-project/noir-protocol-circuits/src/__snapshots__/noir_test_gen.test.ts.snap +++ b/yarn-project/noir-protocol-circuits/src/__snapshots__/noir_test_gen.test.ts.snap @@ -1,6 +1,6 @@ // Jest Snapshot v1, https://goo.gl/fbAQLP -exports[`Data generation for noir tests Computes a private data tree 1`] = ` +exports[`Data generation for noir tests Computes a note hash tree 1`] = ` { "root": "0x0d2f152f19e366e9e690e3e551c1aadc0eab0bb27f6d011a9622d8f31bfa6e22", "siblingPaths": [ diff --git a/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/common.nr b/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/common.nr index 9e9422608f9..8653d993183 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/common.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/common.nr @@ -63,15 +63,15 @@ pub fn validate_arrays(app_public_inputs : PrivateCircuitPublicInputs) { // encrypted_logs_hash and unencrypted_logs_hash have their own integrity checks. } -// Validate all read requests against the historic private data root. -// Use their membership witnesses to do so. If the historic root is not yet +// Validate all read requests against the historical private data root. +// Use their membership witnesses to do so. If the historical root is not yet // initialized, initialize it using the first read request here (if present). // // More info here: // - https://discourse.aztec.network/t/to-read-or-not-to-read/178 // - https://discourse.aztec.network/t/spending-notes-which-havent-yet-been-inserted/180 pub fn validate_read_requests( - historic_note_hash_tree_root: Field, + historical_note_hash_tree_root: Field, read_requests: [Field; MAX_READ_REQUESTS_PER_CALL], read_request_membership_witnesses: [ReadRequestMembershipWitness; MAX_READ_REQUESTS_PER_CALL], ) { @@ -81,7 +81,7 @@ pub fn validate_read_requests( let read_request = read_requests[rr_idx]; let witness = read_request_membership_witnesses[rr_idx]; - // A pending commitment is the one that is not yet added to private data tree + // A pending commitment is the one that is not yet added to note hash tree // A "transient read" is when we try to "read" a pending commitment within a transaction // between function calls, as opposed to reading the outputs of a previous transaction // which is a "pending read". @@ -92,7 +92,7 @@ pub fn validate_read_requests( if (read_request != 0) & (witness.is_transient == false) { let root_for_read_request = read_request_root_from_siblings(read_request, witness.leaf_index, witness.sibling_path); - assert(root_for_read_request == historic_note_hash_tree_root, "private data tree root mismatch"); + assert(root_for_read_request == historical_note_hash_tree_root, "note hash tree root mismatch"); // TODO(https://github.com/AztecProtocol/aztec-packages/issues/1354): do we need to enforce // that a non-transient read_request was derived from the proper/current contract address? @@ -347,7 +347,7 @@ pub fn contract_logic(private_call : PrivateCallData, public_inputs : &mut Kerne private_call.contract_leaf_membership_witness.leaf_index, private_call.contract_leaf_membership_witness.sibling_path); - let purported_contract_tree_root = private_call.call_stack_item.public_inputs.historical_block_data.contract_tree_root(); + let purported_contract_tree_root = private_call.call_stack_item.public_inputs.block_header.contract_tree_root(); assert_eq(computed_contract_tree_root, purported_contract_tree_root, "computed_contract_tree_root does not match purported_contract_tree_root"); } } diff --git a/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/private_kernel_init.nr b/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/private_kernel_init.nr index 7266c691924..88a34b4049c 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/private_kernel_init.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/private_kernel_init.nr @@ -21,7 +21,7 @@ struct PrivateKernelInputsInit { impl PrivateKernelInputsInit { fn initialize_end_values(self, public_inputs: &mut KernelCircuitPublicInputsBuilder) { public_inputs.constants = CombinedConstantData { - block_data: self.private_call.call_stack_item.public_inputs.historical_block_data, + block_header: self.private_call.call_stack_item.public_inputs.block_header, tx_context: self.tx_request.tx_context, }; } @@ -91,7 +91,7 @@ impl PrivateKernelInputsInit { self.validate_this_private_call_against_tx_request(); common::validate_read_requests( - public_inputs.constants.block_data.note_hash_tree_root(), + public_inputs.constants.block_header.note_hash_tree_root(), self.private_call.call_stack_item.public_inputs.read_requests, self.private_call.read_request_membership_witnesses ); @@ -451,7 +451,7 @@ mod tests { builder.failed(); } - #[test(should_fail_with="private data tree root mismatch")] + #[test(should_fail_with="note hash tree root mismatch")] fn native_read_request_bad_request() { let mut builder = PrivateKernelInitInputsBuilder::new(); @@ -464,7 +464,7 @@ mod tests { builder.failed(); } - #[test(should_fail_with="private data tree root mismatch")] + #[test(should_fail_with="note hash tree root mismatch")] fn native_read_request_bad_leaf_index() { let mut builder = PrivateKernelInitInputsBuilder::new(); @@ -478,7 +478,7 @@ mod tests { builder.failed(); } - #[test(should_fail_with="private data tree root mismatch")] + #[test(should_fail_with="note hash tree root mismatch")] fn native_read_request_bad_sibling_path() { let mut builder = PrivateKernelInitInputsBuilder::new(); diff --git a/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/private_kernel_inner.nr b/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/private_kernel_inner.nr index 23231fdd4a0..cdee6d1b941 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/private_kernel_inner.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/private-kernel-lib/src/private_kernel_inner.nr @@ -22,8 +22,8 @@ impl PrivateKernelInputsInner { } fn validate_contract_tree_root(self) { - let purported_contract_tree_root = self.private_call.call_stack_item.public_inputs.historical_block_data.contract_tree_root(); - let previous_kernel_contract_tree_root = self.previous_kernel.public_inputs.constants.block_data.contract_tree_root(); + let purported_contract_tree_root = self.private_call.call_stack_item.public_inputs.block_header.contract_tree_root(); + let previous_kernel_contract_tree_root = self.previous_kernel.public_inputs.constants.block_header.contract_tree_root(); assert(purported_contract_tree_root == previous_kernel_contract_tree_root, "purported_contract_tree_root does not match previous_kernel_contract_tree_root"); } @@ -52,14 +52,14 @@ impl PrivateKernelInputsInner { self.pop_and_validate_this_private_call_hash(&mut public_inputs); common::validate_read_requests( - public_inputs.constants.block_data.note_hash_tree_root(), + public_inputs.constants.block_header.note_hash_tree_root(), self.private_call.call_stack_item.public_inputs.read_requests, // read requests from private call self.private_call.read_request_membership_witnesses); //TODO(David): feels like update_end_values should happen later common::update_end_values(self.private_call, &mut public_inputs); - // ensure that historic/purported contract tree root matches the one in previous kernel + // ensure that historical/purported contract tree root matches the one in previous kernel self.validate_contract_tree_root(); let this_call_stack_item = self.private_call.call_stack_item; @@ -171,9 +171,9 @@ mod tests { fn private_function_incorrect_contract_tree_root_fails() { let mut builder = PrivateKernelInnerInputsBuilder::new(); - // Set historic_tree_root to a wrong value (the correct value + 1). - let contract_tree_root = builder.previous_kernel.block_data.block.contract_tree_root; - builder.previous_kernel.block_data.block.contract_tree_root = contract_tree_root + 1; + // Set historical_tree_root to a wrong value (the correct value + 1). + let contract_tree_root = builder.previous_kernel.block_header.block.contract_tree_root; + builder.previous_kernel.block_header.block.contract_tree_root = contract_tree_root + 1; builder.failed(); } @@ -560,7 +560,7 @@ mod tests { builder.failed(); } - #[test(should_fail_with="private data tree root mismatch")] + #[test(should_fail_with="note hash tree root mismatch")] fn native_read_request_bad_request() { let mut builder = PrivateKernelInnerInputsBuilder::new(); @@ -573,7 +573,7 @@ mod tests { builder.failed(); } - #[test(should_fail_with="private data tree root mismatch")] + #[test(should_fail_with="note hash tree root mismatch")] fn native_read_request_bad_leaf_index() { let mut builder = PrivateKernelInnerInputsBuilder::new(); @@ -587,7 +587,7 @@ mod tests { builder.failed(); } - #[test(should_fail_with="private data tree root mismatch")] + #[test(should_fail_with="note hash tree root mismatch")] fn native_read_request_bad_sibling_path() { let mut builder = PrivateKernelInnerInputsBuilder::new(); @@ -601,15 +601,15 @@ mod tests { builder.failed(); } - #[test(should_fail_with="private data tree root mismatch")] + #[test(should_fail_with="note hash tree root mismatch")] fn native_read_request_root_mismatch() { let mut builder = PrivateKernelInnerInputsBuilder::new(); builder.private_call.append_read_requests(1); // Set the root to be a different root so the above read request is not under this root. - let old_root = builder.previous_kernel.block_data.block.note_hash_tree_root; - builder.previous_kernel.block_data.block.note_hash_tree_root = old_root + 1; + let old_root = builder.previous_kernel.block_header.block.note_hash_tree_root; + builder.previous_kernel.block_header.block.note_hash_tree_root = old_root + 1; builder.failed(); } diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-base/src/main.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-base/src/main.nr index edf487213f5..7405a961633 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/rollup-base/src/main.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-base/src/main.nr @@ -1,5 +1,6 @@ use dep::rollup_lib::base::{BaseRollupInputs,BaseOrMergeRollupPublicInputs}; -fn main(inputs : BaseRollupInputs) -> pub BaseOrMergeRollupPublicInputs { +//TODO add a circuit variant +unconstrained fn main(inputs : BaseRollupInputs) -> pub BaseOrMergeRollupPublicInputs { inputs.base_rollup_circuit() } \ No newline at end of file diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/abis/constant_rollup_data.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/abis/constant_rollup_data.nr index 675c6d6dcb3..410826c29c3 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/abis/constant_rollup_data.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/abis/constant_rollup_data.nr @@ -3,7 +3,7 @@ use crate::abis::append_only_tree_snapshot::AppendOnlyTreeSnapshot; struct ConstantRollupData { // The very latest roots as at the very beginning of the entire rollup: - start_historic_blocks_tree_roots_snapshot : AppendOnlyTreeSnapshot, + start_blocks_tree_snapshot : AppendOnlyTreeSnapshot, // TODO(Sean): Some members of this struct tbd private_kernel_vk_tree_root : Field, @@ -16,7 +16,7 @@ struct ConstantRollupData { impl ConstantRollupData { pub fn eq(self, other : ConstantRollupData) -> bool { - self.start_historic_blocks_tree_roots_snapshot.eq(other.start_historic_blocks_tree_roots_snapshot) & + self.start_blocks_tree_snapshot.eq(other.start_blocks_tree_snapshot) & self.global_variables.eq(other.global_variables) & (self.private_kernel_vk_tree_root == other.private_kernel_vk_tree_root) & (self.public_kernel_vk_tree_root == other.public_kernel_vk_tree_root) & diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/abis/nullifier_leaf_preimage.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/abis/nullifier_leaf_preimage.nr index 5fce155fef6..b55f943f25c 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/abis/nullifier_leaf_preimage.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/abis/nullifier_leaf_preimage.nr @@ -6,7 +6,7 @@ struct NullifierLeafPreimage { impl NullifierLeafPreimage { pub fn default() -> Self { - NullifierLeafPreimage { + Self { leaf_value : 0, next_value : 0, next_index : 0, diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/base/base_rollup_inputs.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/base/base_rollup_inputs.nr index 55da22511da..a453771ce15 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/base/base_rollup_inputs.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/base/base_rollup_inputs.nr @@ -29,10 +29,11 @@ use dep::aztec::constants_gen::{ MAX_NEW_L2_TO_L1_MSGS_PER_TX, NUM_UNENCRYPTED_LOGS_HASHES_PER_TX, NULLIFIER_SUBTREE_HEIGHT, + NULLIFIER_TREE_HEIGHT, }; use dep::types::abis::previous_kernel_data::PreviousKernelData; -use dep::types::abis::membership_witness::NullifierMembershipWitness; -use dep::types::abis::membership_witness::HistoricBlocksTreeRootMembershipWitness; +use dep::types::abis::membership_witness::{NullifierMembershipWitness, MembershipWitness}; +use dep::types::abis::membership_witness::BlocksTreeRootMembershipWitness; struct BaseRollupInputs { kernel_data: [PreviousKernelData; KERNELS_PER_BASE_ROLLUP], @@ -40,8 +41,10 @@ struct BaseRollupInputs { start_nullifier_tree_snapshot: AppendOnlyTreeSnapshot, start_contract_tree_snapshot: AppendOnlyTreeSnapshot, start_public_data_tree_root: Field, - start_historic_blocks_tree_snapshot: AppendOnlyTreeSnapshot, + start_blocks_tree_snapshot: AppendOnlyTreeSnapshot, + sorted_new_nullifiers: [Field; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP], + sorted_new_nullifiers_indexes: [u32; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP], low_nullifier_leaf_preimages: [NullifierLeafPreimage; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP], low_nullifier_membership_witness: [NullifierMembershipWitness; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP], @@ -53,7 +56,7 @@ struct BaseRollupInputs { new_public_data_update_requests_sibling_paths: [[Field; PUBLIC_DATA_TREE_HEIGHT]; MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_BASE_ROLLUP], new_public_data_reads_sibling_paths: [[Field; PUBLIC_DATA_TREE_HEIGHT]; MAX_PUBLIC_DATA_READS_PER_BASE_ROLLUP], - historic_blocks_tree_root_membership_witnesses: [HistoricBlocksTreeRootMembershipWitness; KERNELS_PER_BASE_ROLLUP], + blocks_tree_root_membership_witnesses: [BlocksTreeRootMembershipWitness; KERNELS_PER_BASE_ROLLUP], constants: ConstantRollupData, } @@ -109,8 +112,8 @@ impl BaseRollupInputs { // Calculate the overall calldata hash let calldata_hash = BaseRollupInputs::components_compute_kernel_calldata_hash(self.kernel_data); - // Perform membership checks that the notes provided exist within the historic trees data - self.perform_historical_blocks_tree_membership_checks(); + // Perform membership checks that the notes provided exist within the historical trees data + self.perform_blocks_tree_membership_checks(); let aggregation_object = self.aggregate_proofs(); @@ -180,171 +183,64 @@ impl BaseRollupInputs { calculate_subtree(commitment_tree_leaves) } - unconstrained fn find_leaf_index(self, leaves: [NullifierLeafPreimage; MAX_NEW_NULLIFIERS_PER_TX * 2], nullifier: Field, nullifier_index: u64) -> u64 { - let mut matched = false; - let mut index = 0; - for k in 0..nullifier_index { - if !matched { - if (!leaves[k].is_empty()) { - if (full_field_less_than(leaves[k].leaf_value, nullifier) & - (full_field_greater_than(leaves[k].next_value, nullifier) | - (leaves[k].next_value == 0))) { - matched = true; - index = k; - } - } - } - - - } - // if not matched, our subtree will misformed - we must reject - assert(matched, "Nullifier subtree is malformed"); - index - } + fn check_nullifier_tree_non_membership_and_insert_to_tree(self) -> AppendOnlyTreeSnapshot { + let mut new_nullifiers = [0; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP]; - // TODO this should be done in circuit. Ideally using the sorting strategy. - unconstrained fn check_nullifier_tree_non_membership_and_insert_to_tree(self) -> AppendOnlyTreeSnapshot { - // The below monologue is by Madiaa. fwiw, the plan was not simple. - // - // LADIES AND GENTLEMEN The P L A N ( is simple ) - // 1. Get the previous nullifier set setup - // 2. Check for the first added nullifier that it doesnt exist - // 3. Update the nullifier set - // 4. Calculate a new root with the sibling path - // 5. Use that for the next nullifier check. - // 6. Iterate for all of em - // 7. le bosh (profit) - - // BOYS AND GIRLS THE P L A N ( once the first plan is complete ) - // GENERATE OUR NEW NULLIFIER SUBTREE - // 1. We need to point the new nullifiers to point to the index that the previous nullifier replaced - // 2. If we receive the 0 nullifier leaf (where all values are 0, we skip insertion and leave a sparse subtree) - - // New nullifier subtree - let mut nullifier_insertion_subtree = [NullifierLeafPreimage::default(); MAX_NEW_NULLIFIERS_PER_TX * 2]; - - // This will update on each iteration - let mut current_nullifier_tree_root = self.start_nullifier_tree_snapshot.root; - - // This will increase with every insertion - let start_insertion_index = self.start_nullifier_tree_snapshot.next_available_leaf_index; - let mut new_index = start_insertion_index; - - // For each kernel circuit - for i in 0..KERNELS_PER_BASE_ROLLUP { - let new_nullifiers = self.kernel_data[i].public_inputs.end.new_nullifiers; - // For each of our nullifiers - for j in 0..MAX_NEW_NULLIFIERS_PER_TX { - // Witness containing index and path - let nullifier_index = i * MAX_NEW_NULLIFIERS_PER_TX + j; - - let witness = self.low_nullifier_membership_witness[nullifier_index]; - // Preimage of the lo-index required for a non-membership proof - let low_nullifier_preimage = self.low_nullifier_leaf_preimages[nullifier_index]; - // Newly created nullifier - let nullifier = new_nullifiers[j]; - - // TODO(maddiaa): reason about this more strongly, can this cause issues? - if (nullifier != 0) { - // Create the nullifier leaf of the new nullifier to be inserted - let mut new_nullifier_leaf = NullifierLeafPreimage { - leaf_value : nullifier, - next_value : low_nullifier_preimage.next_value, - next_index : low_nullifier_preimage.next_index, - }; - - // Assuming populated premier subtree - if (low_nullifier_preimage.is_empty()) { - // check previous nullifier leaves - let index = self.find_leaf_index(nullifier_insertion_subtree, nullifier, nullifier_index as u64); - let same_batch_nullifier = nullifier_insertion_subtree[index]; - assert(!same_batch_nullifier.is_empty(), "Same batch batch nullifier is empty"); - assert(full_field_less_than(same_batch_nullifier.leaf_value, nullifier), "Invalid hint"); - assert(full_field_greater_than(same_batch_nullifier.next_value, nullifier) | (same_batch_nullifier.next_value == 0), "Invalid hint"); - - new_nullifier_leaf.next_index = nullifier_insertion_subtree[index].next_index; - new_nullifier_leaf.next_value = nullifier_insertion_subtree[index].next_value; - - // Update child - nullifier_insertion_subtree[index].next_index = new_index; - nullifier_insertion_subtree[index].next_value = nullifier; - } else { - let is_less_than_nullifier = full_field_less_than(low_nullifier_preimage.leaf_value, nullifier); - let is_next_greater_than = full_field_greater_than(low_nullifier_preimage.next_value, nullifier); - - assert(is_less_than_nullifier, "invalid nullifier range"); - assert( - is_next_greater_than | - ((low_nullifier_preimage.next_value == 0) & (low_nullifier_preimage.next_index == 0)), - "invalid nullifier range" - ); - - // Recreate the original low nullifier from the preimage - let original_low_nullifier = NullifierLeafPreimage{ - leaf_value : low_nullifier_preimage.leaf_value, - next_value : low_nullifier_preimage.next_value, - next_index : low_nullifier_preimage.next_index, - }; - - // perform membership check for the low nullifier against the original root - components::assert_check_membership( - original_low_nullifier.hash(), - witness.leaf_index, - witness.sibling_path, - current_nullifier_tree_root, - ); - - // Calculate the new value of the low_nullifier_leaf - let updated_low_nullifier = NullifierLeafPreimage{ - leaf_value : low_nullifier_preimage.leaf_value, - next_value : nullifier, - next_index : new_index - }; - - // We need another set of witness values for this - current_nullifier_tree_root = components::root_from_sibling_path( - updated_low_nullifier.hash(), witness.leaf_index, witness.sibling_path); - } - nullifier_insertion_subtree[nullifier_index] = new_nullifier_leaf; - } - - // increment insertion index - new_index = new_index + 1; + for i in 0..2 { + for j in 0..MAX_NEW_NULLIFIERS_PER_TX { + new_nullifiers[i * MAX_NEW_NULLIFIERS_PER_TX + j] = self.kernel_data[i].public_inputs.end.new_nullifiers[j]; } - } + }; - // Check that the new subtree is to be inserted at the next location, and is empty currently - let empty_nullifier_subtree_root = calculate_empty_tree_root(NULLIFIER_SUBTREE_HEIGHT); - let leafIndexNullifierSubtreeDepth = self.start_nullifier_tree_snapshot.next_available_leaf_index >> (NULLIFIER_SUBTREE_HEIGHT as u32); - components::assert_check_membership( - empty_nullifier_subtree_root, - leafIndexNullifierSubtreeDepth as Field, + crate::indexed_tree::batch_insert( + self.start_nullifier_tree_snapshot, + new_nullifiers, + self.sorted_new_nullifiers, + self.sorted_new_nullifiers_indexes, self.new_nullifiers_subtree_sibling_path, - current_nullifier_tree_root, - ); - - // Create new nullifier subtree to insert into the whole nullifier tree - let nullifier_sibling_path = self.new_nullifiers_subtree_sibling_path; - let nullifier_subtree_root = self.create_nullifier_subtree(nullifier_insertion_subtree); - - // Calculate the new root - // We are inserting a subtree rather than a full tree here - let subtree_index = start_insertion_index >> (NULLIFIER_SUBTREE_HEIGHT as u32); - let new_root = components::root_from_sibling_path(nullifier_subtree_root, subtree_index as Field, nullifier_sibling_path); - - // Return the new state of the nullifier tree - AppendOnlyTreeSnapshot { - next_available_leaf_index: new_index, - root: new_root, - } + self.low_nullifier_leaf_preimages, + self.low_nullifier_membership_witness.map(|witness: NullifierMembershipWitness| { + MembershipWitness { + leaf_index: witness.leaf_index, + sibling_path: witness.sibling_path, + } + }), + |a: Field, b: Field| {a == b}, // Nullifier equals + |nullifier: Field| {nullifier == 0}, // Nullifier is zero + |leaf: NullifierLeafPreimage| {leaf.hash()}, // Hash leaf + |low_leaf: NullifierLeafPreimage, nullifier: Field| { // Is valid low leaf + let is_less_than_nullifier = full_field_less_than(low_leaf.leaf_value, nullifier); + let is_next_greater_than = full_field_less_than(nullifier, low_leaf.next_value); + + (!low_leaf.is_empty()) & is_less_than_nullifier & ( + is_next_greater_than | + ((low_leaf.next_index == 0) & (low_leaf.next_value == 0)) + ) + }, + |low_leaf: NullifierLeafPreimage, nullifier: Field, nullifier_index: u32| { // Update low leaf + NullifierLeafPreimage{ + leaf_value : low_leaf.leaf_value, + next_value : nullifier, + next_index : nullifier_index, + } + }, + |nullifier: Field, low_leaf: NullifierLeafPreimage| { // Build insertion leaf + NullifierLeafPreimage { + leaf_value : nullifier, + next_value : low_leaf.next_value, + next_index : low_leaf.next_index, + } + }, + [0; NULLIFIER_SUBTREE_HEIGHT], + [0; NULLIFIER_TREE_HEIGHT], + ) } fn create_nullifier_subtree(self, leaves: [NullifierLeafPreimage; N]) -> Field { calculate_subtree(leaves.map(|leaf:NullifierLeafPreimage| leaf.hash())) } - // TODO this should be changed to append only and done in-circuit - unconstrained fn validate_and_process_public_state(self) -> Field { + fn validate_and_process_public_state(self) -> Field { // TODO(#2521) - data read validation should happen against the current state of the tx and not the start state. // Blocks all interesting usecases that read and write to the same public state in the same tx. // https://aztecprotocol.slack.com/archives/C02M7VC7TN0/p1695809629015719?thread_ts=1695653252.007339&cid=C02M7VC7TN0 @@ -477,24 +373,26 @@ impl BaseRollupInputs { U256::from_bytes32(sha_digest).to_u128_limbs() } - // Check all of the provided commitments against the historical tree roots - fn perform_historical_blocks_tree_membership_checks(self) { - // For each of the historic_note_hash_tree_membership_checks, we need to do an inclusion proof - // against the historical root provided in the rollup constants - let historic_root = self.constants.start_historic_blocks_tree_roots_snapshot.root; + // Check that the block header used by each kernel is a member of the blocks tree --> since the block header + // contains roots of all the trees this is sufficient to verify that the tree roots used by kernels are correct + fn perform_blocks_tree_membership_checks(self) { + // For each of the block header (their block hashes), we need to do an inclusion proof + // against the blocks tree root from the beginning of a rollup provided in the rollup constants + let blocks_treee_root = self.constants.start_blocks_tree_snapshot.root; for i in 0..KERNELS_PER_BASE_ROLLUP { // Rebuild the block hash - let historical_block_data = self.kernel_data[i].public_inputs.constants.block_data; - let previous_block_hash = historical_block_data.block.hash(); + let block_header = self.kernel_data[i].public_inputs.constants.block_header; + let previous_block_hash = block_header.block.hash(); - let historic_root_witness = self.historic_blocks_tree_root_membership_witnesses[i]; + let previous_block_hash_witness = self.blocks_tree_root_membership_witnesses[i]; + // Now check that the previous block hash is in the blocks tree from the beginning of the rollup components::assert_check_membership( previous_block_hash, - historic_root_witness.leaf_index, - historic_root_witness.sibling_path, - historic_root + previous_block_hash_witness.leaf_index, + previous_block_hash_witness.sibling_path, + blocks_treee_root ); } } @@ -629,7 +527,8 @@ mod tests { CALL_DATA_HASH_LOG_FIELDS, NOTE_HASH_SUBTREE_WIDTH, NUM_CONTRACT_LEAVES, - BaseRollupInputs + BaseRollupInputs, + full_field_less_than, }, merkle_tree::{calculate_subtree, calculate_empty_tree_root}, abis::append_only_tree_snapshot::AppendOnlyTreeSnapshot, @@ -643,7 +542,7 @@ mod tests { CONTRACT_SUBTREE_SIBLING_PATH_LENGTH, CONTRACT_TREE_HEIGHT, CONTRACT_SUBTREE_HEIGHT, - HISTORIC_BLOCKS_TREE_HEIGHT, + BLOCKS_TREE_HEIGHT, KERNELS_PER_BASE_ROLLUP, MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP, MAX_PUBLIC_DATA_READS_PER_BASE_ROLLUP, @@ -657,11 +556,12 @@ mod tests { PUBLIC_DATA_TREE_HEIGHT, }; use dep::types::{ - abis::membership_witness::HistoricBlocksTreeRootMembershipWitness, + abis::membership_witness::BlocksTreeRootMembershipWitness, abis::membership_witness::NullifierMembershipWitness, abis::new_contract_data::NewContractData, abis::public_data_read::PublicDataRead, abis::public_data_update_request::PublicDataUpdateRequest, + abis::previous_kernel_data::PreviousKernelData, tests::previous_kernel_data_builder::PreviousKernelDataBuilder, address::{Address, EthAddress}, utils::bounded_vec::BoundedVec, @@ -670,10 +570,18 @@ mod tests { use dep::std::option::Option; struct NullifierInsertion { - existing_index: Option, + existing_index: u64, + value: Field, + } + + + struct SortedNullifierTuple { value: Field, + original_index: u32, } + global MAX_NEW_NULLIFIERS_PER_TEST = 4; + struct BaseRollupInputsBuilder { kernel_data: [PreviousKernelDataBuilder; KERNELS_PER_BASE_ROLLUP], pre_existing_notes: [Field; NOTE_HASH_SUBTREE_WIDTH], @@ -683,7 +591,7 @@ mod tests { pre_existing_blocks: [Field; KERNELS_PER_BASE_ROLLUP], public_data_reads: BoundedVec, public_data_writes: BoundedVec<(u64, Field), 2>, - new_nullifiers: BoundedVec, + new_nullifiers: BoundedVec, constants: ConstantRollupData, } @@ -704,7 +612,7 @@ mod tests { }); inputs.pre_existing_blocks = inputs.kernel_data.map(|builder: PreviousKernelDataBuilder|{ - builder.block_data.block.hash() + builder.block_header.block.hash() }); inputs @@ -718,6 +626,77 @@ mod tests { sibling_path } + fn update_nullifier_tree_with_new_leaves( + mut self, + nullifier_tree: &mut NonEmptyMerkleTree, + kernel_data: &mut [PreviousKernelData; KERNELS_PER_BASE_ROLLUP], + start_nullifier_tree_snapshot: AppendOnlyTreeSnapshot + ) -> ( + [NullifierLeafPreimage; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP], + [NullifierMembershipWitness; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP], + [Field; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP], + [u32; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP], + ) { + let mut low_nullifier_leaf_preimages: [NullifierLeafPreimage; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP] = dep::std::unsafe::zeroed(); + let mut low_nullifier_membership_witness: [NullifierMembershipWitness; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP] = dep::std::unsafe::zeroed(); + + let mut sorted_new_nullifier_tuples = [SortedNullifierTuple { + value: 0, + original_index: 0, + }; MAX_NEW_NULLIFIERS_PER_TEST]; + + + for i in 0..MAX_NEW_NULLIFIERS_PER_TEST { + sorted_new_nullifier_tuples[i] = SortedNullifierTuple { + value: self.new_nullifiers.get_unchecked(i).value, + original_index: i as u32, + }; + } + sorted_new_nullifier_tuples = sorted_new_nullifier_tuples.sort_via(|a: SortedNullifierTuple, b: SortedNullifierTuple| {full_field_less_than(b.value, a.value)}); + + let mut sorted_new_nullifiers = [0; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP]; + let mut sorted_new_nullifiers_indexes = [0; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP]; + + for i in 0..MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP { + if (i as u32) < (MAX_NEW_NULLIFIERS_PER_TEST as u32) { + sorted_new_nullifiers[i] = sorted_new_nullifier_tuples[i].value; + sorted_new_nullifiers_indexes[i] = sorted_new_nullifier_tuples[i].original_index; + } else { + sorted_new_nullifiers[i] = 0; + sorted_new_nullifiers_indexes[i] = i as u32; + } + } + + let mut pre_existing_nullifiers = self.pre_existing_nullifiers; + + for i in 0..MAX_NEW_NULLIFIERS_PER_TEST { + if (i as u64) < (self.new_nullifiers.len() as u64) { + let sorted_tuple = sorted_new_nullifier_tuples[i]; + let new_nullifier = sorted_tuple.value; + let original_index = sorted_tuple.original_index; + + let low_index = self.new_nullifiers.get_unchecked(original_index as Field).existing_index; + + kernel_data[0].public_inputs.end.new_nullifiers[original_index] = new_nullifier; + + let mut low_preimage = pre_existing_nullifiers[low_index]; + low_nullifier_leaf_preimages[i] = low_preimage; + low_nullifier_membership_witness[i] = NullifierMembershipWitness { + leaf_index: low_index as Field, + sibling_path: nullifier_tree.get_sibling_path(low_index as Field) + }; + + low_preimage.next_value = new_nullifier; + low_preimage.next_index = start_nullifier_tree_snapshot.next_available_leaf_index + original_index; + pre_existing_nullifiers[low_index] = low_preimage; + + nullifier_tree.update_leaf(low_index, low_preimage.hash()); + } + } + + (low_nullifier_leaf_preimages, low_nullifier_membership_witness, sorted_new_nullifiers, sorted_new_nullifiers_indexes) + } + fn build_inputs(mut self) -> BaseRollupInputs { let mut kernel_data = self.kernel_data.map(|builder: PreviousKernelDataBuilder|{ builder.finish() @@ -736,12 +715,12 @@ mod tests { [0; NULLIFIER_TREE_HEIGHT - NULLIFIER_SUBTREE_HEIGHT], [0; NULLIFIER_SUBTREE_HEIGHT] ); - + let start_nullifier_tree_snapshot = AppendOnlyTreeSnapshot { root: start_nullifier_tree.get_root(), next_available_leaf_index: start_nullifier_tree.get_next_available_index() as u32, }; - + let start_contract_tree = NonEmptyMerkleTree::new(self.pre_existing_contracts, [0; CONTRACT_TREE_HEIGHT], [0; CONTRACT_TREE_HEIGHT - 1], [0; 1]); let start_contract_tree_snapshot = AppendOnlyTreeSnapshot { root: start_contract_tree.get_root(), @@ -752,16 +731,16 @@ mod tests { let mut start_public_data_tree = NonEmptyMerkleTree::new(self.pre_existing_public_data, [0; PUBLIC_DATA_TREE_HEIGHT], [0; PUBLIC_DATA_TREE_HEIGHT - 5], [0; 5]); let start_public_data_tree_root = start_public_data_tree.get_root(); - let start_historic_blocks_tree = NonEmptyMerkleTree::new(self.pre_existing_blocks, [0; HISTORIC_BLOCKS_TREE_HEIGHT], [0; HISTORIC_BLOCKS_TREE_HEIGHT - 1], [0; 1]); - let start_historic_blocks_tree_snapshot = AppendOnlyTreeSnapshot { - root: start_historic_blocks_tree.get_root(), - next_available_leaf_index: start_historic_blocks_tree.get_next_available_index() as u32, + let start_blocks_tree = NonEmptyMerkleTree::new(self.pre_existing_blocks, [0; BLOCKS_TREE_HEIGHT], [0; BLOCKS_TREE_HEIGHT - 1], [0; 1]); + let start_blocks_tree_snapshot = AppendOnlyTreeSnapshot { + root: start_blocks_tree.get_root(), + next_available_leaf_index: start_blocks_tree.get_next_available_index() as u32, }; - self.constants.start_historic_blocks_tree_roots_snapshot = start_historic_blocks_tree_snapshot; + self.constants.start_blocks_tree_snapshot = start_blocks_tree_snapshot; let mut new_public_data_reads_sibling_paths: [[Field; PUBLIC_DATA_TREE_HEIGHT]; MAX_PUBLIC_DATA_READS_PER_BASE_ROLLUP] = dep::std::unsafe::zeroed(); - + for i in 0..self.public_data_reads.max_len() { if (i as u64) < (self.public_data_reads.len() as u64) { let index = self.public_data_reads.get_unchecked(i); @@ -792,29 +771,13 @@ mod tests { } } - let mut low_nullifier_leaf_preimages: [NullifierLeafPreimage; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP] = dep::std::unsafe::zeroed(); - let mut low_nullifier_membership_witness: [NullifierMembershipWitness; MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP] = dep::std::unsafe::zeroed(); - - for i in 0..self.new_nullifiers.max_len() { - if (i as u64) < (self.new_nullifiers.len() as u64) { - let new_nullifier = self.new_nullifiers.get_unchecked(i); - kernel_data[0].public_inputs.end.new_nullifiers[i] = new_nullifier.value; - - if (new_nullifier.existing_index.is_some()) { - let low_index = new_nullifier.existing_index.unwrap_unchecked(); - let mut low_preimage = self.pre_existing_nullifiers[low_index]; - low_nullifier_leaf_preimages[i] = low_preimage; - low_nullifier_membership_witness[i] = NullifierMembershipWitness { - leaf_index: low_index as Field, - sibling_path: start_nullifier_tree.get_sibling_path(low_index as Field) - }; - - low_preimage.next_value = new_nullifier.value; - low_preimage.next_index = start_nullifier_tree_snapshot.next_available_leaf_index + (i as u32); - start_nullifier_tree.update_leaf(low_index, low_preimage.hash()); - } - } - } + let ( + low_nullifier_leaf_preimages, + low_nullifier_membership_witness, + sorted_new_nullifiers, + sorted_new_nullifiers_indexes + ) = self.update_nullifier_tree_with_new_leaves(&mut start_nullifier_tree, &mut kernel_data, start_nullifier_tree_snapshot); + let new_nullifiers_subtree_sibling_path = BaseRollupInputsBuilder::extract_subtree_sibling_path(start_nullifier_tree.get_sibling_path(self.pre_existing_nullifiers.len()), [0; NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH]); BaseRollupInputs { @@ -823,7 +786,10 @@ mod tests { start_nullifier_tree_snapshot, start_contract_tree_snapshot, start_public_data_tree_root, - start_historic_blocks_tree_snapshot, + start_blocks_tree_snapshot, + + sorted_new_nullifiers, + sorted_new_nullifiers_indexes, low_nullifier_leaf_preimages, low_nullifier_membership_witness, @@ -833,18 +799,18 @@ mod tests { new_contracts_subtree_sibling_path, new_public_data_update_requests_sibling_paths, new_public_data_reads_sibling_paths, - - historic_blocks_tree_root_membership_witnesses: [ - HistoricBlocksTreeRootMembershipWitness { + + blocks_tree_root_membership_witnesses: [ + BlocksTreeRootMembershipWitness { leaf_index: 0, - sibling_path: start_historic_blocks_tree.get_sibling_path(0) + sibling_path: start_blocks_tree.get_sibling_path(0) }, - HistoricBlocksTreeRootMembershipWitness { + BlocksTreeRootMembershipWitness { leaf_index: 1, - sibling_path: start_historic_blocks_tree.get_sibling_path(1) + sibling_path: start_blocks_tree.get_sibling_path(1) }, ], - + constants: self.constants, } } @@ -863,7 +829,7 @@ mod tests { } #[test] - fn no_new_contract_leaves() { + unconstrained fn no_new_contract_leaves() { let outputs = BaseRollupInputsBuilder::new().execute(); let expected_start_contract_tree_snapshot = AppendOnlyTreeSnapshot { root: test_compute_empty_root([0; CONTRACT_TREE_HEIGHT]), next_available_leaf_index: 2 }; let expected_end_contract_tree_snapshot = AppendOnlyTreeSnapshot { root: test_compute_empty_root([0; CONTRACT_TREE_HEIGHT]), next_available_leaf_index: 4 }; @@ -872,7 +838,7 @@ mod tests { } #[test] - fn contract_leaf_inserted() { + unconstrained fn contract_leaf_inserted() { let new_contract = NewContractData { contract_address: Address::from_field(1), portal_contract_address: EthAddress::from_field(2), @@ -903,7 +869,7 @@ mod tests { } #[test] - fn contract_leaf_inserted_in_non_empty_snapshot_tree() { + unconstrained fn contract_leaf_inserted_in_non_empty_snapshot_tree() { let new_contract = NewContractData { contract_address: Address::from_field(1), portal_contract_address: EthAddress::from_field(2), @@ -936,7 +902,7 @@ mod tests { } #[test] - fn new_commitments_tree() { + unconstrained fn new_commitments_tree() { let mut builder = BaseRollupInputsBuilder::new(); let new_commitments = [27, 28, 29, 30, 31, 32]; @@ -972,7 +938,7 @@ mod tests { } #[test] - fn new_nullifier_tree_empty() { + unconstrained fn new_nullifier_tree_empty() { /** * DESCRIPTION */ @@ -999,7 +965,7 @@ mod tests { } #[test] - fn nullifier_insertion_test() { + unconstrained fn nullifier_insertion_test() { let mut builder = BaseRollupInputsBuilder::new(); builder.pre_existing_nullifiers[0] = NullifierLeafPreimage { @@ -1014,7 +980,7 @@ mod tests { }; builder.new_nullifiers.push(NullifierInsertion { - existing_index: Option::some(0), + existing_index: 0, value: 1, }); @@ -1047,7 +1013,7 @@ mod tests { } #[test] - fn new_nullifier_tree_all_larger() { + unconstrained fn new_nullifier_tree_all_larger() { let mut builder = BaseRollupInputsBuilder::new(); builder.pre_existing_nullifiers[0] = NullifierLeafPreimage { @@ -1062,16 +1028,18 @@ mod tests { }; builder.new_nullifiers.push(NullifierInsertion { - existing_index: Option::some(1), + existing_index: 1, value: 8, }); for i in 1..builder.new_nullifiers.max_len() { builder.new_nullifiers.push(NullifierInsertion { - existing_index: Option::none(), + existing_index: 1, value: (8 + i) as Field, }); } + let output = builder.execute(); + let mut tree_nullifiers = [NullifierLeafPreimage::default(); MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP * 2]; tree_nullifiers[0] = builder.pre_existing_nullifiers[0]; @@ -1103,18 +1071,14 @@ mod tests { [0; NULLIFIER_SUBTREE_HEIGHT + 1] ); - let output = builder.execute(); - assert(output.end_nullifier_tree_snapshot.eq(AppendOnlyTreeSnapshot { root: end_nullifier_tree.get_root(), next_available_leaf_index: 2 * MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP as u32, })); } - // TODO(Alvaro) some nullifier tree tests. We are updating the nullifier tree insertion algorithm. - - #[test(should_fail_with = "membership check failed")] - fn new_nullifier_tree_double_spend() { + #[test(should_fail_with = "Invalid low leaf")] + unconstrained fn new_nullifier_tree_double_spend() { let mut builder = BaseRollupInputsBuilder::new(); builder.pre_existing_nullifiers[0] = NullifierLeafPreimage { @@ -1129,20 +1093,19 @@ mod tests { }; builder.new_nullifiers.push(NullifierInsertion { - existing_index: Option::some(1), + existing_index: 1, value: 8, }); builder.new_nullifiers.push(NullifierInsertion { - existing_index: Option::some(1), + existing_index: 1, value: 8, }); builder.fails(); } - - #[test(should_fail_with = "Nullifier subtree is malformed")] - fn new_nullifier_tree_double_spend_same_batch() { + #[test(should_fail_with = "Invalid low leaf")] + unconstrained fn new_nullifier_tree_double_spend_same_batch() { let mut builder = BaseRollupInputsBuilder::new(); builder.pre_existing_nullifiers[0] = NullifierLeafPreimage { @@ -1157,39 +1120,40 @@ mod tests { }; builder.new_nullifiers.push(NullifierInsertion { - existing_index: Option::some(1), + existing_index: 1, value: 8, }); builder.new_nullifiers.push(NullifierInsertion { - existing_index: Option::none(), + existing_index: 1, value: 8, }); builder.fails(); } - #[test] - fn empty_block_calldata_hash() { + unconstrained fn empty_block_calldata_hash() { let outputs = BaseRollupInputsBuilder::new().execute(); let hash_input_flattened = [0; CALL_DATA_HASH_FULL_FIELDS * 32 + CALL_DATA_HASH_LOG_FIELDS * 16]; let sha_digest = dep::std::hash::sha256(hash_input_flattened); let expected_calldata_hash = U256::from_bytes32(sha_digest).to_u128_limbs(); - assert_eq(outputs.calldata_hash, expected_calldata_hash); + for i in 0..NUM_FIELDS_PER_SHA256 { + assert_eq(outputs.calldata_hash[i], expected_calldata_hash[i]); + } } #[test(should_fail_with = "membership check failed")] - fn compute_membership_historic_blocks_tree_negative() { + unconstrained fn compute_membership_blocks_tree_negative() { let mut inputs = BaseRollupInputsBuilder::new().build_inputs(); - inputs.historic_blocks_tree_root_membership_witnesses[0].sibling_path[0] = 27; + inputs.blocks_tree_root_membership_witnesses[0].sibling_path[0] = 27; let _output = inputs.base_rollup_circuit(); } #[test] - fn constants_dont_change() { + unconstrained fn constants_dont_change() { let inputs = BaseRollupInputsBuilder::new().build_inputs(); let outputs = inputs.base_rollup_circuit(); @@ -1197,28 +1161,28 @@ mod tests { } #[test(should_fail_with = "kernel chain_id does not match the rollup chain_id")] - fn constants_dont_match_kernels_chain_id() { + unconstrained fn constants_dont_match_kernels_chain_id() { let mut builder = BaseRollupInputsBuilder::new(); builder.constants.global_variables.chain_id = 3; builder.fails(); } #[test(should_fail_with = "kernel version does not match the rollup version")] - fn constants_dont_match_kernels_version() { + unconstrained fn constants_dont_match_kernels_version() { let mut builder = BaseRollupInputsBuilder::new(); builder.constants.global_variables.version = 3; builder.fails(); } #[test] - fn subtree_height_is_0() { + unconstrained fn subtree_height_is_0() { let outputs = BaseRollupInputsBuilder::new().execute(); assert_eq(outputs.rollup_subtree_height, 0); } #[test] - fn single_public_state_read() { + unconstrained fn single_public_state_read() { let mut builder = BaseRollupInputsBuilder::new(); builder.pre_existing_public_data[0] = 27; @@ -1228,7 +1192,7 @@ mod tests { } #[test] - fn single_public_state_write() { + unconstrained fn single_public_state_write() { let mut builder = BaseRollupInputsBuilder::new(); builder.pre_existing_public_data[0] = 27; @@ -1247,7 +1211,7 @@ mod tests { } #[test] - fn multiple_public_state_read_writes() { + unconstrained fn multiple_public_state_read_writes() { let mut builder = BaseRollupInputsBuilder::new(); builder.pre_existing_public_data[0] = 27; @@ -1271,4 +1235,4 @@ mod tests { assert_eq(outputs.end_public_data_tree_root, expected_public_data_tree.get_root()); } -} +} \ No newline at end of file diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/indexed_tree.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/indexed_tree.nr new file mode 100644 index 00000000000..1f871270b2d --- /dev/null +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/indexed_tree.nr @@ -0,0 +1,121 @@ +use crate::abis::append_only_tree_snapshot::AppendOnlyTreeSnapshot; +use crate::merkle_tree::{calculate_subtree, calculate_empty_tree_root}; + +use dep::types::abis::membership_witness::MembershipWitness; + +fn check_permutation(original_array: [T; N], sorted_array: [T; N], indexes: [u32; N], is_equal: fn (T, T) -> bool) { + let mut seen_value = [false; N]; + for i in 0..N { + let index = indexes[i]; + let sorted_value = sorted_array[i]; + let original_value = original_array[index]; + assert(is_equal(sorted_value, original_value), "Invalid index"); + assert(!seen_value[index], "Duplicated index"); + seen_value[index] = true; + } +} + +#[test] +fn check_permutation_basic_test(){ + let original_array = [1, 2, 3]; + let sorted_array = [3, 1, 2]; + let indexes = [2, 0, 1]; + let is_equal = |a: Field, b: Field| a == b; + check_permutation(original_array, sorted_array, indexes, is_equal); +} + +#[test(should_fail_with = "Duplicated index")] +fn check_permutation_duplicated_index(){ + let original_array = [0, 1, 0]; + let sorted_array = [1, 0, 0]; + let indexes = [1, 0, 0]; + let is_equal = |a: Field, b: Field| a == b; + check_permutation(original_array, sorted_array, indexes, is_equal); +} + +#[test(should_fail_with = "Invalid index")] +fn check_permutation_invalid_index(){ + let original_array = [0, 1, 2]; + let sorted_array = [1, 0, 0]; + let indexes = [1, 0, 2]; + let is_equal = |a: Field, b: Field| a == b; + check_permutation(original_array, sorted_array, indexes, is_equal); +} + +pub fn batch_insert( + start_snapshot: AppendOnlyTreeSnapshot, + values_to_insert: [Value; SubtreeWidth], + sorted_values: [Value; SubtreeWidth], + sorted_values_indexes: [u32; SubtreeWidth], + new_subtree_sibling_path: [Field; SiblingPathLength], + low_leaf_preimages: [Leaf; SubtreeWidth], + low_leaf_membership_witnesses: [MembershipWitness; SubtreeWidth], + is_equal: fn (Value, Value) -> bool, + is_empty_value: fn (Value) -> bool, + hash_leaf: fn (Leaf) -> Field, + is_valid_low_leaf: fn(Leaf, Value) -> bool, + update_low_leaf: fn(Leaf, Value, u32) -> Leaf, + build_insertion_leaf: fn (Value, Leaf) -> Leaf, + _subtree_height: [Field; SubtreeHeight], + _tree_height: [Field; TreeHeight], +) -> AppendOnlyTreeSnapshot { + // A permutation to the values is provided to make the insertion use only one insertion strategy + check_permutation(values_to_insert, sorted_values, sorted_values_indexes, is_equal); + + // Now, update the existing leaves with the new leaves + let mut current_tree_root = start_snapshot.root; + let mut insertion_subtree: [Leaf; SubtreeWidth] = dep::std::unsafe::zeroed(); + let start_insertion_index = start_snapshot.next_available_leaf_index; + + for i in 0..sorted_values.len() { + let value = sorted_values[i]; + if !is_empty_value(value) { + let low_leaf_preimage = low_leaf_preimages[i]; + let witness = low_leaf_membership_witnesses[i]; + + assert(is_valid_low_leaf(low_leaf_preimage, value), "Invalid low leaf"); + + // perform membership check for the low leaf against the original root + crate::components::assert_check_membership( + hash_leaf(low_leaf_preimage), + witness.leaf_index, + witness.sibling_path, + current_tree_root, + ); + + let value_index = sorted_values_indexes[i]; + + // Calculate the new value of the low_leaf + let updated_low_leaf= update_low_leaf(low_leaf_preimage, value, start_insertion_index+value_index); + + current_tree_root = crate::components::root_from_sibling_path( + hash_leaf(updated_low_leaf), witness.leaf_index, witness.sibling_path); + + insertion_subtree[value_index] = build_insertion_leaf(value, low_leaf_preimage); + } + } + + let empty_subtree_root = calculate_empty_tree_root(SubtreeHeight); + let leaf_index_subtree_depth = start_insertion_index >> (SubtreeHeight as u32); + + crate::components::assert_check_membership( + empty_subtree_root, + leaf_index_subtree_depth as Field, + new_subtree_sibling_path, + current_tree_root, + ); + + // Create new subtree to insert into the whole indexed tree + let subtree_root = calculate_subtree(insertion_subtree.map(hash_leaf)); + + // Calculate the new root + // We are inserting a subtree rather than a full tree here + let subtree_index = start_insertion_index >> (SubtreeHeight as u32); + let new_root = crate::components::root_from_sibling_path(subtree_root, subtree_index as Field, new_subtree_sibling_path); + + AppendOnlyTreeSnapshot { + root: new_root, + next_available_leaf_index: start_insertion_index + (values_to_insert.len() as u32), + } +} + diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/lib.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/lib.nr index 3a225975d81..1d0df93cc9d 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/lib.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/lib.nr @@ -15,4 +15,6 @@ mod hash; mod merkle_tree; -mod tests; \ No newline at end of file +mod tests; + +mod indexed_tree; diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root.nr index c64ee5fcbdd..d6052cee5e2 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root.nr @@ -37,7 +37,7 @@ impl RootRollupInputs { ); // Build the block hash for this iteration from the tree roots and global variables - // Then insert the block into the historic blocks tree + // Then insert the block into the blocks tree let block_hash = compute_block_hash_with_globals(left.constants.global_variables, right.end_note_hash_tree_snapshot.root, right.end_nullifier_tree_snapshot.root, @@ -45,10 +45,10 @@ impl RootRollupInputs { new_l1_to_l2_messages_tree_snapshot.root, right.end_public_data_tree_root); - // Update the historic blocks tree - let end_historic_blocks_tree_snapshot = components::insert_subtree_to_snapshot_tree( - self.start_historic_blocks_tree_snapshot, - self.new_historic_blocks_tree_sibling_path, + // Update the blocks tree + let end_blocks_tree_snapshot = components::insert_subtree_to_snapshot_tree( + self.start_blocks_tree_snapshot, + self.new_blocks_tree_sibling_path, 0, block_hash, 0 @@ -72,19 +72,19 @@ impl RootRollupInputs { end_public_data_tree_root : right.end_public_data_tree_root, start_l1_to_l2_messages_tree_snapshot : self.start_l1_to_l2_messages_tree_snapshot, end_l1_to_l2_messages_tree_snapshot : new_l1_to_l2_messages_tree_snapshot, - start_historic_blocks_tree_snapshot : self.start_historic_blocks_tree_snapshot, - end_historic_blocks_tree_snapshot : end_historic_blocks_tree_snapshot, + start_blocks_tree_snapshot : self.start_blocks_tree_snapshot, + end_blocks_tree_snapshot : end_blocks_tree_snapshot, calldata_hash : components::compute_calldata_hash(self.previous_rollup_data), l1_to_l2_messages_hash : compute_messages_hash(self.new_l1_to_l2_messages), // The cpp code was just not initializing these, so they would be zeroed out // TODO(Lasse/Jean): add explanation for this. - end_tree_of_historic_contract_tree_roots_snapshot : zeroed_out_snapshot, - end_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot : zeroed_out_snapshot, - end_tree_of_historic_note_hash_tree_roots_snapshot : zeroed_out_snapshot, - start_tree_of_historic_contract_tree_roots_snapshot : zeroed_out_snapshot, - start_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot : zeroed_out_snapshot, - start_tree_of_historic_note_hash_tree_roots_snapshot : zeroed_out_snapshot, + end_tree_of_historical_contract_tree_roots_snapshot : zeroed_out_snapshot, + end_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot : zeroed_out_snapshot, + end_tree_of_historical_note_hash_tree_roots_snapshot : zeroed_out_snapshot, + start_tree_of_historical_contract_tree_roots_snapshot : zeroed_out_snapshot, + start_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot : zeroed_out_snapshot, + start_tree_of_historical_note_hash_tree_roots_snapshot : zeroed_out_snapshot, } } } diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root/root_rollup_inputs.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root/root_rollup_inputs.nr index 23f1f6db7e9..5c2fa4075ff 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root/root_rollup_inputs.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root/root_rollup_inputs.nr @@ -6,7 +6,7 @@ use crate::abis::constant_rollup_data::ConstantRollupData; use dep::aztec::constants_gen::{ NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP, L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH, - HISTORIC_BLOCKS_TREE_HEIGHT + BLOCKS_TREE_HEIGHT }; struct RootRollupInputs { @@ -20,6 +20,6 @@ struct RootRollupInputs { start_l1_to_l2_messages_tree_snapshot : AppendOnlyTreeSnapshot, // inputs required to add the block hash - start_historic_blocks_tree_snapshot : AppendOnlyTreeSnapshot, - new_historic_blocks_tree_sibling_path : [Field; HISTORIC_BLOCKS_TREE_HEIGHT], + start_blocks_tree_snapshot : AppendOnlyTreeSnapshot, + new_blocks_tree_sibling_path : [Field; BLOCKS_TREE_HEIGHT], } \ No newline at end of file diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root/root_rollup_public_inputs.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root/root_rollup_public_inputs.nr index 2a1ca5daa5e..9b15ca2955f 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root/root_rollup_public_inputs.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/root/root_rollup_public_inputs.nr @@ -26,20 +26,20 @@ struct RootRollupPublicInputs { start_public_data_tree_root : Field, end_public_data_tree_root : Field, - start_tree_of_historic_note_hash_tree_roots_snapshot : AppendOnlyTreeSnapshot, - end_tree_of_historic_note_hash_tree_roots_snapshot : AppendOnlyTreeSnapshot, + start_tree_of_historical_note_hash_tree_roots_snapshot : AppendOnlyTreeSnapshot, + end_tree_of_historical_note_hash_tree_roots_snapshot : AppendOnlyTreeSnapshot, - start_tree_of_historic_contract_tree_roots_snapshot : AppendOnlyTreeSnapshot, - end_tree_of_historic_contract_tree_roots_snapshot : AppendOnlyTreeSnapshot, + start_tree_of_historical_contract_tree_roots_snapshot : AppendOnlyTreeSnapshot, + end_tree_of_historical_contract_tree_roots_snapshot : AppendOnlyTreeSnapshot, start_l1_to_l2_messages_tree_snapshot : AppendOnlyTreeSnapshot, end_l1_to_l2_messages_tree_snapshot : AppendOnlyTreeSnapshot, - start_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot : AppendOnlyTreeSnapshot, - end_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot : AppendOnlyTreeSnapshot, + start_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot : AppendOnlyTreeSnapshot, + end_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot : AppendOnlyTreeSnapshot, - start_historic_blocks_tree_snapshot : AppendOnlyTreeSnapshot, - end_historic_blocks_tree_snapshot : AppendOnlyTreeSnapshot, + start_blocks_tree_snapshot : AppendOnlyTreeSnapshot, + end_blocks_tree_snapshot : AppendOnlyTreeSnapshot, calldata_hash : [Field; NUM_FIELDS_PER_SHA256], l1_to_l2_messages_hash : [Field; NUM_FIELDS_PER_SHA256], diff --git a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/tests/root_rollup_inputs.nr b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/tests/root_rollup_inputs.nr index 81e026c53d5..7da05d2df6d 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/tests/root_rollup_inputs.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/rollup-lib/src/tests/root_rollup_inputs.nr @@ -7,7 +7,7 @@ use dep::aztec::constants_gen::{ L1_TO_L2_MSG_TREE_HEIGHT, L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH, L1_TO_L2_MSG_SUBTREE_HEIGHT, - HISTORIC_BLOCKS_TREE_HEIGHT, + BLOCKS_TREE_HEIGHT, }; use crate::abis::append_only_tree_snapshot::AppendOnlyTreeSnapshot; use crate::tests::previous_rollup_data::default_previous_rollup_data; @@ -25,10 +25,10 @@ pub fn compute_l1_l2_empty_snapshot() -> (AppendOnlyTreeSnapshot, [Field; L1_TO_ (AppendOnlyTreeSnapshot{ root: zero_hashes[zero_hashes.len() - 1], next_available_leaf_index: 0 }, new_l1_to_l2_messages_tree_root_sibling_path) } -pub fn compute_historic_blocks_tree_snapshot() -> (AppendOnlyTreeSnapshot, [Field; HISTORIC_BLOCKS_TREE_HEIGHT]) { - let zero_hashes = compute_zero_hashes([0; HISTORIC_BLOCKS_TREE_HEIGHT]); - let mut sibling_path = [0; HISTORIC_BLOCKS_TREE_HEIGHT]; - for i in 1..HISTORIC_BLOCKS_TREE_HEIGHT { +pub fn compute_blocks_tree_snapshot() -> (AppendOnlyTreeSnapshot, [Field; BLOCKS_TREE_HEIGHT]) { + let zero_hashes = compute_zero_hashes([0; BLOCKS_TREE_HEIGHT]); + let mut sibling_path = [0; BLOCKS_TREE_HEIGHT]; + for i in 1..BLOCKS_TREE_HEIGHT { sibling_path[i] = zero_hashes[i-1]; } (AppendOnlyTreeSnapshot { root: zero_hashes[zero_hashes.len() - 1], next_available_leaf_index: 0 }, sibling_path) @@ -42,10 +42,10 @@ pub fn default_root_rollup_inputs() -> RootRollupInputs { inputs.new_l1_to_l2_messages_tree_root_sibling_path = l1_l2_empty_sibling_path; inputs.start_l1_to_l2_messages_tree_snapshot = l1_l2_empty_snapshot; - let (historic_blocks_snapshot, historic_blocks_sibling_path) = compute_historic_blocks_tree_snapshot(); + let (blocks_snapshot, blocks_sibling_path) = compute_blocks_tree_snapshot(); - inputs.start_historic_blocks_tree_snapshot = historic_blocks_snapshot; - inputs.new_historic_blocks_tree_sibling_path = historic_blocks_sibling_path; + inputs.start_blocks_tree_snapshot = blocks_snapshot; + inputs.new_blocks_tree_sibling_path = blocks_sibling_path; inputs.previous_rollup_data = default_previous_rollup_data(); diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis.nr index 8fd4ee28109..dd2b52402df 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis.nr @@ -7,7 +7,7 @@ mod membership_witness; mod new_contract_data; mod contract_leaf_preimage; -mod historical_block_data; +mod block_header; mod combined_constant_data; mod public_data_read; mod public_data_update_request; diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/historical_block_data.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/block_header.nr similarity index 95% rename from yarn-project/noir-protocol-circuits/src/crates/types/src/abis/historical_block_data.nr rename to yarn-project/noir-protocol-circuits/src/crates/types/src/abis/block_header.nr index 488d4cf6ba0..9616da0aa8c 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/historical_block_data.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/block_header.nr @@ -1,6 +1,6 @@ use crate::block::Block; -struct HistoricalBlockData { +struct BlockHeader { blocks_tree_root : Field, block : Block, // Private data @@ -8,7 +8,7 @@ struct HistoricalBlockData { private_kernel_vk_tree_root : Field, } -impl HistoricalBlockData { +impl BlockHeader { fn assert_is_zero(self) { self.block.assert_is_zero(); assert(self.private_kernel_vk_tree_root == 0); diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/combined_constant_data.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/combined_constant_data.nr index 947c6c88214..d1706c9624e 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/combined_constant_data.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/combined_constant_data.nr @@ -1,7 +1,7 @@ use crate::transaction::context::TxContext; -use crate::abis::historical_block_data::HistoricalBlockData; +use crate::abis::block_header::BlockHeader; struct CombinedConstantData { - block_data: HistoricalBlockData, + block_header: BlockHeader, tx_context: TxContext, } diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/membership_witness.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/membership_witness.nr index 2af3ca4ff21..6b4c29dcb71 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/membership_witness.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/membership_witness.nr @@ -5,7 +5,7 @@ use dep::aztec::constants_gen::{ NULLIFIER_TREE_HEIGHT, NOTE_HASH_TREE_HEIGHT, ROLLUP_VK_TREE_HEIGHT, - HISTORIC_BLOCKS_TREE_HEIGHT, + BLOCKS_TREE_HEIGHT, }; struct MembershipWitness { @@ -37,9 +37,9 @@ struct NullifierMembershipWitness{ sibling_path: [Field; NULLIFIER_TREE_HEIGHT] } -struct HistoricBlocksTreeRootMembershipWitness{ +struct BlocksTreeRootMembershipWitness{ leaf_index: Field, - sibling_path: [Field; HISTORIC_BLOCKS_TREE_HEIGHT] + sibling_path: [Field; BLOCKS_TREE_HEIGHT] } struct ReadRequestMembershipWitness { diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/private_circuit_public_inputs.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/private_circuit_public_inputs.nr index 12bb26c95ad..b13e029a926 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/private_circuit_public_inputs.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/private_circuit_public_inputs.nr @@ -1,7 +1,7 @@ use crate::{ abis::{ call_context::CallContext, - historical_block_data::HistoricalBlockData, + block_header::BlockHeader, }, contrakt::deployment_data::ContractDeploymentData, hash::NUM_FIELDS_PER_SHA256, @@ -46,7 +46,7 @@ struct PrivateCircuitPublicInputs { encrypted_log_preimages_length: Field, unencrypted_log_preimages_length: Field, - historical_block_data: HistoricalBlockData, + block_header: BlockHeader, contract_deployment_data: ContractDeploymentData, @@ -73,7 +73,7 @@ impl PrivateCircuitPublicInputs { fields.push_array(self.unencrypted_logs_hash); fields.push(self.encrypted_log_preimages_length); fields.push(self.unencrypted_log_preimages_length); - fields.push_array(self.historical_block_data.to_array()); + fields.push_array(self.block_header.to_array()); fields.push(self.contract_deployment_data.hash()); fields.push(self.chain_id); fields.push(self.version); diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_circuit_public_inputs.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_circuit_public_inputs.nr index 98370a90c2c..abcaf60e7af 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_circuit_public_inputs.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/abis/public_circuit_public_inputs.nr @@ -11,7 +11,7 @@ use dep::aztec::constants_gen::{ use crate::{ abis::{ call_context::CallContext, - historical_block_data::HistoricalBlockData, + block_header::BlockHeader, }, address::Address, contrakt::{ @@ -42,7 +42,7 @@ struct PublicCircuitPublicInputs{ // variable-length data. unencrypted_log_preimages_length: Field, - historical_block_data: HistoricalBlockData, + block_header: BlockHeader, prover_address: Address, } @@ -66,7 +66,7 @@ impl PublicCircuitPublicInputs{ inputs.push_array(self.new_l2_to_l1_msgs); inputs.push_array(self.unencrypted_logs_hash); inputs.push(self.unencrypted_log_preimages_length); - inputs.push_array(self.historical_block_data.to_array()); + inputs.push_array(self.block_header.to_array()); inputs.push(self.prover_address.to_field()); assert_eq(inputs.len(), constants_gen::PUBLIC_CIRCUIT_PUBLIC_INPUTS_HASH_INPUT_LENGTH, "Incorrect number of input fields when hashing PublicCircuitPublicInputs"); diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/fixtures.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/fixtures.nr index 00c81df572c..fca591a7fa6 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/fixtures.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/fixtures.nr @@ -5,7 +5,7 @@ mod note_hash_tree; mod read_requests; use crate::address::Address; -use crate::abis::historical_block_data::HistoricalBlockData; +use crate::abis::block_header::BlockHeader; use crate::block::Block; use crate::point::Point; use crate::tests::fixtures; @@ -14,7 +14,7 @@ global MSG_SENDER = Address { inner: 27 }; global DEPLOYER_PUBLIC_KEY = Point { x: 123456789, y: 123456789 }; -global HISTORICAL_BLOCK_DATA = HistoricalBlockData { +global BLOCK_HEADER = BlockHeader { blocks_tree_root: 0, block: Block { note_hash_tree_root: fixtures::note_hash_tree::ROOT, diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/previous_kernel_data_builder.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/previous_kernel_data_builder.nr index b5c02900860..d41008a7ad4 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/previous_kernel_data_builder.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/previous_kernel_data_builder.nr @@ -4,7 +4,7 @@ use crate::{ call_request::{CallerContext, CallRequest}, combined_constant_data::CombinedConstantData, combined_accumulated_data::CombinedAccumulatedDataBuilder, - historical_block_data::HistoricalBlockData, + block_header::BlockHeader, kernel_circuit_public_inputs::KernelCircuitPublicInputs, previous_kernel_data::PreviousKernelData, public_data_read::PublicDataRead, @@ -34,7 +34,7 @@ struct PreviousKernelDataBuilder { contract_address: Address, portal_contract_address: EthAddress, end: CombinedAccumulatedDataBuilder, - block_data: HistoricalBlockData, + block_header: BlockHeader, tx_context: TxContext, is_private: bool, proof: Proof, @@ -55,7 +55,7 @@ impl PreviousKernelDataBuilder { contract_address: fixtures::contracts::parent_contract.address, portal_contract_address: fixtures::contracts::parent_contract.portal_contract_address, end, - block_data: fixtures::HISTORICAL_BLOCK_DATA, + block_header: fixtures::BLOCK_HEADER, tx_context, is_private: true, proof: Proof {}, @@ -171,7 +171,7 @@ impl PreviousKernelDataBuilder { let public_inputs = KernelCircuitPublicInputs { end: self.end.finish(), constants: CombinedConstantData { - block_data: self.block_data, + block_header: self.block_header, tx_context: self.tx_context, }, is_private: self.is_private, diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/private_circuit_public_inputs_builder.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/private_circuit_public_inputs_builder.nr index 9a4f2939797..f000639f3df 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/private_circuit_public_inputs_builder.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/private_circuit_public_inputs_builder.nr @@ -2,7 +2,7 @@ use crate::{ abis::{ call_context::CallContext, complete_address::CompleteAddress, - historical_block_data::HistoricalBlockData, + block_header::BlockHeader, private_circuit_public_inputs::PrivateCircuitPublicInputs, }, contrakt::deployment_data::ContractDeploymentData, @@ -50,7 +50,7 @@ struct PrivateCircuitPublicInputsBuilder { encrypted_log_preimages_length: Field, unencrypted_log_preimages_length: Field, - historical_block_data: HistoricalBlockData, + block_header: BlockHeader, contract_deployment_data: ContractDeploymentData, @@ -101,7 +101,7 @@ impl PrivateCircuitPublicInputsBuilder { public_inputs.call_context = call_context; public_inputs.args_hash = args_hash; public_inputs.contract_deployment_data = contract_deployment_data; - public_inputs.historical_block_data = fixtures::HISTORICAL_BLOCK_DATA; + public_inputs.block_header = fixtures::BLOCK_HEADER; public_inputs.chain_id = 0; public_inputs.version = 1; @@ -131,7 +131,7 @@ impl PrivateCircuitPublicInputsBuilder { encrypted_log_preimages_length: self.encrypted_log_preimages_length, unencrypted_log_preimages_length: self.unencrypted_log_preimages_length, - historical_block_data: self.historical_block_data, + block_header: self.block_header, contract_deployment_data: self.contract_deployment_data, diff --git a/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/public_circuit_public_inputs_builder.nr b/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/public_circuit_public_inputs_builder.nr index 184c558a82e..7f1de0beee6 100644 --- a/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/public_circuit_public_inputs_builder.nr +++ b/yarn-project/noir-protocol-circuits/src/crates/types/src/tests/public_circuit_public_inputs_builder.nr @@ -1,7 +1,7 @@ use crate::{ abis::{ call_context::CallContext, - historical_block_data::HistoricalBlockData, + block_header::BlockHeader, public_circuit_public_inputs::PublicCircuitPublicInputs, }, address::Address, @@ -35,7 +35,7 @@ struct PublicCircuitPublicInputsBuilder { new_l2_to_l1_msgs: BoundedVec, unencrypted_logs_hash: [Field; NUM_FIELDS_PER_SHA256], unencrypted_log_preimages_length: Field, - historical_block_data: HistoricalBlockData, + block_header: BlockHeader, prover_address: Address, } @@ -43,7 +43,7 @@ impl PublicCircuitPublicInputsBuilder { pub fn new() -> Self { let mut public_inputs: PublicCircuitPublicInputsBuilder = dep::std::unsafe::zeroed(); public_inputs.call_context.msg_sender = fixtures::MSG_SENDER; - public_inputs.historical_block_data = fixtures::HISTORICAL_BLOCK_DATA; + public_inputs.block_header = fixtures::BLOCK_HEADER; public_inputs } @@ -60,7 +60,7 @@ impl PublicCircuitPublicInputsBuilder { new_l2_to_l1_msgs: self.new_l2_to_l1_msgs.storage, unencrypted_logs_hash: self.unencrypted_logs_hash, unencrypted_log_preimages_length: self.unencrypted_log_preimages_length, - historical_block_data: self.historical_block_data, + block_header: self.block_header, prover_address: self.prover_address, } } diff --git a/yarn-project/noir-protocol-circuits/src/index.test.ts b/yarn-project/noir-protocol-circuits/src/index.test.ts index 9b7671574b1..974269bb8f7 100644 --- a/yarn-project/noir-protocol-circuits/src/index.test.ts +++ b/yarn-project/noir-protocol-circuits/src/index.test.ts @@ -1,6 +1,7 @@ import { AggregationObject, AztecAddress, + BlockHeader, CONTRACT_TREE_HEIGHT, CallContext, CallRequest, @@ -12,7 +13,6 @@ import { FunctionData, FunctionLeafPreimage, FunctionSelector, - HistoricBlockData, KernelCircuitPublicInputs, MAX_NEW_COMMITMENTS_PER_CALL, MAX_NEW_COMMITMENTS_PER_TX, @@ -103,7 +103,7 @@ describe('Private kernel', () => { const callContext = new CallContext(AztecAddress.ZERO, contractAddress, Fr.ZERO, selector, false, false, true); - const historicBlockData = new HistoricBlockData( + const blockHeader = new BlockHeader( Fr.fromString('0x16642d9ccd8346c403aa4c3fa451178b22534a27035cdaa6ec34ae53b29c50cb'), Fr.fromString('0x0bcfa3e9f1a8922ee92c6dc964d6595907c1804a86753774322b468f69d4f278'), Fr.fromString('0x1864fcdaa80ff2719154fa7c8a9050662972707168d69eac9db6fd3110829f80'), @@ -130,7 +130,7 @@ describe('Private kernel', () => { [Fr.fromString('0xe3b0c44298fc1c149afbf4c8996fb924'), Fr.fromString('0x27ae41e4649b934ca495991b7852b855')], Fr.fromString('0xf8'), Fr.fromString('0x04'), - historicBlockData, + blockHeader, contractDeploymentData, Fr.ZERO, Fr.ZERO, @@ -215,7 +215,7 @@ describe('Private kernel', () => { makeTuple(MAX_PUBLIC_DATA_READS_PER_TX, () => PublicDataRead.empty()), ); - const historicBlockData = new HistoricBlockData( + const blockHeader = new BlockHeader( Fr.fromString('0x16642d9ccd8346c403aa4c3fa451178b22534a27035cdaa6ec34ae53b29c50cb'), Fr.fromString('0x0bcfa3e9f1a8922ee92c6dc964d6595907c1804a86753774322b468f69d4f278'), Fr.fromString('0x1864fcdaa80ff2719154fa7c8a9050662972707168d69eac9db6fd3110829f80'), @@ -226,7 +226,7 @@ describe('Private kernel', () => { Fr.fromString('0x200569267c0f73ac89aaa414239398db9445dd4ad3a8cf37015cd55b8d4c5e8d'), ); - const constants = new CombinedConstantData(historicBlockData, txContext); + const constants = new CombinedConstantData(blockHeader, txContext); const kernelPublicInputs = new KernelCircuitPublicInputs(combinedAccumulatedData, constants, true); @@ -275,7 +275,7 @@ describe('Private kernel', () => { false, ); - const historicBlockData = new HistoricBlockData( + const blockHeader = new BlockHeader( Fr.fromString('0x0dc1f2fbe77c0c72d329cc63f2bd88cd76a30c5802f8138814874cc328148834'), Fr.fromString('0x1861d7a76f4c8f7db95fa8aa1bcbdd5cbf576efe17455fee698f625292667070'), Fr.fromString('0x2f7255183443071e94e90651593c46342978e689e1f4f3e402616fa59633b974'), @@ -305,7 +305,7 @@ describe('Private kernel', () => { [Fr.fromString('0xe3b0c44298fc1c149afbf4c8996fb924'), Fr.fromString('0x27ae41e4649b934ca495991b7852b855')], Fr.fromString('0x04'), Fr.fromString('0x04'), - historicBlockData, + blockHeader, ContractDeploymentData.empty(), chainId, version, @@ -381,7 +381,7 @@ describe('Private kernel', () => { makeTuple(MAX_PUBLIC_DATA_READS_PER_TX, () => PublicDataRead.empty()), ); - const constants = new CombinedConstantData(historicBlockData, txContext); + const constants = new CombinedConstantData(blockHeader, txContext); const kernelPublicInputs = new KernelCircuitPublicInputs(combinedAccumulatedData, constants, true); diff --git a/yarn-project/noir-protocol-circuits/src/noir_test_gen.test.ts b/yarn-project/noir-protocol-circuits/src/noir_test_gen.test.ts index 9449c90f6d8..aada805f27a 100644 --- a/yarn-project/noir-protocol-circuits/src/noir_test_gen.test.ts +++ b/yarn-project/noir-protocol-circuits/src/noir_test_gen.test.ts @@ -90,7 +90,7 @@ describe('Data generation for noir tests', () => { }).toMatchSnapshot(); }); - it('Computes a private data tree', async () => { + it('Computes a note hash tree', async () => { const indexes = new Array(128).fill(null).map((_, i) => BigInt(i)); const leaves = indexes.map(i => new Fr(i + 1n).toBuffer()); diff --git a/yarn-project/noir-protocol-circuits/src/type_conversion.test.ts b/yarn-project/noir-protocol-circuits/src/type_conversion.test.ts index 93bf57ae940..f9cd72f820e 100644 --- a/yarn-project/noir-protocol-circuits/src/type_conversion.test.ts +++ b/yarn-project/noir-protocol-circuits/src/type_conversion.test.ts @@ -1,11 +1,11 @@ import { AztecAddress, + BlockHeader, ContractDeploymentData, EthAddress, Fr, FunctionData, FunctionSelector, - HistoricBlockData, Point, TxContext, } from '@aztec/circuits.js'; @@ -13,6 +13,8 @@ import { import { mapAztecAddressFromNoir, mapAztecAddressToNoir, + mapBlockHeaderFromNoir, + mapBlockHeaderToNoir, mapContractDeploymentDataFromNoir, mapContractDeploymentDataToNoir, mapEthAddressFromNoir, @@ -23,8 +25,6 @@ import { mapFunctionDataToNoir, mapFunctionSelectorFromNoir, mapFunctionSelectorToNoir, - mapHistoricalBlockDataFromNoir, - mapHistoricalBlockDataToNoir, mapPointFromNoir, mapPointToNoir, mapTxContextFromNoir, @@ -85,8 +85,8 @@ describe('Noir<>Circuits.js type conversion test suite', () => { expect(mapFunctionDataFromNoir(mapFunctionDataToNoir(functionData))).toEqual(functionData); }); - it('should map historical block data', () => { - const historicalBlockData = new HistoricBlockData( + it('should map block header', () => { + const blockHeader = new BlockHeader( new Fr(35n), new Fr(36n), new Fr(37n), @@ -96,9 +96,7 @@ describe('Noir<>Circuits.js type conversion test suite', () => { new Fr(41n), new Fr(42n), ); - expect(mapHistoricalBlockDataFromNoir(mapHistoricalBlockDataToNoir(historicalBlockData))).toEqual( - historicalBlockData, - ); + expect(mapBlockHeaderFromNoir(mapBlockHeaderToNoir(blockHeader))).toEqual(blockHeader); }); }); }); diff --git a/yarn-project/noir-protocol-circuits/src/type_conversion.ts b/yarn-project/noir-protocol-circuits/src/type_conversion.ts index 0ae1b87bc33..e42a3cceb56 100644 --- a/yarn-project/noir-protocol-circuits/src/type_conversion.ts +++ b/yarn-project/noir-protocol-circuits/src/type_conversion.ts @@ -2,8 +2,10 @@ import { AggregationObject, AppendOnlyTreeSnapshot, AztecAddress, + BLOCKS_TREE_HEIGHT, BaseOrMergeRollupPublicInputs, BaseRollupInputs, + BlockHeader, CallContext, CallRequest, CallerContext, @@ -19,8 +21,6 @@ import { FunctionData, FunctionSelector, GlobalVariables, - HISTORIC_BLOCKS_TREE_HEIGHT, - HistoricBlockData, KernelCircuitPublicInputs, KernelCircuitPublicInputsFinal, MAX_NEW_COMMITMENTS_PER_TX, @@ -64,6 +64,7 @@ import { import { Tuple } from '@aztec/foundation/serialize'; import { + BlockHeader as BlockHeaderNoir, CallContext as CallContextNoir, CallRequest as CallRequestNoir, CallerContext as CallerContextNoir, @@ -75,7 +76,6 @@ import { FunctionData as FunctionDataNoir, FunctionLeafMembershipWitness as FunctionLeafMembershipWitnessNoir, FunctionSelector as FunctionSelectorNoir, - HistoricalBlockData as HistoricalBlockDataNoir, KernelCircuitPublicInputs as KernelCircuitPublicInputsNoir, NewContractData as NewContractDataNoir, Address as NoirAztecAddress, @@ -112,7 +112,7 @@ import { } from './types/public_kernel_private_previous_types.js'; import { BaseRollupInputs as BaseRollupInputsNoir, - HistoricBlocksTreeRootMembershipWitness as HistoricBlocksTreeRootMembershipWitnessNoir, + BlocksTreeRootMembershipWitness as BlocksTreeRootMembershipWitnessNoir, NullifierLeafPreimage as NullifierLeafPreimageNoir, NullifierMembershipWitness as NullifierMembershipWitnessNoir, } from './types/rollup_base_types.js'; @@ -156,6 +156,13 @@ export function mapNumberFromNoir(number: NoirField): number { return Number(Fr.fromString(number).toBigInt()); } +/** + * + */ +export function mapNumberToNoir(number: number): NoirField { + return new Fr(BigInt(number)).toString(); +} + /** * Maps a point to a noir point. * @param point - The point. @@ -426,40 +433,40 @@ export function mapCallRequestToNoir(callRequest: CallRequest): CallRequestNoir } /** - * Maps a historical block data to a noir historical block data. - * @param historicalBlockData - The historical block data. - * @returns The noir historical block data. + * Maps a block header to a noir block header. + * @param blockHeader - The block header. + * @returns The noir block header. */ -export function mapHistoricalBlockDataToNoir(historicalBlockData: HistoricBlockData): HistoricalBlockDataNoir { +export function mapBlockHeaderToNoir(blockHeader: BlockHeader): BlockHeaderNoir { return { - blocks_tree_root: mapFieldToNoir(historicalBlockData.blocksTreeRoot), + blocks_tree_root: mapFieldToNoir(blockHeader.blocksTreeRoot), block: { - note_hash_tree_root: mapFieldToNoir(historicalBlockData.noteHashTreeRoot), - nullifier_tree_root: mapFieldToNoir(historicalBlockData.nullifierTreeRoot), - contract_tree_root: mapFieldToNoir(historicalBlockData.contractTreeRoot), - l1_to_l2_messages_tree_root: mapFieldToNoir(historicalBlockData.l1ToL2MessagesTreeRoot), - public_data_tree_root: mapFieldToNoir(historicalBlockData.publicDataTreeRoot), - global_variables_hash: mapFieldToNoir(historicalBlockData.globalVariablesHash), + note_hash_tree_root: mapFieldToNoir(blockHeader.noteHashTreeRoot), + nullifier_tree_root: mapFieldToNoir(blockHeader.nullifierTreeRoot), + contract_tree_root: mapFieldToNoir(blockHeader.contractTreeRoot), + l1_to_l2_messages_tree_root: mapFieldToNoir(blockHeader.l1ToL2MessagesTreeRoot), + public_data_tree_root: mapFieldToNoir(blockHeader.publicDataTreeRoot), + global_variables_hash: mapFieldToNoir(blockHeader.globalVariablesHash), }, - private_kernel_vk_tree_root: mapFieldToNoir(historicalBlockData.privateKernelVkTreeRoot), + private_kernel_vk_tree_root: mapFieldToNoir(blockHeader.privateKernelVkTreeRoot), }; } /** - * Maps a noir historical block data to a historical block data. - * @param historicalBlockData - The noir historical block data. - * @returns The historical block data. + * Maps a noir block header to a block header. + * @param blockHeader - The noir block header. + * @returns The block header. */ -export function mapHistoricalBlockDataFromNoir(historicalBlockData: HistoricalBlockDataNoir): HistoricBlockData { - return new HistoricBlockData( - mapFieldFromNoir(historicalBlockData.block.note_hash_tree_root), - mapFieldFromNoir(historicalBlockData.block.nullifier_tree_root), - mapFieldFromNoir(historicalBlockData.block.contract_tree_root), - mapFieldFromNoir(historicalBlockData.block.l1_to_l2_messages_tree_root), - mapFieldFromNoir(historicalBlockData.blocks_tree_root), - mapFieldFromNoir(historicalBlockData.private_kernel_vk_tree_root), - mapFieldFromNoir(historicalBlockData.block.public_data_tree_root), - mapFieldFromNoir(historicalBlockData.block.global_variables_hash), +export function mapBlockHeaderFromNoir(blockHeader: BlockHeaderNoir): BlockHeader { + return new BlockHeader( + mapFieldFromNoir(blockHeader.block.note_hash_tree_root), + mapFieldFromNoir(blockHeader.block.nullifier_tree_root), + mapFieldFromNoir(blockHeader.block.contract_tree_root), + mapFieldFromNoir(blockHeader.block.l1_to_l2_messages_tree_root), + mapFieldFromNoir(blockHeader.blocks_tree_root), + mapFieldFromNoir(blockHeader.private_kernel_vk_tree_root), + mapFieldFromNoir(blockHeader.block.public_data_tree_root), + mapFieldFromNoir(blockHeader.block.global_variables_hash), ); } @@ -504,7 +511,7 @@ export function mapPrivateCircuitPublicInputsToNoir( >, encrypted_log_preimages_length: mapFieldToNoir(privateCircuitPublicInputs.encryptedLogPreimagesLength), unencrypted_log_preimages_length: mapFieldToNoir(privateCircuitPublicInputs.unencryptedLogPreimagesLength), - historical_block_data: mapHistoricalBlockDataToNoir(privateCircuitPublicInputs.historicBlockData), + block_header: mapBlockHeaderToNoir(privateCircuitPublicInputs.blockHeader), contract_deployment_data: mapContractDeploymentDataToNoir(privateCircuitPublicInputs.contractDeploymentData), chain_id: mapFieldToNoir(privateCircuitPublicInputs.chainId), version: mapFieldToNoir(privateCircuitPublicInputs.version), @@ -887,7 +894,7 @@ export function mapCombinedAccumulatedDataToNoir( */ export function mapCombinedConstantDataFromNoir(combinedConstantData: CombinedConstantDataNoir): CombinedConstantData { return new CombinedConstantData( - mapHistoricalBlockDataFromNoir(combinedConstantData.block_data), + mapBlockHeaderFromNoir(combinedConstantData.block_header), mapTxContextFromNoir(combinedConstantData.tx_context), ); } @@ -899,7 +906,7 @@ export function mapCombinedConstantDataFromNoir(combinedConstantData: CombinedCo */ export function mapCombinedConstantDataToNoir(combinedConstantData: CombinedConstantData): CombinedConstantDataNoir { return { - block_data: mapHistoricalBlockDataToNoir(combinedConstantData.blockData), + block_header: mapBlockHeaderToNoir(combinedConstantData.blockHeader), tx_context: mapTxContextToNoir(combinedConstantData.txContext), }; } @@ -1067,9 +1074,7 @@ export function mapGlobalVariablesFromNoir(globalVariables: GlobalVariablesNoir) */ export function mapConstantRollupDataToNoir(constantRollupData: ConstantRollupData): ConstantRollupDataNoir { return { - start_historic_blocks_tree_roots_snapshot: mapAppendOnlyTreeSnapshotToNoir( - constantRollupData.startHistoricBlocksTreeRootsSnapshot, - ), + start_blocks_tree_snapshot: mapAppendOnlyTreeSnapshotToNoir(constantRollupData.startBlocksTreeSnapshot), private_kernel_vk_tree_root: mapFieldToNoir(constantRollupData.privateKernelVkTreeRoot), public_kernel_vk_tree_root: mapFieldToNoir(constantRollupData.publicKernelVkTreeRoot), base_rollup_vk_hash: mapFieldToNoir(constantRollupData.baseRollupVkHash), @@ -1103,7 +1108,7 @@ export function mapPublicCircuitPublicInputsToNoir( new_l2_to_l1_msgs: publicInputs.newL2ToL1Msgs.map(mapFieldToNoir) as FixedLengthArray, unencrypted_logs_hash: publicInputs.unencryptedLogsHash.map(mapFieldToNoir) as FixedLengthArray, unencrypted_log_preimages_length: mapFieldToNoir(publicInputs.unencryptedLogPreimagesLength), - historical_block_data: mapHistoricalBlockDataToNoir(publicInputs.historicBlockData), + block_header: mapBlockHeaderToNoir(publicInputs.blockHeader), prover_address: mapAztecAddressToNoir(publicInputs.proverAddress), }; @@ -1115,7 +1120,7 @@ export function mapPublicCircuitPublicInputsToNoir( */ export function mapConstantRollupDataFromNoir(constantRollupData: ConstantRollupDataNoir): ConstantRollupData { return new ConstantRollupData( - mapAppendOnlyTreeSnapshotFromNoir(constantRollupData.start_historic_blocks_tree_roots_snapshot), + mapAppendOnlyTreeSnapshotFromNoir(constantRollupData.start_blocks_tree_snapshot), mapFieldFromNoir(constantRollupData.private_kernel_vk_tree_root), mapFieldFromNoir(constantRollupData.public_kernel_vk_tree_root), mapFieldFromNoir(constantRollupData.base_rollup_vk_hash), @@ -1283,12 +1288,11 @@ export function mapRootRollupInputsToNoir(rootRollupInputs: RootRollupInputs): R start_l1_to_l2_messages_tree_snapshot: mapAppendOnlyTreeSnapshotToNoir( rootRollupInputs.startL1ToL2MessagesTreeSnapshot, ), - start_historic_blocks_tree_snapshot: mapAppendOnlyTreeSnapshotToNoir( - rootRollupInputs.startHistoricBlocksTreeSnapshot, - ), - new_historic_blocks_tree_sibling_path: rootRollupInputs.newHistoricBlocksTreeSiblingPath.map( - mapFieldToNoir, - ) as FixedLengthArray, + start_blocks_tree_snapshot: mapAppendOnlyTreeSnapshotToNoir(rootRollupInputs.startBlocksTreeSnapshot), + new_blocks_tree_sibling_path: rootRollupInputs.newBlocksTreeSiblingPath.map(mapFieldToNoir) as FixedLengthArray< + NoirField, + 16 + >, }; } @@ -1311,20 +1315,20 @@ export function mapRootRollupPublicInputsFromNoir( mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.end_contract_tree_snapshot), mapFieldFromNoir(rootRollupPublicInputs.start_public_data_tree_root), mapFieldFromNoir(rootRollupPublicInputs.end_public_data_tree_root), - mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.start_tree_of_historic_note_hash_tree_roots_snapshot), - mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.end_tree_of_historic_note_hash_tree_roots_snapshot), - mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.start_tree_of_historic_contract_tree_roots_snapshot), - mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.end_tree_of_historic_contract_tree_roots_snapshot), + mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.start_tree_of_historical_note_hash_tree_roots_snapshot), + mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.end_tree_of_historical_note_hash_tree_roots_snapshot), + mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.start_tree_of_historical_contract_tree_roots_snapshot), + mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.end_tree_of_historical_contract_tree_roots_snapshot), mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.start_l1_to_l2_messages_tree_snapshot), mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.end_l1_to_l2_messages_tree_snapshot), mapAppendOnlyTreeSnapshotFromNoir( - rootRollupPublicInputs.start_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot, + rootRollupPublicInputs.start_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot, ), mapAppendOnlyTreeSnapshotFromNoir( - rootRollupPublicInputs.end_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot, + rootRollupPublicInputs.end_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot, ), - mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.start_historic_blocks_tree_snapshot), - mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.end_historic_blocks_tree_snapshot), + mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.start_blocks_tree_snapshot), + mapAppendOnlyTreeSnapshotFromNoir(rootRollupPublicInputs.end_blocks_tree_snapshot), mapTupleFromNoir(rootRollupPublicInputs.calldata_hash, 2, mapFieldFromNoir), mapTupleFromNoir(rootRollupPublicInputs.l1_to_l2_messages_hash, 2, mapFieldFromNoir), ); @@ -1377,18 +1381,18 @@ export function mapNullifierMembershipWitnessToNoir( } /** - * Maps a membership witness of the historic blocks tree to noir. + * Maps a membership witness of the blocks tree to noir. * @param membershipWitness - The membership witness. * @returns The noir membership witness. */ -export function mapHistoricBlocksTreeRootMembershipWitnessToNoir( - membershipWitness: MembershipWitness, -): HistoricBlocksTreeRootMembershipWitnessNoir { +export function mapBlocksTreeRootMembershipWitnessToNoir( + membershipWitness: MembershipWitness, +): BlocksTreeRootMembershipWitnessNoir { return { leaf_index: membershipWitness.leafIndex.toString(), sibling_path: membershipWitness.siblingPath.map(mapFieldToNoir) as FixedLengthArray< NoirField, - typeof HISTORIC_BLOCKS_TREE_HEIGHT + typeof BLOCKS_TREE_HEIGHT >, }; } @@ -1405,7 +1409,12 @@ export function mapBaseRollupInputsToNoir(inputs: BaseRollupInputs): BaseRollupI start_nullifier_tree_snapshot: mapAppendOnlyTreeSnapshotToNoir(inputs.startNullifierTreeSnapshot), start_contract_tree_snapshot: mapAppendOnlyTreeSnapshotToNoir(inputs.startContractTreeSnapshot), start_public_data_tree_root: mapFieldToNoir(inputs.startPublicDataTreeRoot), - start_historic_blocks_tree_snapshot: mapAppendOnlyTreeSnapshotToNoir(inputs.startHistoricBlocksTreeSnapshot), + start_blocks_tree_snapshot: mapAppendOnlyTreeSnapshotToNoir(inputs.startBlocksTreeSnapshot), + sorted_new_nullifiers: inputs.sortedNewNullifiers.map(mapFieldToNoir) as FixedLengthArray, + sorted_new_nullifiers_indexes: inputs.sortednewNullifiersIndexes.map(mapNumberToNoir) as FixedLengthArray< + NoirField, + 128 + >, low_nullifier_leaf_preimages: inputs.lowNullifierLeafPreimages.map( mapNullifierLeafPreimageToNoir, ) as FixedLengthArray, @@ -1429,9 +1438,9 @@ export function mapBaseRollupInputsToNoir(inputs: BaseRollupInputs): BaseRollupI new_public_data_reads_sibling_paths: inputs.newPublicDataReadsSiblingPaths.map(siblingPath => siblingPath.map(mapFieldToNoir), ) as FixedLengthArray, 32>, - historic_blocks_tree_root_membership_witnesses: inputs.historicBlocksTreeRootMembershipWitnesses.map( - mapHistoricBlocksTreeRootMembershipWitnessToNoir, - ) as FixedLengthArray, + blocks_tree_root_membership_witnesses: inputs.blocksTreeRootMembershipWitnesses.map( + mapBlocksTreeRootMembershipWitnessToNoir, + ) as FixedLengthArray, constants: mapConstantRollupDataToNoir(inputs.constants), }; } diff --git a/yarn-project/noir-protocol-circuits/src/types/private_kernel_init_types.ts b/yarn-project/noir-protocol-circuits/src/types/private_kernel_init_types.ts index 4f00c7e1953..af47e9d082f 100644 --- a/yarn-project/noir-protocol-circuits/src/types/private_kernel_init_types.ts +++ b/yarn-project/noir-protocol-circuits/src/types/private_kernel_init_types.ts @@ -74,7 +74,7 @@ export interface Block { global_variables_hash: Field; } -export interface HistoricalBlockData { +export interface BlockHeader { blocks_tree_root: Field; block: Block; private_kernel_vk_tree_root: Field; @@ -96,7 +96,7 @@ export interface PrivateCircuitPublicInputs { unencrypted_logs_hash: FixedLengthArray; encrypted_log_preimages_length: Field; unencrypted_log_preimages_length: Field; - historical_block_data: HistoricalBlockData; + block_header: BlockHeader; contract_deployment_data: ContractDeploymentData; chain_id: Field; version: Field; @@ -210,7 +210,7 @@ export interface CombinedAccumulatedData { } export interface CombinedConstantData { - block_data: HistoricalBlockData; + block_header: BlockHeader; tx_context: TxContext; } diff --git a/yarn-project/noir-protocol-circuits/src/types/private_kernel_inner_types.ts b/yarn-project/noir-protocol-circuits/src/types/private_kernel_inner_types.ts index e59e29b2b37..935af55a122 100644 --- a/yarn-project/noir-protocol-circuits/src/types/private_kernel_inner_types.ts +++ b/yarn-project/noir-protocol-circuits/src/types/private_kernel_inner_types.ts @@ -96,7 +96,7 @@ export interface Block { global_variables_hash: Field; } -export interface HistoricalBlockData { +export interface BlockHeader { blocks_tree_root: Field; block: Block; private_kernel_vk_tree_root: Field; @@ -125,7 +125,7 @@ export interface TxContext { } export interface CombinedConstantData { - block_data: HistoricalBlockData; + block_header: BlockHeader; tx_context: TxContext; } @@ -173,7 +173,7 @@ export interface PrivateCircuitPublicInputs { unencrypted_logs_hash: FixedLengthArray; encrypted_log_preimages_length: Field; unencrypted_log_preimages_length: Field; - historical_block_data: HistoricalBlockData; + block_header: BlockHeader; contract_deployment_data: ContractDeploymentData; chain_id: Field; version: Field; diff --git a/yarn-project/noir-protocol-circuits/src/types/private_kernel_ordering_types.ts b/yarn-project/noir-protocol-circuits/src/types/private_kernel_ordering_types.ts index 67f146a9d92..a84711e7fea 100644 --- a/yarn-project/noir-protocol-circuits/src/types/private_kernel_ordering_types.ts +++ b/yarn-project/noir-protocol-circuits/src/types/private_kernel_ordering_types.ts @@ -96,7 +96,7 @@ export interface Block { global_variables_hash: Field; } -export interface HistoricalBlockData { +export interface BlockHeader { blocks_tree_root: Field; block: Block; private_kernel_vk_tree_root: Field; @@ -125,7 +125,7 @@ export interface TxContext { } export interface CombinedConstantData { - block_data: HistoricalBlockData; + block_header: BlockHeader; tx_context: TxContext; } diff --git a/yarn-project/noir-protocol-circuits/src/types/public_kernel_private_previous_types.ts b/yarn-project/noir-protocol-circuits/src/types/public_kernel_private_previous_types.ts index 20dccc4b25c..7690aca56f5 100644 --- a/yarn-project/noir-protocol-circuits/src/types/public_kernel_private_previous_types.ts +++ b/yarn-project/noir-protocol-circuits/src/types/public_kernel_private_previous_types.ts @@ -96,7 +96,7 @@ export interface Block { global_variables_hash: Field; } -export interface HistoricalBlockData { +export interface BlockHeader { blocks_tree_root: Field; block: Block; private_kernel_vk_tree_root: Field; @@ -125,7 +125,7 @@ export interface TxContext { } export interface CombinedConstantData { - block_data: HistoricalBlockData; + block_header: BlockHeader; tx_context: TxContext; } @@ -180,7 +180,7 @@ export interface PublicCircuitPublicInputs { new_l2_to_l1_msgs: FixedLengthArray; unencrypted_logs_hash: FixedLengthArray; unencrypted_log_preimages_length: Field; - historical_block_data: HistoricalBlockData; + block_header: BlockHeader; prover_address: Address; } diff --git a/yarn-project/noir-protocol-circuits/src/types/public_kernel_public_previous_types.ts b/yarn-project/noir-protocol-circuits/src/types/public_kernel_public_previous_types.ts index bb0596aff1c..739dce88379 100644 --- a/yarn-project/noir-protocol-circuits/src/types/public_kernel_public_previous_types.ts +++ b/yarn-project/noir-protocol-circuits/src/types/public_kernel_public_previous_types.ts @@ -96,7 +96,7 @@ export interface Block { global_variables_hash: Field; } -export interface HistoricalBlockData { +export interface BlockHeader { blocks_tree_root: Field; block: Block; private_kernel_vk_tree_root: Field; @@ -125,7 +125,7 @@ export interface TxContext { } export interface CombinedConstantData { - block_data: HistoricalBlockData; + block_header: BlockHeader; tx_context: TxContext; } @@ -180,7 +180,7 @@ export interface PublicCircuitPublicInputs { new_l2_to_l1_msgs: FixedLengthArray; unencrypted_logs_hash: FixedLengthArray; unencrypted_log_preimages_length: Field; - historical_block_data: HistoricalBlockData; + block_header: BlockHeader; prover_address: Address; } diff --git a/yarn-project/noir-protocol-circuits/src/types/rollup_base_types.ts b/yarn-project/noir-protocol-circuits/src/types/rollup_base_types.ts index a4ac38b38eb..1c880cb2cdf 100644 --- a/yarn-project/noir-protocol-circuits/src/types/rollup_base_types.ts +++ b/yarn-project/noir-protocol-circuits/src/types/rollup_base_types.ts @@ -96,7 +96,7 @@ export interface Block { global_variables_hash: Field; } -export interface HistoricalBlockData { +export interface BlockHeader { blocks_tree_root: Field; block: Block; private_kernel_vk_tree_root: Field; @@ -125,7 +125,7 @@ export interface TxContext { } export interface CombinedConstantData { - block_data: HistoricalBlockData; + block_header: BlockHeader; tx_context: TxContext; } @@ -163,7 +163,7 @@ export interface NullifierMembershipWitness { sibling_path: FixedLengthArray; } -export interface HistoricBlocksTreeRootMembershipWitness { +export interface BlocksTreeRootMembershipWitness { leaf_index: Field; sibling_path: FixedLengthArray; } @@ -176,7 +176,7 @@ export interface GlobalVariables { } export interface ConstantRollupData { - start_historic_blocks_tree_roots_snapshot: AppendOnlyTreeSnapshot; + start_blocks_tree_snapshot: AppendOnlyTreeSnapshot; private_kernel_vk_tree_root: Field; public_kernel_vk_tree_root: Field; base_rollup_vk_hash: Field; @@ -190,7 +190,9 @@ export interface BaseRollupInputs { start_nullifier_tree_snapshot: AppendOnlyTreeSnapshot; start_contract_tree_snapshot: AppendOnlyTreeSnapshot; start_public_data_tree_root: Field; - start_historic_blocks_tree_snapshot: AppendOnlyTreeSnapshot; + start_blocks_tree_snapshot: AppendOnlyTreeSnapshot; + sorted_new_nullifiers: FixedLengthArray; + sorted_new_nullifiers_indexes: FixedLengthArray; low_nullifier_leaf_preimages: FixedLengthArray; low_nullifier_membership_witness: FixedLengthArray; new_commitments_subtree_sibling_path: FixedLengthArray; @@ -198,7 +200,7 @@ export interface BaseRollupInputs { new_contracts_subtree_sibling_path: FixedLengthArray; new_public_data_update_requests_sibling_paths: FixedLengthArray, 32>; new_public_data_reads_sibling_paths: FixedLengthArray, 32>; - historic_blocks_tree_root_membership_witnesses: FixedLengthArray; + blocks_tree_root_membership_witnesses: FixedLengthArray; constants: ConstantRollupData; } diff --git a/yarn-project/noir-protocol-circuits/src/types/rollup_merge_types.ts b/yarn-project/noir-protocol-circuits/src/types/rollup_merge_types.ts index bf438c2c3cd..233624ab34d 100644 --- a/yarn-project/noir-protocol-circuits/src/types/rollup_merge_types.ts +++ b/yarn-project/noir-protocol-circuits/src/types/rollup_merge_types.ts @@ -22,7 +22,7 @@ export interface GlobalVariables { } export interface ConstantRollupData { - start_historic_blocks_tree_roots_snapshot: AppendOnlyTreeSnapshot; + start_blocks_tree_snapshot: AppendOnlyTreeSnapshot; private_kernel_vk_tree_root: Field; public_kernel_vk_tree_root: Field; base_rollup_vk_hash: Field; diff --git a/yarn-project/noir-protocol-circuits/src/types/rollup_root_types.ts b/yarn-project/noir-protocol-circuits/src/types/rollup_root_types.ts index ee37cbf72ff..63c4b01df51 100644 --- a/yarn-project/noir-protocol-circuits/src/types/rollup_root_types.ts +++ b/yarn-project/noir-protocol-circuits/src/types/rollup_root_types.ts @@ -22,7 +22,7 @@ export interface GlobalVariables { } export interface ConstantRollupData { - start_historic_blocks_tree_roots_snapshot: AppendOnlyTreeSnapshot; + start_blocks_tree_snapshot: AppendOnlyTreeSnapshot; private_kernel_vk_tree_root: Field; public_kernel_vk_tree_root: Field; base_rollup_vk_hash: Field; @@ -68,8 +68,8 @@ export interface RootRollupInputs { new_l1_to_l2_messages: FixedLengthArray; new_l1_to_l2_messages_tree_root_sibling_path: FixedLengthArray; start_l1_to_l2_messages_tree_snapshot: AppendOnlyTreeSnapshot; - start_historic_blocks_tree_snapshot: AppendOnlyTreeSnapshot; - new_historic_blocks_tree_sibling_path: FixedLengthArray; + start_blocks_tree_snapshot: AppendOnlyTreeSnapshot; + new_blocks_tree_sibling_path: FixedLengthArray; } export interface RootRollupPublicInputs { @@ -83,16 +83,16 @@ export interface RootRollupPublicInputs { end_contract_tree_snapshot: AppendOnlyTreeSnapshot; start_public_data_tree_root: Field; end_public_data_tree_root: Field; - start_tree_of_historic_note_hash_tree_roots_snapshot: AppendOnlyTreeSnapshot; - end_tree_of_historic_note_hash_tree_roots_snapshot: AppendOnlyTreeSnapshot; - start_tree_of_historic_contract_tree_roots_snapshot: AppendOnlyTreeSnapshot; - end_tree_of_historic_contract_tree_roots_snapshot: AppendOnlyTreeSnapshot; + start_tree_of_historical_note_hash_tree_roots_snapshot: AppendOnlyTreeSnapshot; + end_tree_of_historical_note_hash_tree_roots_snapshot: AppendOnlyTreeSnapshot; + start_tree_of_historical_contract_tree_roots_snapshot: AppendOnlyTreeSnapshot; + end_tree_of_historical_contract_tree_roots_snapshot: AppendOnlyTreeSnapshot; start_l1_to_l2_messages_tree_snapshot: AppendOnlyTreeSnapshot; end_l1_to_l2_messages_tree_snapshot: AppendOnlyTreeSnapshot; - start_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot: AppendOnlyTreeSnapshot; - end_tree_of_historic_l1_to_l2_messages_tree_roots_snapshot: AppendOnlyTreeSnapshot; - start_historic_blocks_tree_snapshot: AppendOnlyTreeSnapshot; - end_historic_blocks_tree_snapshot: AppendOnlyTreeSnapshot; + start_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot: AppendOnlyTreeSnapshot; + end_tree_of_historical_l1_to_l2_messages_tree_roots_snapshot: AppendOnlyTreeSnapshot; + start_blocks_tree_snapshot: AppendOnlyTreeSnapshot; + end_blocks_tree_snapshot: AppendOnlyTreeSnapshot; calldata_hash: FixedLengthArray; l1_to_l2_messages_hash: FixedLengthArray; } diff --git a/yarn-project/p2p-bootstrap/terraform/main.tf b/yarn-project/p2p-bootstrap/terraform/main.tf index a5b2f88ffb4..43925bd5b29 100644 --- a/yarn-project/p2p-bootstrap/terraform/main.tf +++ b/yarn-project/p2p-bootstrap/terraform/main.tf @@ -1,6 +1,6 @@ # Terraform to setup a prototype network of Aztec Boot Nodes in AWS # It sets up 2 boot nodes with different ports/keys etc. -# Some duplication across the 2 defined services, could possibly +# Some duplication across the 2 defined services, could possibly # be refactored to use modules as and when we build out infrastructure for real terraform { @@ -48,198 +48,21 @@ data "terraform_remote_state" "aztec-network_iac" { } } - -resource "aws_cloudwatch_log_group" "aztec-bootstrap-1-log-group" { - name = "/fargate/service/${var.DEPLOY_TAG}/aztec-bootstrap-1" - retention_in_days = 14 -} - -resource "aws_service_discovery_service" "aztec-bootstrap-1" { - name = "${var.DEPLOY_TAG}-aztec-bootstrap-1" - - health_check_custom_config { - failure_threshold = 1 - } - - dns_config { - namespace_id = data.terraform_remote_state.setup_iac.outputs.local_service_discovery_id - - dns_records { - ttl = 60 - type = "A" - } - - dns_records { - ttl = 60 - type = "SRV" - } - - routing_policy = "MULTIVALUE" - } - - # Terraform just fails if this resource changes and you have registered instances. - provisioner "local-exec" { - when = destroy - command = "${path.module}/../servicediscovery-drain.sh ${self.id}" - } -} - -resource "aws_ecs_task_definition" "aztec-bootstrap-1" { - family = "${var.DEPLOY_TAG}-aztec-bootstrap-1" - requires_compatibilities = ["FARGATE"] - network_mode = "awsvpc" - cpu = "2048" - memory = "4096" - execution_role_arn = data.terraform_remote_state.setup_iac.outputs.ecs_task_execution_role_arn - task_role_arn = data.terraform_remote_state.aztec2_iac.outputs.cloudwatch_logging_ecs_role_arn - - container_definitions = <" && exit 1 + +serviceId="--service-id=$1" + +echo "Draining servicediscovery instances from $1 ..." +ids="$(aws servicediscovery list-instances $serviceId --query 'Instances[].Id' --output text | tr '\t' ' ')" + +found= +for id in $ids; do + if [ -n "$id" ]; then + echo "Deregistering $1 / $id ..." + aws servicediscovery deregister-instance $serviceId --instance-id "$id" + found=1 + fi +done + +# Yes, I'm being lazy here... +[ -n "$found" ] && sleep 5 || true \ No newline at end of file diff --git a/yarn-project/p2p-bootstrap/terraform/variables.tf b/yarn-project/p2p-bootstrap/terraform/variables.tf index aafaf04abe0..aead56bdd84 100644 --- a/yarn-project/p2p-bootstrap/terraform/variables.tf +++ b/yarn-project/p2p-bootstrap/terraform/variables.tf @@ -2,30 +2,29 @@ variable "DEPLOY_TAG" { type = string } -variable "BOOTNODE_1_LISTEN_PORT" { - type = string +variable "BOOTNODE_LISTEN_PORT" { + type = string + default = 40500 } variable "BOOTNODE_1_PRIVATE_KEY" { type = string } -variable "BOOTNODE_2_LISTEN_PORT" { - type = string -} - variable "BOOTNODE_2_PRIVATE_KEY" { type = string } -variable "ECR_URL" { - type = string -} - variable "P2P_MIN_PEERS" { - type = string + type = string + default = 50 } variable "P2P_MAX_PEERS" { + type = string + default = 100 +} + +variable "DOCKERHUB_ACCOUNT" { type = string } diff --git a/yarn-project/p2p/src/client/p2p_client.ts b/yarn-project/p2p/src/client/p2p_client.ts index 7880bf2310b..d8baeb7ac8f 100644 --- a/yarn-project/p2p/src/client/p2p_client.ts +++ b/yarn-project/p2p/src/client/p2p_client.ts @@ -114,7 +114,7 @@ export class P2PClient implements P2P { /** * In-memory P2P client constructor. - * @param l2BlockSource - P2P client's source for fetching existing block data. + * @param l2BlockSource - P2P client's source for fetching existing blocks. * @param txPool - The client's instance of a transaction pool. Defaults to in-memory implementation. * @param p2pService - The concrete instance of p2p networking to use. * @param log - A logger. diff --git a/yarn-project/p2p/src/service/libp2p_service.ts b/yarn-project/p2p/src/service/libp2p_service.ts index 05f3a896023..e13c7e18eda 100644 --- a/yarn-project/p2p/src/service/libp2p_service.ts +++ b/yarn-project/p2p/src/service/libp2p_service.ts @@ -39,7 +39,7 @@ const INITIAL_PEER_REFRESH_INTERVAL = 20000; * @returns The peer ID. */ export async function createLibP2PPeerId(privateKey?: string) { - if (!privateKey) { + if (!privateKey?.length) { return await createSecp256k1PeerId(); } const base64 = Buffer.from(privateKey, 'hex').toString('base64'); diff --git a/yarn-project/pxe/src/contract_tree/index.ts b/yarn-project/pxe/src/contract_tree/index.ts index 8726daf8295..2c3f9de54dc 100644 --- a/yarn-project/pxe/src/contract_tree/index.ts +++ b/yarn-project/pxe/src/contract_tree/index.ts @@ -146,12 +146,14 @@ export class ContractTree { * If the witness hasn't been previously computed, this function will request the contract node * to find the contract's index and path in order to create the membership witness. * + * @param blockNumber - The block number at which to get the data. + * * @returns A Promise that resolves to the MembershipWitness object for the given contract tree. */ - public async getContractMembershipWitness() { + public async getContractMembershipWitness(blockNumber: number | 'latest' = 'latest') { const index = await this.getContractIndex(); - const siblingPath = await this.stateInfoProvider.getContractSiblingPath(index); + const siblingPath = await this.stateInfoProvider.getContractSiblingPath(blockNumber, index); return new MembershipWitness( CONTRACT_TREE_HEIGHT, index, @@ -226,7 +228,7 @@ export class ContractTree { const root = await this.getFunctionTreeRoot(); const newContractData = new NewContractData(completeAddress.address, portalContract, root); const commitment = computeContractLeaf(newContractData); - this.contractIndex = await this.stateInfoProvider.findLeafIndex(MerkleTreeId.CONTRACT_TREE, commitment); + this.contractIndex = await this.stateInfoProvider.findLeafIndex('latest', MerkleTreeId.CONTRACT_TREE, commitment); if (this.contractIndex === undefined) { throw new Error( `Failed to find contract at ${completeAddress.address} with portal ${portalContract} resulting in commitment ${commitment}.`, diff --git a/yarn-project/pxe/src/database/database.ts b/yarn-project/pxe/src/database/database.ts index 906bc4bcc93..72f50a1c576 100644 --- a/yarn-project/pxe/src/database/database.ts +++ b/yarn-project/pxe/src/database/database.ts @@ -1,4 +1,4 @@ -import { CompleteAddress, HistoricBlockData, PublicKey } from '@aztec/circuits.js'; +import { BlockHeader, CompleteAddress, PublicKey } from '@aztec/circuits.js'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { Fr } from '@aztec/foundation/fields'; import { ContractDatabase, MerkleTreeId, NoteFilter } from '@aztec/types'; @@ -91,24 +91,24 @@ export interface Database extends ContractDatabase { setTreeRoots(roots: Record): Promise; /** - * Retrieve the stored Historic Block Data from the database. - * The function returns a Promise that resolves to the Historic Block Data. + * Retrieve the stored Block Header from the database. + * The function returns a Promise that resolves to the Block Header. * This data is required to reproduce block attestations. - * Throws an error if the historic block data is not available within the database. + * Throws an error if the block header is not available within the database. * * note: this data is a combination of the tree roots and the global variables hash. */ - getHistoricBlockData(): HistoricBlockData; + getBlockHeader(): BlockHeader; /** - * Set the latest Historic Block Data. + * Set the latest Block Header. * This function updates the 'global variables hash' and `tree roots` property of the instance * Note that this will overwrite any existing hash or roots in the database. * - * @param historicBlockData - An object containing the most recent historic block data. + * @param blockHeader - An object containing the most recent block header. * @returns A Promise that resolves when the hash has been successfully updated in the database. */ - setHistoricBlockData(historicBlockData: HistoricBlockData): Promise; + setBlockHeader(blockHeader: BlockHeader): Promise; /** * Adds complete address to the database. diff --git a/yarn-project/pxe/src/database/memory_db.ts b/yarn-project/pxe/src/database/memory_db.ts index 88f98601c3b..b0562f0ceb6 100644 --- a/yarn-project/pxe/src/database/memory_db.ts +++ b/yarn-project/pxe/src/database/memory_db.ts @@ -1,4 +1,4 @@ -import { CompleteAddress, HistoricBlockData, PublicKey } from '@aztec/circuits.js'; +import { BlockHeader, CompleteAddress, PublicKey } from '@aztec/circuits.js'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { Fr } from '@aztec/foundation/fields'; import { createDebugLogger } from '@aztec/foundation/log'; @@ -118,12 +118,12 @@ export class MemoryDB extends MemoryContractDatabase implements Database { return Promise.resolve(); } - public getHistoricBlockData(): HistoricBlockData { + public getBlockHeader(): BlockHeader { const roots = this.getTreeRoots(); if (!this.globalVariablesHash) { throw new Error(`Global variables hash not set in memory database`); } - return new HistoricBlockData( + return new BlockHeader( roots[MerkleTreeId.NOTE_HASH_TREE], roots[MerkleTreeId.NULLIFIER_TREE], roots[MerkleTreeId.CONTRACT_TREE], @@ -135,15 +135,15 @@ export class MemoryDB extends MemoryContractDatabase implements Database { ); } - public async setHistoricBlockData(historicBlockData: HistoricBlockData): Promise { - this.globalVariablesHash = historicBlockData.globalVariablesHash; + public async setBlockHeader(blockHeader: BlockHeader): Promise { + this.globalVariablesHash = blockHeader.globalVariablesHash; await this.setTreeRoots({ - [MerkleTreeId.NOTE_HASH_TREE]: historicBlockData.noteHashTreeRoot, - [MerkleTreeId.NULLIFIER_TREE]: historicBlockData.nullifierTreeRoot, - [MerkleTreeId.CONTRACT_TREE]: historicBlockData.contractTreeRoot, - [MerkleTreeId.L1_TO_L2_MESSAGES_TREE]: historicBlockData.l1ToL2MessagesTreeRoot, - [MerkleTreeId.BLOCKS_TREE]: historicBlockData.blocksTreeRoot, - [MerkleTreeId.PUBLIC_DATA_TREE]: historicBlockData.publicDataTreeRoot, + [MerkleTreeId.NOTE_HASH_TREE]: blockHeader.noteHashTreeRoot, + [MerkleTreeId.NULLIFIER_TREE]: blockHeader.nullifierTreeRoot, + [MerkleTreeId.CONTRACT_TREE]: blockHeader.contractTreeRoot, + [MerkleTreeId.L1_TO_L2_MESSAGES_TREE]: blockHeader.l1ToL2MessagesTreeRoot, + [MerkleTreeId.BLOCKS_TREE]: blockHeader.blocksTreeRoot, + [MerkleTreeId.PUBLIC_DATA_TREE]: blockHeader.publicDataTreeRoot, }); } diff --git a/yarn-project/pxe/src/kernel_oracle/index.ts b/yarn-project/pxe/src/kernel_oracle/index.ts index de5794a3712..79a32c21e9b 100644 --- a/yarn-project/pxe/src/kernel_oracle/index.ts +++ b/yarn-project/pxe/src/kernel_oracle/index.ts @@ -24,7 +24,7 @@ export class KernelOracle implements ProvingDataOracle { } async getNoteMembershipWitness(leafIndex: bigint): Promise> { - const path = await this.node.getNoteHashSiblingPath(leafIndex); + const path = await this.node.getNoteHashSiblingPath('latest', leafIndex); return new MembershipWitness( path.pathSize, leafIndex, diff --git a/yarn-project/pxe/src/note_processor/note_processor.ts b/yarn-project/pxe/src/note_processor/note_processor.ts index c3a57994f48..0f07830436f 100644 --- a/yarn-project/pxe/src/note_processor/note_processor.ts +++ b/yarn-project/pxe/src/note_processor/note_processor.ts @@ -16,7 +16,7 @@ import { getAcirSimulator } from '../simulator/index.js'; */ interface ProcessedData { /** - * Holds L2 block data and associated context. + * Holds L2 block and a cache of already requested tx hashes. */ blockContext: L2BlockContext; /** diff --git a/yarn-project/pxe/src/pxe_service/pxe_service.ts b/yarn-project/pxe/src/pxe_service/pxe_service.ts index fc0bb37c78e..37f4eb7dcb3 100644 --- a/yarn-project/pxe/src/pxe_service/pxe_service.ts +++ b/yarn-project/pxe/src/pxe_service/pxe_service.ts @@ -237,13 +237,13 @@ export class PXEService implements PXE { // TODO(https://github.com/AztecProtocol/aztec-packages/issues/1386) // This can always be `uniqueSiloedNoteHash` once notes added from public also include nonces. const noteHashToLookUp = nonce.isZero() ? siloedNoteHash : uniqueSiloedNoteHash; - const index = await this.node.findLeafIndex(MerkleTreeId.NOTE_HASH_TREE, noteHashToLookUp); + const index = await this.node.findLeafIndex('latest', MerkleTreeId.NOTE_HASH_TREE, noteHashToLookUp); if (index === undefined) { throw new Error('Note does not exist.'); } const siloedNullifier = siloNullifier(note.contractAddress, innerNullifier!); - const nullifierIndex = await this.node.findLeafIndex(MerkleTreeId.NULLIFIER_TREE, siloedNullifier); + const nullifierIndex = await this.node.findLeafIndex('latest', MerkleTreeId.NULLIFIER_TREE, siloedNullifier); if (nullifierIndex !== undefined) { throw new Error('The note has been destroyed.'); } @@ -449,10 +449,10 @@ export class PXEService implements PXE { /** * Retrieves the simulation parameters required to run an ACIR simulation. - * This includes the contract address, function artifact, portal contract address, and historic tree roots. + * This includes the contract address, function artifact, portal contract address, and historical tree roots. * * @param execRequest - The transaction request object containing details of the contract call. - * @returns An object containing the contract address, function artifact, portal contract address, and historic tree roots. + * @returns An object containing the contract address, function artifact, portal contract address, and historical tree roots. */ async #getSimulationParameters(execRequest: FunctionCall | TxExecutionRequest) { const contractAddress = (execRequest as FunctionCall).to ?? (execRequest as TxExecutionRequest).origin; diff --git a/yarn-project/pxe/src/simulator_oracle/index.ts b/yarn-project/pxe/src/simulator_oracle/index.ts index 42c3edd74c4..1bb9a289c74 100644 --- a/yarn-project/pxe/src/simulator_oracle/index.ts +++ b/yarn-project/pxe/src/simulator_oracle/index.ts @@ -1,12 +1,12 @@ import { DBOracle, FunctionArtifactWithDebugMetadata, MessageLoadOracleInputs } from '@aztec/acir-simulator'; import { AztecAddress, + BlockHeader, CompleteAddress, EthAddress, Fr, FunctionSelector, GrumpkinPrivateKey, - HistoricBlockData, PublicKey, } from '@aztec/circuits.js'; import { createDebugLogger } from '@aztec/foundation/log'; @@ -115,7 +115,7 @@ export class SimulatorOracle implements DBOracle { const messageAndIndex = await this.stateInfoProvider.getL1ToL2MessageAndIndex(msgKey); const message = messageAndIndex.message.toFieldArray(); const index = messageAndIndex.index; - const siblingPath = await this.stateInfoProvider.getL1ToL2MessageSiblingPath(index); + const siblingPath = await this.stateInfoProvider.getL1ToL2MessageSiblingPath('latest', index); return { message, siblingPath: siblingPath.toFieldArray(), @@ -129,32 +129,28 @@ export class SimulatorOracle implements DBOracle { * @returns - The index of the commitment. Undefined if it does not exist in the tree. */ async getCommitmentIndex(commitment: Fr) { - return await this.stateInfoProvider.findLeafIndex(MerkleTreeId.NOTE_HASH_TREE, commitment); + return await this.stateInfoProvider.findLeafIndex('latest', MerkleTreeId.NOTE_HASH_TREE, commitment); } async getNullifierIndex(nullifier: Fr) { - return await this.stateInfoProvider.findLeafIndex(MerkleTreeId.NULLIFIER_TREE, nullifier); + return await this.stateInfoProvider.findLeafIndex('latest', MerkleTreeId.NULLIFIER_TREE, nullifier); } public async findLeafIndex(blockNumber: number, treeId: MerkleTreeId, leafValue: Fr): Promise { - this.log.warn('Block number ignored in SimulatorOracle.findLeafIndex because archival node is not yet implemented'); - return await this.stateInfoProvider.findLeafIndex(treeId, leafValue); + return await this.stateInfoProvider.findLeafIndex(blockNumber, treeId, leafValue); } public async getSiblingPath(blockNumber: number, treeId: MerkleTreeId, leafIndex: bigint): Promise { - this.log.warn( - 'Block number ignored in SimulatorOracle.getSiblingPath because archival node is not yet implemented', - ); // @todo Doing a nasty workaround here because of https://github.com/AztecProtocol/aztec-packages/issues/3414 switch (treeId) { case MerkleTreeId.NULLIFIER_TREE: - return (await this.stateInfoProvider.getNullifierTreeSiblingPath(leafIndex)).toFieldArray(); + return (await this.stateInfoProvider.getNullifierTreeSiblingPath(blockNumber, leafIndex)).toFieldArray(); case MerkleTreeId.NOTE_HASH_TREE: - return (await this.stateInfoProvider.getNoteHashSiblingPath(leafIndex)).toFieldArray(); + return (await this.stateInfoProvider.getNoteHashSiblingPath(blockNumber, leafIndex)).toFieldArray(); case MerkleTreeId.BLOCKS_TREE: - return (await this.stateInfoProvider.getHistoricBlocksTreeSiblingPath(leafIndex)).toFieldArray(); + return (await this.stateInfoProvider.getBlocksTreeSiblingPath(blockNumber, leafIndex)).toFieldArray(); case MerkleTreeId.PUBLIC_DATA_TREE: - return (await this.stateInfoProvider.getPublicDataTreeSiblingPath(leafIndex)).toFieldArray(); + return (await this.stateInfoProvider.getPublicDataTreeSiblingPath(blockNumber, leafIndex)).toFieldArray(); default: throw new Error('Not implemented'); } @@ -179,12 +175,12 @@ export class SimulatorOracle implements DBOracle { } /** - * Retrieve the databases view of the Historic Block Data object. - * This structure is fed into the circuits simulator and is used to prove against certain historic roots. + * Retrieve the databases view of the Block Header object. + * This structure is fed into the circuits simulator and is used to prove against certain historical roots. * - * @returns A Promise that resolves to a HistoricBlockData object. + * @returns A Promise that resolves to a BlockHeader object. */ - getHistoricBlockData(): Promise { - return Promise.resolve(this.db.getHistoricBlockData()); + getBlockHeader(): Promise { + return Promise.resolve(this.db.getBlockHeader()); } } diff --git a/yarn-project/pxe/src/synchronizer/synchronizer.test.ts b/yarn-project/pxe/src/synchronizer/synchronizer.test.ts index 2179fecc524..6ee3071712c 100644 --- a/yarn-project/pxe/src/synchronizer/synchronizer.test.ts +++ b/yarn-project/pxe/src/synchronizer/synchronizer.test.ts @@ -1,4 +1,4 @@ -import { CompleteAddress, Fr, GrumpkinScalar, HistoricBlockData } from '@aztec/circuits.js'; +import { BlockHeader, CompleteAddress, Fr, GrumpkinScalar } from '@aztec/circuits.js'; import { Grumpkin } from '@aztec/circuits.js/barretenberg'; import { TestKeyStore } from '@aztec/key-store'; import { AztecNode, INITIAL_L2_BLOCK_NUM, L2Block, MerkleTreeId } from '@aztec/types'; @@ -14,17 +14,17 @@ describe('Synchronizer', () => { let database: Database; let synchronizer: TestSynchronizer; let roots: Record; - let blockData: HistoricBlockData; + let blockHeader: BlockHeader; beforeEach(() => { - blockData = HistoricBlockData.random(); + blockHeader = BlockHeader.random(); roots = { - [MerkleTreeId.CONTRACT_TREE]: blockData.contractTreeRoot, - [MerkleTreeId.NOTE_HASH_TREE]: blockData.noteHashTreeRoot, - [MerkleTreeId.NULLIFIER_TREE]: blockData.nullifierTreeRoot, - [MerkleTreeId.PUBLIC_DATA_TREE]: blockData.publicDataTreeRoot, - [MerkleTreeId.L1_TO_L2_MESSAGES_TREE]: blockData.l1ToL2MessagesTreeRoot, - [MerkleTreeId.BLOCKS_TREE]: blockData.blocksTreeRoot, + [MerkleTreeId.CONTRACT_TREE]: blockHeader.contractTreeRoot, + [MerkleTreeId.NOTE_HASH_TREE]: blockHeader.noteHashTreeRoot, + [MerkleTreeId.NULLIFIER_TREE]: blockHeader.nullifierTreeRoot, + [MerkleTreeId.PUBLIC_DATA_TREE]: blockHeader.publicDataTreeRoot, + [MerkleTreeId.L1_TO_L2_MESSAGES_TREE]: blockHeader.l1ToL2MessagesTreeRoot, + [MerkleTreeId.BLOCKS_TREE]: blockHeader.blocksTreeRoot, }; aztecNode = mock(); @@ -34,7 +34,7 @@ describe('Synchronizer', () => { it('sets tree roots from aztec node on initial sync', async () => { aztecNode.getBlockNumber.mockResolvedValue(3); - aztecNode.getHistoricBlockData.mockResolvedValue(blockData); + aztecNode.getBlockHeader.mockResolvedValue(blockHeader); await synchronizer.initialSync(); @@ -55,7 +55,7 @@ describe('Synchronizer', () => { it('overrides tree roots from initial sync once current block number is larger', async () => { // Initial sync is done on block with height 3 aztecNode.getBlockNumber.mockResolvedValue(3); - aztecNode.getHistoricBlockData.mockResolvedValue(blockData); + aztecNode.getBlockHeader.mockResolvedValue(blockHeader); await synchronizer.initialSync(); const roots0 = database.getTreeRoots(); diff --git a/yarn-project/pxe/src/synchronizer/synchronizer.ts b/yarn-project/pxe/src/synchronizer/synchronizer.ts index 524fddd2a3e..98dfd30c7e6 100644 --- a/yarn-project/pxe/src/synchronizer/synchronizer.ts +++ b/yarn-project/pxe/src/synchronizer/synchronizer.ts @@ -1,4 +1,4 @@ -import { AztecAddress, Fr, HistoricBlockData, PublicKey } from '@aztec/circuits.js'; +import { AztecAddress, BlockHeader, Fr, PublicKey } from '@aztec/circuits.js'; import { computeGlobalsHash } from '@aztec/circuits.js/abis'; import { DebugLogger, createDebugLogger } from '@aztec/foundation/log'; import { InterruptibleSleep } from '@aztec/foundation/sleep'; @@ -68,13 +68,10 @@ export class Synchronizer { } protected async initialSync() { - const [blockNumber, historicBlockData] = await Promise.all([ - this.node.getBlockNumber(), - this.node.getHistoricBlockData(), - ]); + const [blockNumber, blockHeader] = await Promise.all([this.node.getBlockNumber(), this.node.getBlockHeader()]); this.initialSyncBlockNumber = blockNumber; this.synchedToBlock = this.initialSyncBlockNumber; - await this.db.setHistoricBlockData(historicBlockData); + await this.db.setBlockHeader(blockHeader); } protected async work(limit = 1, retryInterval = 1000): Promise { @@ -204,18 +201,18 @@ export class Synchronizer { } const globalsHash = computeGlobalsHash(latestBlock.block.globalVariables); - const blockData = new HistoricBlockData( + const blockHeader = new BlockHeader( block.endNoteHashTreeSnapshot.root, block.endNullifierTreeSnapshot.root, block.endContractTreeSnapshot.root, block.endL1ToL2MessagesTreeSnapshot.root, - block.endHistoricBlocksTreeSnapshot.root, + block.endBlocksTreeSnapshot.root, Fr.ZERO, // todo: private kernel vk tree root block.endPublicDataTreeRoot, globalsHash, ); - await this.db.setHistoricBlockData(blockData); + await this.db.setBlockHeader(blockHeader); } /** diff --git a/yarn-project/sequencer-client/src/block_builder/solo_block_builder.test.ts b/yarn-project/sequencer-client/src/block_builder/solo_block_builder.test.ts index ea943c69120..808e02f6cae 100644 --- a/yarn-project/sequencer-client/src/block_builder/solo_block_builder.test.ts +++ b/yarn-project/sequencer-client/src/block_builder/solo_block_builder.test.ts @@ -54,10 +54,10 @@ import { EmptyRollupProver } from '../prover/empty.js'; import { RollupProver } from '../prover/index.js'; import { ProcessedTx, - makeEmptyProcessedTx as makeEmptyProcessedTxFromHistoricTreeRoots, + makeEmptyProcessedTx as makeEmptyProcessedTxFromHistoricalTreeRoots, makeProcessedTx, } from '../sequencer/processed_tx.js'; -import { getHistoricBlockData } from '../sequencer/utils.js'; +import { getBlockHeader } from '../sequencer/utils.js'; import { RollupSimulator } from '../simulator/index.js'; import { RealRollupCircuitSimulator } from '../simulator/rollup.js'; import { SoloBlockBuilder } from './solo_block_builder.js'; @@ -115,8 +115,8 @@ describe('sequencer/solo_block_builder', () => { }, 20_000); const makeEmptyProcessedTx = async () => { - const historicTreeRoots = await getHistoricBlockData(builderDb); - return makeEmptyProcessedTxFromHistoricTreeRoots(historicTreeRoots, chainId, version); + const historicalTreeRoots = await getBlockHeader(builderDb); + return makeEmptyProcessedTxFromHistoricalTreeRoots(historicalTreeRoots, chainId, version); }; // Updates the expectedDb trees based on the new commitments, contracts, and nullifiers from these txs @@ -143,7 +143,7 @@ describe('sequencer/solo_block_builder', () => { await expectsDb.appendLeaves(MerkleTreeId.L1_TO_L2_MESSAGES_TREE, asBuffer); }; - const updateHistoricBlocksTree = async () => { + const updateBlocksTree = async () => { const blockHash = computeBlockHashWithGlobals( globalVariables, rootRollupOutput.endNoteHashTreeSnapshot.root, @@ -162,7 +162,7 @@ describe('sequencer/solo_block_builder', () => { const buildMockSimulatorInputs = async () => { const kernelOutput = makePrivateKernelPublicInputsFinal(); - kernelOutput.constants.blockData = await getHistoricBlockData(expectsDb); + kernelOutput.constants.blockHeader = await getBlockHeader(expectsDb); const tx = await makeProcessedTx( new Tx( @@ -204,8 +204,8 @@ describe('sequencer/solo_block_builder', () => { // Calculate block hash rootRollupOutput.globalVariables = globalVariables; - await updateHistoricBlocksTree(); - rootRollupOutput.endHistoricBlocksTreeSnapshot = await getTreeSnapshot(MerkleTreeId.BLOCKS_TREE); + await updateBlocksTree(); + rootRollupOutput.endBlocksTreeSnapshot = await getTreeSnapshot(MerkleTreeId.BLOCKS_TREE); const txs = [...txsLeft, ...txsRight]; @@ -235,8 +235,8 @@ describe('sequencer/solo_block_builder', () => { endPublicDataTreeRoot: rootRollupOutput.endPublicDataTreeRoot, startL1ToL2MessagesTreeSnapshot: rootRollupOutput.startL1ToL2MessagesTreeSnapshot, endL1ToL2MessagesTreeSnapshot: rootRollupOutput.endL1ToL2MessagesTreeSnapshot, - startHistoricBlocksTreeSnapshot: rootRollupOutput.startHistoricBlocksTreeSnapshot, - endHistoricBlocksTreeSnapshot: rootRollupOutput.endHistoricBlocksTreeSnapshot, + startBlocksTreeSnapshot: rootRollupOutput.startBlocksTreeSnapshot, + endBlocksTreeSnapshot: rootRollupOutput.endBlocksTreeSnapshot, newCommitments, newNullifiers, newContracts, @@ -298,7 +298,7 @@ describe('sequencer/solo_block_builder', () => { const makeBloatedProcessedTx = async (seed = 0x1) => { const tx = mockTx(seed); const kernelOutput = KernelCircuitPublicInputs.empty(); - kernelOutput.constants.blockData = await getHistoricBlockData(builderDb); + kernelOutput.constants.blockHeader = await getBlockHeader(builderDb); kernelOutput.end.publicDataUpdateRequests = makeTuple( MAX_PUBLIC_DATA_UPDATE_REQUESTS_PER_TX, i => new PublicDataUpdateRequest(fr(i), fr(0), fr(i + 10)), diff --git a/yarn-project/sequencer-client/src/block_builder/solo_block_builder.ts b/yarn-project/sequencer-client/src/block_builder/solo_block_builder.ts index da6b621773c..06976b2f209 100644 --- a/yarn-project/sequencer-client/src/block_builder/solo_block_builder.ts +++ b/yarn-project/sequencer-client/src/block_builder/solo_block_builder.ts @@ -1,12 +1,12 @@ import { AppendOnlyTreeSnapshot, + BLOCKS_TREE_HEIGHT, BaseOrMergeRollupPublicInputs, BaseRollupInputs, CONTRACT_SUBTREE_HEIGHT, CONTRACT_SUBTREE_SIBLING_PATH_LENGTH, ConstantRollupData, GlobalVariables, - HISTORIC_BLOCKS_TREE_HEIGHT, L1_TO_L2_MSG_SUBTREE_HEIGHT, L1_TO_L2_MSG_SUBTREE_SIBLING_PATH_LENGTH, MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP, @@ -103,7 +103,7 @@ export class SoloBlockBuilder implements BlockBuilder { startContractTreeSnapshot, startPublicDataTreeSnapshot, startL1ToL2MessageTreeSnapshot, - startHistoricBlocksTreeSnapshot, + startBlocksTreeSnapshot, ] = await Promise.all( [ MerkleTreeId.NOTE_HASH_TREE, @@ -127,7 +127,7 @@ export class SoloBlockBuilder implements BlockBuilder { endContractTreeSnapshot, endPublicDataTreeRoot, endL1ToL2MessagesTreeSnapshot, - endHistoricBlocksTreeSnapshot, + endBlocksTreeSnapshot, } = circuitsOutput; // Collect all new nullifiers, commitments, and contracts from all txs in this block @@ -167,8 +167,8 @@ export class SoloBlockBuilder implements BlockBuilder { endPublicDataTreeRoot, startL1ToL2MessagesTreeSnapshot: startL1ToL2MessageTreeSnapshot, endL1ToL2MessagesTreeSnapshot, - startHistoricBlocksTreeSnapshot, - endHistoricBlocksTreeSnapshot, + startBlocksTreeSnapshot, + endBlocksTreeSnapshot, newCommitments, newNullifiers, newL2ToL1Msgs, @@ -193,14 +193,14 @@ export class SoloBlockBuilder implements BlockBuilder { protected validateTxs(txs: ProcessedTx[]) { for (const tx of txs) { - for (const historicTreeRoot of [ + for (const historicalTreeRoot of [ 'noteHashTreeRoot', 'contractTreeRoot', 'nullifierTreeRoot', 'l1ToL2MessagesTreeRoot', ] as const) { - if (tx.data.constants.blockData[historicTreeRoot].isZero()) { - throw new Error(`Empty ${historicTreeRoot} for tx: ${toFriendlyJSON(tx)}`); + if (tx.data.constants.blockHeader[historicalTreeRoot].isZero()) { + throw new Error(`Empty ${historicalTreeRoot} for tx: ${toFriendlyJSON(tx)}`); } } } @@ -313,15 +313,15 @@ export class SoloBlockBuilder implements BlockBuilder { this.debug(`Updating and validating root trees`); const globalVariablesHash = computeGlobalsHash(left[0].constants.globalVariables); await this.db.updateLatestGlobalVariablesHash(globalVariablesHash); - await this.db.updateHistoricBlocksTree(globalVariablesHash); + await this.db.updateBlocksTree(globalVariablesHash); await this.validateRootOutput(rootOutput); return [rootOutput, rootProof]; } - async updateHistoricBlocksTree(globalVariables: GlobalVariables) { - // Calculate the block hash and add it to the historic block hashes tree + async updateBlocksTree(globalVariables: GlobalVariables) { + // Calculate the block hash and add it to the historical block hashes tree const blockHash = await this.calculateBlockHash(globalVariables); await this.db.appendLeaves(MerkleTreeId.BLOCKS_TREE, [blockHash.toBuffer()]); } @@ -364,7 +364,7 @@ export class SoloBlockBuilder implements BlockBuilder { protected async validateRootOutput(rootOutput: RootRollupPublicInputs) { await Promise.all([ this.validateTrees(rootOutput), - this.validateTree(rootOutput, MerkleTreeId.BLOCKS_TREE, 'HistoricBlocks'), + this.validateTree(rootOutput, MerkleTreeId.BLOCKS_TREE, 'Blocks'), this.validateTree(rootOutput, MerkleTreeId.L1_TO_L2_MESSAGES_TREE, 'L1ToL2Messages'), ]); } @@ -376,7 +376,7 @@ export class SoloBlockBuilder implements BlockBuilder { name: 'Contract' | 'NoteHash' | 'L1ToL2Messages', ) { const localTree = await this.getTreeSnapshot(treeId); - const simulatedTree = rootOutput[`endTreeOfHistoric${name}TreeRootsSnapshot`]; + const simulatedTree = rootOutput[`endTreeOfHistorical${name}TreeRootsSnapshot`]; this.validateSimulatedTree(localTree, simulatedTree, name, `Roots ${name}`); } @@ -414,7 +414,7 @@ export class SoloBlockBuilder implements BlockBuilder { protected validateSimulatedTree( localTree: AppendOnlyTreeSnapshot, simulatedTree: AppendOnlyTreeSnapshot, - name: 'NoteHash' | 'Contract' | 'Nullifier' | 'L1ToL2Messages' | 'HistoricBlocks', + name: 'NoteHash' | 'Contract' | 'Nullifier' | 'L1ToL2Messages' | 'Blocks', label?: string, ) { if (!simulatedTree.root.toBuffer().equals(localTree.root.toBuffer())) { @@ -466,13 +466,13 @@ export class SoloBlockBuilder implements BlockBuilder { // Get tree snapshots const startL1ToL2MessagesTreeSnapshot = await this.getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGES_TREE); - // Get historic block tree roots - const startHistoricBlocksTreeSnapshot = await this.getTreeSnapshot(MerkleTreeId.BLOCKS_TREE); - const newHistoricBlocksTreeSiblingPathArray = await getRootTreeSiblingPath(MerkleTreeId.BLOCKS_TREE); + // Get blocks tree + const startBlocksTreeSnapshot = await this.getTreeSnapshot(MerkleTreeId.BLOCKS_TREE); + const newBlocksTreeSiblingPathArray = await getRootTreeSiblingPath(MerkleTreeId.BLOCKS_TREE); - const newHistoricBlocksTreeSiblingPath = makeTuple( - HISTORIC_BLOCKS_TREE_HEIGHT, - i => (i < newHistoricBlocksTreeSiblingPathArray.length ? newHistoricBlocksTreeSiblingPathArray[i] : Fr.ZERO), + const newBlocksTreeSiblingPath = makeTuple( + BLOCKS_TREE_HEIGHT, + i => (i < newBlocksTreeSiblingPathArray.length ? newBlocksTreeSiblingPathArray[i] : Fr.ZERO), 0, ); @@ -481,8 +481,8 @@ export class SoloBlockBuilder implements BlockBuilder { newL1ToL2Messages, newL1ToL2MessagesTreeRootSiblingPath, startL1ToL2MessagesTreeSnapshot, - startHistoricBlocksTreeSnapshot, - newHistoricBlocksTreeSiblingPath, + startBlocksTreeSnapshot, + newBlocksTreeSiblingPath, }); } @@ -539,19 +539,19 @@ export class SoloBlockBuilder implements BlockBuilder { return new MembershipWitness(height, index, assertLength(path.toFieldArray(), height)); } - protected getHistoricTreesMembershipWitnessFor(tx: ProcessedTx) { - const blockData = tx.data.constants.blockData; + protected getHistoricalTreesMembershipWitnessFor(tx: ProcessedTx) { + const blockHeader = tx.data.constants.blockHeader; const { noteHashTreeRoot, nullifierTreeRoot, contractTreeRoot, l1ToL2MessagesTreeRoot, publicDataTreeRoot } = - blockData; + blockHeader; const blockHash = computeBlockHash( - blockData.globalVariablesHash, + blockHeader.globalVariablesHash, noteHashTreeRoot, nullifierTreeRoot, contractTreeRoot, l1ToL2MessagesTreeRoot, publicDataTreeRoot, ); - return this.getMembershipWitnessFor(blockHash, MerkleTreeId.BLOCKS_TREE, HISTORIC_BLOCKS_TREE_HEIGHT); + return this.getMembershipWitnessFor(blockHash, MerkleTreeId.BLOCKS_TREE, BLOCKS_TREE_HEIGHT); } protected async getConstantRollupData(globalVariables: GlobalVariables): Promise { @@ -560,7 +560,7 @@ export class SoloBlockBuilder implements BlockBuilder { mergeRollupVkHash: DELETE_FR, privateKernelVkTreeRoot: FUTURE_FR, publicKernelVkTreeRoot: FUTURE_FR, - startHistoricBlocksTreeRootsSnapshot: await this.getTreeSnapshot(MerkleTreeId.BLOCKS_TREE), + startBlocksTreeSnapshot: await this.getTreeSnapshot(MerkleTreeId.BLOCKS_TREE), globalVariables, }); } @@ -651,7 +651,7 @@ export class SoloBlockBuilder implements BlockBuilder { const startContractTreeSnapshot = await this.getTreeSnapshot(MerkleTreeId.CONTRACT_TREE); const startNoteHashTreeSnapshot = await this.getTreeSnapshot(MerkleTreeId.NOTE_HASH_TREE); const startPublicDataTreeSnapshot = await this.getTreeSnapshot(MerkleTreeId.PUBLIC_DATA_TREE); - const startHistoricBlocksTreeSnapshot = await this.getTreeSnapshot(MerkleTreeId.BLOCKS_TREE); + const startBlocksTreeSnapshot = await this.getTreeSnapshot(MerkleTreeId.BLOCKS_TREE); // Get the subtree sibling paths for the circuit const newCommitmentsSubtreeSiblingPathArray = await this.getSubtreeSiblingPath( @@ -708,7 +708,12 @@ export class SoloBlockBuilder implements BlockBuilder { // Update the nullifier tree, capturing the low nullifier info for each individual operation const newNullifiers = [...left.data.end.newNullifiers, ...right.data.end.newNullifiers]; - const [nullifierWitnessLeaves, newNullifiersSubtreeSiblingPath] = await this.db.batchInsert( + const { + lowLeavesWitnessData: nullifierWitnessLeaves, + newSubtreeSiblingPath: newNullifiersSubtreeSiblingPath, + sortedNewLeaves: sortedNewNullifiers, + sortedNewLeavesIndexes: sortednewNullifiersIndexes, + } = await this.db.batchInsert( MerkleTreeId.NULLIFIER_TREE, newNullifiers.map(fr => fr.toBuffer()), NULLIFIER_SUBTREE_HEIGHT, @@ -731,7 +736,9 @@ export class SoloBlockBuilder implements BlockBuilder { startContractTreeSnapshot, startNoteHashTreeSnapshot, startPublicDataTreeRoot: startPublicDataTreeSnapshot.root, - startHistoricBlocksTreeSnapshot, + startBlocksTreeSnapshot, + sortedNewNullifiers: makeTuple(MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP, i => Fr.fromBuffer(sortedNewNullifiers[i])), + sortednewNullifiersIndexes: makeTuple(MAX_NEW_NULLIFIERS_PER_BASE_ROLLUP, i => sortednewNullifiersIndexes[i]), newCommitmentsSubtreeSiblingPath, newContractsSubtreeSiblingPath, newNullifiersSubtreeSiblingPath: makeTuple(NULLIFIER_SUBTREE_SIBLING_PATH_LENGTH, i => @@ -754,9 +761,9 @@ export class SoloBlockBuilder implements BlockBuilder { : this.makeEmptyMembershipWitness(NULLIFIER_TREE_HEIGHT), ), kernelData: [this.getKernelDataFor(left), this.getKernelDataFor(right)], - historicBlocksTreeRootMembershipWitnesses: [ - await this.getHistoricTreesMembershipWitnessFor(left), - await this.getHistoricTreesMembershipWitnessFor(right), + blocksTreeRootMembershipWitnesses: [ + await this.getHistoricalTreesMembershipWitnessFor(left), + await this.getHistoricalTreesMembershipWitnessFor(right), ], }); } diff --git a/yarn-project/sequencer-client/src/block_builder/types.ts b/yarn-project/sequencer-client/src/block_builder/types.ts index b909dbc1721..b39f0a90a17 100644 --- a/yarn-project/sequencer-client/src/block_builder/types.ts +++ b/yarn-project/sequencer-client/src/block_builder/types.ts @@ -5,7 +5,7 @@ import { AppendOnlyTreeSnapshot, BaseOrMergeRollupPublicInputs, RootRollupPublic */ export type AllowedTreeNames = T extends RootRollupPublicInputs - ? 'NoteHash' | 'Contract' | 'Nullifier' | 'L1ToL2Messages' | 'HistoricBlocks' + ? 'NoteHash' | 'Contract' | 'Nullifier' | 'L1ToL2Messages' | 'Blocks' : 'NoteHash' | 'Contract' | 'Nullifier'; /** diff --git a/yarn-project/sequencer-client/src/sequencer/processed_tx.ts b/yarn-project/sequencer-client/src/sequencer/processed_tx.ts index 508981f981d..b3d098acb47 100644 --- a/yarn-project/sequencer-client/src/sequencer/processed_tx.ts +++ b/yarn-project/sequencer-client/src/sequencer/processed_tx.ts @@ -1,7 +1,7 @@ import { + BlockHeader, CombinedAccumulatedData, Fr, - HistoricBlockData, Proof, PublicKernelPublicInputs, makeEmptyProof, @@ -87,13 +87,9 @@ export async function makeProcessedTx( * Makes an empty tx from an empty kernel circuit public inputs. * @returns A processed empty tx. */ -export function makeEmptyProcessedTx( - historicTreeRoots: HistoricBlockData, - chainId: Fr, - version: Fr, -): Promise { +export function makeEmptyProcessedTx(historicalTreeRoots: BlockHeader, chainId: Fr, version: Fr): Promise { const emptyKernelOutput = PublicKernelPublicInputs.empty(); - emptyKernelOutput.constants.blockData = historicTreeRoots; + emptyKernelOutput.constants.blockHeader = historicalTreeRoots; emptyKernelOutput.constants.txContext.chainId = chainId; emptyKernelOutput.constants.txContext.version = version; const emptyProof = makeEmptyProof(); diff --git a/yarn-project/sequencer-client/src/sequencer/public_processor.test.ts b/yarn-project/sequencer-client/src/sequencer/public_processor.test.ts index 621018a0459..382730e4c36 100644 --- a/yarn-project/sequencer-client/src/sequencer/public_processor.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/public_processor.test.ts @@ -2,6 +2,7 @@ import { PublicExecution, PublicExecutionResult, PublicExecutor } from '@aztec/a import { ARGS_LENGTH, AztecAddress, + BlockHeader, CallContext, CallRequest, CombinedAccumulatedData, @@ -9,7 +10,6 @@ import { Fr, FunctionData, GlobalVariables, - HistoricBlockData, MAX_PRIVATE_CALL_STACK_LENGTH_PER_TX, MAX_PUBLIC_CALL_STACK_LENGTH_PER_TX, PUBLIC_DATA_TREE_HEIGHT, @@ -85,7 +85,7 @@ describe('public_processor', () => { publicKernel, publicProver, GlobalVariables.empty(), - HistoricBlockData.empty(), + BlockHeader.empty(), publicContractsDB, publicWorldStateDB, ); @@ -140,7 +140,7 @@ describe('public_processor', () => { publicKernel, publicProver, GlobalVariables.empty(), - HistoricBlockData.empty(), + BlockHeader.empty(), publicContractsDB, publicWorldStateDB, ); diff --git a/yarn-project/sequencer-client/src/sequencer/public_processor.ts b/yarn-project/sequencer-client/src/sequencer/public_processor.ts index e243f5c82e5..820da1cf0c6 100644 --- a/yarn-project/sequencer-client/src/sequencer/public_processor.ts +++ b/yarn-project/sequencer-client/src/sequencer/public_processor.ts @@ -9,13 +9,13 @@ import { } from '@aztec/acir-simulator'; import { AztecAddress, + BlockHeader, CallRequest, CombinedAccumulatedData, ContractStorageRead, ContractStorageUpdateRequest, Fr, GlobalVariables, - HistoricBlockData, KernelCircuitPublicInputs, MAX_NEW_COMMITMENTS_PER_CALL, MAX_NEW_L2_TO_L1_MSGS_PER_CALL, @@ -52,7 +52,7 @@ import { PublicKernelCircuitSimulator } from '../simulator/index.js'; import { ContractsDataSourcePublicDB, WorldStateDB, WorldStatePublicDB } from '../simulator/public_executor.js'; import { RealPublicKernelCircuitSimulator } from '../simulator/public_kernel.js'; import { FailedTx, ProcessedTx, makeEmptyProcessedTx, makeProcessedTx } from './processed_tx.js'; -import { getHistoricBlockData } from './utils.js'; +import { getBlockHeader } from './utils.js'; /** * Creates new instances of PublicProcessor given the provided merkle tree db and contract data source. @@ -75,18 +75,18 @@ export class PublicProcessorFactory { prevGlobalVariables: GlobalVariables, globalVariables: GlobalVariables, ): Promise { - const blockData = await getHistoricBlockData(this.merkleTree, prevGlobalVariables); + const blockHeader = await getBlockHeader(this.merkleTree, prevGlobalVariables); const publicContractsDB = new ContractsDataSourcePublicDB(this.contractDataSource); const worldStatePublicDB = new WorldStatePublicDB(this.merkleTree); const worldStateDB = new WorldStateDB(this.merkleTree, this.l1Tol2MessagesDataSource); - const publicExecutor = new PublicExecutor(worldStatePublicDB, publicContractsDB, worldStateDB, blockData); + const publicExecutor = new PublicExecutor(worldStatePublicDB, publicContractsDB, worldStateDB, blockHeader); return new PublicProcessor( this.merkleTree, publicExecutor, new RealPublicKernelCircuitSimulator(), new EmptyPublicProver(), globalVariables, - blockData, + blockHeader, publicContractsDB, worldStatePublicDB, ); @@ -104,7 +104,7 @@ export class PublicProcessor { protected publicKernel: PublicKernelCircuitSimulator, protected publicProver: PublicProver, protected globalVariables: GlobalVariables, - protected blockData: HistoricBlockData, + protected blockHeader: BlockHeader, protected publicContractsDB: ContractsDataSourcePublicDB, protected publicStateDB: PublicStateDB, @@ -152,7 +152,7 @@ export class PublicProcessor { */ public makeEmptyProcessedTx(): Promise { const { chainId, version } = this.globalVariables; - return makeEmptyProcessedTx(this.blockData, chainId, version); + return makeEmptyProcessedTx(this.blockHeader, chainId, version); } protected async processTx(tx: Tx): Promise { @@ -259,7 +259,7 @@ export class PublicProcessor { protected async getPublicCircuitPublicInputs(result: PublicExecutionResult) { const publicDataTreeInfo = await this.db.getTreeInfo(MerkleTreeId.PUBLIC_DATA_TREE); - this.blockData.publicDataTreeRoot = Fr.fromBuffer(publicDataTreeInfo.root); + this.blockHeader.publicDataTreeRoot = Fr.fromBuffer(publicDataTreeInfo.root); const callStackPreimages = await this.getPublicCallStackPreimages(result); const publicCallStackHashes = padArrayEnd( @@ -293,7 +293,7 @@ export class PublicProcessor { publicCallStackHashes, unencryptedLogsHash, unencryptedLogPreimagesLength, - historicBlockData: this.blockData, + blockHeader: this.blockHeader, }); } diff --git a/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts b/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts index 792c6236ecd..694f4ee61b7 100644 --- a/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts +++ b/yarn-project/sequencer-client/src/sequencer/sequencer.test.ts @@ -1,7 +1,7 @@ import { + BlockHeader, Fr, GlobalVariables, - HistoricBlockData, NUMBER_OF_L1_L2_MESSAGES_PER_ROLLUP, makeEmptyProof, } from '@aztec/circuits.js'; @@ -57,7 +57,7 @@ describe('sequencer', () => { publicProcessor = mock({ process: async txs => [await Promise.all(txs.map(tx => makeProcessedTx(tx))), []], - makeEmptyProcessedTx: () => makeEmptyProcessedTx(HistoricBlockData.empty(), chainId, version), + makeEmptyProcessedTx: () => makeEmptyProcessedTx(BlockHeader.empty(), chainId, version), }); publicProcessorFactory = mock({ diff --git a/yarn-project/sequencer-client/src/sequencer/utils.ts b/yarn-project/sequencer-client/src/sequencer/utils.ts index b43747fc644..62cde96601b 100644 --- a/yarn-project/sequencer-client/src/sequencer/utils.ts +++ b/yarn-project/sequencer-client/src/sequencer/utils.ts @@ -1,18 +1,18 @@ -import { Fr, GlobalVariables, HistoricBlockData } from '@aztec/circuits.js'; +import { BlockHeader, Fr, GlobalVariables } from '@aztec/circuits.js'; import { computeGlobalsHash } from '@aztec/circuits.js/abis'; import { MerkleTreeOperations } from '@aztec/world-state'; /** - * Fetches the private, nullifier, contract tree and l1 to l2 messages tree roots from a given db and assembles a CombinedHistoricTreeRoots object. + * Fetches the private, nullifier, contract tree and l1 to l2 messages tree roots from a given db and assembles a CombinedHistoricalTreeRoots object. */ -export async function getHistoricBlockData( +export async function getBlockHeader( db: MerkleTreeOperations, prevBlockGlobalVariables: GlobalVariables = GlobalVariables.empty(), ) { const prevGlobalsHash = computeGlobalsHash(prevBlockGlobalVariables); const roots = await db.getTreeRoots(); - return new HistoricBlockData( + return new BlockHeader( Fr.fromBuffer(roots.noteHashTreeRoot), Fr.fromBuffer(roots.nullifierTreeRoot), Fr.fromBuffer(roots.contractDataTreeRoot), diff --git a/yarn-project/types/src/aztec_node/rpc/aztec_node_client.ts b/yarn-project/types/src/aztec_node/rpc/aztec_node_client.ts index 1c312302357..4f2f702c21c 100644 --- a/yarn-project/types/src/aztec_node/rpc/aztec_node_client.ts +++ b/yarn-project/types/src/aztec_node/rpc/aztec_node_client.ts @@ -1,4 +1,4 @@ -import { FunctionSelector, HistoricBlockData } from '@aztec/circuits.js'; +import { BlockHeader, FunctionSelector } from '@aztec/circuits.js'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { EthAddress } from '@aztec/foundation/eth-address'; import { Fr } from '@aztec/foundation/fields'; @@ -35,7 +35,7 @@ export function createAztecNodeClient(url: string, fetch = defaultFetch): AztecN ContractData, Fr, FunctionSelector, - HistoricBlockData, + BlockHeader, L2Block, L2Tx, LogId, diff --git a/yarn-project/types/src/interfaces/aztec-node.ts b/yarn-project/types/src/interfaces/aztec-node.ts index e03966ec8ae..92463e96b0f 100644 --- a/yarn-project/types/src/interfaces/aztec-node.ts +++ b/yarn-project/types/src/interfaces/aztec-node.ts @@ -1,4 +1,4 @@ -import { HistoricBlockData } from '@aztec/circuits.js'; +import { BlockHeader } from '@aztec/circuits.js'; import { L1ContractAddresses } from '@aztec/ethereum'; import { AztecAddress } from '@aztec/foundation/aztec-address'; import { Fr } from '@aztec/foundation/fields'; @@ -139,10 +139,10 @@ export interface AztecNode extends StateInfoProvider { getTreeRoots(): Promise>; /** - * Returns the currently committed historic block data. - * @returns The current committed block data. + * Returns the currently committed block header. + * @returns The current committed block header. */ - getHistoricBlockData(): Promise; + getBlockHeader(): Promise; /** * Simulates the public part of a transaction with the current state. diff --git a/yarn-project/types/src/interfaces/state_provider.ts b/yarn-project/types/src/interfaces/state_provider.ts index 68d01812a7e..cec3f6fed55 100644 --- a/yarn-project/types/src/interfaces/state_provider.ts +++ b/yarn-project/types/src/interfaces/state_provider.ts @@ -1,7 +1,7 @@ import { + BLOCKS_TREE_HEIGHT, CONTRACT_TREE_HEIGHT, Fr, - HISTORIC_BLOCKS_TREE_HEIGHT, L1_TO_L2_MSG_TREE_HEIGHT, NOTE_HASH_TREE_HEIGHT, NULLIFIER_TREE_HEIGHT, @@ -14,41 +14,57 @@ import { MerkleTreeId } from '../merkle_tree_id.js'; import { SiblingPath } from '../sibling_path.js'; import { NullifierMembershipWitness } from './nullifier_witness.js'; +/** Helper type for a specific L2 block number or the latest block number */ +type BlockNumber = number | 'latest'; + /** * Interface providing methods for retrieving information about content of the state trees. */ export interface StateInfoProvider { /** * Find the index of the given leaf in the given tree. + * @param blockNumber - The block number at which to get the data or 'latest' for latest data * @param treeId - The tree to search in. * @param leafValue - The value to search for * @returns The index of the given leaf in the given tree or undefined if not found. */ - findLeafIndex(treeId: MerkleTreeId, leafValue: Fr): Promise; + findLeafIndex(blockNumber: BlockNumber, treeId: MerkleTreeId, leafValue: Fr): Promise; /** * Returns a sibling path for the given index in the contract tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - The index of the leaf for which the sibling path is required. * @returns The sibling path for the leaf index. * TODO: https://github.com/AztecProtocol/aztec-packages/issues/3414 */ - getContractSiblingPath(leafIndex: bigint): Promise>; + getContractSiblingPath( + blockNumber: BlockNumber, + leafIndex: bigint, + ): Promise>; /** * Returns a sibling path for the given index in the nullifier tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - The index of the leaf for which the sibling path is required. * @returns The sibling path for the leaf index. * TODO: https://github.com/AztecProtocol/aztec-packages/issues/3414 */ - getNullifierTreeSiblingPath(leafIndex: bigint): Promise>; + getNullifierTreeSiblingPath( + blockNumber: BlockNumber, + leafIndex: bigint, + ): Promise>; /** * Returns a sibling path for the given index in the note hash tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - The index of the leaf for which the sibling path is required. * @returns The sibling path for the leaf index. * TODO: https://github.com/AztecProtocol/aztec-packages/issues/3414 */ - getNoteHashSiblingPath(leafIndex: bigint): Promise>; + getNoteHashSiblingPath( + blockNumber: BlockNumber, + leafIndex: bigint, + ): Promise>; /** * Gets a confirmed/consumed L1 to L2 message for the given message key (throws if not found). @@ -60,46 +76,64 @@ export interface StateInfoProvider { /** * Returns a sibling path for a leaf in the committed l1 to l2 data tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - Index of the leaf in the tree. * @returns The sibling path. * TODO: https://github.com/AztecProtocol/aztec-packages/issues/3414 */ - getL1ToL2MessageSiblingPath(leafIndex: bigint): Promise>; + getL1ToL2MessageSiblingPath( + blockNumber: BlockNumber, + leafIndex: bigint, + ): Promise>; /** * Returns a sibling path for a leaf in the committed historic blocks tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - Index of the leaf in the tree. * @returns The sibling path. * TODO: https://github.com/AztecProtocol/aztec-packages/issues/3414 */ - getHistoricBlocksTreeSiblingPath(leafIndex: bigint): Promise>; + getBlocksTreeSiblingPath( + blockNumber: BlockNumber, + leafIndex: bigint, + ): Promise>; /** * Returns a sibling path for a leaf in the committed public data tree. + * @param blockNumber - The block number at which to get the data. * @param leafIndex - Index of the leaf in the tree. * @returns The sibling path. * TODO: https://github.com/AztecProtocol/aztec-packages/issues/3414 */ - getPublicDataTreeSiblingPath(leafIndex: bigint): Promise>; + getPublicDataTreeSiblingPath( + blockNumber: BlockNumber, + leafIndex: bigint, + ): Promise>; /** * Returns a nullifier membership witness for a given nullifier at a given block. - * @param blockNumber - The block number at which to get the index. + * @param blockNumber - The block number at which to get the data. * @param nullifier - Nullifier we try to find witness for. * @returns The nullifier membership witness (if found). */ - getNullifierMembershipWitness(blockNumber: number, nullifier: Fr): Promise; + getNullifierMembershipWitness( + blockNumber: BlockNumber, + nullifier: Fr, + ): Promise; /** * Returns a low nullifier membership witness for a given nullifier at a given block. - * @param blockNumber - The block number at which to get the index. + * @param blockNumber - The block number at which to get the data. * @param nullifier - Nullifier we try to find the low nullifier witness for. * @returns The low nullifier membership witness (if found). * @remarks Low nullifier witness can be used to perform a nullifier non-inclusion proof by leveraging the "linked * list structure" of leaves and proving that a lower nullifier is pointing to a bigger next value than the nullifier * we are trying to prove non-inclusion for. */ - getLowNullifierMembershipWitness(blockNumber: number, nullifier: Fr): Promise; + getLowNullifierMembershipWitness( + blockNumber: BlockNumber, + nullifier: Fr, + ): Promise; /** * Get a block specified by its number. diff --git a/yarn-project/types/src/l2_block.ts b/yarn-project/types/src/l2_block.ts index 1ddd1347ede..67832ef2da7 100644 --- a/yarn-project/types/src/l2_block.ts +++ b/yarn-project/types/src/l2_block.ts @@ -86,9 +86,9 @@ export class L2Block { */ public startL1ToL2MessagesTreeSnapshot: AppendOnlyTreeSnapshot, /** - * The tree snapshot of the historic blocks tree at the start of the rollup. + * The tree snapshot of the blocks tree at the start of the rollup. */ - public startHistoricBlocksTreeSnapshot: AppendOnlyTreeSnapshot = AppendOnlyTreeSnapshot.empty(), + public startBlocksTreeSnapshot: AppendOnlyTreeSnapshot = AppendOnlyTreeSnapshot.empty(), /** * The tree snapshot of the note hash tree at the end of the rollup. */ @@ -110,9 +110,9 @@ export class L2Block { */ public endL1ToL2MessagesTreeSnapshot: AppendOnlyTreeSnapshot, /** - * The tree snapshot of the historic blocks tree at the end of the rollup. + * The tree snapshot of the blocks tree at the end of the rollup. */ - public endHistoricBlocksTreeSnapshot: AppendOnlyTreeSnapshot, + public endBlocksTreeSnapshot: AppendOnlyTreeSnapshot, /** * The commitments to be inserted into the note hash tree. */ @@ -216,13 +216,13 @@ export class L2Block { startContractTreeSnapshot: makeAppendOnlyTreeSnapshot(0), startPublicDataTreeRoot: Fr.random(), startL1ToL2MessagesTreeSnapshot: makeAppendOnlyTreeSnapshot(0), - startHistoricBlocksTreeSnapshot: makeAppendOnlyTreeSnapshot(0), + startBlocksTreeSnapshot: makeAppendOnlyTreeSnapshot(0), endNoteHashTreeSnapshot: makeAppendOnlyTreeSnapshot(newCommitments.length), endNullifierTreeSnapshot: makeAppendOnlyTreeSnapshot(newNullifiers.length), endContractTreeSnapshot: makeAppendOnlyTreeSnapshot(newContracts.length), endPublicDataTreeRoot: Fr.random(), endL1ToL2MessagesTreeSnapshot: makeAppendOnlyTreeSnapshot(1), - endHistoricBlocksTreeSnapshot: makeAppendOnlyTreeSnapshot(1), + endBlocksTreeSnapshot: makeAppendOnlyTreeSnapshot(1), newCommitments, newNullifiers, newContracts, @@ -277,9 +277,9 @@ export class L2Block { */ startL1ToL2MessagesTreeSnapshot: AppendOnlyTreeSnapshot; /** - * The tree snapshot of the historic blocks tree at the start of the rollup. + * The tree snapshot of the blocks tree at the start of the rollup. */ - startHistoricBlocksTreeSnapshot: AppendOnlyTreeSnapshot; + startBlocksTreeSnapshot: AppendOnlyTreeSnapshot; /** * The tree snapshot of the note hash tree at the end of the rollup. */ @@ -301,9 +301,9 @@ export class L2Block { */ endL1ToL2MessagesTreeSnapshot: AppendOnlyTreeSnapshot; /** - * The tree snapshot of the historic blocks tree at the end of the rollup. + * The tree snapshot of the blocks tree at the end of the rollup. */ - endHistoricBlocksTreeSnapshot: AppendOnlyTreeSnapshot; + endBlocksTreeSnapshot: AppendOnlyTreeSnapshot; /** * The commitments to be inserted into the note hash tree. */ @@ -352,13 +352,13 @@ export class L2Block { fields.startContractTreeSnapshot, fields.startPublicDataTreeRoot, fields.startL1ToL2MessagesTreeSnapshot, - fields.startHistoricBlocksTreeSnapshot, + fields.startBlocksTreeSnapshot, fields.endNoteHashTreeSnapshot, fields.endNullifierTreeSnapshot, fields.endContractTreeSnapshot, fields.endPublicDataTreeRoot, fields.endL1ToL2MessagesTreeSnapshot, - fields.endHistoricBlocksTreeSnapshot, + fields.endBlocksTreeSnapshot, fields.newCommitments, fields.newNullifiers, fields.newPublicDataWrites, @@ -387,13 +387,13 @@ export class L2Block { this.startContractTreeSnapshot, this.startPublicDataTreeRoot, this.startL1ToL2MessagesTreeSnapshot, - this.startHistoricBlocksTreeSnapshot, + this.startBlocksTreeSnapshot, this.endNoteHashTreeSnapshot, this.endNullifierTreeSnapshot, this.endContractTreeSnapshot, this.endPublicDataTreeRoot, this.endL1ToL2MessagesTreeSnapshot, - this.endHistoricBlocksTreeSnapshot, + this.endBlocksTreeSnapshot, this.newCommitments.length, this.newCommitments, this.newNullifiers.length, @@ -449,13 +449,13 @@ export class L2Block { const startContractTreeSnapshot = reader.readObject(AppendOnlyTreeSnapshot); const startPublicDataTreeRoot = reader.readObject(Fr); const startL1ToL2MessagesTreeSnapshot = reader.readObject(AppendOnlyTreeSnapshot); - const startHistoricBlocksTreeSnapshot = reader.readObject(AppendOnlyTreeSnapshot); + const startBlocksTreeSnapshot = reader.readObject(AppendOnlyTreeSnapshot); const endNoteHashTreeSnapshot = reader.readObject(AppendOnlyTreeSnapshot); const endNullifierTreeSnapshot = reader.readObject(AppendOnlyTreeSnapshot); const endContractTreeSnapshot = reader.readObject(AppendOnlyTreeSnapshot); const endPublicDataTreeRoot = reader.readObject(Fr); const endL1ToL2MessagesTreeSnapshot = reader.readObject(AppendOnlyTreeSnapshot); - const endHistoricBlocksTreeSnapshot = reader.readObject(AppendOnlyTreeSnapshot); + const endBlocksTreeSnapshot = reader.readObject(AppendOnlyTreeSnapshot); const newCommitments = reader.readVector(Fr); const newNullifiers = reader.readVector(Fr); const newPublicDataWrites = reader.readVector(PublicDataWrite); @@ -473,13 +473,13 @@ export class L2Block { startContractTreeSnapshot, startPublicDataTreeRoot, startL1ToL2MessagesTreeSnapshot: startL1ToL2MessagesTreeSnapshot, - startHistoricBlocksTreeSnapshot, + startBlocksTreeSnapshot, endNoteHashTreeSnapshot, endNullifierTreeSnapshot, endContractTreeSnapshot, endPublicDataTreeRoot, endL1ToL2MessagesTreeSnapshot, - endHistoricBlocksTreeSnapshot, + endBlocksTreeSnapshot, newCommitments, newNullifiers, newPublicDataWrites, @@ -589,13 +589,13 @@ export class L2Block { this.startContractTreeSnapshot, this.startPublicDataTreeRoot, this.startL1ToL2MessagesTreeSnapshot, - this.startHistoricBlocksTreeSnapshot, + this.startBlocksTreeSnapshot, this.endNoteHashTreeSnapshot, this.endNullifierTreeSnapshot, this.endContractTreeSnapshot, this.endPublicDataTreeRoot, this.endL1ToL2MessagesTreeSnapshot, - this.endHistoricBlocksTreeSnapshot, + this.endBlocksTreeSnapshot, this.getCalldataHash(), this.getL1ToL2MessagesHash(), ); @@ -615,7 +615,7 @@ export class L2Block { this.startContractTreeSnapshot, this.startPublicDataTreeRoot, this.startL1ToL2MessagesTreeSnapshot, - this.startHistoricBlocksTreeSnapshot, + this.startBlocksTreeSnapshot, ); return sha256(inputValue); } @@ -632,7 +632,7 @@ export class L2Block { this.endContractTreeSnapshot, this.endPublicDataTreeRoot, this.endL1ToL2MessagesTreeSnapshot, - this.endHistoricBlocksTreeSnapshot, + this.endBlocksTreeSnapshot, ); return sha256(inputValue); } @@ -843,14 +843,14 @@ export class L2Block { `startContractTreeSnapshot: ${inspectTreeSnapshot(this.startContractTreeSnapshot)}`, `startPublicDataTreeRoot: ${this.startPublicDataTreeRoot.toString()}`, `startL1ToL2MessagesTreeSnapshot: ${inspectTreeSnapshot(this.startL1ToL2MessagesTreeSnapshot)}`, - `startHistoricBlocksTreeSnapshot: ${inspectTreeSnapshot(this.startHistoricBlocksTreeSnapshot)}`, + `startBlocksTreeSnapshot: ${inspectTreeSnapshot(this.startBlocksTreeSnapshot)}`, `endNoteHashTreeSnapshot: ${inspectTreeSnapshot(this.endNoteHashTreeSnapshot)}`, `endNullifierTreeSnapshot: ${inspectTreeSnapshot(this.endNullifierTreeSnapshot)}`, `endContractTreeSnapshot: ${inspectTreeSnapshot(this.endContractTreeSnapshot)}`, `endPublicDataTreeRoot: ${this.endPublicDataTreeRoot.toString()}`, `endPublicDataTreeRoot: ${this.endPublicDataTreeRoot.toString()}`, `endL1ToL2MessagesTreeSnapshot: ${inspectTreeSnapshot(this.endL1ToL2MessagesTreeSnapshot)}`, - `endHistoricBlocksTreeSnapshot: ${inspectTreeSnapshot(this.endHistoricBlocksTreeSnapshot)}`, + `endBlocksTreeSnapshot: ${inspectTreeSnapshot(this.endBlocksTreeSnapshot)}`, `newCommitments: ${inspectFrArray(this.newCommitments)}`, `newNullifiers: ${inspectFrArray(this.newNullifiers)}`, `newPublicDataWrite: ${inspectPublicDataWriteArray(this.newPublicDataWrites)}`, diff --git a/yarn-project/types/src/tx/tx.ts b/yarn-project/types/src/tx/tx.ts index 6d49a50912a..381d0a7f782 100644 --- a/yarn-project/types/src/tx/tx.ts +++ b/yarn-project/types/src/tx/tx.ts @@ -45,6 +45,7 @@ export class Tx { /** * Contracts deployed in this tx. * Note: Portal address is always set to zero in the tx's new contracts. + * TODO(#3417): Check if portal addresses are still always set to zero */ public readonly newContracts: Tuple, ) { diff --git a/yarn-project/types/src/tx_execution_request.ts b/yarn-project/types/src/tx_execution_request.ts index b944b423af6..7cfbd9aab78 100644 --- a/yarn-project/types/src/tx_execution_request.ts +++ b/yarn-project/types/src/tx_execution_request.ts @@ -15,6 +15,7 @@ export class TxExecutionRequest { public origin: AztecAddress, /** * Function data representing the function to call. + * TODO(#3417): Remove this field and replace with a function selector. */ public functionData: FunctionData, /** diff --git a/yarn-project/world-state/README.md b/yarn-project/world-state/README.md index 42f095d9dbb..a14b1fa02b6 100644 --- a/yarn-project/world-state/README.md +++ b/yarn-project/world-state/README.md @@ -11,9 +11,9 @@ As of the time of writing the collection consisted of the following trees. #### Standard 'Append Only' trees - The Contract Tree. Every contract created within the system has a 'Function Tree', a tree of leaves generated from the functions on the contract. The root of the function tree is inserted as a leaf in the contracts tree. -- The Contract Tree Roots Tree. A tree whose leaves are the historic roots of the contract tree. +- The Contract Tree Roots Tree. A tree whose leaves are the historical roots of the contract tree. - The Note Hash Tree. A tree whose leaves are the note hashes of notes generated by the private contract function calls within the system. -- The Note Hash Tree Roots Tree. A tree whose leaves are the historic roots of the note hash tree. +- The Note Hash Tree Roots Tree. A tree whose leaves are the historical roots of the note hash tree. #### Indexed trees diff --git a/yarn-project/world-state/src/merkle-tree/merkle_tree_operations_facade.ts b/yarn-project/world-state/src/merkle-tree/merkle_tree_operations_facade.ts index 5917a863694..29ba293736d 100644 --- a/yarn-project/world-state/src/merkle-tree/merkle_tree_operations_facade.ts +++ b/yarn-project/world-state/src/merkle-tree/merkle_tree_operations_facade.ts @@ -1,5 +1,5 @@ import { Fr } from '@aztec/foundation/fields'; -import { LowLeafWitnessData } from '@aztec/merkle-tree'; +import { BatchInsertionResult } from '@aztec/merkle-tree'; import { L2Block, LeafData, MerkleTreeId, SiblingPath } from '@aztec/types'; import { CurrentTreeRoots, HandleL2BlockResult, MerkleTreeDb, MerkleTreeOperations, TreeInfo } from '../index.js'; @@ -120,8 +120,8 @@ export class MerkleTreeOperationsFacade implements MerkleTreeOperations { * @param globalVariablesHash - The hash of the current global variables to include in the block hash. * @returns Empty promise. */ - public updateHistoricBlocksTree(globalVariablesHash: Fr): Promise { - return this.trees.updateHistoricBlocksTree(globalVariablesHash, this.includeUncommitted); + public updateBlocksTree(globalVariablesHash: Fr): Promise { + return this.trees.updateBlocksTree(globalVariablesHash, this.includeUncommitted); } /** @@ -171,11 +171,11 @@ export class MerkleTreeOperationsFacade implements MerkleTreeOperations { * @param subtreeHeight - Height of the subtree. * @returns The data for the leaves to be updated when inserting the new ones. */ - public batchInsert( + public batchInsert( treeId: MerkleTreeId, leaves: Buffer[], subtreeHeight: number, - ): Promise<[LowLeafWitnessData[], SiblingPath] | [undefined, SiblingPath]> { + ): Promise> { return this.trees.batchInsert(treeId, leaves, subtreeHeight); } } diff --git a/yarn-project/world-state/src/merkle-tree/merkle_tree_snapshot_operations_facade.ts b/yarn-project/world-state/src/merkle-tree/merkle_tree_snapshot_operations_facade.ts new file mode 100644 index 00000000000..93c94d19163 --- /dev/null +++ b/yarn-project/world-state/src/merkle-tree/merkle_tree_snapshot_operations_facade.ts @@ -0,0 +1,143 @@ +import { Fr } from '@aztec/circuits.js'; +import { BatchInsertionResult, IndexedTreeSnapshot, TreeSnapshot } from '@aztec/merkle-tree'; +import { LeafData, MerkleTreeId, SiblingPath } from '@aztec/types'; + +import { CurrentTreeRoots, HandleL2BlockResult, MerkleTreeDb, MerkleTreeOperations, TreeInfo } from '../index.js'; + +/** + * Merkle tree operations on readonly tree snapshots. + */ +export class MerkleTreeSnapshotOperationsFacade implements MerkleTreeOperations { + #treesDb: MerkleTreeDb; + #blockNumber: number; + #treeSnapshots: ReadonlyArray = []; + + constructor(trees: MerkleTreeDb, blockNumber: number) { + this.#treesDb = trees; + this.#blockNumber = blockNumber; + } + + async #getTreeSnapshot(merkleTreeId: number): Promise { + if (this.#treeSnapshots[merkleTreeId]) { + return this.#treeSnapshots[merkleTreeId]; + } + + this.#treeSnapshots = await this.#treesDb.getSnapshot(this.#blockNumber); + return this.#treeSnapshots[merkleTreeId]!; + } + + async findLeafIndex(treeId: MerkleTreeId, value: Buffer): Promise { + const tree = await this.#getTreeSnapshot(treeId); + const numLeaves = tree.getNumLeaves(); + for (let i = 0n; i < numLeaves; i++) { + const currentValue = await tree.getLeafValue(i); + if (currentValue && currentValue.equals(value)) { + return i; + } + } + return undefined; + } + + getLatestGlobalVariablesHash(): Promise { + return Promise.reject(new Error('not implemented')); + } + + async getLeafData(treeId: MerkleTreeId.NULLIFIER_TREE, index: number): Promise { + const snapshot = (await this.#getTreeSnapshot(treeId)) as IndexedTreeSnapshot; + return snapshot.getLatestLeafDataCopy(BigInt(index)); + } + + async getLeafValue(treeId: MerkleTreeId, index: bigint): Promise { + const snapshot = await this.#getTreeSnapshot(treeId); + return snapshot.getLeafValue(BigInt(index)); + } + + getPreviousValueIndex( + _treeId: MerkleTreeId.NULLIFIER_TREE, + _value: bigint, + ): Promise<{ + /** + * The index of the found leaf. + */ + index: number; + /** + * A flag indicating if the corresponding leaf's value is equal to `newValue`. + */ + alreadyPresent: boolean; + }> { + return Promise.reject(new Error('not implemented')); + } + + async getSiblingPath(treeId: MerkleTreeId, index: bigint): Promise> { + const snapshot = await this.#getTreeSnapshot(treeId); + return snapshot.getSiblingPath(index); + } + + async getTreeInfo(treeId: MerkleTreeId): Promise { + const snapshot = await this.#getTreeSnapshot(treeId); + return { + depth: snapshot.getDepth(), + root: snapshot.getRoot(), + size: snapshot.getNumLeaves(), + treeId, + }; + } + + async getTreeRoots(): Promise { + const snapshots = await Promise.all([ + this.#getTreeSnapshot(MerkleTreeId.CONTRACT_TREE), + this.#getTreeSnapshot(MerkleTreeId.NULLIFIER_TREE), + this.#getTreeSnapshot(MerkleTreeId.NOTE_HASH_TREE), + this.#getTreeSnapshot(MerkleTreeId.PUBLIC_DATA_TREE), + this.#getTreeSnapshot(MerkleTreeId.L1_TO_L2_MESSAGES_TREE), + this.#getTreeSnapshot(MerkleTreeId.BLOCKS_TREE), + ]); + + return { + blocksTreeRoot: snapshots[MerkleTreeId.BLOCKS_TREE].getRoot(), + contractDataTreeRoot: snapshots[MerkleTreeId.CONTRACT_TREE].getRoot(), + l1Tol2MessagesTreeRoot: snapshots[MerkleTreeId.L1_TO_L2_MESSAGES_TREE].getRoot(), + noteHashTreeRoot: snapshots[MerkleTreeId.NOTE_HASH_TREE].getRoot(), + nullifierTreeRoot: snapshots[MerkleTreeId.NULLIFIER_TREE].getRoot(), + publicDataTreeRoot: snapshots[MerkleTreeId.PUBLIC_DATA_TREE].getRoot(), + }; + } + + appendLeaves(): Promise { + return Promise.reject(new Error('Tree snapshot operations are read-only')); + } + + batchInsert(): Promise< + BatchInsertionResult + > { + return Promise.reject(new Error('Tree snapshot operations are read-only')); + } + + updateBlocksTree(): Promise { + return Promise.reject(new Error('Tree snapshot operations are read-only')); + } + + commit(): Promise { + return Promise.reject(new Error('Tree snapshot operations are read-only')); + } + + handleL2Block(): Promise { + return Promise.reject(new Error('Tree snapshot operations are read-only')); + } + + rollback(): Promise { + return Promise.reject(new Error('Tree snapshot operations are read-only')); + } + + updateHistoricBlocksTree(): Promise { + return Promise.reject(new Error('Tree snapshot operations are read-only')); + } + + updateLatestGlobalVariablesHash(): Promise { + return Promise.reject(new Error('Tree snapshot operations are read-only')); + } + + updateLeaf(): Promise { + return Promise.reject(new Error('Tree snapshot operations are read-only')); + } +} diff --git a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts index 4fe162c12b0..d5e3798a7c6 100644 --- a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts +++ b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.test.ts @@ -72,13 +72,13 @@ const getMockBlock = (blockNumber: number, newContractsCommitments?: Buffer[]) = startContractTreeSnapshot: getMockTreeSnapshot(), startPublicDataTreeRoot: Fr.random(), startL1ToL2MessagesTreeSnapshot: getMockTreeSnapshot(), - startHistoricBlocksTreeSnapshot: getMockTreeSnapshot(), + startBlocksTreeSnapshot: getMockTreeSnapshot(), endNoteHashTreeSnapshot: getMockTreeSnapshot(), endNullifierTreeSnapshot: getMockTreeSnapshot(), endContractTreeSnapshot: getMockTreeSnapshot(), endPublicDataTreeRoot: Fr.random(), endL1ToL2MessagesTreeSnapshot: getMockTreeSnapshot(), - endHistoricBlocksTreeSnapshot: getMockTreeSnapshot(), + endBlocksTreeSnapshot: getMockTreeSnapshot(), newCommitments: times(MAX_NEW_COMMITMENTS_PER_TX, Fr.random), newNullifiers: times(MAX_NEW_NULLIFIERS_PER_TX, Fr.random), newContracts: newContractsCommitments?.map(x => Fr.fromBuffer(x)) ?? [Fr.random()], diff --git a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts index 896b85a55de..8c07bed87c4 100644 --- a/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts +++ b/yarn-project/world-state/src/synchronizer/server_world_state_synchronizer.ts @@ -8,6 +8,7 @@ import { LevelUp } from 'levelup'; import { HandleL2BlockResult, MerkleTreeOperations, MerkleTrees } from '../index.js'; import { MerkleTreeOperationsFacade } from '../merkle-tree/merkle_tree_operations_facade.js'; +import { MerkleTreeSnapshotOperationsFacade } from '../merkle-tree/merkle_tree_snapshot_operations_facade.js'; import { WorldStateConfig } from './config.js'; import { WorldStateRunningState, WorldStateStatus, WorldStateSynchronizer } from './world_state_synchronizer.js'; @@ -52,6 +53,10 @@ export class ServerWorldStateSynchronizer implements WorldStateSynchronizer { return new MerkleTreeOperationsFacade(this.merkleTreeDb, false); } + public getSnapshot(blockNumber: number): MerkleTreeOperations { + return new MerkleTreeSnapshotOperationsFacade(this.merkleTreeDb, blockNumber); + } + public static async new( db: LevelUp, merkleTreeDb: MerkleTrees, @@ -130,16 +135,16 @@ export class ServerWorldStateSynchronizer implements WorldStateSynchronizer { /** * Forces an immediate sync * @param minBlockNumber - The minimum block number that we must sync to - * @returns A promise that resolves once the sync has completed. + * @returns A promise that resolves with the block number the world state was synced to */ - public async syncImmediate(minBlockNumber?: number): Promise { + public async syncImmediate(minBlockNumber?: number): Promise { if (this.currentState !== WorldStateRunningState.RUNNING) { throw new Error(`World State is not running, unable to perform sync`); } // If we have been given a block number to sync to and we have reached that number // then return. if (minBlockNumber !== undefined && minBlockNumber <= this.currentL2BlockNum) { - return; + return this.currentL2BlockNum; } const blockToSyncTo = minBlockNumber === undefined ? 'latest' : `${minBlockNumber}`; this.log(`World State at block ${this.currentL2BlockNum}, told to sync to block ${blockToSyncTo}...`); @@ -148,7 +153,7 @@ export class ServerWorldStateSynchronizer implements WorldStateSynchronizer { while (true) { // Check the block number again if (minBlockNumber !== undefined && minBlockNumber <= this.currentL2BlockNum) { - return; + return this.currentL2BlockNum; } // Poll for more blocks const numBlocks = await this.l2BlockDownloader.pollImmediate(); @@ -164,7 +169,7 @@ export class ServerWorldStateSynchronizer implements WorldStateSynchronizer { `Unable to sync to block number ${minBlockNumber}, currently synced to block ${this.currentL2BlockNum}`, ); } - return; + return this.currentL2BlockNum; } } diff --git a/yarn-project/world-state/src/synchronizer/world_state_synchronizer.ts b/yarn-project/world-state/src/synchronizer/world_state_synchronizer.ts index 39e75cb91cb..96e6885101a 100644 --- a/yarn-project/world-state/src/synchronizer/world_state_synchronizer.ts +++ b/yarn-project/world-state/src/synchronizer/world_state_synchronizer.ts @@ -48,9 +48,9 @@ export interface WorldStateSynchronizer { /** * Forces an immediate sync to an optionally provided minimum block number * @param minBlockNumber - The minimum block number that we must sync to - * @returns A promise that resolves once the sync has completed. + * @returns A promise that resolves with the block number the world state was synced to */ - syncImmediate(minBlockNumber?: number): Promise; + syncImmediate(minBlockNumber?: number): Promise; /** * Returns an instance of MerkleTreeOperations that will include uncommitted data. @@ -63,4 +63,11 @@ export interface WorldStateSynchronizer { * @returns An instance of MerkleTreeOperations that will not include uncommitted data. */ getCommitted(): MerkleTreeOperations; + + /** + * Returns a readonly instance of MerkleTreeOperations where the state is as it was at the given block number + * @param block - The block number to look at + * @returns An instance of MerkleTreeOperations + */ + getSnapshot(block: number): MerkleTreeOperations; } diff --git a/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts b/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts index 04f4d749fee..13c6617513d 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_tree_db.ts @@ -1,7 +1,7 @@ import { MAX_NEW_NULLIFIERS_PER_TX } from '@aztec/circuits.js'; import { Fr } from '@aztec/foundation/fields'; import { createDebugLogger } from '@aztec/foundation/log'; -import { LowLeafWitnessData } from '@aztec/merkle-tree'; +import { BatchInsertionResult, IndexedTreeSnapshot, TreeSnapshot } from '@aztec/merkle-tree'; import { L2Block, LeafData, MerkleTreeId, SiblingPath } from '@aztec/types'; /** @@ -91,7 +91,13 @@ export type MerkleTreeDb = { [Property in keyof MerkleTreeOperations as Exclude]: WithIncludeUncommitted< MerkleTreeOperations[Property] >; -} & Pick; +} & Pick & { + /** + * Returns a snapshot of the current state of the trees. + * @param block - The block number to take the snapshot at. + */ + getSnapshot(block: number): Promise>; + }; /** * Defines the interface for operations on a set of Merkle Trees. @@ -175,7 +181,7 @@ export interface MerkleTreeOperations { * This includes all of the current roots of all of the data trees and the current blocks global vars. * @param globalVariablesHash - The global variables hash to insert into the block hash. */ - updateHistoricBlocksTree(globalVariablesHash: Fr): Promise; + updateBlocksTree(globalVariablesHash: Fr): Promise; /** * Updates the latest global variables hash @@ -195,11 +201,11 @@ export interface MerkleTreeOperations { * @param subtreeHeight - Height of the subtree. * @returns The witness data for the leaves to be updated when inserting the new ones. */ - batchInsert( + batchInsert( treeId: MerkleTreeId, leaves: Buffer[], subtreeHeight: number, - ): Promise<[LowLeafWitnessData[], SiblingPath] | [undefined, SiblingPath]>; + ): Promise>; /** * Handles a single L2 block (i.e. Inserts the new commitments into the merkle tree). diff --git a/yarn-project/world-state/src/world-state-db/merkle_trees.ts b/yarn-project/world-state/src/world-state-db/merkle_trees.ts index cdf1aa5c9bd..311c071d8b1 100644 --- a/yarn-project/world-state/src/world-state-db/merkle_trees.ts +++ b/yarn-project/world-state/src/world-state-db/merkle_trees.ts @@ -1,8 +1,8 @@ import { + BLOCKS_TREE_HEIGHT, CONTRACT_TREE_HEIGHT, Fr, GlobalVariables, - HISTORIC_BLOCKS_TREE_HEIGHT, L1_TO_L2_MSG_TREE_HEIGHT, NOTE_HASH_TREE_HEIGHT, NULLIFIER_SUBTREE_HEIGHT, @@ -15,8 +15,8 @@ import { SerialQueue } from '@aztec/foundation/fifo'; import { createDebugLogger } from '@aztec/foundation/log'; import { AppendOnlyTree, + BatchInsertionResult, IndexedTree, - LowLeafWitnessData, Pedersen, SparseTree, StandardIndexedTree, @@ -110,14 +110,14 @@ export class MerkleTrees implements MerkleTreeDb { `${MerkleTreeId[MerkleTreeId.L1_TO_L2_MESSAGES_TREE]}`, L1_TO_L2_MSG_TREE_HEIGHT, ); - const historicBlocksTree: AppendOnlyTree = await initializeTree( + const blocksTree: AppendOnlyTree = await initializeTree( StandardTree, this.db, hasher, `${MerkleTreeId[MerkleTreeId.BLOCKS_TREE]}`, - HISTORIC_BLOCKS_TREE_HEIGHT, + BLOCKS_TREE_HEIGHT, ); - this.trees = [contractTree, nullifierTree, noteHashTree, publicDataTree, l1Tol2MessagesTree, historicBlocksTree]; + this.trees = [contractTree, nullifierTree, noteHashTree, publicDataTree, l1Tol2MessagesTree, blocksTree]; this.jobQueue.start(); @@ -125,7 +125,7 @@ export class MerkleTrees implements MerkleTreeDb { if (!fromDb) { const initialGlobalVariablesHash = computeGlobalsHash(GlobalVariables.empty()); await this._updateLatestGlobalVariablesHash(initialGlobalVariablesHash); - await this._updateHistoricBlocksTree(initialGlobalVariablesHash, true); + await this._updateBlocksTree(initialGlobalVariablesHash, true); await this._commit(); } else { await this._updateLatestGlobalVariablesHash(fromDbOptions.globalVariablesHash); @@ -177,8 +177,8 @@ export class MerkleTrees implements MerkleTreeDb { * @param globalsHash - The current global variables hash. * @param includeUncommitted - Indicates whether to include uncommitted data. */ - public async updateHistoricBlocksTree(globalsHash: Fr, includeUncommitted: boolean) { - await this.synchronize(() => this._updateHistoricBlocksTree(globalsHash, includeUncommitted)); + public async updateBlocksTree(globalsHash: Fr, includeUncommitted: boolean) { + await this.synchronize(() => this._updateBlocksTree(globalsHash, includeUncommitted)); } /** @@ -401,10 +401,7 @@ export class MerkleTrees implements MerkleTreeDb { treeId: MerkleTreeId, leaves: Buffer[], subtreeHeight: SubtreeHeight, - ): Promise< - | [LowLeafWitnessData[], SiblingPath] - | [undefined, SiblingPath] - > { + ): Promise> { const tree = this.trees[treeId] as StandardIndexedTree; if (!('batchInsert' in tree)) { throw new Error('Tree does not support `batchInsert` method'); @@ -430,7 +427,7 @@ export class MerkleTrees implements MerkleTreeDb { return Promise.resolve(this.latestGlobalVariablesHash.get(includeUncommitted)); } - private async _updateHistoricBlocksTree(globalsHash: Fr, includeUncommitted: boolean) { + private async _updateBlocksTree(globalsHash: Fr, includeUncommitted: boolean) { const blockHash = await this._getCurrentBlockHash(globalsHash, includeUncommitted); await this._appendLeaves(MerkleTreeId.BLOCKS_TREE, [blockHash.toBuffer()]); } @@ -524,6 +521,16 @@ export class MerkleTrees implements MerkleTreeDb { this.latestGlobalVariablesHash.rollback(); } + public getSnapshot(blockNumber: number) { + return Promise.all(this.trees.map(tree => tree.getSnapshot(blockNumber))); + } + + private async _snapshot(blockNumber: number): Promise { + for (const tree of this.trees) { + await tree.snapshot(blockNumber); + } + } + /** * Handles a single L2 block (i.e. Inserts the new commitments into the merkle tree). * @param l2Block - The L2 block to handle. @@ -535,7 +542,7 @@ export class MerkleTrees implements MerkleTreeDb { [l2Block.endNoteHashTreeSnapshot.root, MerkleTreeId.NOTE_HASH_TREE], [l2Block.endPublicDataTreeRoot, MerkleTreeId.PUBLIC_DATA_TREE], [l2Block.endL1ToL2MessagesTreeSnapshot.root, MerkleTreeId.L1_TO_L2_MESSAGES_TREE], - [l2Block.endHistoricBlocksTreeSnapshot.root, MerkleTreeId.BLOCKS_TREE], + [l2Block.endBlocksTreeSnapshot.root, MerkleTreeId.BLOCKS_TREE], ] as const; const compareRoot = (root: Fr, treeId: MerkleTreeId) => { const treeRoot = this.trees[treeId].getRoot(true); @@ -576,7 +583,7 @@ export class MerkleTrees implements MerkleTreeDb { await this._updateLeaf(MerkleTreeId.PUBLIC_DATA_TREE, newValue.toBuffer(), leafIndex.value); } - // Sync and add the block to the historic blocks tree + // Sync and add the block to the blocks tree const globalVariablesHash = computeGlobalsHash(l2Block.globalVariables); await this._updateLatestGlobalVariablesHash(globalVariablesHash); this.log(`Synced global variables with hash ${globalVariablesHash}`); @@ -602,6 +609,8 @@ export class MerkleTrees implements MerkleTreeDb { } } + await this._snapshot(l2Block.number); + return { isBlockOurs: ourBlock }; } } diff --git a/yellow-paper/Dockerfile b/yellow-paper/Dockerfile new file mode 100644 index 00000000000..1d9939128e9 --- /dev/null +++ b/yellow-paper/Dockerfile @@ -0,0 +1,4 @@ +FROM node:18-alpine +WORKDIR /usr/src +COPY . . +RUN yarn && yarn build --no-minify \ No newline at end of file diff --git a/yellow-paper/docs/calls/enqueued-calls.md b/yellow-paper/docs/calls/enqueued-calls.md new file mode 100644 index 00000000000..069a0c0c302 --- /dev/null +++ b/yellow-paper/docs/calls/enqueued-calls.md @@ -0,0 +1,10 @@ +--- +sidebar_position: 2 +--- +# Enqueued calls + +Calls from private functions to public functions are asynchronous. Since private and public functions are executed in different domains at different times and in different contexts, as the former are run by the user on a PXE and the latter by the sequencer, it is not possible for a private function to call a public one and await its result. Instead, private functions can _enqueue_ public function calls. + +The process is analogous to [synchronous calls](./sync-calls.md), but rely on an `enqueuePublicFunctionCall` oracle call that accepts the same arguments. The returned object by the enqueue call is a `PublicCallStackItem` with a flag `is_execution_request` set and empty side effects, to reflect that the stack item has not been executed yet. As with synchronous calls, the caller is responsible for validating the function and arguments in the call stack item, and to push its hash to its public call stack, which represents the list of enqueued public function calls. + +As the transaction is received by the sequencer, the public kernel circuit begins processing the enqueued public function calls from the transaction public call stack, pushing new recursive calls as needed, until the public call stack is empty, as described in the [synchronous calls](./sync-calls.md) section. \ No newline at end of file diff --git a/yellow-paper/docs/calls/images/calls/pub_pvt_messaging.png b/yellow-paper/docs/calls/images/calls/pub_pvt_messaging.png new file mode 100644 index 00000000000..37254ecd22d Binary files /dev/null and b/yellow-paper/docs/calls/images/calls/pub_pvt_messaging.png differ diff --git a/yellow-paper/docs/calls/images/calls/pvt_pub_ordering.png b/yellow-paper/docs/calls/images/calls/pvt_pub_ordering.png new file mode 100644 index 00000000000..2a5b3d8e32f Binary files /dev/null and b/yellow-paper/docs/calls/images/calls/pvt_pub_ordering.png differ diff --git a/yellow-paper/docs/calls/index.md b/yellow-paper/docs/calls/index.md new file mode 100644 index 00000000000..fb6ad10355c --- /dev/null +++ b/yellow-paper/docs/calls/index.md @@ -0,0 +1,13 @@ +--- +title: Calls +--- + +# Calls + +Functions in the Aztec Network can call other functions. These calls are [synchronous](./sync-calls.md) when they they occur within private functions or within public functions, but are [enqueued](./enqueued-calls.md) when done from a private to a public function. The protocol also supports alternate call methods, such as static calls. + +In addition to function calls, the protocol allows for communication via message-passing back-and-forth between L1 and L2, as well as from public to private functions. + +import DocCardList from '@theme/DocCardList'; + + diff --git a/yellow-paper/docs/calls/public_private_messaging.md b/yellow-paper/docs/calls/public_private_messaging.md new file mode 100644 index 00000000000..c418429f2e4 --- /dev/null +++ b/yellow-paper/docs/calls/public_private_messaging.md @@ -0,0 +1,52 @@ +--- +sidebar_position: 5 +--- + +# Inter-Layer Calls + +## Public-Private messaging + +:::info Disclaimer +This is a draft. These requirements need to be considered by the wider team, and might change significantly before a mainnet release. +::: + +Private functions work by providing evidence of correct execution generated locally through kernel proofs. Public functions, on the other hand, are able to utilize the latest state to manage updates and perform alterations. As such, public state and private state are in different trees. In a private function you cannot reference or modify public state and vice versa. + +Yet, it should be possible for: +1. private functions to call private or public functions +2. public functions to call private or public functions + +For private execution, the user executed methods locally and presents evidence of correct execution as part of their transaction in the form of a kernel proof (generated locally on user device ahead of time). This way, the builder doesn't need to have knowledge of everything happening in the transaction, only the results. However, public functions are executed at the "tip" of the chain (i.e. make use of the latest updates), they can only be done by a builder who is aware of all the changes. Therefore a public function can't be executed locally by the user in the same way a private function is, as it would lead to race conditions, if the user doesn't keep track of the latest updates of the chain. If we were to build this public proof on the latest state, we would encounter problems. How can two different users build proofs at the same time, given that they will be executed one after the other by the sequencer? The simple answer is that they cannot, as race conditions would arise where one of the proofs would be invalidated by the other due to a change in the state root (which would nullify Merkle paths). + +As a result, private functions are always executed first, as they are executed on a state $S_i$, where $i \le n$, with $S_n$ representing the current state where the public functions always operate on the current state $S_n$. + +This enables private functions to enqueue calls to public functions. But vice-versa is not true. Since private functions execute first, it cannot "wait" on the results of any of their calls to public functions. Stated differently, any calls made across domains are unilateral in nature. + +The figure below shows the order of function calls on the left-hand side, while the right-hand side shows how the functions will be executed. Notably, the second private function call is independent of the output of the public function and merely occurs after its execution. + +![Public - Private Ordering](./images/calls/pvt_pub_ordering.png) + +## Private to Public Messaging +If a private function in an Aztec smart contract wants to call a public function, it gets pushed into a separate public call stack that is enqueued. The private kernel circuit which must prove the execution of the private function(s), then hashes each of the item in the call stack and returns that. The private kernel proof, the public inputs of the private kernel (which contain the hash of the each of the public call stack item) and other transaction data (like enqueued public function calls, new commitments, nullifiers etc) get passed along to the sequencer. Sequencer then picks up the public call stack item and executes each of the functions. The Public VM which executes the methods then verifies that the hash provided by the private kernel matches the current call stack item. + +This way, you can destroy your private state and create them in public within the same transaction or indirectly assert constraints on the execution of the private functions with latest data. + +### Handling Privacy Leakage and `msg.sender` +In the above design, the sequencer only sees the public part of the call stack along with any new commitments, nullifiers etc that were created in the private transaction i.e. should learns nothing more of the private transaction (such as its origin, execution logic etc). + +But what if the enqueued public function makes use of `msg_sender` which is meant to use + +Specifically, when the call stack is passed to the kernel circuit, the kernel should assert the `msg_sender` is 0 and hash appropriately. `msg_sender` could be the contract address too instead of `0`, but it leaks which contract is calling the public method and therefore leaks which contract the user was interacting with in private land. + +### Reverts + +If the private part of the transaction reverts, then public calls are never enqueued. But if the public part of the transaction reverts, it should still revert the entire transaction i.e. the sequencer should drop the execution results of the private part of the transaction and not include those in the state transitioner smart contract. However, since the sequencer had to execute your transaction, appropriate fee will be charged. Reverting in public causing the whole transaction to be dropped enables existing paradigms of ethereum where your valid transaction can revert because of altered state e.g., trade incurring too much slippage. + +## Public to Private Messaging +Since public functions execute after private functions, it isn't possible for public to call a private function in the same transaction. Nevertheless, it is quite useful for public functions to have a message passing system to private. A public function could add messages to an append only merkle tree to save messages from a public function call, that can later be executed by a private function. Note, only a transaction coming after the one including the message from a public function can consume it. In practice this means that unless you are the sequencer it will not be within the same rollup. + +To elaborate, a public function may not have read access to encrypted private state in the note hash tree, but it can write to it. You could create a note in the public domain, compute it's note hash which gets passed to the inputs of the public VM which adds the hash to the note hash tree. The user who wants to redeem the note can add the note preimage to their PXE and then redeem/nullify the note in the private domain at a later time. + +In the picture below, it is worth noting that all data reads performed by private functions are historical in nature, and that private functions are not capable of modifying public storage. Conversely, public functions have the capacity to manipulate private storage (e.g., inserting new commitments, potentially as part of transferring funds from the public domain to the private domain). + +![Public - Private Messaging](./images/calls/pub_pvt_messaging.png) diff --git a/yellow-paper/docs/calls/static-calls.md b/yellow-paper/docs/calls/static-calls.md new file mode 100644 index 00000000000..6daff979909 --- /dev/null +++ b/yellow-paper/docs/calls/static-calls.md @@ -0,0 +1,20 @@ +--- +sidebar_position: 3 +--- +# Static calls + +[Synchronous calls](./sync-calls.md), both private and public, can be executed as _static_ calls. This means that the called function, and all nested calls within, cannot emit any modifying side effects, such as creating or consuming notes, writing to storage, or emitting events. The purpose of a static call is to query another contract while ensuring that the call will not modify state. Static calls are based on [EIP214](https://eips.ethereum.org/EIPS/eip-214). + +In particular, the following fields of the returned `CallStackItem` must be zero or empty in a static call: +- `new_commitments` +- `new_nullifiers` +- `nullified_commitments` +- `new_l2_to_l1_msgs` +- `encrypted_logs_hash` +- `unencrypted_logs_hash` +- `encrypted_log_preimages_length` +- `unencrypted_log_preimages_length` + +At the protocol level, a static call is identified by a `is_static_call` flag in the `CircuitPublicInputs` of the `CallStackItem`. The kernel is responsible for asserting that the call and all nested calls do not emit any forbidden side effects. + +At the contract level, a caller can initiate a static call via a `staticCallPrivateFunction` or `staticCallPublicFunction` oracle call. The caller is responsible for asserting that the returned `CallStackItem` has the `is_static_call` flag correctly set. diff --git a/yellow-paper/docs/calls/sync-calls.md b/yellow-paper/docs/calls/sync-calls.md new file mode 100644 index 00000000000..7c0ecbdfc06 --- /dev/null +++ b/yellow-paper/docs/calls/sync-calls.md @@ -0,0 +1,35 @@ +--- +sidebar_position: 1 +--- +# Synchronous calls + +Calls from a private function to another private function, as well as calls from a public function to another public function, are *synchronous*. When a synchronous function call is found during execution, execution jumps to the target of the call, and returns to the caller with a return value from the function called. This allows easy composability across contracts. + +At the protocol level, each call is represented as a `CallStackItem`, which includes the contract address and function being called, as well as the public inputs `PrivateCircuitPublicInputs` or `PublicCircuitPublicInputs` that are outputted by the execution of the called function. These public inputs include information on the call context, the side effects of the execution, and the block header. + +At the contract level, a call is executed via an oracle call `callPrivateFunction` or `callPublicFunction`, both of which accept the contract address to call, the function selector, and a hash of the arguments. The oracle call prompts the executor to pause the current frame, jump to the target of the call, and return its result. The result is a `CallStackItem` that represents the nested execution. + +The caller is responsible for asserting that the function and arguments in the returned `CallStackItem` match the requested ones, otherwise a malicious oracle could return a `CallStackItem` for a different execution. The caller must also push the hash of the returned `CallStackItem` into the private or public call stack of the current execution context, which is returned as part of the `CircuitPublicInputs` output. The end result is a top-level entrypoint `CallStackItem`, with a stack of nested call stack items to process. + +The kernel circuit is then responsible for iteratively processing each `CallStackItem`, pushing new items into the stack as it encounters nested calls, until the stack is empty. The private kernel circuit processes private function calls locally in the PXE, whereas the public kernel circuit processes public function calls on the sequencer. + +The private kernel circuit iterations begin with the entrypoint execution, empty output and proof. The public kernel circuit starts with the public call stack in the transaction object, and builds on top of the output and proof of the private kernel circuit. + +``` +let call_stack, kernel_public_inputs, proof +if is_private(): + call_stack = [top_level_execution] + kernel_public_inputs = empty_inputs + proof = empty_proof +else: + call_stack = tx.public_call_stack + kernel_public_inputs = tx.kernel_public_inputs + proof = tx.proof + +while call_stack is not empty: + let call_stack_item = call_stack.pop() + call_stack.push(...call_stack_item.call_stack) + kernel_public_inputs, proof = kernel_circuit(call_stack_item, kernel_public_inputs, proof) +``` + +The kernel circuit asserts that nested functions and their side effects are processed in order, and that the hash of each nested execution matches the corresponding hash outputted in the call stack by each `CircuitPublicInputs`. \ No newline at end of file diff --git a/yellow-paper/docs/gas-and-fees/gas-and-fees.md b/yellow-paper/docs/gas-and-fees/gas-and-fees.md index 7626094a15b..6c70c3a5d82 100644 --- a/yellow-paper/docs/gas-and-fees/gas-and-fees.md +++ b/yellow-paper/docs/gas-and-fees/gas-and-fees.md @@ -158,7 +158,7 @@ This would appear to introduce a circular dependency whereby an appropriate fee Initially, the values of transaction gas limits can be set to a very high number, the base gas limits set to values corresponding to the user's chosen amortization level and the fees aet to 0. The transaction can be simulated under these conditions and simulation will provide actual gas consumption figures. Simulation can then be repeated with more realistic values of gas limits and the updated gas consumption figures will be reported. A few iterations of this process will enable the user to establish and prepare an appropriate fee. -Simulation of the transaction will provide feedback as to it's gas consumption, this can then be repeated to converge on the optimum fee to be prepared. The private portion of the transaction will be proven via the private kernel circuit resulting in a number of fee related public inputs: +Simulation of the transaction will provide feedback as to it's gas consumption, this can be repeated to converge on the optimum fee to be prepared. The private portion of the transaction will be proven via the private kernel circuit resulting in a number of fee related public inputs: - **feeCommitments** - New commitments generated as part of fee preparation - **feeNullifiers** - New nullifiers generated as part of fee preparation @@ -255,6 +255,9 @@ This next example differs in that the refund is performed privately using partia ![Private Refund](../gas-and-fees/images/gas-and-fees/private-refund.jpg) +In both of these examples the fee is effectively escrowed as part of the private portion of fee preparation. The enqueued public function is simply an instruction to increase the balance of the payment asset held by the fee payment contract. The sequencer should be able to inspect the public call instruction, consisting of contract address, function selector and arguments and be confident that this function will not fail. Provided the logic of the fee payment contract is defined correctly, once escrowed, the fee can't be modified by the user's transaction payload. This gives the sequencer the guarantee that they will be paid for the work they perform. Finally, the fee distribution function in either of these examples can be written such that the sequencer can be confident of success. This function simply needs to take the securely escrowed fee, compute the actual fee and subsequent refund before increasing the balance of the 2 parties within the payment asset. + + ### Paying Via L1 As a further option, it would be possible to facilitate payments directly from L1. Here, a mechanism similar to L1 -> L2 messaging would be used to transmit the payment to the sequencer. diff --git a/yellow-paper/docs/private-message-delivery/note-discovery.md b/yellow-paper/docs/private-message-delivery/note-discovery.md index d6b1d478145..9894f6b13c2 100644 --- a/yellow-paper/docs/private-message-delivery/note-discovery.md +++ b/yellow-paper/docs/private-message-delivery/note-discovery.md @@ -6,9 +6,9 @@ sidebar_position: 3 ## Requirements -When users interact with contracts they will generate and publish encrypted notes for other network participants. In order for a user to consume those notes, they need to identify, retrieve and decrypt them. The total number of encrypted notes published by the network will be substantial, making it infeasible for some users to simply retrieve every note and attempt a naive brute-force decryption. For this reason, those users will want to utilize a note discovery protocol to privately identify and provide a much smaller subset of notes for the user to decrypt. +When users interact with contracts they will generate and publish encrypted notes for other network participants. In order for a user to consume notes that belong to them, they need to identify, retrieve and decrypt them. A simple, privacy-preserving approach to this would be to download all of the notes and attempt decryption. However, the total number of encrypted notes published by the network will be substantial, making it infeasible for some users to do this. Those users will want to utilize a note discovery protocol to privately identify their notes. -A number of techniques currently exist to perform this task with various compromises of levels of privacy and the required amounts of computational effort and/or network bandwidth. This is a field into which a lot of research if being conducted so our approach is not to dictate a specific technique but to put in place the necessary abstractions such that users can select their preferred protocol and new techniques can be integrated in the future. +A number of techniques currently exist to help with this and it is a field into which a lot of research is being conducted. Therefore, our approach is not to dictate or enshrine a specific note discovery mechanism but to put in place the necessary abstractions such that users can freely choose. Additionally, through this approach we allow for integration of new or improved protocols in the future. ## Tag Abstraction diff --git a/yellow-paper/docs/private-message-delivery/private-message-delivery.md b/yellow-paper/docs/private-message-delivery/private-message-delivery.md index 92564a139e5..c379a60bc30 100644 --- a/yellow-paper/docs/private-message-delivery/private-message-delivery.md +++ b/yellow-paper/docs/private-message-delivery/private-message-delivery.md @@ -12,7 +12,7 @@ Maintaining the core tenet of privacy within the Aztec Network imposes a number 2. Alice will need to broadcast the encrypted state so as to make it available for Bob to retrieve. 3. Alice will need to broadcast a 'tag' alongside the encrypted state. This tag must be identifiable by Bob's chosen [note discovery protocol](./note-discovery.md) but not identifiable by any third party. -Fulfilling these requirements will enable users to privately identify, retrieve, decrypt and spend their application state. +Fulfilling these requirements will enable users to privately identify, retrieve, decrypt and consume their application state. Individual pieces of application state transmitted in this way are termed 'notes'. ## Constraining Message Delivery @@ -34,5 +34,5 @@ Constraining publication to the correct data availability layer will be performe ## User Handshaking -One function that is useful regardless of the preferred note discovery and encryption schemes is for user's to be notified when they have been sent a note from another user for the first time. To achieve this we will deploy a 'user handshaking' contract that can be used to create a private note for a recipient containing the sender's details (e.g. public key). Network participants will be able to retrieve these notes, decrypt them and use the contents to guide them in the generation of tags of notes to retrieve. +One function that is useful regardless of a user's preferred note discovery and encryption scheme is for users to be notified when they have been sent a note from another user for the first time. To facilitate this we will deploy a 'handshaking' contract that can be used to create a private note for a recipient containing the sender's information (e.g. public key). The notes generated by this contract will be easy to identify enabling users to retrieve these notes, decrypt them and use the contents in any deterministic tag generation used by their chosen note discovery protocol. Trial decryption of these notes alone should not put too high a burden on end users. diff --git a/yellow-paper/docs/public-vm/InstructionSet.mdx b/yellow-paper/docs/public-vm/InstructionSet.mdx new file mode 100644 index 00000000000..141e435dd25 --- /dev/null +++ b/yellow-paper/docs/public-vm/InstructionSet.mdx @@ -0,0 +1,5 @@ +# Instruction Set + +import GeneratedInstructionSet from './gen/_InstructionSet.mdx'; + + \ No newline at end of file diff --git a/yellow-paper/docs/public-vm/Types.mdx b/yellow-paper/docs/public-vm/Types.mdx new file mode 100644 index 00000000000..e69de29bb2d diff --git a/yellow-paper/docs/public-vm/_category_.json b/yellow-paper/docs/public-vm/_category_.json new file mode 100644 index 00000000000..b71bfdd8d04 --- /dev/null +++ b/yellow-paper/docs/public-vm/_category_.json @@ -0,0 +1,8 @@ +{ + "label": "AVM: Aztec's Public VM", + "position": 5, + "link": { + "type": "generated-index", + "description": "Aztec's Public VM..." + } +} diff --git a/yellow-paper/docs/public-vm/alu.md b/yellow-paper/docs/public-vm/alu.md new file mode 100644 index 00000000000..bd83dde6a23 --- /dev/null +++ b/yellow-paper/docs/public-vm/alu.md @@ -0,0 +1,35 @@ +# Algebraic Logic Unit + +The algebraic logic unit performs operations analogous to an arithmetic unit in a CPU. + +This component of the VM circuit evaluates both base-2 arithmetic operations and prime-field operation. It takes its input/output from the intermediate registers in the state controller. + +The following block diagram maps out an draft of the internal components of the "ALU" + +![](./gen/images/alu/alu.png) + +Notes: + +For logic operations (e.g. AND/OR) we use lookup tables. The max. size of each lookup table cannot grow too large as the Prover pays a constant cost linear with the size of the lookup table. + +To this end we use lookup tables for logic operations that take _8-bit input operands_ for a total table size of 2^16. + +i.e. the table contains the output for every possible 8-bit combination of 2 input operands. + +#### Slice registers + +We need to slice our inputs into 8-bit chunks for logic operations, in order to index the lookup tables. + +As a simplification, we can say that _any_ operation that requires range-constraints will split the input operands into 8-bit slices, as we can then apply consistent range-checking logic. + +#### Carry flag + +Used to test for overflows. If we want the high-level instruction set to have "add with carry" we need to expose the carry flag to the state controller. + +## Example operation: 32-bit ADD(a, b, c) + +Assume we start with `a` in intermediate register `R1`, `b` in intermediate register `R2`, and `c` in intermediate register `R3` + +1. Store the first 32 bits of `a + b` in slice registers `s1, s2, s3, s4`, with the carry bit in `carry` +2. Validate $a + b = s_1 + 2^8s_2 + 2^{16}s_3 + 2^{24}s_4 + 2^{32}\text{carry}$ +3. Validate $c = s_1 + 2^8s_2 + 2^{16}s_3 + 2^{24}s_4$ diff --git a/yellow-paper/docs/public-vm/bytecode-validation-circuit.md b/yellow-paper/docs/public-vm/bytecode-validation-circuit.md new file mode 100644 index 00000000000..ccdd88cadc5 --- /dev/null +++ b/yellow-paper/docs/public-vm/bytecode-validation-circuit.md @@ -0,0 +1,190 @@ +# Bytecode Validation Circuit + +Goal: Validate that a polynomial commitment to AVM program opcodes maps to the bytecode representation of an AVM program of maximum size $n$. + +# Definitions - Curves and Fields + +The bytecode validation circuit is implemented over the BN254 elliptic curve with group elements defined via $\mathbb{G}_{bn254}$. + +The field $\mathbb{F}$ represents the finite field whose characteristic equals the number of points on the BN254 curve. + +# Bytecode representation + +Each opcode in the AVM can be described by an integer in the range $[0, \ldots, 256^{31}]$ (i.e. 31 bytes of data). All opcodes excluding `SET` require much less data than 31 bytes. + +In the AVM circuit architecture, multiple columns are used to define a VM operation. These columns describe the following quantities: + +1. the opcode to be executed (1 byte of data) +2. three parameter columns that define either literal values or memory indexes +3. three "flag" columns that define metadata associated with each parameter (e.g. whether the parameter a should be interpreted as a literal value or a memory index, or an indirect memory index) + +To both minimize the amount of information used to _define_ a given AVM program, the AVM posesses an additional column that describes the _packed_ opcode. i.e. the integer concatenation of all 6 of the above column values. We define this column via the vector of field elements $\mathbf{op} \in \mathbb{F}^n$ (where $n$ is the number of opcodes in the program). $\mathbf{op}$ is defined as the _column representation_ of an AVM program. + +## Packed bytecode representation + +When _broadcasting_ the data for AVM programs, we desire an encoding that minimizes the raw number of bytes broadcast, we call this the _packed representation_ of the program. + +The number of bytes required to represent an element in $\mathbf{op}$ in the AVM can be derived from the value of the 1st byte (e.g `ADD` requires 7 bytes of data - the ADD opcode (1 byte) and three memory indices (each of size 2 bytes)). + +See (ref: TODO!) for a table that describes the amount of data required for each opcode. + +Each field element in a BN254 circuit can represent _31_ bytes of bytecode data. The packed representation of an AVM program $\mathbf{b} \in \mathbb{F}^n$ is defined as the concatenation of $\mathbf{op}$ into 31-byte chunks, represented as field elements. + +There exists a mapping function $g$ that, given the packed representation $\mathbf{b}$, will produce the column representation $\mathbf{op}$. + +$$ +g(\mathbf{b}) = \mathbf{op} +$$ + +A full description of $g$ is provided [further down in this document](#Definition-of-mapping-function-g). + +## Committed representation + +The committed representation of an AVM program is an elliptic curve polynomial commitment $[P] \in \mathbb{G}_{bn254}$, created via the KZG polynomial commitment scheme (ref). + +$[P]$ is a commitment to $P(X) \in \mathbb{F}[X]^n$ where $P(X) = \sum_{i=0}^{n-1} op_i X^i$ + +# Bytecode validation logic + +Given inputs $\mathbf{b} \in \mathbb{F}^n$ and $[P] \in \mathbb{G}_{bn254}$, we must validate that $[P] = \text{commit}_{KZG}(g(\mathbf{b}))$. + +This requires the following _high level_ steps: + +1. For all $i \in [0, \ldots, n - 1]$, validate that $b_i < 256^{31} - 1$ +2. Compute $\mathbf{op} = g(\mathbf{b})$ +3. Perform a _polynomial consistency check_ between $\mathbb{op}$ and $[P]$ + +# Polynomial Consistency Check + +> The most straightforward way of validating $\mathbb{op}, [P]$ would be to directly construct $[P]$ from $\mathbb{op}$. +> We do not do this, as this would require a large multiscalar multiplication over the BN254 curve. This could only be performed efficiently over a Grumpkin SNARK circuit, which would add downstream complexity to the Aztec architecture (currently the only Grumpkin proofs being accumulated are elliptic-curve-virtual-machine circuits). The rollup circuit architecture already supports efficient recursive aggregation of BN254 proofs - the desire is for the bytecode validation circuit to be a canonical Honk SNARK over the BN254 field. + +To perform a polynomial consistency check between $\mathbb{op}$ and $[P]$, we perform the following: + +1. Generate a challenge $z \in \mathbb{F}$ by computing the Poseidon hash of $H(op_0, \ldots, op_{n-1}, [P])$ +2. Compute $\sum_{i=0}^{n-1} op_i z^i = r \in \mathbb{F}$ +3. Validate via a KZG opening proof that $[P]$ commits to a polynomial $P(X)$ such that $P(z) = r$ + +In the same manner that Honk pairings can be deferred via aggregating pairing inputs into an accumulator, the pairing required to validate the KZG opening proof can also be deferred. + +## Evaluating the polynomial consistency check within a circuit + +The direct computation of $r = \sum_{i=0}^{n-1} op_i z^i$ is trivial as the field is native to a BN254 SNARK circuit, and will require approx. 2 constraints per opcode. + +Validating a KZG opening proof will require approx. 3 non-native elliptic curve scalar multiplications, which will have a cost of approx. 30,000 constraints if using `stdlib::biggroup` from the PLONK standard library. + +The major cost of the consistency check is the Poseidon hash of the packed bytecode vector $\mathbb{b}$ and the commitment $[P]$ - this will incur approx. 22 constraints per element in $\mathbb{b}$ + +# Definition of mapping function $g$ + +The following is a pseudocode description of $g$, which can efficiently be described in a Honk circuit (i.e. no branches). + +We define a function `slice(element, idx, length)`. `element` is a field element interpreted as a length-31 byte array. `slice` computes the byte array `element[idx] : element[idx + length]`, converts into a field element and returns it. + +We define a size-256 lookup table `c` that maps an avm instruction byte to the byte length required to represent its respective opcode. + +``` +g(b) { + let i := 0; // index into bytecode array `b` + let j := 0; // byte offset of current bytecode element + let op := []; // vector of opcode values we need to populate + for k in [0, n]: + { + let f := b[i]; + let instruction_byte := f.slice(j, 1); + let opcode_length := c[instruction_byte]; + let bytes_remaining_in_f := 30 - j; + let op_split := opcode_length > bytes_remaining_in_f; + let bytes_from_f := op_split ? bytes_remaining_in_f : opcode_length; + let op_hi := f.slice(j, bytes_from_f); + + let f' := b[i+1]; + let bytes_from_f' := opcode_length - bytes_from_f; + let op_lo := f'.slice(0, bytes_in_f'); + + op[k] := op_lo + (op_hi << (bytes_in_f' * 8)); + i := i + op_split; + j := op_split ? bytes_in_f' : j + opcode_length; + } + return op; +} +``` + +Pseudocode definition of `slice` function constraints: + +We define `pow(x)` to be a size-31 lookup table that maps an input $x \in [0, \ldots, 31]$ into the value $2^{8x}$ + +We require the Prover has computed witness field elements `f_lo`, `f_hi`, `result` that satisfy the following constraints: + +``` +slice(f, index, length) +{ + assert(f_hi < pow(index)); + assert(f_lo < pow(31 - index - length)); + assert(result < pow(length)); + assert(f == f_lo + result * pow(31 - index - length) + f_hi * pow(31 - index)); + return result; +} +``` + +## Evaluating `g` within a Honk circuit + +The `g` function requires the contents of $\mathbb{b}$ be present via a lookup table. We can achieve this by instantiating elements of $\mathbb{b}$ via the ROM abstraction present in the Plonk standard library (table initialisation costs 2 constraints per element, table reads cost 2 constraints per element) + +We can instantiate tables `c` , `pow` as lookup tables via the same mechanism. + +The `slice` function requires 3 variable-length range checks. In Honk circuits we only can support fixed-length range checks. + +The following pseudocode defines how a variable-length range check can be composed of fixed-length range checks. Here we assume we have previously constrained all inputs to be less than $2^{248} - 1$ + +``` +less_than(a, b) { + // this block is not constrained and defines witness gneeration + let a_lo := a & (2^{124} - 1) + let b_lo := b & (2^{124} - 1) + let a_hi := (a >> 124) + let b_hi := (b >> 124) + let borrow := b_lo < a_lo + let r_lo := b_lo - a_lo + borrow*2^124 + let r_hi := b_hi - a_hi - borrow + + // this block defines constraints + assert(a_lo < 2^124) + assert(a_hi < 2^124) + assert(b_lo < 2^124) + assert(b_hi < 2^124) + assert(r_lo < 2^124) + assert(r_hi < 2^124) + assert(borrow*borrow - borrow = 0) // bool check + assert(a_lo + 2^{124}a_hi = a) + assert(b_lo + 2^{124}b_hi = b) + assert(r_lo = b_lo - a_lo + borrow*2^124) + assert(r_hi = b_hi - a_hi - borrow) +} +``` + +Each `slice` call requires three `less_than` calls, and each iteration of `g` requires 3 `slice` calls. In total this produces 36 size-124 range checks per iteration of `g`. Each size-124 range check requires approx. 5 constraints, producing 180 constraints of range checks per opcode processed. + +A rough estimate of the total constraints per opcode processed by the `g` function would be 200 constraints per opcdoe. + +# Bytecode Validation Circuit Summary + +The bytecode validation circuit takes, as public inputs, the packed bytecode array $\mathbf{b} \in \mathbb{F}$ and the bytecode commitment $[P] \in \mathbb{G}_{bn254}$ (represented via field elements). + +The circuit evaluates the following: + +1. For all $i \in [0, \ldots, n - 1]$, validate that $b_i < 256^{31} - 1$ +2. Compute $\mathbf{op} = g(\mathbf{b})$ +3. Perform a _polynomial consistency check_ between $\mathbf{op}$ and $[P]$ + +### Summary of main circuit costs + +The polynomial consistency check requires a Poseidon hash that includes the packed bytecode array $\mathbb{b}$. This requires approx. 22 Honk constraints per 31 bytes of bytecode. + +The `g` function will cost approx. 200 constraints per opcode. + +For a given length `n` , the approx. number of constraints required will be approx `222n`. + +A circuit of size 2^21 (2 million constraints) will be able to process a program containing approximately $n = 9,400$ steps. In contrast, a Soldity program can contain a maximum of 24kb of bytecode. + +Note: unless the efficiency of the validation circuit can be improved by a factor of ~4x, it will not be possible to construct bytecode validation proofs client-side in a web browser. Delegating proof construction to a 3rd party would be acceptable in this context because the 3rd party is untrusted and no secret information is leaked. diff --git a/yellow-paper/docs/public-vm/control-flow.md b/yellow-paper/docs/public-vm/control-flow.md new file mode 100644 index 00000000000..707e78ecf8b --- /dev/null +++ b/yellow-paper/docs/public-vm/control-flow.md @@ -0,0 +1,32 @@ +# VM Control Flow + High-level architecture + +This document breaks down the VM into internal components and defines how these components interact with one another. + +This can be considered an intermediate abstraction layer between the high-level VM definition and the explicit cirucit architecture. The intention is for this abstraction to be analogous to how CPU chips are broken down into discrete components. + +# Sub-operations + +> Notation note: I use the term "clock cycle" in a way that is analogous to "row in the execution trace". + +We wish to define a set of sub-operations our VM can execute. Multiple sub-operations can be executed per clock cycle. Each instruction in the instruction set exposed by the VM is composed of 1 or more sub-operations. + +The intention is for sub-operations to be implementable as independent VM circuit relations. + +# Control flow + +![](./gen/images/control-flow/avm-control-flow.png) + +> Notation note: whenever the VM "sends a signal" to one or more VM components, this is analogous to defining a boolean column in the execution trace that toggles on/off specific functionality + +- The instruction controller uses a program counter to read the current opcode to be executed, and send signals to downstream components to execute the opcode +- The state controller reads data from memory into the intermediate registers, using the opcode parameter values +- The VM "algebraic logic unit" executes the opcode given the intermediate register values, and writes output into the intermediate registers +- The state controller writes the output from an intermediate register into memory + +## Chiplets + +This borrows the chiplet nomenclature from the Miden VM - these components encapsulate functionality that can be effectively defined via an independent sub-circuit (analog in real world = specific part of a CPU chip die) + +Chiplets can be developed iteratively i.e. first draft of the AVM only needs a barebones algebraic logic unit. + +> We want apply the following design heuristic to chiplets: they are _loosely coupled_ to other AVM components. It should be possible to remove a chiplet and the AVM opcodes it implements, without requiring any upstream changes to the AVM architecture/implementation diff --git a/yellow-paper/docs/public-vm/gen/_InstructionSet.mdx b/yellow-paper/docs/public-vm/gen/_InstructionSet.mdx new file mode 100644 index 00000000000..12b44617024 --- /dev/null +++ b/yellow-paper/docs/public-vm/gen/_InstructionSet.mdx @@ -0,0 +1,1451 @@ +[comment]: # (THIS IS A GENERATED FILE! DO NOT EDIT!) +[comment]: # (Generated via `yarn preprocess`) + +[comment]: # (Generated by InstructionSetMarkdownGen.tsx and InstructionSet.js) + +import Markdown from 'react-markdown' +import CodeBlock from '@theme/CodeBlock' + + +## Instructions Table + +Click on an instruction name to jump to its section. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
OpcodeNameSummaryBit-sizeExpression
0x00 [`ADD`](#isa-section-add)Addition (a + b)96{ + `M[dstOffset] = M[aOffset] + M[bOffset] mod 2^k` + }
0x01 [`SUB`](#isa-section-sub)Subtraction (a - b)96{ + `M[dstOffset] = M[aOffset] - M[bOffset] mod 2^k` + }
0x02 [`DIV`](#isa-section-div)Unsigned division (a / b)96{ + `M[dstOffset] = M[aOffset] / M[bOffset]` + }
0x03 [`EQ`](#isa-section-eq)Equality check (a == b)96{ + `M[dstOffset] = M[aOffset] == M[bOffset] ? 1 : 0` + }
0x04 [`LT`](#isa-section-lt)Less-than check (a < b)96{ + `M[dstOffset] = M[aOffset] < M[bOffset] ? 1 : 0` + }
0x05 [`LTE`](#isa-section-lte)Less-than-or-equals check (a <= b)96{ + `M[dstOffset] = M[aOffset] <= M[bOffset] ? 1 : 0` + }
0x06 [`AND`](#isa-section-and)Bitwise AND (a & b)96{ + `M[dstOffset] = M[aOffset] AND M[bOffset]` + }
0x07 [`OR`](#isa-section-or)Bitwise OR (a | b)96{ + `M[dstOffset] = M[aOffset] OR M[bOffset]` + }
0x08 [`XOR`](#isa-section-xor)Bitwise XOR (a ^ b)96{ + `M[dstOffset] = M[aOffset] XOR M[bOffset]` + }
0x09 [`NOT`](#isa-section-not)Bitwise NOT (inversion)72{ + `M[dstOffset] = NOT M[aOffset]` + }
0x0a [`SHL`](#isa-section-shl)Bitwise leftward shift (a << b)96{ + `M[dstOffset] = M[aOffset] << M[bOffset]` + }
0x0b [`SHR`](#isa-section-shr)Bitwise rightward shift (a >> b)96{ + `M[dstOffset] = M[aOffset] >> M[bOffset]` + }
0x0c [`CAST`](#isa-section-cast)Type cast72{ + `M[dstOffset] = cast(M[aOffset])` + }
0x0d [`SET`](#isa-section-set)Set a memory word from a constant in the bytecode.48+N{ + `M[dstOffset] = const` + }
0x0e [`MOV`](#isa-section-mov)Move a word from source memory location to destination`.64{ + `M[dstOffset] = M[srcOffset]` + }
0x0f [`CMOV`](#isa-section-cmov)Move a word (conditionally chosen) from one memory location to another (`d = cond > 0 ? a : b`).112{ + `M[dstOffset] = M[condOffset] > 0 ? M[aOffset] : M[bOffset]` + }
0x10 [`CALLDATACOPY`](#isa-section-calldatacopy)Copy calldata into memory.88{ + `M[dstOffset:dstOffset+size] = calldata[cdOffset:cdOffset+size]` + }
0x11 [`SLOAD`](#isa-section-sload)Load a word from storage.64{ + `M[dstOffset] = storage[M[slotOffset]]` + }
0x12 [`SSTORE`](#isa-section-sstore)Write a word to storage.64{ + `storage[M[slotOffset]] = M[srcOffset]` + }
0x13 [`EMITNOTEHASH`](#isa-section-emitnotehash)Emit a new note hash to be inserted into the notes tree40emitNoteHash(M[contentOffset])
0x14 [`EMITNULLIFIER`](#isa-section-emitnullifier)Emit a new nullifier to be inserted into the nullifier tree40emitNullifier(M[nullifierOffset])
0x15 [`SENDL2TOL1MSG`](#isa-section-sendl2tol1msg)Send an L2-to-L1 message40sendL2ToL1Message(M[contentOffset])
0x16 [`JUMP`](#isa-section-jump)Jump to a location in the bytecode.32{ + `PC = loc` + }
0x17 [`JUMPI`](#isa-section-jumpi)Conditionally jump to a location in the bytecode.64{ + `PC = M[condOffset] > 0 ? loc : PC` + }
0x18 [`RETURN`](#isa-section-return)Halt execution with `success`, optionally returning some data.64{ + `return(M[offset:offset+size])` + }
0x19 [`REVERT`](#isa-section-revert)Halt execution with `failure`, reverting state changes and optionally returning some data.64{ + `revert(M[offset:offset+size])` + }
0x1a [`CALL`](#isa-section-call)Call into another contract.208 +{`M[successOffset] = call( + M[l1GasOffset], M[l2GasOffset], M[addrOffset], + M[argsOffset], M[argsSize], + M[retOffset], M[retSize])`} +
0x1b [`STATICCALL`](#isa-section-staticcall)Call into another contract, disallowing persistent state modifications.208 +{`M[successOffset] = staticcall( + M[l1GasOffset], M[l2GasOffset], M[addrOffset], + M[argsOffset], M[argsSize], + M[retOffset], M[retSize])`} +
0x1c [`ULOG`](#isa-section-ulog)Emit an unencrypted log with data from the `field` memory page64{ + `ulog(M[offset:offset+size])` + }
0x1d [`CHAINID`](#isa-section-chainid)Get this rollup's L1 chain ID40{ + `M[dstOffset] = Globals.chainId` + }
0x1e [`VERSION`](#isa-section-version)Get this rollup's L2 version ID40{ + `M[dstOffset] = Globals.version` + }
0x1f [`BLOCKNUMBER`](#isa-section-blocknumber)Get this block's number40{ + `M[dstOffset] = Globals.blocknumber` + }
0x20 [`TIMESTAMP`](#isa-section-timestamp)Get this L2 block's timestamp40{ + `M[dstOffset] = Globals.timestamp` + }
0x21 [`COINBASE`](#isa-section-coinbase)Get the block's beneficiary address40{ + `M[dstOffset] = Globals.coinbase` + }
0x22 [`BLOCKL1GASLIMIT`](#isa-section-blockl1gaslimit)Total amount of "L1 gas" that a block can consume40{ + `M[dstOffset] = Globals.l1GasLimit` + }
0x23 [`BLOCKL2GASLIMIT`](#isa-section-blockl2gaslimit)Total amount of "L2 gas" that a block can consume40{ + `M[dstOffset] = Globals.l2GasLimit` + }
0x24 [`NOTESROOT`](#isa-section-notesroot)Get the historical note-hash tree root as of the specified block number.64{ + `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].note_hash_tree_root` + }
0x25 [`NULLIFIERSROOT`](#isa-section-nullroot)Get the historical nullifier tree root as of the specified block number.64{ + `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].nullifier_tree_root` + }
0x26 [`CONTRACTSROOT`](#isa-section-contractsroot)Get the historical contracts tree root as of the specified block number.64{ + `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].contracts_tree_root` + }
0x27 [`MSGSROOT`](#isa-section-msgsroot)Get the historical l1-to-l2 messages tree root as of the specified block number.64{ + `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].l1_to_l2_messages_tree_root` + }
0x28 [`NOTESROOT`](#isa-section-notesroot)Get the historical note-hash tree root as of the specified block number.64{ + `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].note_hash_tree_root` + }
0x29 [`PUBLICDATAROOT`](#isa-section-publicdataroot)Get the historical public data tree root as of the specified block number.64{ + `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].public_data_tree_root` + }
0x2a [`GLOBALSHASH`](#isa-section-globalshash)Get the historical global variables hash as of the specified block number.64{ + `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].global_variables_hash` + }
0x2b [`BLOCKSROOT`](#isa-section-blocksroot)Get the historical blocks tree root as of the specified block number.64{ + `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].blocks_tree_root` + }
0x2c [`GRANDROOT`](#isa-section-grandroot)Get the historical grandfather tree root as of the specified block number.64{ + `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].grandfather_tree_root` + }
0x2d [`ORIGIN`](#isa-section-origin)Get the transaction's origination address40{ + `M[dstOffset] = TxContext.origin` + }
0x2e [`REFUNDEE`](#isa-section-refundee)The recipient of fee refunds for this transaction40{ + `M[dstOffset] = TxContext.refundee` + }
0x2f [`FEEPERL1GAS`](#isa-section-feeperl1gas)The fee to be paid per "L1 gas" - set by the transaction's original caller40{ + `M[dstOffset] = TxContext.feePerL1Gas` + }
0x30 [`FEEPERL2GAS`](#isa-section-feeperl2gas)The fee to be paid per "L2 gas" - set by the transaction's original caller40{ + `M[dstOffset] = TxContext.feePerL2Gas` + }
0x31 [`CALLER`](#isa-section-caller)Get the address of the sender (the caller's context)40{ + `M[dstOffset] = CallContext.sender` + }
0x32 [`ADDRESS`](#isa-section-address)Get the address of the currently executing l2 contract40{ + `M[dstOffset] = CallContext.storageContractAddress` + }
0x33 [`PORTAL`](#isa-section-portal)Get the address of the l1 portal contract40{ + `M[dstOffset] = CallContext.portalAddress` + }
0x34 [`CALLDEPTH`](#isa-section-calldepth)Get how many calls deep the current call context is40{ + `M[dstOffset] = CallContext.calldepth` + }
0x35 [`L1GAS`](#isa-section-l1gas)Remaining "L1 gas" for this call (after this instruction).40{ + `M[dstOffset] = LatestContext.l1Gas` + }
0x36 [`L2GAS`](#isa-section-l2gas)Remaining "L2 gas" for this call (after this instruction).40{ + `M[dstOffset] = LatestContext.l2Gas` + }
+ + +## Instructions + +### `ADD` (0x00) +Addition (a + b) + +[See in table.](#isa-table-add) + +- **Category**: arithmetic +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] + M[bOffset] mod 2^k` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/ADD.png) + +### `SUB` (0x01) +Subtraction (a - b) + +[See in table.](#isa-table-sub) + +- **Category**: arithmetic +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] - M[bOffset] mod 2^k` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/SUB.png) + +### `DIV` (0x02) +Unsigned division (a / b) + +[See in table.](#isa-table-div) + +- **Category**: arithmetic +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] / M[bOffset]` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/DIV.png) + +### `EQ` (0x03) +Equality check (a == b) + +[See in table.](#isa-table-eq) + +- **Category**: conditional +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] == M[bOffset] ? 1 : 0` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/EQ.png) + +### `LT` (0x04) +Less-than check (a < b) + +[See in table.](#isa-table-lt) + +- **Category**: conditional +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] < M[bOffset] ? 1 : 0` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/LT.png) + +### `LTE` (0x05) +Less-than-or-equals check (a <= b) + +[See in table.](#isa-table-lte) + +- **Category**: conditional +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] <= M[bOffset] ? 1 : 0` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/LTE.png) + +### `AND` (0x06) +Bitwise AND (a & b) + +[See in table.](#isa-table-and) + +- **Category**: bitwise +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] AND M[bOffset]` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/AND.png) + +### `OR` (0x07) +Bitwise OR (a | b) + +[See in table.](#isa-table-or) + +- **Category**: bitwise +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] OR M[bOffset]` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/OR.png) + +### `XOR` (0x08) +Bitwise XOR (a ^ b) + +[See in table.](#isa-table-xor) + +- **Category**: bitwise +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] XOR M[bOffset]` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/XOR.png) + +### `NOT` (0x09) +Bitwise NOT (inversion) + +[See in table.](#isa-table-not) + +- **Category**: bitwise +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = NOT M[aOffset]` +- **Tag checks**: `T[aOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 72 + +![](./images/bit-formats/NOT.png) + +### `SHL` (0x0a) +Bitwise leftward shift (a << b) + +[See in table.](#isa-table-shl) + +- **Category**: bitwise +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] << M[bOffset]` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/SHL.png) + +### `SHR` (0x0b) +Bitwise rightward shift (a >> b) + +[See in table.](#isa-table-shr) + +- **Category**: bitwise +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. +- **Args**: + - **aOffset**: memory offset of the operation's left input + - **bOffset**: memory offset of the operation's right input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[aOffset] >> M[bOffset]` +- **Tag checks**: `T[aOffset] == T[bOffset] == op-type` +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 96 + +![](./images/bit-formats/SHR.png) + +### `CAST` (0x0c) +Type cast + +[See in table.](#isa-table-cast) + +- **Category**: types +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **dest-type**: The [type/size](./Types) to tag the output with when different from `op-type`. +- **Args**: + - **aOffset**: memory offset of word to cast + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = cast(M[aOffset])` +- **Details**: Cast a word in memory based on the `dest-type` specified in the bytecode. Truncates when casting to a smaller type, left-zero-pads when casting to a larger type. +- **Tag updates**: `T[dstOffset] = dest-type` +- **Bit-size**: 72 + +![](./images/bit-formats/CAST.png) + +### `SET` (0x0d) +Set a memory word from a constant in the bytecode. + +[See in table.](#isa-table-set) + +- **Category**: memory +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. + - **op-type**: The [type/size](./Types) to check inputs against and tag the output with. `field` type is NOT supported for SET. +- **Args**: + - **const**: an N-bit constant value from the bytecode to store in memory (any type except `field`) + - **dstOffset**: memory offset specifying where to store the constant +- **Expression**: `M[dstOffset] = const` +- **Details**: Set memory word at `dstOffset` to `const`'s immediate value. `const`'s bit-size (N) can be 8, 16, 32, 64, or 128 based on `op-type`. It _cannot be 254 (`field` type)_! +- **Tag updates**: `T[dstOffset] = op-type` +- **Bit-size**: 48+N + +![](./images/bit-formats/SET.png) + +### `MOV` (0x0e) +Move a word from source memory location to destination`. + +[See in table.](#isa-table-mov) + +- **Category**: memory +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **srcOffset**: memory offset of word to move + - **dstOffset**: memory offset specifying where to store that word +- **Expression**: `M[dstOffset] = M[srcOffset]` +- **Tag updates**: `T[dstOffset] = T[srcOffset]` +- **Bit-size**: 64 + +![](./images/bit-formats/MOV.png) + +### `CMOV` (0x0f) +Move a word (conditionally chosen) from one memory location to another (`d = cond > 0 ? a : b`). + +[See in table.](#isa-table-cmov) + +- **Category**: memory +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **aOffset**: memory offset of word 'a' to conditionally move + - **bOffset**: memory offset of word 'b' to conditionally move + - **condOffset**: memory offset of the operations 'conditional' input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = M[condOffset] > 0 ? M[aOffset] : M[bOffset]` +- **Details**: One of two source memory locations is chosen based on the condition. `T[condOffset]` is not checked because the greater-than-zero suboperation is the same regardless of type. +- **Tag updates**: `T[dstOffset] = M[condOffset] > 0 ? T[aOffset] : T[bOffset]` +- **Bit-size**: 112 + +![](./images/bit-formats/CMOV.png) + +### `CALLDATACOPY` (0x10) +Copy calldata into memory. + +[See in table.](#isa-table-calldatacopy) + +- **Category**: contract calls +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **cdOffset**: offset into calldata to copy from + - **size**: number of words to copy + - **dstOffset**: memory offset specifying where to copy the first word to +- **Expression**: `M[dstOffset:dstOffset+size] = calldata[cdOffset:cdOffset+size]` +- **Details**: Calldata is read-only and cannot be directly operated on by other instructions. This instruction moves words from calldata into memory so they can be operated on normally. +- **Tag updates**: `T[dstOffset:dstOffset+size] = field` +- **Bit-size**: 88 + +![](./images/bit-formats/CALLDATACOPY.png) + +### `SLOAD` (0x11) +Load a word from storage. + +[See in table.](#isa-table-sload) + +- **Category**: storage & messaging +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **slotOffset**: memory offset of the storage slot to load from + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = storage[M[slotOffset]]` +- **Details**: Load a word from this contract's persistent public storage into memory. +- **Tag updates**: `T[dstOffset] = field` +- **Bit-size**: 64 + +![](./images/bit-formats/SLOAD.png) + +### `SSTORE` (0x12) +Write a word to storage. + +[See in table.](#isa-table-sstore) + +- **Category**: storage & messaging +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **srcOffset**: memory offset of the word to store + - **slotOffset**: memory offset containing the storage slot to store to +- **Expression**: `storage[M[slotOffset]] = M[srcOffset]` +- **Details**: Store a word from memory into this contract's persistent public storage. +- **Bit-size**: 64 + +![](./images/bit-formats/SSTORE.png) + +### `EMITNOTEHASH` (0x13) +Emit a new note hash to be inserted into the notes tree + +[See in table.](#isa-table-emitnotehash) + +- **Category**: storage & messaging +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **noteHashOffset**: memory offset of the note hash +- **Expression**: emitNoteHash(M[contentOffset]) +- **Bit-size**: 40 + +![](./images/bit-formats/EMITNOTEHASH.png) + +### `EMITNULLIFIER` (0x14) +Emit a new nullifier to be inserted into the nullifier tree + +[See in table.](#isa-table-emitnullifier) + +- **Category**: storage & messaging +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **nullifierOffset**: memory offset of nullifier +- **Expression**: emitNullifier(M[nullifierOffset]) +- **Bit-size**: 40 + +![](./images/bit-formats/EMITNULLIFIER.png) + +### `SENDL2TOL1MSG` (0x15) +Send an L2-to-L1 message + +[See in table.](#isa-table-sendl2tol1msg) + +- **Category**: storage & messaging +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **contentOffset**: memory offset of the message content +- **Expression**: sendL2ToL1Message(M[contentOffset]) +- **Bit-size**: 40 + +![](./images/bit-formats/SENDL2TOL1MSG.png) + +### `JUMP` (0x16) +Jump to a location in the bytecode. + +[See in table.](#isa-table-jump) + +- **Category**: control +- **Args**: + - **loc**: target location to jump to +- **Expression**: `PC = loc` +- **Details**: Target location is an immediate value (a constant in the bytecode). +- **Bit-size**: 32 + +![](./images/bit-formats/JUMP.png) + +### `JUMPI` (0x17) +Conditionally jump to a location in the bytecode. + +[See in table.](#isa-table-jumpi) + +- **Category**: control +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **loc**: target location conditionally jump to + - **condOffset**: memory offset of the operations 'conditional' input +- **Expression**: `PC = M[condOffset] > 0 ? loc : PC` +- **Details**: Target location is an immediate value (a constant in the bytecode). `T[condOffset]` is not checked because the greater-than-zero suboperation is the same regardless of type. +- **Bit-size**: 64 + +![](./images/bit-formats/JUMPI.png) + +### `RETURN` (0x18) +Halt execution with `success`, optionally returning some data. + +[See in table.](#isa-table-return) + +- **Category**: contract calls +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **offset**: memory offset of first word to return + - **size**: number of words to return +- **Expression**: `return(M[offset:offset+size])` +- **Details**: Return control flow to the calling context/contract. +- **Bit-size**: 64 + +![](./images/bit-formats/RETURN.png) + +### `REVERT` (0x19) +Halt execution with `failure`, reverting state changes and optionally returning some data. + +[See in table.](#isa-table-revert) + +- **Category**: contract calls +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **offset**: memory offset of first word to return + - **size**: number of words to return +- **Expression**: `revert(M[offset:offset+size])` +- **Details**: Return control flow to the calling context/contract. +- **Bit-size**: 64 + +![](./images/bit-formats/REVERT.png) + +### `CALL` (0x1a) +Call into another contract. + +[See in table.](#isa-table-call) + +- **Category**: contract calls +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **l1GasOffset**: amount of L1 gas to provide to the callee + - **l2GasOffset**: amount of L2 gas to provide to the callee + - **addrOffset**: address of the contract to call + - **argsOffset**: memory offset to args (will become the callee's calldata) + - **argsSize**: number of words to pass via callee's calldata + - **retOffset**: destination memory offset specifying where to store the data returned from the callee + - **retSize**: number of words to copy from data returned by callee + - **successOffset**: destination memory offset specifying where to store the call's success (0: failure, 1: success) +- **Expression**: + +{`M[successOffset] = call( + M[l1GasOffset], M[l2GasOffset], M[addrOffset], + M[argsOffset], M[argsSize], + M[retOffset], M[retSize])`} + +- **Details**: Creates a new CallContext, triggers execution of the corresponding contract code, + and then resumes execution in the current CallContext. A non-existent contract or one + with no code will return success. Nested call has an incremented `CallContext.calldepth`. +- **Tag checks**: `T[l1GasOffset] == T[l2GasOffset] == u32` +- **Tag updates**: + +{`T[successOffset] = u8 +T[retOffset:retOffset+retSize] = field`} + +- **Bit-size**: 208 + +![](./images/bit-formats/CALL.png) + +### `STATICCALL` (0x1b) +Call into another contract, disallowing persistent state modifications. + +[See in table.](#isa-table-staticcall) + +- **Category**: contract calls +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **l1GasOffset**: amount of L1 gas to provide to the callee + - **l2GasOffset**: amount of L2 gas to provide to the callee + - **addrOffset**: address of the contract to call + - **argsOffset**: memory offset to args (will become the callee's calldata) + - **argsSize**: number of words to pass via callee's calldata + - **retOffset**: destination memory offset specifying where to store the data returned from the callee + - **retSize**: number of words to copy from data returned by callee + - **successOffset**: destination memory offset specifying where to store the call's success (0: failure, 1: success) +- **Expression**: + +{`M[successOffset] = staticcall( + M[l1GasOffset], M[l2GasOffset], M[addrOffset], + M[argsOffset], M[argsSize], + M[retOffset], M[retSize])`} + +- **Details**: Same as `CALL`, but the callee is cannot modify persistent state. Disallowed instructions are `SSTORE`, `ULOG`, `CALL`. +- **Tag checks**: `T[l1GasOffset] == T[l2GasOffset] == u32` +- **Tag updates**: + +{`T[successOffset] = u8 +T[retOffset:retOffset+retSize] = field`} + +- **Bit-size**: 208 + +![](./images/bit-formats/STATICCALL.png) + +### `ULOG` (0x1c) +Emit an unencrypted log with data from the `field` memory page + +[See in table.](#isa-table-ulog) + +- **Category**: logging +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **offset**: memory offset of the data to log + - **size**: number of words to log +- **Expression**: `ulog(M[offset:offset+size])` +- **Bit-size**: 64 + +![](./images/bit-formats/ULOG.png) + +### `CHAINID` (0x1d) +Get this rollup's L1 chain ID + +[See in table.](#isa-table-chainid) + +- **Category**: block info +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = Globals.chainId` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/CHAINID.png) + +### `VERSION` (0x1e) +Get this rollup's L2 version ID + +[See in table.](#isa-table-version) + +- **Category**: block info +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = Globals.version` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/VERSION.png) + +### `BLOCKNUMBER` (0x1f) +Get this block's number + +[See in table.](#isa-table-blocknumber) + +- **Category**: block info +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = Globals.blocknumber` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/BLOCKNUMBER.png) + +### `TIMESTAMP` (0x20) +Get this L2 block's timestamp + +[See in table.](#isa-table-timestamp) + +- **Category**: block info +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = Globals.timestamp` +- **Tag updates**: `T[dstOffset] = u64` +- **Bit-size**: 40 + +![](./images/bit-formats/TIMESTAMP.png) + +### `COINBASE` (0x21) +Get the block's beneficiary address + +[See in table.](#isa-table-coinbase) + +- **Category**: block info +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = Globals.coinbase` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/COINBASE.png) + +### `BLOCKL1GASLIMIT` (0x22) +Total amount of "L1 gas" that a block can consume + +[See in table.](#isa-table-blockl1gaslimit) + +- **Category**: block info +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = Globals.l1GasLimit` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/BLOCKL1GASLIMIT.png) + +### `BLOCKL2GASLIMIT` (0x23) +Total amount of "L2 gas" that a block can consume + +[See in table.](#isa-table-blockl2gaslimit) + +- **Category**: block info +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = Globals.l2GasLimit` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/BLOCKL2GASLIMIT.png) + +### `NOTESROOT` (0x24) +Get the historical note-hash tree root as of the specified block number. + +[See in table.](#isa-table-notesroot) + +- **Category**: historical access +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **blockNumOffset**: memory offset of the block number input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].note_hash_tree_root` +- **Tag updates**: `T[dstOffset] = field` +- **Bit-size**: 64 + +![](./images/bit-formats/NOTESROOT.png) + +### `NULLIFIERSROOT` (0x25) +Get the historical nullifier tree root as of the specified block number. + +[See in table.](#isa-table-nullroot) + +- **Category**: historical access +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **blockNumOffset**: memory offset of the block number input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].nullifier_tree_root` +- **Tag updates**: `T[dstOffset] = field` +- **Bit-size**: 64 + +![](./images/bit-formats/NULLIFIERSROOT.png) + +### `CONTRACTSROOT` (0x26) +Get the historical contracts tree root as of the specified block number. + +[See in table.](#isa-table-contractsroot) + +- **Category**: historical access +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **blockNumOffset**: memory offset of the block number input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].contracts_tree_root` +- **Tag updates**: `T[dstOffset] = field` +- **Bit-size**: 64 + +![](./images/bit-formats/CONTRACTSROOT.png) + +### `MSGSROOT` (0x27) +Get the historical l1-to-l2 messages tree root as of the specified block number. + +[See in table.](#isa-table-msgsroot) + +- **Category**: historical access +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **blockNumOffset**: memory offset of the block number input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].l1_to_l2_messages_tree_root` +- **Tag updates**: `T[dstOffset] = field` +- **Bit-size**: 64 + +![](./images/bit-formats/MSGSROOT.png) + +### `NOTESROOT` (0x28) +Get the historical note-hash tree root as of the specified block number. + +[See in table.](#isa-table-notesroot) + +- **Category**: historical access +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **blockNumOffset**: memory offset of the block number input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].note_hash_tree_root` +- **Tag updates**: `T[dstOffset] = field` +- **Bit-size**: 64 + +![](./images/bit-formats/NOTESROOT.png) + +### `PUBLICDATAROOT` (0x29) +Get the historical public data tree root as of the specified block number. + +[See in table.](#isa-table-publicdataroot) + +- **Category**: historical access +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **blockNumOffset**: memory offset of the block number input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].public_data_tree_root` +- **Tag updates**: `T[dstOffset] = field` +- **Bit-size**: 64 + +![](./images/bit-formats/PUBLICDATAROOT.png) + +### `GLOBALSHASH` (0x2a) +Get the historical global variables hash as of the specified block number. + +[See in table.](#isa-table-globalshash) + +- **Category**: historical access +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **blockNumOffset**: memory offset of the block number input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].global_variables_hash` +- **Tag updates**: `T[dstOffset] = field` +- **Bit-size**: 64 + +![](./images/bit-formats/GLOBALSHASH.png) + +### `BLOCKSROOT` (0x2b) +Get the historical blocks tree root as of the specified block number. + +[See in table.](#isa-table-blocksroot) + +- **Category**: historical access +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **blockNumOffset**: memory offset of the block number input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].blocks_tree_root` +- **Tag updates**: `T[dstOffset] = field` +- **Bit-size**: 64 + +![](./images/bit-formats/BLOCKSROOT.png) + +### `GRANDROOT` (0x2c) +Get the historical grandfather tree root as of the specified block number. + +[See in table.](#isa-table-grandroot) + +- **Category**: historical access +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **blockNumOffset**: memory offset of the block number input + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].grandfather_tree_root` +- **Tag updates**: `T[dstOffset] = field` +- **Bit-size**: 64 + +![](./images/bit-formats/GRANDROOT.png) + +### `ORIGIN` (0x2d) +Get the transaction's origination address + +[See in table.](#isa-table-origin) + +- **Category**: tx context +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = TxContext.origin` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/ORIGIN.png) + +### `REFUNDEE` (0x2e) +The recipient of fee refunds for this transaction + +[See in table.](#isa-table-refundee) + +- **Category**: tx context +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = TxContext.refundee` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/REFUNDEE.png) + +### `FEEPERL1GAS` (0x2f) +The fee to be paid per "L1 gas" - set by the transaction's original caller + +[See in table.](#isa-table-feeperl1gas) + +- **Category**: tx context +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = TxContext.feePerL1Gas` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/FEEPERL1GAS.png) + +### `FEEPERL2GAS` (0x30) +The fee to be paid per "L2 gas" - set by the transaction's original caller + +[See in table.](#isa-table-feeperl2gas) + +- **Category**: tx context +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = TxContext.feePerL2Gas` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/FEEPERL2GAS.png) + +### `CALLER` (0x31) +Get the address of the sender (the caller's context) + +[See in table.](#isa-table-caller) + +- **Category**: call context +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = CallContext.sender` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/CALLER.png) + +### `ADDRESS` (0x32) +Get the address of the currently executing l2 contract + +[See in table.](#isa-table-address) + +- **Category**: call context +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = CallContext.storageContractAddress` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/ADDRESS.png) + +### `PORTAL` (0x33) +Get the address of the l1 portal contract + +[See in table.](#isa-table-portal) + +- **Category**: call context +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = CallContext.portalAddress` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/PORTAL.png) + +### `CALLDEPTH` (0x34) +Get how many calls deep the current call context is + +[See in table.](#isa-table-calldepth) + +- **Category**: call context +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = CallContext.calldepth` +- **Details**: Note: security issues with EVM's tx.origin can be resolved by asserting the `calldepth == 0`. +- **Tag updates**: `T[dstOffset] = u8` +- **Bit-size**: 40 + +![](./images/bit-formats/CALLDEPTH.png) + +### `L1GAS` (0x35) +Remaining "L1 gas" for this call (after this instruction). + +[See in table.](#isa-table-l1gas) + +- **Category**: latest context +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = LatestContext.l1Gas` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/L1GAS.png) + +### `L2GAS` (0x36) +Remaining "L2 gas" for this call (after this instruction). + +[See in table.](#isa-table-l2gas) + +- **Category**: latest context +- **Flags**: + - **indirect**: Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`. +- **Args**: + - **dstOffset**: memory offset specifying where to store operation's result +- **Expression**: `M[dstOffset] = LatestContext.l2Gas` +- **Tag updates**: `T[dstOffset] = u32` +- **Bit-size**: 40 + +![](./images/bit-formats/L2GAS.png) diff --git a/yellow-paper/docs/public-vm/gen/images/alu/alu.png b/yellow-paper/docs/public-vm/gen/images/alu/alu.png new file mode 100644 index 00000000000..b0aca4fe1e9 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/alu/alu.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/ADD.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/ADD.png new file mode 100644 index 00000000000..4dd6dcc56b3 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/ADD.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/ADDRESS.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/ADDRESS.png new file mode 100644 index 00000000000..2c128e55cc3 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/ADDRESS.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/AND.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/AND.png new file mode 100644 index 00000000000..28699de3959 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/AND.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKL1GASLIMIT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKL1GASLIMIT.png new file mode 100644 index 00000000000..f9cbd7a4b40 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKL1GASLIMIT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKL2GASLIMIT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKL2GASLIMIT.png new file mode 100644 index 00000000000..9e1160f7e36 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKL2GASLIMIT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKNUMBER.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKNUMBER.png new file mode 100644 index 00000000000..55de72ff390 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKNUMBER.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKSROOT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKSROOT.png new file mode 100644 index 00000000000..cb678fd654b Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/BLOCKSROOT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/CALL.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/CALL.png new file mode 100644 index 00000000000..c4a26fb27ef Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/CALL.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/CALLDATACOPY.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/CALLDATACOPY.png new file mode 100644 index 00000000000..305917efe37 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/CALLDATACOPY.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/CALLDEPTH.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/CALLDEPTH.png new file mode 100644 index 00000000000..382668bfe4a Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/CALLDEPTH.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/CALLER.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/CALLER.png new file mode 100644 index 00000000000..a594244415a Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/CALLER.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/CAST.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/CAST.png new file mode 100644 index 00000000000..529a236c900 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/CAST.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/CHAINID.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/CHAINID.png new file mode 100644 index 00000000000..2c468671c3f Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/CHAINID.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/CMOV.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/CMOV.png new file mode 100644 index 00000000000..7c1ac378dd3 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/CMOV.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/COINBASE.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/COINBASE.png new file mode 100644 index 00000000000..f2e8eb1ff8e Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/COINBASE.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/CONTRACTSROOT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/CONTRACTSROOT.png new file mode 100644 index 00000000000..ddb3fc79680 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/CONTRACTSROOT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/DIV.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/DIV.png new file mode 100644 index 00000000000..2a30fad2d28 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/DIV.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/EMITNOTEHASH.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/EMITNOTEHASH.png new file mode 100644 index 00000000000..3f5f66fc40a Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/EMITNOTEHASH.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/EMITNULLIFIER.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/EMITNULLIFIER.png new file mode 100644 index 00000000000..d6e841d1c03 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/EMITNULLIFIER.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/EQ.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/EQ.png new file mode 100644 index 00000000000..18bd16ed228 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/EQ.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/FEEPERL1GAS.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/FEEPERL1GAS.png new file mode 100644 index 00000000000..fde3846b150 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/FEEPERL1GAS.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/FEEPERL2GAS.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/FEEPERL2GAS.png new file mode 100644 index 00000000000..d0c1443e816 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/FEEPERL2GAS.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/GLOBALSHASH.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/GLOBALSHASH.png new file mode 100644 index 00000000000..b1fd91771f0 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/GLOBALSHASH.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/GRANDROOT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/GRANDROOT.png new file mode 100644 index 00000000000..13638898ac8 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/GRANDROOT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/JUMP.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/JUMP.png new file mode 100644 index 00000000000..d2048dc928c Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/JUMP.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/JUMPI.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/JUMPI.png new file mode 100644 index 00000000000..1c03b6a42de Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/JUMPI.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/L1GAS.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/L1GAS.png new file mode 100644 index 00000000000..66a53c795e6 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/L1GAS.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/L2GAS.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/L2GAS.png new file mode 100644 index 00000000000..1b9668c46bd Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/L2GAS.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/LT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/LT.png new file mode 100644 index 00000000000..f6fcdf112fa Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/LT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/LTE.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/LTE.png new file mode 100644 index 00000000000..1af3baea07c Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/LTE.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/MOV.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/MOV.png new file mode 100644 index 00000000000..649465c7d04 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/MOV.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/MSGSROOT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/MSGSROOT.png new file mode 100644 index 00000000000..62aef1a1e30 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/MSGSROOT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/NOT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/NOT.png new file mode 100644 index 00000000000..05ddd512807 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/NOT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/NOTESROOT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/NOTESROOT.png new file mode 100644 index 00000000000..9753651fe35 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/NOTESROOT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/NULLIFIERSROOT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/NULLIFIERSROOT.png new file mode 100644 index 00000000000..e8de85ffe1c Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/NULLIFIERSROOT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/OR.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/OR.png new file mode 100644 index 00000000000..ebc64c28a15 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/OR.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/ORIGIN.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/ORIGIN.png new file mode 100644 index 00000000000..bcedd1c0a56 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/ORIGIN.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/PORTAL.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/PORTAL.png new file mode 100644 index 00000000000..19957ac6c8e Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/PORTAL.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/PUBLICDATAROOT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/PUBLICDATAROOT.png new file mode 100644 index 00000000000..21a2fe9b538 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/PUBLICDATAROOT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/REFUNDEE.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/REFUNDEE.png new file mode 100644 index 00000000000..f4f89b595f6 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/REFUNDEE.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/RETURN.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/RETURN.png new file mode 100644 index 00000000000..c81b8566e8f Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/RETURN.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/REVERT.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/REVERT.png new file mode 100644 index 00000000000..f8de1fe51b4 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/REVERT.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/SENDL2TOL1MSG.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/SENDL2TOL1MSG.png new file mode 100644 index 00000000000..0e0e529f3ab Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/SENDL2TOL1MSG.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/SET.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/SET.png new file mode 100644 index 00000000000..90b07c3bcbe Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/SET.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/SHL.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/SHL.png new file mode 100644 index 00000000000..78a1ca4e8e8 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/SHL.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/SHR.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/SHR.png new file mode 100644 index 00000000000..0260c3e29e4 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/SHR.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/SLOAD.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/SLOAD.png new file mode 100644 index 00000000000..edb4539d8ca Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/SLOAD.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/SSTORE.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/SSTORE.png new file mode 100644 index 00000000000..efc6e0272ec Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/SSTORE.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/STATICCALL.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/STATICCALL.png new file mode 100644 index 00000000000..4c48dbd0eeb Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/STATICCALL.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/SUB.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/SUB.png new file mode 100644 index 00000000000..1a58fee072d Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/SUB.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/TIMESTAMP.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/TIMESTAMP.png new file mode 100644 index 00000000000..2031f2f6306 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/TIMESTAMP.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/ULOG.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/ULOG.png new file mode 100644 index 00000000000..c0f17fdb67b Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/ULOG.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/VERSION.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/VERSION.png new file mode 100644 index 00000000000..02b61144468 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/VERSION.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/bit-formats/XOR.png b/yellow-paper/docs/public-vm/gen/images/bit-formats/XOR.png new file mode 100644 index 00000000000..7fbb6381452 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/bit-formats/XOR.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/control-flow/avm-control-flow.png b/yellow-paper/docs/public-vm/gen/images/control-flow/avm-control-flow.png new file mode 100644 index 00000000000..1fc3e034208 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/control-flow/avm-control-flow.png differ diff --git a/yellow-paper/docs/public-vm/gen/images/state-model/memory.png b/yellow-paper/docs/public-vm/gen/images/state-model/memory.png new file mode 100644 index 00000000000..c19910b1730 Binary files /dev/null and b/yellow-paper/docs/public-vm/gen/images/state-model/memory.png differ diff --git a/yellow-paper/docs/public-vm/state-model.md b/yellow-paper/docs/public-vm/state-model.md new file mode 100644 index 00000000000..615260cd686 --- /dev/null +++ b/yellow-paper/docs/public-vm/state-model.md @@ -0,0 +1,105 @@ +# The Aztec VM State Model + +The goal of this note is to describe the VM state model and to specify "internal" VM abstractions that can be mapped to circuit designs. + +# A memory-only state model + +The AVM possesses three distinct data regions, accessed via distinct VM instructions: memory, calldata and returndata + +![](./gen/images/state-model/memory.png) + +All data regions are linear blocks of memory where each memory cell stores a finite field element. + +#### Main Memory + +Main memory stores the internal state of the current program being executed. +Can be written to as well as read. + +The main memory region stores _type tags_ alongside data values. [Type tags are explained further on in this document](#type tags). + +#### Calldata + +Read-only data structure that stores the input data when executing a public function. + +#### Returndata + +When a function is called from within the public VM, the return parameters of the called function are present in returndata. + +### Registers (and their absence in the AVM) + +The AVM does not have external registers. i.e. a register that holds a persistent value that can be operated on from one opcode to the next. + +For example, in the x86 architecture, there exist 8 registers (%rax, %rbx etc). Instructions can operate either directly on register values (e.g. `add %rax %rbx`) or on values in memory that the register values point to (e.g. `add (%rax) (%rbx)`). + +> The AVM does not support registers as this would require each register to exist as a column in the VM execution trace. "registers" can be implemented as a higher-level abstraction by a compiler producing AVM bytecode, by reserving fixed regions of memory to represent registers. + +### Memory addressing mode + +In the AVM, an instruction operand `X` can refer to one of three quantities: + +1. A literal value `X` +2. A memory address `M[X]` +3. An indirect memory address `M[M[X]]` + +Indirect memory addressing is required in order to support read/writes into dynamically-sized data structures (the address parameter `X` is part of the program bytecode, which is insufficient to describe the location in memory of a dynamically-sized data structure). + +Memory addresses must be tagged to be a `u32` type. + +# Tagged memory + +We define a `tag` to refer to the potential maximum value of a cell of main memory. The following tags are supported: + +| tag value | maximum memory cell value | +| --------- | ------------------------- | +| 0 | 0 | +| 1 | $2^8 - 1$ | +| 2 | $2^{16} - 1$ | +| 3 | $2^{32} - 1$ | +| 4 | $2^{64} - 1$ | +| 5 | $2^{128} - 1$ | +| 6 | $p - 1$ | + +Note: $p$ describes the modulus of the finite field that the AVM circuit is defined over (i.e. number of points on the BN254 curve). + +The purpose of a tag is to inform the VM of the maximum possible length of an operand value that has been loaded from memory. + +Multiple AVM instructions explicitly operate over range-constrained input parameters (e.g. ADD32). The maximum allowable value for an instruction's input parameters is defined via an _instruction tag_. Two potential scenarios result: + +1. A VM instruction's tag value matches the input parameter tag values +2. A VM instruction's tag value does not match the input parameter tag values + +If case 2 is triggered, an error flag is raised. + +--- + +### Writing into memory + +It is required that all VM instructions that write into main memory explicitly define the tag of the output value and ensure the value is appropriately constrained to be consistent with the assigned tag. + +--- + +### MOV and tag conversions + +The MOV instruction copies data from between memory cell, perserving tags. + +The only VM instruction that can be used to cast between tags is CAST. There are 2 modes to MOV: + +1. The destination tag describes a maximum value that is _less than_ the source tag +2. The destination tag describes a maximum value that is _greater than or equal to_ the source tag + +For Case 1, range constraints must be applied to ensure the destination value is consistent with the source value after tag truncations have been applied. + +Case 2 is trivial as no additional consistency checks must be performed between soruce and destination values. + +--- + +### Calldata/returndata and tag conversions + +All elements in calldata/returndata are implicitly tagged as field elements (i.e. maximum value is $p - 1$). To perform a tag conversion, calldata/returndata must be copied into main memory, followed by an appropriate MOV instruction. + +## VM threat model, security requirements + +TODO: move this somewhere else, doesn't quite fit. + +An honest Prover must always be able to construct a satsisfiable proof for an AVM program, even if the program throws an error. +This implies constraints produced by the AVM **must** be satisfiable. diff --git a/yellow-paper/docs/public-vm/tagged-memory.md b/yellow-paper/docs/public-vm/tagged-memory.md new file mode 100644 index 00000000000..a72c693da93 --- /dev/null +++ b/yellow-paper/docs/public-vm/tagged-memory.md @@ -0,0 +1,60 @@ +# Tagged Memory - An instruction-set centric explanation + +## Explanation of Tagged Memory +Every word in memory will have an associated `type-tag` (unset, u8, u16, u32, u64, u128, field). For memory address `a`, we refer to the corresponding memory word's `type-tag` as `T[a]`. + +Every instruction will be flagged with an `op-type` in bytecode (u8, u16, u32, u64, u128, field). + +If an instruction uses a "source operand" as a memory location (e.g. `z = M[s0] + y`), the VM first retrieves the `type-tag` referenced by the operand (`T[s0]`) and enforces that it matches `op-type`. The VM enforces this for all source operands used for direct memory reads. + +If an instruction uses a "dest operand" as a memory location (e.g. `M[d0] = x + y`), when the VM assigns a word to that memory location, it also assigns the corresponding `type-tag` (`T[d0] = op-type`). The VM performs this tag assignment for all dest operands used for direct memory writes. + +**If an instruction fails any of its operand type-tag-checks, the current call's execution reverts!** + +### `ADD<32>` example +`ADD<32>` is an `ADD` instruction with `op-type` u32. As elaborated on later, an `ADD` performs `M[d0] = M[s0] + M[s1]`. In this case, both `s0` and `s1` are "source operands" used for direct memory reads to retrieve inputs to an addition. So, the VM enforces the `op-type(u32) == T[s0] == T[s1]`. `d0` here is a "dest operand" used for a direct memory write to store the output of the addition. So, the VM tags memory location `d0` with `type-tag` of u32: `T[d0] = op-type(u32)`. + +Here is a summary of what is happening for `ADD<32>`: +``` +assert T[s0] == u32 // enforce that source memory locations' type-tags == op-type +assert T[s1] == u32 +T[d0] = u32 // tag destination memory location as op-type +M[d0] = M[s0] + M[s1] +``` + + +### Type tags and `CASTs` + +`CAST` is different from other instructions in that it will be flagged with an additional `dest-type`. So, a `CAST` will assign `dest-type` (instead of `op-type`) to the memory location specified by its "dest operand" `d0`. `CAST<32, 64>` enforces that `T[s0]` matches u32 (the `op-type`) and assigns `T[d0] = u64` (the `dest-type`). + +Here is a summary of what is happening for a `CAST<32, 64>`: +``` +assert T[s0] == u32 // enforce that source memory location's type-tags == op-type +T[d0] = u64 // tag destination memory location as dest-type +M[d0] = M[s0] +``` + +### Type tags and indirect `MOVs` +A basic `MOV` instruction performs direct memory accesses and operates in the same as a simple `ADD` instruction as outlined above. A simple `MOV<64>` would do: +``` +assert T[s0] == u64 // enforce that source memory location's type-tag == op-type +T[d0] = u64 // tag destination memory location with op-type +M[d0] = M[s0] +``` + +Consider a `MOV<64, s0-indirect>`, which treats s0 as an indirect memory pointer to perform `M[d0] = M[M[s0]]`. Here, the VM first needs to enforce that `M[s0]` is a valid memory address (has type u32), and it then needs to perform the standard check that resulting word has type matching `op-type`: +``` +assert T[s0] == u32 // enforce that the direct source memory location contains a valid address (type-tag == u32) +assert T[M[s0]] == u64 // enforce that the indirect source memory location's type-tag == op-type +T[d0] = u64 // tag destination memory location with op-type +M[d0] = M[M[s0]] +``` + +Similarly, a `MOV<64, d0-indirect>` treats d0 as an indirect memory pointer to perform `M[M[d0]] = M[s0]`, and here the VM first needs to enforce that `M[d0]` is a valid memory address (has type u32) before assigning the destination location its type tag: +``` +assert T[s0] == u64 // enforce that source memory location's type-tag == op-type +assert T[d0] == u32 // enforce that the direct destination memory location contains a valid address (type-tag == u32) +T[M[d0]] = u64 // tag indirect destination memory location with op-type +M[M[d0]] = M[s0] +``` + diff --git a/yellow-paper/docs/transactions/index.md b/yellow-paper/docs/transactions/index.md new file mode 100644 index 00000000000..d9d7bbb3e46 --- /dev/null +++ b/yellow-paper/docs/transactions/index.md @@ -0,0 +1,15 @@ +--- +title: Transactions +--- + +# Transactions + +A transaction is the minimal action that changes the state of the network. Transactions in Aztec have a private and a public component, where the former is executed in the user's private execution environment (PXE) and the latter by the sequencer. + +A transaction is also split into three phases to [support authorization abstraction and fee payments](../gas-and-fees/gas-and-fees.md#fees): a validation and fee preparation phase, a main execution phase, and fee distribution phase. + +Users initiate a transaction by sending a _transaction request_ to their local PXE, which [locally simulates and proves the transaction](./local-execution.md) and returns a [_transaction_ object](./tx-object.md) identified by a [_transaction hash_](./tx-object.md#transaction-hash). This transaction object is then broadcasted to the network via an Aztec Node, which checks its [validity](./validity.md), and eventually picked up by a sequencer who [executes the public component of the transaction](./public-execution.md) and includes it in a block. + +import DocCardList from '@theme/DocCardList'; + + diff --git a/yellow-paper/docs/transactions/local-execution.md b/yellow-paper/docs/transactions/local-execution.md new file mode 100644 index 00000000000..154655706d5 --- /dev/null +++ b/yellow-paper/docs/transactions/local-execution.md @@ -0,0 +1,31 @@ +# Local Execution + +Transactions are initiated via a _transaction execution request_ sent from the user to their local _private execution environment_ (PXE). The PXE first executes the transaction locally in a _simulation_ step, and then generates a _zero-knowledge proof_ of correct execution. The PXE is then responsible for converting a _transaction execution request_ into a [_transaction_](./tx-object.md) ready to be broadcasted to the network. + +## Execution request + +A transaction execution request has the following structure. Note that, since Aztec uses full native account abstraction where every account is backed by a contract, a transaction execution request only needs to provide the contract address, function, and arguments of the initial call; nonces and signatures are arguments to the call, and thus opaque to the protocol. + +| Field | Type | Description | +|----------|----------|----------| +| origin | AztecAddress | Address of the contract where the transaction is initiated. | +| functionSelector | Field | Selector (identifier) of the function to be called as entrypoint in the origin contract. | +| argsHash | Field | Hash of the arguments to be used for calling the entrypoint function. | +| txContext | TxContext | Includes contract deployment data (if this tx is used to deploy a contract), chain id, and protocol version. | +| packedArguments | PackedArguments[] | Preimages for argument hashes. When executing a function call with the hash of the arguments, the PXE will look for the preimage of that hash in this list, and expand the arguments to execute the call. | +| authWitnesses | AuthWitness[] | Authorization witnesses. When authorizing an action identified by a hash, the PXE will look for the authorization witness identified by that hash and provide that value to the account contract. | + +## Simulation step + +Upon receiving a transaction execution request to _simulate_, the PXE will locally execute the function identified by the given `functionSelector` in the given `origin` contract with the arguments committed to by `argsHash`. We refer to this function as the _entrypoint_. During execution, contracts may request authorization witnesses or expanded arguments from the _execution oracle_, which are answered with the `packedArguments` and `authWitnesses` from the request. + +The _entrypoint_ may enqueue additional function calls, either private or public, and so forth. The simulation step will always execute all private functions in the call stack until emptied. The result of the simulation is a [_transaction_](./tx-object.md) object without an associated _proof_ which is returned to the application that requested the simulation. + +In terms of circuitry, the simulation step must execute all application circuits that correspond to private function calls, and then execute the private kernel circuit until the private call stack is empty. Note that circuits are only executed, there is no witness generation or proving involved. + +## Proving step + +The proving step is similar to the simulation step, though witnesses are generated for all circuits and proven. Note that it is not necessary to execute the simulation step before the proving step, though it is desirable in order to provide the user with info on their transaction and catch any failed assertions early. + +The output of the proving step is a [_transaction_](./tx-object.md) object with a valid _proof_ associated, ready to be broadcasted to the network. + diff --git a/yellow-paper/docs/transactions/public-execution.md b/yellow-paper/docs/transactions/public-execution.md new file mode 100644 index 00000000000..e281acad277 --- /dev/null +++ b/yellow-paper/docs/transactions/public-execution.md @@ -0,0 +1,16 @@ +# Public execution + +Transactions have a _public execution_ component. Once a transaction is picked up by a sequencer to be included in a block, the sequencer is responsible for executing all enqueued public function calls in the transaction. These are defined by the `data.accumulatedData.publicCallStack` field of the [transaction object](./tx-object.md), which are commitments to the preimages of the `enqueuedPublicFunctionCalls` in the transaction. The sequencer pops function calls from the stack, and pushes new ones as needed, until the public call stack is empty. + +## Bytecode + +Unlike private functions, which are native circuits, public functions in the Aztec Network are specified in Brillig, a zkVM-friendly bytecode. This bytecode is executed and proven in the Brillig public virtual machine. Each function call is a run of the virtual machine, and a _public kernel circuit_ aggregates these calls and produces a final proof for the transaction, which also includes the _private kernel circuit_ proof of the transaction generated during [local execution](./local-execution.md). + +## State + +Since public execution is run by the sequencer, it is run on the state of the chain as it is when the transaction is included in the block. Public functions operate on _public state_, an updateable key-value mapping, instead of notes. + +## Reverts + +Note that, unlike local private execution, public execution can _revert_ due to a failed assertion, running out of gas, trying to call a non-existing function, or other failures. If this happens, the sequencer halts execution and discards all side effects from the [transaction payload phase](../gas-and-fees/gas-and-fees.md#transaction-payload). The transaction is still included in the block and pays fees, but is flagged as reverted. + diff --git a/yellow-paper/docs/transactions/tx-object.md b/yellow-paper/docs/transactions/tx-object.md new file mode 100644 index 00000000000..ccfa2f7d4a7 --- /dev/null +++ b/yellow-paper/docs/transactions/tx-object.md @@ -0,0 +1,85 @@ +# Transaction object + +The transaction object is the struct broadcasted to the p2p network, generated by [_local execution_](./local-execution.md) by the user's PXE. Sequencers pick up transactions from the p2p network to include in a block. + +## Transaction object struct + +The fields of a transaction object are the following: + +| Field | Type | Description | +|----------|----------|----------| +| data | PrivateKernelPublicInputsFinal | Public inputs (ie output) of the last iteration of the private kernel circuit for this transaction. | +| proof | Buffer | Zero-knowledge honk proof for the last iteration of the private kernel circuit for this transaction. | +| encryptedLogs | Buffer[][] | Encrypted logs emitted per function in this transaction. Position `i` contains the encrypted logs emitted by the `i`-th function execution. | +| unencryptedLogs | Buffer[][] | Equivalent to the above but for unencrypted logs. | +| enqueuedPublicFunctionCalls | PublicCallRequest[] | List of public function calls to run during public execution. | +| newContracts | ExtendedContractData[] | List of new contracts to be deployed as part of this transaction. | + +### Private kernel public inputs final + +Output of the last iteration of the private kernel circuit. Includes _accumulated data_ after recursing through all private function calls, as well as _constant data_ composed of _block header_ reflecting the state of the chain when such functions were executed, and the global _transaction context_. Refer to the circuits section for more info. + +**Accumulated data** + +| Field | Type | Description | +|-------|------|-------------| +| aggregationObject | AggregationObject | Aggregated proof of all the previous kernel iterations. | +| newCommitments | Field[] | The new commitments made in this transaction. | +| newNullifiers | Field[] | The new nullifiers made in this transaction. | +| nullifiedCommitments | Field[] | The commitments which are nullified by a nullifier in the above list. | +| privateCallStack | Field[] | Current private call stack. | +| publicCallStack | Field[] | Current public call stack. | +| newL2ToL1Msgs | Field[] | All the new L2 to L1 messages created in this transaction. | +| encryptedLogsHash | Field[] | Accumulated encrypted logs hash from all the previous kernel iterations. | +| unencryptedLogsHash | Field[] | Accumulated unencrypted logs hash from all the previous kernel iterations. | +| encryptedLogPreimagesLength | Field | Total accumulated length of the encrypted log preimages emitted in all the previous kernel iterations. | +| unencryptedLogPreimagesLength | Field | Total accumulated length of the unencrypted log preimages emitted in all the previous kernel iterations. | +| newContracts | NewContractData[] | All the new contracts deployed in this transaction. | +| maxBlockNum | Field | Maximum block number (inclusive) for inclusion of this transaction in a block. | + +**Block header** + +| Field | Type | Description | +|-------|------|-------------| +| noteHashTreeRoot | Field | Root of the note hash tree at the time of when this information was assembled. | +| nullifierTreeRoot | Field | Root of the nullifier tree at the time of when this information was assembled. | +| contractTreeRoot | Field | Root of the contract tree at the time of when this information was assembled. | +| l1ToL2MessagesTreeRoot | Field | Root of the L1 to L2 messages tree at the time of when this information was assembled. | +| blocksTreeRoot | Field | Root of the historic blocks tree at the time of when this information was assembled. | +| privateKernelVkTreeRoot | Field | Root of the private kernel VK tree at the time of when this information was assembled (future enhancement). | +| publicDataTreeRoot | Field | Current public state tree hash. | +| globalVariablesHash | Field | Previous globals hash, this value is used to recalculate the block hash. | + +### Public call request + +Each _public call request_ is the preimage of a public call stack item in the transaction's `data`, and has the following fields: + +| Field | Type | Description | +|----------|----------|----------| +| contractAddress | AztecAddress | Address of the contract on which the function is invoked. | +| callContext | CallContext | Includes function selector and caller. | +| args | Field[] | Arguments to the function call. | +| sideEffectCounter | number? | Optional counter for ordering side effects of this function call. | + +### Extended contract data + +Each _extended contract data_ corresponds to a contract being deployed by the transaction, and has the following fields: + +| Field | Type | Description | +|----------|----------|----------| +| address | AztecAddress | Address where the contract is to be deployed. | +| portalAddress | EthereumAddress | Portal address on L1 for this contract (zero if none). | +| bytecode | Buffer | Encoded Brillig bytecode for all public functions in the contract. | +| publicKey | PublicKey | Master public encryption key for this contract (zero if none). | +| partialAddress | Field | Hash of the constructor arguments, salt, and bytecode. | + +## Transaction hash + +A transaction is identified by its _transaction hash_. In order to be able to identify a transaction before it has been locally executed, the hash is computed from its [_transaction execution request_](./local-execution.md#execution-request) by hashing: + +- `origin` +- `functionSelector` +- `argsHash` +- `txContent` + +The resulting transaction hash is always emitted during local execution as the first nullifier of the transaction, in order to prevent replay attacks. This is enforced by the private kernel circuit. \ No newline at end of file diff --git a/yellow-paper/docs/transactions/validity.md b/yellow-paper/docs/transactions/validity.md new file mode 100644 index 00000000000..9d464e0d858 --- /dev/null +++ b/yellow-paper/docs/transactions/validity.md @@ -0,0 +1,18 @@ +# Validity conditions + +The _validity conditions_ of a transaction define when a [_transaction object_](./tx-object.md) is valid. Nodes should check the validity of a transaction when they receive it either directly or through the p2p pool, and if they found it invalid, should drop it immediately and not broadcast it. + +In addition to being well-formed, the transaction object needs to pass the following checks: + +- **Proof is valid**: The `proof` for the given public `data` should be valid according to a protocol-wide verification key for the final private kernel circuit. +- **No double-spends**: No `nullifier` in the transaction `data` should be already present in the nullifier tree. +- **No pending private function calls**: The `data` private call stack should be empty. +- **Valid historic data**: The tree roots in the block header of `data` must match the tree roots of a block in the chain. +- **Maximum block number not exceeded**: The transaction must be included in a block with height no greater than the value specified in `maxBlockNum` within the transaction's `data`. +- **Preimages must match commitments in `data`**: The expanded fields in the transaction object should match the commitments (hashes) to them in the public `data`. + - The `encryptedLogs` should match the `encryptedLogsHash` and `encryptedLogPreimagesLength` in the transaction `data`. + - The `unencryptedLogs` should match the `unencryptedLogsHash` and `unencryptedLogPreimagesLength` in the transaction `data`. + - Each public call stack item in the transaction `data` should have a corresponding preimage in the `enqueuedPublicFunctionCalls`. + - Each new contract data in transaction `data` should have a corresponding preimage in the `newContracts`. + +Note that all checks but the last one are enforced by the base rollup circuit when the transaction is included in a block. \ No newline at end of file diff --git a/yellow-paper/docusaurus.config.js b/yellow-paper/docusaurus.config.js index 9cf437f4e02..e8ca3ea0488 100644 --- a/yellow-paper/docusaurus.config.js +++ b/yellow-paper/docusaurus.config.js @@ -36,6 +36,11 @@ const config = { locales: ["en"], }, + markdown: { + mermaid: true, + }, + themes: ["@docusaurus/theme-mermaid"], + presets: [ [ "classic", diff --git a/yellow-paper/package.json b/yellow-paper/package.json index ad92226d9ad..ad63a831f4f 100644 --- a/yellow-paper/package.json +++ b/yellow-paper/package.json @@ -4,13 +4,14 @@ "private": true, "scripts": { "docusaurus": "docusaurus", - "start": "docusaurus start", - "start:dev": "docusaurus start --host 0.0.0.0", + "start": "yarn preprocess && docusaurus start", + "start:dev": "yarn preprocess && docusaurus start --host 0.0.0.0", "build": "docusaurus build", "swizzle": "docusaurus swizzle", "deploy": "docusaurus deploy", "clear": "docusaurus clear", "serve": "docusaurus serve", + "preprocess": "yarn node ./src/preprocess/index.js", "write-translations": "docusaurus write-translations", "write-heading-ids": "docusaurus write-heading-ids", "typecheck": "tsc" @@ -18,11 +19,13 @@ "dependencies": { "@docusaurus/core": "2.4.3", "@docusaurus/preset-classic": "2.4.3", + "@docusaurus/theme-mermaid": "^2.4.3", "@mdx-js/react": "^1.6.22", "clsx": "^1.2.1", "prism-react-renderer": "^1.3.5", "react": "^17.0.2", "react-dom": "^17.0.2", + "react-markdown": "6.0.0", "rehype-katex": "5", "remark-math": "3" }, diff --git a/yellow-paper/src/preprocess/InstructionSet/InstructionSet.js b/yellow-paper/src/preprocess/InstructionSet/InstructionSet.js new file mode 100644 index 00000000000..578ac938aaf --- /dev/null +++ b/yellow-paper/src/preprocess/InstructionSet/InstructionSet.js @@ -0,0 +1,1102 @@ +const {instructionSize} = require('./InstructionSize'); + +const TOPICS_IN_TABLE = [ + "Name", "Summary", "Bit-size", "Expression", +]; +const TOPICS_IN_SECTIONS = [ + "Name", "Summary", "Category", "Flags", "Args", "Expression", "Details", "Tag checks", "Tag updates", "Bit-size", +]; + +const OP_TYPE_DESCRIPTION = "The [type/size](./Types) to check inputs against and tag the output with."; +const DEST_TYPE_DESCRIPTION = "The [type/size](./Types) to tag the output with when different from `op-type`."; +const INDIRECT_FLAG_DESCRIPTION = "Toggles whether each memory-offset argument is an indirect offset. 0th bit corresponds to 0th offset arg, etc. Indirect offsets result in memory accesses like `M[M[offset]]` instead of the more standard `M[offset]`."; + +const INSTRUCTION_SET_RAW = [ + { + "id": "add", + "Name": "`ADD`", + "Category": "arithmetic", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] + M[bOffset] mod 2^k`", + "Summary": "Addition (a + b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "sub", + "Name": "`SUB`", + "Category": "arithmetic", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] - M[bOffset] mod 2^k`", + "Summary": "Subtraction (a - b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "div", + "Name": "`DIV`", + "Category": "arithmetic", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] / M[bOffset]`", + "Summary": "Unsigned division (a / b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "eq", + "Name": "`EQ`", + "Category": "conditional", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result", "type": "u8"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] == M[bOffset] ? 1 : 0`", + "Summary": "Equality check (a == b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "lt", + "Name": "`LT`", + "Category": "conditional", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result", "type": "u8"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] < M[bOffset] ? 1 : 0`", + "Summary": "Less-than check (a < b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "lte", + "Name": "`LTE`", + "Category": "conditional", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result", "type": "u8"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] <= M[bOffset] ? 1 : 0`", + "Summary": "Less-than-or-equals check (a <= b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "and", + "Name": "`AND`", + "Category": "bitwise", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] AND M[bOffset]`", + "Summary": "Bitwise AND (a & b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "or", + "Name": "`OR`", + "Category": "bitwise", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] OR M[bOffset]`", + "Summary": "Bitwise OR (a | b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "xor", + "Name": "`XOR`", + "Category": "bitwise", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] XOR M[bOffset]`", + "Summary": "Bitwise XOR (a ^ b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "not", + "Name": "`NOT`", + "Category": "bitwise", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = NOT M[aOffset]`", + "Summary": "Bitwise NOT (inversion)", + "Details": "", + "Tag checks": "`T[aOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "shl", + "Name": "`SHL`", + "Category": "bitwise", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] << M[bOffset]`", + "Summary": "Bitwise leftward shift (a << b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "shr", + "Name": "`SHR`", + "Category": "bitwise", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": OP_TYPE_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of the operation's left input"}, + {"name": "bOffset", "description": "memory offset of the operation's right input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = M[aOffset] >> M[bOffset]`", + "Summary": "Bitwise rightward shift (a >> b)", + "Details": "", + "Tag checks": "`T[aOffset] == T[bOffset] == op-type`", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "cast", + "Name": "`CAST`", + "Category": "types", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "dest-type", "description": DEST_TYPE_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of word to cast"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = cast(M[aOffset])`", + "Summary": "Type cast", + "Details": "Cast a word in memory based on the `dest-type` specified in the bytecode. Truncates when casting to a smaller type, left-zero-pads when casting to a larger type.", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = dest-type`", + }, + { + "id": "set", + "Name": "`SET`", + "Category": "memory", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + {"name": "op-type", "description": "The [type/size](./Types) to check inputs against and tag the output with. `field` type is NOT supported for SET."}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "const", "description": "an N-bit constant value from the bytecode to store in memory (any type except `field`)", "mode": "immediate"}, + {"name": "dstOffset", "description": "memory offset specifying where to store the constant"}, + ], + "Expression": "`M[dstOffset] = const`", + "Summary": "Set a memory word from a constant in the bytecode.", + "Details": "Set memory word at `dstOffset` to `const`'s immediate value. `const`'s bit-size (N) can be 8, 16, 32, 64, or 128 based on `op-type`. It _cannot be 254 (`field` type)_!", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = op-type`", + }, + { + "id": "mov", + "Name": "`MOV`", + "Category": "memory", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "srcOffset", "description": "memory offset of word to move"}, + {"name": "dstOffset", "description": "memory offset specifying where to store that word"}, + ], + "Expression": "`M[dstOffset] = M[srcOffset]`", + "Summary": "Move a word from source memory location to destination`.", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = T[srcOffset]`", + }, + { + "id": "cmov", + "Name": "`CMOV`", + "Category": "memory", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "3", + "#memwrites": "1", + "Args": [ + {"name": "aOffset", "description": "memory offset of word 'a' to conditionally move"}, + {"name": "bOffset", "description": "memory offset of word 'b' to conditionally move"}, + {"name": "condOffset", "description": "memory offset of the operations 'conditional' input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = M[condOffset] > 0 ? M[aOffset] : M[bOffset]`", + "Summary": "Move a word (conditionally chosen) from one memory location to another (`d = cond > 0 ? a : b`).", + "Details": "One of two source memory locations is chosen based on the condition. `T[condOffset]` is not checked because the greater-than-zero suboperation is the same regardless of type.", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = M[condOffset] > 0 ? T[aOffset] : T[bOffset]`", + }, + { + "id": "calldatacopy", + "Name": "`CALLDATACOPY`", + "Category": "contract calls", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "`s1`", + "#memwrites": "`s1`", + "Args": [ + {"name": "cdOffset", "description": "offset into calldata to copy from"}, + {"name": "size", "description": "number of words to copy", "mode": "immediate", "type": "u24"}, + {"name": "dstOffset", "description": "memory offset specifying where to copy the first word to"}, + ], + "Expression": "`M[dstOffset:dstOffset+size] = calldata[cdOffset:cdOffset+size]`", + "Summary": "Copy calldata into memory.", + "Details": "Calldata is read-only and cannot be directly operated on by other instructions. This instruction moves words from calldata into memory so they can be operated on normally.", + "Tag checks": "", + "Tag updates": "`T[dstOffset:dstOffset+size] = field`", + }, + { + "id": "sload", + "Name": "`SLOAD`", + "Category": "storage & messaging", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "1", + "Args": [ + {"name": "slotOffset", "description": "memory offset of the storage slot to load from"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = storage[M[slotOffset]]`", + "Summary": "Load a word from storage.", + "Details": "Load a word from this contract's persistent public storage into memory.", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = field`", + }, + { + "id": "sstore", + "Name": "`SSTORE`", + "Category": "storage & messaging", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "2", + "#memwrites": "0", + "Args": [ + {"name": "srcOffset", "description": "memory offset of the word to store"}, + {"name": "slotOffset", "description": "memory offset containing the storage slot to store to"}, + ], + "Expression": "`storage[M[slotOffset]] = M[srcOffset]`", + "Summary": "Write a word to storage.", + "Details": "Store a word from memory into this contract's persistent public storage.", + "Tag checks": "", + "Tag updates": "", + }, + { + "id": "emitnotehash", + "Name": "`EMITNOTEHASH`", + "Category": "storage & messaging", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "0", + "Args": [ + {"name": "noteHashOffset", "description": "memory offset of the note hash"}, + ], + "Expression": `emitNoteHash(M[contentOffset])`, + "Summary": "Emit a new note hash to be inserted into the notes tree", + "Details": "", + "Tag checks": "", + "Tag updates": "", + }, + { + "id": "emitnullifier", + "Name": "`EMITNULLIFIER`", + "Category": "storage & messaging", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "0", + "Args": [ + {"name": "nullifierOffset", "description": "memory offset of nullifier"}, + ], + "Expression": `emitNullifier(M[nullifierOffset])`, + "Summary": "Emit a new nullifier to be inserted into the nullifier tree", + "Details": "", + "Tag checks": "", + "Tag updates": "", + }, + { + "id": "sendl2tol1msg", + "Name": "`SENDL2TOL1MSG`", + "Category": "storage & messaging", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "0", + "Args": [ + {"name": "contentOffset", "description": "memory offset of the message content"}, + ], + "Expression": `sendL2ToL1Message(M[contentOffset])`, + "Summary": "Send an L2-to-L1 message", + "Details": "", + "Tag checks": "", + "Tag updates": "", + }, + { + "id": "jump", + "Name": "`JUMP`", + "Category": "control", + "Flags": [], + "#memreads": "0", + "#memwrites": "0", + "Args": [ + {"name": "loc", "description": "target location to jump to", "mode": "immediate", "type": "u24"}, + ], + "Expression": "`PC = loc`", + "Summary": "Jump to a location in the bytecode.", + "Details": "Target location is an immediate value (a constant in the bytecode).", + "Tag checks": "", + "Tag updates": "", + }, + { + "id": "jumpi", + "Name": "`JUMPI`", + "Category": "control", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "3", + "#memwrites": "0", + "Args": [ + {"name": "loc", "description": "target location conditionally jump to", "mode": "immediate", "type": "u24"}, + {"name": "condOffset", "description": "memory offset of the operations 'conditional' input"}, + ], + "Expression": "`PC = M[condOffset] > 0 ? loc : PC`", + "Summary": "Conditionally jump to a location in the bytecode.", + "Details": "Target location is an immediate value (a constant in the bytecode). `T[condOffset]` is not checked because the greater-than-zero suboperation is the same regardless of type.", + "Tag checks": "", + "Tag updates": "", + }, + { + "id": "return", + "Name": "`RETURN`", + "Category": "contract calls", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "`s1`", + "#memwrites": "0", + "Args": [ + {"name": "offset", "description": "memory offset of first word to return"}, + {"name": "size", "description": "number of words to return", "mode": "immediate", "type": "u24"}, + ], + "Expression": "`return(M[offset:offset+size])`", + "Summary": "Halt execution with `success`, optionally returning some data.", + "Details": "Return control flow to the calling context/contract.", + "Tag checks": "", + "Tag updates": "", + }, + { + "id": "revert", + "Name": "`REVERT`", + "Category": "contract calls", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "`s1`", + "#memwrites": "0", + "Args": [ + {"name": "offset", "description": "memory offset of first word to return"}, + {"name": "size", "description": "number of words to return", "mode": "immediate", "type": "u24"}, + ], + "Expression": "`revert(M[offset:offset+size])`", + "Summary": "Halt execution with `failure`, reverting state changes and optionally returning some data.", + "Details": "Return control flow to the calling context/contract.", + "Tag checks": "", + "Tag updates": "", + }, + { + "id": "call", + "Name": "`CALL`", + "Category": "contract calls", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "5", + "#memwrites": "`1+retSize`", + "Args": [ + {"name": "l1GasOffset", "description": "amount of L1 gas to provide to the callee"}, + {"name": "l2GasOffset", "description": "amount of L2 gas to provide to the callee"}, + {"name": "addrOffset", "description": "address of the contract to call"}, + {"name": "argsOffset", "description": "memory offset to args (will become the callee's calldata)"}, + {"name": "argsSize", "description": "number of words to pass via callee's calldata", "mode": "immediate", "type": "u24"}, + {"name": "retOffset", "description": "destination memory offset specifying where to store the data returned from the callee"}, + {"name": "retSize", "description": "number of words to copy from data returned by callee", "mode": "immediate", "type": "u24"}, + {"name": "successOffset", "description": "destination memory offset specifying where to store the call's success (0: failure, 1: success)", "type": "u8"}, + ], + "Expression":` +M[successOffset] = call( + M[l1GasOffset], M[l2GasOffset], M[addrOffset], + M[argsOffset], M[argsSize], + M[retOffset], M[retSize]) +`, + "Summary": "Call into another contract.", + "Details": `Creates a new CallContext, triggers execution of the corresponding contract code, + and then resumes execution in the current CallContext. A non-existent contract or one + with no code will return success. Nested call has an incremented \`CallContext.calldepth\`.`, + "Tag checks": "`T[l1GasOffset] == T[l2GasOffset] == u32`", + "Tag updates": ` +T[successOffset] = u8 +T[retOffset:retOffset+retSize] = field +`, + }, + { + "id": "staticcall", + "Name": "`STATICCALL`", + "Category": "contract calls", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "5", + "#memwrites": "`1+retSize`", + "Args": [ + {"name": "l1GasOffset", "description": "amount of L1 gas to provide to the callee"}, + {"name": "l2GasOffset", "description": "amount of L2 gas to provide to the callee"}, + {"name": "addrOffset", "description": "address of the contract to call"}, + {"name": "argsOffset", "description": "memory offset to args (will become the callee's calldata)"}, + {"name": "argsSize", "description": "number of words to pass via callee's calldata", "mode": "immediate", "type": "u24"}, + {"name": "retOffset", "description": "destination memory offset specifying where to store the data returned from the callee"}, + {"name": "retSize", "description": "number of words to copy from data returned by callee", "mode": "immediate", "type": "u24"}, + {"name": "successOffset", "description": "destination memory offset specifying where to store the call's success (0: failure, 1: success)", "type": "u8"}, + ], + "Expression": ` +M[successOffset] = staticcall( + M[l1GasOffset], M[l2GasOffset], M[addrOffset], + M[argsOffset], M[argsSize], + M[retOffset], M[retSize]) +`, + "Summary": "Call into another contract, disallowing persistent state modifications.", + "Details": "Same as `CALL`, but the callee is cannot modify persistent state. Disallowed instructions are `SSTORE`, `ULOG`, `CALL`.", + "Tag checks": "`T[l1GasOffset] == T[l2GasOffset] == u32`", + "Tag updates": ` +T[successOffset] = u8 +T[retOffset:retOffset+retSize] = field +`, + }, + { + "id": "ulog", + "Name": "`ULOG`", + "Category": "logging", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "`s1`", + "#memwrites": "0", + "Args": [ + {"name": "offset", "description": "memory offset of the data to log"}, + {"name": "size", "description": "number of words to log", "mode": "immediate", "type": "u24"}, + ], + "Expression": "`ulog(M[offset:offset+size])`", + "Summary": "Emit an unencrypted log with data from the `field` memory page", + "Details": "", + "Tag checks": "", + "Tag updates": "", + }, + { + "id": "chainid", + "Name": "`CHAINID`", + "Category": "block info", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = Globals.chainId`", + "Summary": "Get this rollup's L1 chain ID", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "version", + "Name": "`VERSION`", + "Category": "block info", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = Globals.version`", + "Summary": "Get this rollup's L2 version ID", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "blocknumber", + "Name": "`BLOCKNUMBER`", + "Category": "block info", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = Globals.blocknumber`", + "Summary": "Get this block's number", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "timestamp", + "Name": "`TIMESTAMP`", + "Category": "block info", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = Globals.timestamp`", + "Summary": "Get this L2 block's timestamp", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u64`", + }, + { + "id": "coinbase", + "Name": "`COINBASE`", + "Category": "block info", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = Globals.coinbase`", + "Summary": "Get the block's beneficiary address", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "blockl1gaslimit", + "Name": "`BLOCKL1GASLIMIT`", + "Category": "block info", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = Globals.l1GasLimit`", + "Summary": "Total amount of \"L1 gas\" that a block can consume", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "blockl2gaslimit", + "Name": "`BLOCKL2GASLIMIT`", + "Category": "block info", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = Globals.l2GasLimit`", + "Summary": "Total amount of \"L2 gas\" that a block can consume", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "notesroot", + "Name": "`NOTESROOT`", + "Category": "historical access", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "blockNumOffset", "description": "memory offset of the block number input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].note_hash_tree_root`", + "Summary": "Get the historical note-hash tree root as of the specified block number.", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = field`", + }, + { + "id": "nullroot", + "Name": "`NULLIFIERSROOT`", + "Category": "historical access", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "blockNumOffset", "description": "memory offset of the block number input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].nullifier_tree_root`", + "Summary": "Get the historical nullifier tree root as of the specified block number.", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = field`", + }, + { + "id": "contractsroot", + "Name": "`CONTRACTSROOT`", + "Category": "historical access", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "blockNumOffset", "description": "memory offset of the block number input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].contracts_tree_root`", + "Summary": "Get the historical contracts tree root as of the specified block number.", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = field`", + }, + { + "id": "msgsroot", + "Name": "`MSGSROOT`", + "Category": "historical access", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "blockNumOffset", "description": "memory offset of the block number input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].l1_to_l2_messages_tree_root`", + "Summary": "Get the historical l1-to-l2 messages tree root as of the specified block number.", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = field`", + }, + { + "id": "notesroot", + "Name": "`NOTESROOT`", + "Category": "historical access", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "blockNumOffset", "description": "memory offset of the block number input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].note_hash_tree_root`", + "Summary": "Get the historical note-hash tree root as of the specified block number.", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = field`", + }, + { + "id": "publicdataroot", + "Name": "`PUBLICDATAROOT`", + "Category": "historical access", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "blockNumOffset", "description": "memory offset of the block number input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].public_data_tree_root`", + "Summary": "Get the historical public data tree root as of the specified block number.", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = field`", + }, + { + "id": "globalshash", + "Name": "`GLOBALSHASH`", + "Category": "historical access", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "blockNumOffset", "description": "memory offset of the block number input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].global_variables_hash`", + "Summary": "Get the historical global variables hash as of the specified block number.", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = field`", + }, + { + "id": "blocksroot", + "Name": "`BLOCKSROOT`", + "Category": "historical access", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "blockNumOffset", "description": "memory offset of the block number input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].blocks_tree_root`", + "Summary": "Get the historical blocks tree root as of the specified block number.", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = field`", + }, + { + "id": "grandroot", + "Name": "`GRANDROOT`", + "Category": "historical access", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "1", + "#memwrites": "1", + "Args": [ + {"name": "blockNumOffset", "description": "memory offset of the block number input"}, + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = HistoricalBlockData[M[blockNumOffset]].grandfather_tree_root`", + "Summary": "Get the historical grandfather tree root as of the specified block number.", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = field`", + }, + { + "id": "origin", + "Name": "`ORIGIN`", + "Category": "tx context", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = TxContext.origin`", + "Summary": "Get the transaction's origination address", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "refundee", + "Name": "`REFUNDEE`", + "Category": "tx context", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = TxContext.refundee`", + "Summary": "The recipient of fee refunds for this transaction", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "feeperl1gas", + "Name": "`FEEPERL1GAS`", + "Category": "tx context", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = TxContext.feePerL1Gas`", + "Summary": "The fee to be paid per \"L1 gas\" - set by the transaction's original caller", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "feeperl2gas", + "Name": "`FEEPERL2GAS`", + "Category": "tx context", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = TxContext.feePerL2Gas`", + "Summary": "The fee to be paid per \"L2 gas\" - set by the transaction's original caller", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "caller", + "Name": "`CALLER`", + "Category": "call context", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = CallContext.sender`", + "Summary": "Get the address of the sender (the caller's context)", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "address", + "Name": "`ADDRESS`", + "Category": "call context", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = CallContext.storageContractAddress`", + "Summary": "Get the address of the currently executing l2 contract", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "portal", + "Name": "`PORTAL`", + "Category": "call context", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = CallContext.portalAddress`", + "Summary": "Get the address of the l1 portal contract", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "calldepth", + "Name": "`CALLDEPTH`", + "Category": "call context", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = CallContext.calldepth`", + "Summary": "Get how many calls deep the current call context is", + "Details": "Note: security issues with EVM's tx.origin can be resolved by asserting the `calldepth == 0`.", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u8`", + }, + { + "id": "l1gas", + "Name": "`L1GAS`", + "Category": "latest context", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = LatestContext.l1Gas`", + "Summary": "Remaining \"L1 gas\" for this call (after this instruction).", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, + { + "id": "l2gas", + "Name": "`L2GAS`", + "Category": "latest context", + "Flags": [ + {"name": "indirect", "description": INDIRECT_FLAG_DESCRIPTION}, + ], + "#memreads": "0", + "#memwrites": "1", + "Args": [ + {"name": "dstOffset", "description": "memory offset specifying where to store operation's result"}, + ], + "Expression": "`M[dstOffset] = LatestContext.l2Gas`", + "Summary": "Remaining \"L2 gas\" for this call (after this instruction).", + "Details": "", + "Tag checks": "", + "Tag updates": "`T[dstOffset] = u32`", + }, +]; +const INSTRUCTION_SET = INSTRUCTION_SET_RAW.map((instr) => {instr['Bit-size'] = instructionSize(instr); return instr;}); + +module.exports = { + TOPICS_IN_TABLE, + TOPICS_IN_SECTIONS, + INSTRUCTION_SET, +}; diff --git a/yellow-paper/src/preprocess/InstructionSet/InstructionSetMarkdownGen.js b/yellow-paper/src/preprocess/InstructionSet/InstructionSetMarkdownGen.js new file mode 100644 index 00000000000..b9a689749cd --- /dev/null +++ b/yellow-paper/src/preprocess/InstructionSet/InstructionSetMarkdownGen.js @@ -0,0 +1,138 @@ +const fs = require("fs"); +const path = require("path"); + +const { + TOPICS_IN_TABLE, + TOPICS_IN_SECTIONS, + INSTRUCTION_SET, + instructionSize +} = require('./InstructionSet'); + +function escapeBraces(str) { + return str.replace(//g, ">"); +} + +function stripBraces(str) { + return str.replace(/[<>]/g, ''); +} + +function instructionSetPreface() { + let preface = "[comment]: # (THIS IS A GENERATED FILE! DO NOT EDIT!)\n"; + preface += "[comment]: # (Generated via `yarn preprocess`)\n\n"; + preface += "[comment]: # (Generated by InstructionSetMarkdownGen.tsx and InstructionSet.js)\n\n"; + preface += "import Markdown from 'react-markdown'\n"; + preface += "import CodeBlock from '@theme/CodeBlock'\n\n"; + return preface; +} + +function toOpcode(index) { + return '0x' + index.toString(16).padStart(2, '0'); +} + +function htmlInstructionSetTable() { + let table = "## Instructions Table\n"; + table += "\nClick on an instruction name to jump to its section.\n"; + table += "\n\n"; + let header = ""; + for (let t = 0; t < TOPICS_IN_TABLE.length; t++) { + header += ``; + } + table += `${header}\n`; + + for (let i = 0; i < INSTRUCTION_SET.length; i++) { + const instr = INSTRUCTION_SET[i]; + const name = instr['Name']; + let row = `\n`; + row += `\t`; + row += `\t`; + + for (let t = 0; t < TOPICS_IN_TABLE.length; t++) { + const topic = TOPICS_IN_TABLE[t]; + + if (topic == 'Name') continue; // skip + let cell = instr[topic]; + if (cell[0] == '\n') { // if string starts with newline, assume it's a multi-line code block + cell = `\n{\`${cell.trim()}\`}\n\t`; + } else if (cell[0] == '`' && topic != 'Name') { + cell = `{\n\t\t\`${cell.replace(/`/g, '')}\`\n\t}`; + } else { + cell = escapeBraces(cell); // escape html + cell = `${cell}`; + } + row += `\n\t`; + } + row += "\n"; + table += `${row}\n`; + } + table += "
Opcode${TOPICS_IN_TABLE[t]}
${toOpcode(i)}[${stripBraces(name)}](#isa-section-${instr['id']})${cell}
\n"; + return table; +} + +function markdownSublist(items) { + let markdown = ""; + for (let i = 0; i < items.length; i++) { + let item = items[i]; + if (typeof item === 'string') { + markdown += `\n\t- ${item}`; + } else { + markdown += `\n\t- **${item['name']}**: ${item['description']}`; + } + } + return markdown; +} + +function markdownInstructionSetSection(pathToGenDir) { + let markdown = "## Instructions\n"; + for (let i = 0; i < INSTRUCTION_SET.length; i++) { + const instr = INSTRUCTION_SET[i]; + const name = instr['Name']; + let subsection = `###
${name} (${toOpcode(i)})\n`; + subsection += `${instr['Summary']}\n\n`; + subsection += `[See in table.](#isa-table-${instr['id']})\n\n`; + for (let t = 0; t < TOPICS_IN_SECTIONS.length; t++) { + const topic = TOPICS_IN_SECTIONS[t]; + let field = instr[topic]; + if (topic == 'Name' || topic == 'Summary' || !field || field.length == 0) continue; // skip + + let item = `- **${topic}**: ` + if (Array.isArray(field) ) { + item += markdownSublist(field); + } else if (field[0] == '\n') { // if string starts with newline, assume it's a multi-line code block + item += `\n\n{\`${field.trim()}\`}\n`; + } else { + item += field; + } + subsection += `${item}\n`; + } + const bitFormatPath = `./images/bit-formats/${name.replace(/`/g, '')}.png`; + if (fs.existsSync(`${pathToGenDir}/${bitFormatPath}`)) { + subsection += `\n![](${bitFormatPath})`; + } + markdown += `\n${subsection}\n`; + } + return markdown; +} + +async function generateInstructionSet() { + const rootDir = path.join(__dirname, "../../../"); + const docsDir = path.join(rootDir, "docs", "docs"); + + const relPath = path.relative(docsDir, "docs/public-vm/gen/_InstructionSet.mdx"); + const docsFilePath = path.resolve(docsDir, relPath); + const docsDirName = path.dirname(docsFilePath); + if (!fs.existsSync(docsDirName)) { + fs.mkdirSync(docsDirName, { recursive: true }); + } + + const preface = instructionSetPreface(); + const table = htmlInstructionSetTable(); + const section = markdownInstructionSetSection(docsDirName); + const doc = `${preface}\n${table}\n\n${section}`; + fs.writeFileSync(docsFilePath, doc); + + console.log("Preprocessing complete."); +} + +module.exports = { + generateInstructionSet, +}; \ No newline at end of file diff --git a/yellow-paper/src/preprocess/InstructionSet/InstructionSize.js b/yellow-paper/src/preprocess/InstructionSet/InstructionSize.js new file mode 100644 index 00000000000..0cefffcf327 --- /dev/null +++ b/yellow-paper/src/preprocess/InstructionSet/InstructionSize.js @@ -0,0 +1,86 @@ +const OPCODE_SIZE = 8; +const FLAG_SIZE = 8; + +const DEFAULT_OPERAND_SIZE = 24; // for direct/indirect memory offsets + +function argSize(arg) { + if (arg['mode'] && arg['mode'] == 'immediate') { + if (arg['type']) { + return Number(arg['type'].replace(/u/, '')); + } else { + return undefined; // none specified! + } + } else { + return DEFAULT_OPERAND_SIZE; + } +} + +function toOpcode(index) { + return '0x' + index.toString(16).padStart(2, '0'); +} + +/* Compute bit-size of instruction based on flags and number of operands, + * whether they are immediate (and op-type if so) + * + * All instructions have: + * - 1 byte for opcode + * - 1 byte to toggle indirect mode for up to 8 non-immediate args + * 24 bits per-arg (for non-immediates) + * N bits per immediate arg, where N is 8, 16, 32, 64, or 128 based on type + * 1 byte for op-type + * 1 byte for dest-type + */ +function instructionSize(instr) { + let size = OPCODE_SIZE; + let numUntypedImmediates = 0; + for (let arg of instr['Args']) { + const aSize = argSize(arg); + if (aSize === undefined) { + numUntypedImmediates++; + } else { + size += aSize; + } + } + if (instr['Flags']) { + // assigns each flag a byte (indirect, op-type, dest-type) + size += instr['Flags'].length * FLAG_SIZE; + } + let sizeStr = size.toString(); + if (numUntypedImmediates > 0) { + sizeStr += '+N'; + } + return sizeStr; +} + +function instructionBitFormat(instr, index) { + let bitFormat = { + 'Name': instr['Name'], + 'Opcode': { + 'code': toOpcode(index), + 'size': OPCODE_SIZE, + }, + 'Args': [], + 'Flags': [], + }; + + //for (let arg of instr['Args']) { + for (let a = 0; a < instr['Args'].length; a++) { + const arg = instr['Args'][a]; + const aSize = argSize(arg); + if (aSize === undefined) { + bitFormat['Args'][a] = {"name": arg['name'], "size": 'N'}; + } else { + bitFormat['Args'][a] = {"name": arg['name'], "size": aSize}; + } + } + for (let f = 0; f < instr['Flags'].length; f++) { + const flag = instr['Flags'][f]; + bitFormat['Flags'][f] = {"name": flag['name'], "size": FLAG_SIZE}; + } + return bitFormat; +} + +module.exports = { + instructionSize, + instructionBitFormat, +}; \ No newline at end of file diff --git a/yellow-paper/src/preprocess/InstructionSet/genBitFormats.js b/yellow-paper/src/preprocess/InstructionSet/genBitFormats.js new file mode 100644 index 00000000000..99bdc4c34f8 --- /dev/null +++ b/yellow-paper/src/preprocess/InstructionSet/genBitFormats.js @@ -0,0 +1,15 @@ +const fs = require("fs"); + +const {instructionBitFormat} = require('./InstructionSize'); +const {INSTRUCTION_SET} = require('./InstructionSet'); + +function run() { + const formats = []; + for (let i = 0; i < INSTRUCTION_SET.length; i++) { + const instr = INSTRUCTION_SET[i]; + const bitFormat = instructionBitFormat(instr, i); + formats.push(bitFormat); + } + fs.writeFileSync('./InstructionBitFormats.json', JSON.stringify(formats)); +} +run(); \ No newline at end of file diff --git a/yellow-paper/src/preprocess/index.js b/yellow-paper/src/preprocess/index.js new file mode 100644 index 00000000000..71c4227df06 --- /dev/null +++ b/yellow-paper/src/preprocess/index.js @@ -0,0 +1,6 @@ +const {generateInstructionSet} = require('./InstructionSet/InstructionSetMarkdownGen'); + +async function run() { + await generateInstructionSet(); +} +run(); \ No newline at end of file diff --git a/yellow-paper/yarn.lock b/yellow-paper/yarn.lock index 3e351ffa146..7e3c4614e87 100644 --- a/yellow-paper/yarn.lock +++ b/yellow-paper/yarn.lock @@ -1220,6 +1220,11 @@ "@babel/helper-validator-identifier" "^7.22.20" to-fast-properties "^2.0.0" +"@braintree/sanitize-url@^6.0.0": + version "6.0.4" + resolved "https://registry.yarnpkg.com/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz#923ca57e173c6b232bbbb07347b1be982f03e783" + integrity sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A== + "@colors/colors@1.5.0": version "1.5.0" resolved "https://registry.yarnpkg.com/@colors/colors/-/colors-1.5.0.tgz#bb504579c1cae923e6576a4f5da43d25f97bdbd9" @@ -1572,6 +1577,20 @@ use-sync-external-store "^1.2.0" utility-types "^3.10.0" +"@docusaurus/theme-mermaid@^2.4.3": + version "2.4.3" + resolved "https://registry.yarnpkg.com/@docusaurus/theme-mermaid/-/theme-mermaid-2.4.3.tgz#b40194fb4f46813a18d1350a188d43b68a8192dd" + integrity sha512-S1tZ3xpowtFiTrpTKmvVbRHUYGOlEG5CnPzWlO4huJT1sAwLR+pD6f9DYUlPv2+9NezF3EfUrUyW9xLH0UP58w== + dependencies: + "@docusaurus/core" "2.4.3" + "@docusaurus/module-type-aliases" "2.4.3" + "@docusaurus/theme-common" "2.4.3" + "@docusaurus/types" "2.4.3" + "@docusaurus/utils-validation" "2.4.3" + "@mdx-js/react" "^1.6.22" + mermaid "^9.2.2" + tslib "^2.4.0" + "@docusaurus/theme-search-algolia@2.4.3": version "2.4.3" resolved "https://registry.yarnpkg.com/@docusaurus/theme-search-algolia/-/theme-search-algolia-2.4.3.tgz#32d4cbefc3deba4112068fbdb0bde11ac51ece53" @@ -3024,6 +3043,11 @@ comma-separated-tokens@^1.0.0: resolved "https://registry.yarnpkg.com/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz#632b80b6117867a158f1080ad498b2fbe7e3f5ea" integrity sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw== +commander@7, commander@^7.2.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-7.2.0.tgz#a36cb57d0b501ce108e4d20559a150a391d97ab7" + integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw== + commander@^2.20.0: version "2.20.3" resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" @@ -3034,11 +3058,6 @@ commander@^5.1.0: resolved "https://registry.yarnpkg.com/commander/-/commander-5.1.0.tgz#46abbd1652f8e059bddaef99bbdcb2ad9cf179ae" integrity sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg== -commander@^7.2.0: - version "7.2.0" - resolved "https://registry.yarnpkg.com/commander/-/commander-7.2.0.tgz#a36cb57d0b501ce108e4d20559a150a391d97ab7" - integrity sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw== - commander@^8.0.0, commander@^8.3.0: version "8.3.0" resolved "https://registry.yarnpkg.com/commander/-/commander-8.3.0.tgz#4837ea1b2da67b9c616a67afbb0fafee567bca66" @@ -3172,6 +3191,20 @@ core-util-is@~1.0.0: resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.3.tgz#a6042d3634c2b27e9328f837b965fac83808db85" integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== +cose-base@^1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/cose-base/-/cose-base-1.0.3.tgz#650334b41b869578a543358b80cda7e0abe0a60a" + integrity sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg== + dependencies: + layout-base "^1.0.0" + +cose-base@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/cose-base/-/cose-base-2.2.0.tgz#1c395c35b6e10bb83f9769ca8b817d614add5c01" + integrity sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g== + dependencies: + layout-base "^2.0.0" + cosmiconfig@^6.0.0: version "6.0.0" resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-6.0.0.tgz#da4fee853c52f6b1e6935f41c1a2fc50bd4a9982" @@ -3369,6 +3402,285 @@ csstype@^3.0.2: resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.2.tgz#1d4bf9d572f11c14031f0436e1c10bc1f571f50b" integrity sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ== +cytoscape-cose-bilkent@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/cytoscape-cose-bilkent/-/cytoscape-cose-bilkent-4.1.0.tgz#762fa121df9930ffeb51a495d87917c570ac209b" + integrity sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ== + dependencies: + cose-base "^1.0.0" + +cytoscape-fcose@^2.1.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/cytoscape-fcose/-/cytoscape-fcose-2.2.0.tgz#e4d6f6490df4fab58ae9cea9e5c3ab8d7472f471" + integrity sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ== + dependencies: + cose-base "^2.2.0" + +cytoscape@^3.23.0: + version "3.27.0" + resolved "https://registry.yarnpkg.com/cytoscape/-/cytoscape-3.27.0.tgz#5141cd694570807c91075b609181bce102e0bb88" + integrity sha512-pPZJilfX9BxESwujODz5pydeGi+FBrXq1rcaB1mfhFXXFJ9GjE6CNndAk+8jPzoXGD+16LtSS4xlYEIUiW4Abg== + dependencies: + heap "^0.2.6" + lodash "^4.17.21" + +"d3-array@2 - 3", "d3-array@2.10.0 - 3", "d3-array@2.5.0 - 3", d3-array@3, d3-array@^3.2.0: + version "3.2.4" + resolved "https://registry.yarnpkg.com/d3-array/-/d3-array-3.2.4.tgz#15fec33b237f97ac5d7c986dc77da273a8ed0bb5" + integrity sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg== + dependencies: + internmap "1 - 2" + +d3-axis@3: + version "3.0.0" + resolved "https://registry.yarnpkg.com/d3-axis/-/d3-axis-3.0.0.tgz#c42a4a13e8131d637b745fc2973824cfeaf93322" + integrity sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw== + +d3-brush@3: + version "3.0.0" + resolved "https://registry.yarnpkg.com/d3-brush/-/d3-brush-3.0.0.tgz#6f767c4ed8dcb79de7ede3e1c0f89e63ef64d31c" + integrity sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ== + dependencies: + d3-dispatch "1 - 3" + d3-drag "2 - 3" + d3-interpolate "1 - 3" + d3-selection "3" + d3-transition "3" + +d3-chord@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-chord/-/d3-chord-3.0.1.tgz#d156d61f485fce8327e6abf339cb41d8cbba6966" + integrity sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g== + dependencies: + d3-path "1 - 3" + +"d3-color@1 - 3", d3-color@3: + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-color/-/d3-color-3.1.0.tgz#395b2833dfac71507f12ac2f7af23bf819de24e2" + integrity sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA== + +d3-contour@4: + version "4.0.2" + resolved "https://registry.yarnpkg.com/d3-contour/-/d3-contour-4.0.2.tgz#bb92063bc8c5663acb2422f99c73cbb6c6ae3bcc" + integrity sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA== + dependencies: + d3-array "^3.2.0" + +d3-delaunay@6: + version "6.0.4" + resolved "https://registry.yarnpkg.com/d3-delaunay/-/d3-delaunay-6.0.4.tgz#98169038733a0a5babbeda55054f795bb9e4a58b" + integrity sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A== + dependencies: + delaunator "5" + +"d3-dispatch@1 - 3", d3-dispatch@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-dispatch/-/d3-dispatch-3.0.1.tgz#5fc75284e9c2375c36c839411a0cf550cbfc4d5e" + integrity sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg== + +"d3-drag@2 - 3", d3-drag@3: + version "3.0.0" + resolved "https://registry.yarnpkg.com/d3-drag/-/d3-drag-3.0.0.tgz#994aae9cd23c719f53b5e10e3a0a6108c69607ba" + integrity sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg== + dependencies: + d3-dispatch "1 - 3" + d3-selection "3" + +"d3-dsv@1 - 3", d3-dsv@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-dsv/-/d3-dsv-3.0.1.tgz#c63af978f4d6a0d084a52a673922be2160789b73" + integrity sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q== + dependencies: + commander "7" + iconv-lite "0.6" + rw "1" + +"d3-ease@1 - 3", d3-ease@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-ease/-/d3-ease-3.0.1.tgz#9658ac38a2140d59d346160f1f6c30fda0bd12f4" + integrity sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w== + +d3-fetch@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-fetch/-/d3-fetch-3.0.1.tgz#83141bff9856a0edb5e38de89cdcfe63d0a60a22" + integrity sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw== + dependencies: + d3-dsv "1 - 3" + +d3-force@3: + version "3.0.0" + resolved "https://registry.yarnpkg.com/d3-force/-/d3-force-3.0.0.tgz#3e2ba1a61e70888fe3d9194e30d6d14eece155c4" + integrity sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg== + dependencies: + d3-dispatch "1 - 3" + d3-quadtree "1 - 3" + d3-timer "1 - 3" + +"d3-format@1 - 3", d3-format@3: + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-format/-/d3-format-3.1.0.tgz#9260e23a28ea5cb109e93b21a06e24e2ebd55641" + integrity sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA== + +d3-geo@3: + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-geo/-/d3-geo-3.1.0.tgz#74fd54e1f4cebd5185ac2039217a98d39b0a4c0e" + integrity sha512-JEo5HxXDdDYXCaWdwLRt79y7giK8SbhZJbFWXqbRTolCHFI5jRqteLzCsq51NKbUoX0PjBVSohxrx+NoOUujYA== + dependencies: + d3-array "2.5.0 - 3" + +d3-hierarchy@3: + version "3.1.2" + resolved "https://registry.yarnpkg.com/d3-hierarchy/-/d3-hierarchy-3.1.2.tgz#b01cd42c1eed3d46db77a5966cf726f8c09160c6" + integrity sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA== + +"d3-interpolate@1 - 3", "d3-interpolate@1.2.0 - 3", d3-interpolate@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-interpolate/-/d3-interpolate-3.0.1.tgz#3c47aa5b32c5b3dfb56ef3fd4342078a632b400d" + integrity sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g== + dependencies: + d3-color "1 - 3" + +"d3-path@1 - 3", d3-path@3, d3-path@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-path/-/d3-path-3.1.0.tgz#22df939032fb5a71ae8b1800d61ddb7851c42526" + integrity sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ== + +d3-polygon@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-polygon/-/d3-polygon-3.0.1.tgz#0b45d3dd1c48a29c8e057e6135693ec80bf16398" + integrity sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg== + +"d3-quadtree@1 - 3", d3-quadtree@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-quadtree/-/d3-quadtree-3.0.1.tgz#6dca3e8be2b393c9a9d514dabbd80a92deef1a4f" + integrity sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw== + +d3-random@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-random/-/d3-random-3.0.1.tgz#d4926378d333d9c0bfd1e6fa0194d30aebaa20f4" + integrity sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ== + +d3-scale-chromatic@3: + version "3.0.0" + resolved "https://registry.yarnpkg.com/d3-scale-chromatic/-/d3-scale-chromatic-3.0.0.tgz#15b4ceb8ca2bb0dcb6d1a641ee03d59c3b62376a" + integrity sha512-Lx9thtxAKrO2Pq6OO2Ua474opeziKr279P/TKZsMAhYyNDD3EnCffdbgeSYN5O7m2ByQsxtuP2CSDczNUIZ22g== + dependencies: + d3-color "1 - 3" + d3-interpolate "1 - 3" + +d3-scale@4: + version "4.0.2" + resolved "https://registry.yarnpkg.com/d3-scale/-/d3-scale-4.0.2.tgz#82b38e8e8ff7080764f8dcec77bd4be393689396" + integrity sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ== + dependencies: + d3-array "2.10.0 - 3" + d3-format "1 - 3" + d3-interpolate "1.2.0 - 3" + d3-time "2.1.1 - 3" + d3-time-format "2 - 4" + +"d3-selection@2 - 3", d3-selection@3: + version "3.0.0" + resolved "https://registry.yarnpkg.com/d3-selection/-/d3-selection-3.0.0.tgz#c25338207efa72cc5b9bd1458a1a41901f1e1b31" + integrity sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ== + +d3-shape@3: + version "3.2.0" + resolved "https://registry.yarnpkg.com/d3-shape/-/d3-shape-3.2.0.tgz#a1a839cbd9ba45f28674c69d7f855bcf91dfc6a5" + integrity sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA== + dependencies: + d3-path "^3.1.0" + +"d3-time-format@2 - 4", d3-time-format@4: + version "4.1.0" + resolved "https://registry.yarnpkg.com/d3-time-format/-/d3-time-format-4.1.0.tgz#7ab5257a5041d11ecb4fe70a5c7d16a195bb408a" + integrity sha512-dJxPBlzC7NugB2PDLwo9Q8JiTR3M3e4/XANkreKSUxF8vvXKqm1Yfq4Q5dl8budlunRVlUUaDUgFt7eA8D6NLg== + dependencies: + d3-time "1 - 3" + +"d3-time@1 - 3", "d3-time@2.1.1 - 3", d3-time@3: + version "3.1.0" + resolved "https://registry.yarnpkg.com/d3-time/-/d3-time-3.1.0.tgz#9310db56e992e3c0175e1ef385e545e48a9bb5c7" + integrity sha512-VqKjzBLejbSMT4IgbmVgDjpkYrNWUYJnbCGo874u7MMKIWsILRX+OpX/gTk8MqjpT1A/c6HY2dCA77ZN0lkQ2Q== + dependencies: + d3-array "2 - 3" + +"d3-timer@1 - 3", d3-timer@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-timer/-/d3-timer-3.0.1.tgz#6284d2a2708285b1abb7e201eda4380af35e63b0" + integrity sha512-ndfJ/JxxMd3nw31uyKoY2naivF+r29V+Lc0svZxe1JvvIRmi8hUsrMvdOwgS1o6uBHmiz91geQ0ylPP0aj1VUA== + +"d3-transition@2 - 3", d3-transition@3: + version "3.0.1" + resolved "https://registry.yarnpkg.com/d3-transition/-/d3-transition-3.0.1.tgz#6869fdde1448868077fdd5989200cb61b2a1645f" + integrity sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w== + dependencies: + d3-color "1 - 3" + d3-dispatch "1 - 3" + d3-ease "1 - 3" + d3-interpolate "1 - 3" + d3-timer "1 - 3" + +d3-zoom@3: + version "3.0.0" + resolved "https://registry.yarnpkg.com/d3-zoom/-/d3-zoom-3.0.0.tgz#d13f4165c73217ffeaa54295cd6969b3e7aee8f3" + integrity sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw== + dependencies: + d3-dispatch "1 - 3" + d3-drag "2 - 3" + d3-interpolate "1 - 3" + d3-selection "2 - 3" + d3-transition "2 - 3" + +d3@^7.4.0, d3@^7.8.2: + version "7.8.5" + resolved "https://registry.yarnpkg.com/d3/-/d3-7.8.5.tgz#fde4b760d4486cdb6f0cc8e2cbff318af844635c" + integrity sha512-JgoahDG51ncUfJu6wX/1vWQEqOflgXyl4MaHqlcSruTez7yhaRKR9i8VjjcQGeS2en/jnFivXuaIMnseMMt0XA== + dependencies: + d3-array "3" + d3-axis "3" + d3-brush "3" + d3-chord "3" + d3-color "3" + d3-contour "4" + d3-delaunay "6" + d3-dispatch "3" + d3-drag "3" + d3-dsv "3" + d3-ease "3" + d3-fetch "3" + d3-force "3" + d3-format "3" + d3-geo "3" + d3-hierarchy "3" + d3-interpolate "3" + d3-path "3" + d3-polygon "3" + d3-quadtree "3" + d3-random "3" + d3-scale "4" + d3-scale-chromatic "3" + d3-selection "3" + d3-shape "3" + d3-time "3" + d3-time-format "4" + d3-timer "3" + d3-transition "3" + d3-zoom "3" + +dagre-d3-es@7.0.9: + version "7.0.9" + resolved "https://registry.yarnpkg.com/dagre-d3-es/-/dagre-d3-es-7.0.9.tgz#aca12fccd9d09955a4430029ba72ee6934542a8d" + integrity sha512-rYR4QfVmy+sR44IBDvVtcAmOReGBvRCWDpO2QjYwqgh9yijw6eSHBqaPG/LIOEy7aBsniLvtMW6pg19qJhq60w== + dependencies: + d3 "^7.8.2" + lodash-es "^4.17.21" + +dayjs@^1.11.7: + version "1.11.10" + resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.10.tgz#68acea85317a6e164457d6d6947564029a6a16a0" + integrity sha512-vjAczensTgRcqDERK0SR2XMwsF/tSvnvlv6VcF2GIhg6Sx4yOIt/irsr1RDJsKiIyBzJDpCoXiWWq28MqH2cnQ== + debug@2.6.9, debug@^2.6.0: version "2.6.9" resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" @@ -3376,7 +3688,7 @@ debug@2.6.9, debug@^2.6.0: dependencies: ms "2.0.0" -debug@4, debug@^4.1.0, debug@^4.1.1: +debug@4, debug@^4.0.0, debug@^4.1.0, debug@^4.1.1: version "4.3.4" resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== @@ -3449,6 +3761,13 @@ del@^6.1.1: rimraf "^3.0.2" slash "^3.0.0" +delaunator@5: + version "5.0.0" + resolved "https://registry.yarnpkg.com/delaunator/-/delaunator-5.0.0.tgz#60f052b28bd91c9b4566850ebf7756efe821d81b" + integrity sha512-AyLvtyJdbv/U1GkiS6gUUzclRoAY4Gs75qkMygJJhU75LW4DNuSF2RMzpxs9jw9Oz1BobHjTdkG3zdP55VxAqw== + dependencies: + robust-predicates "^3.0.0" + depd@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" @@ -3555,6 +3874,11 @@ domhandler@^5.0.2, domhandler@^5.0.3: dependencies: domelementtype "^2.3.0" +dompurify@2.4.3: + version "2.4.3" + resolved "https://registry.yarnpkg.com/dompurify/-/dompurify-2.4.3.tgz#f4133af0e6a50297fc8874e2eaedc13a3c308c03" + integrity sha512-q6QaLcakcRjebxjg8/+NP+h0rPfatOgOzc46Fst9VAA3jF2ApfKBNKMzdP4DYTqtUMXSCd5pRS/8Po/OmoCHZQ== + domutils@^2.5.2, domutils@^2.8.0: version "2.8.0" resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.8.0.tgz#4437def5db6e2d1f5d6ee859bd95ca7d02048135" @@ -3613,6 +3937,11 @@ electron-to-chromium@^1.4.535: resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.549.tgz#ab223f5d85c55a9def358db163bc8cacba72df69" integrity sha512-gpXfJslSi4hYDkA0mTLEpYKRv9siAgSUgZ+UWyk+J5Cttpd1ThCVwdclzIwQSclz3hYn049+M2fgrP1WpvF8xg== +elkjs@^0.8.2: + version "0.8.2" + resolved "https://registry.yarnpkg.com/elkjs/-/elkjs-0.8.2.tgz#c37763c5a3e24e042e318455e0147c912a7c248e" + integrity sha512-L6uRgvZTH+4OF5NE/MBbzQx/WYpru1xCBE9respNj6qznEewGUIfhzmm7horWWxbNO2M0WckQypGctR8lH79xQ== + emoji-regex@^8.0.0: version "8.0.0" resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" @@ -4353,6 +4682,11 @@ he@^1.2.0: resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== +heap@^0.2.6: + version "0.2.7" + resolved "https://registry.yarnpkg.com/heap/-/heap-0.2.7.tgz#1e6adf711d3f27ce35a81fe3b7bd576c2260a8fc" + integrity sha512-2bsegYkkHO+h/9MGbn6KWcE45cHZgPANo5LXF7EvWdT0yT2EguSVO1nDgU5c8+ZOPwp2vMNa7YFsJhVcDR9Sdg== + history@^4.9.0: version "4.10.1" resolved "https://registry.yarnpkg.com/history/-/history-4.10.1.tgz#33371a65e3a83b267434e2b3f3b1b4c58aad4cf3" @@ -4509,6 +4843,13 @@ iconv-lite@0.4.24: dependencies: safer-buffer ">= 2.1.2 < 3" +iconv-lite@0.6: + version "0.6.3" + resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" + integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw== + dependencies: + safer-buffer ">= 2.1.2 < 3.0.0" + icss-utils@^5.0.0, icss-utils@^5.1.0: version "5.1.0" resolved "https://registry.yarnpkg.com/icss-utils/-/icss-utils-5.1.0.tgz#c6be6858abd013d768e98366ae47e25d5887b1ae" @@ -4592,6 +4933,11 @@ inline-style-parser@0.1.1: resolved "https://registry.yarnpkg.com/inline-style-parser/-/inline-style-parser-0.1.1.tgz#ec8a3b429274e9c0a1f1c4ffa9453a7fef72cea1" integrity sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q== +"internmap@1 - 2": + version "2.0.3" + resolved "https://registry.yarnpkg.com/internmap/-/internmap-2.0.3.tgz#6685f23755e43c524e251d29cbc97248e3061009" + integrity sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg== + interpret@^1.0.0: version "1.4.0" resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.4.0.tgz#665ab8bc4da27a774a40584e812e3e0fa45b1a1e" @@ -4942,6 +5288,11 @@ keyv@^3.0.0: dependencies: json-buffer "3.0.0" +khroma@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/khroma/-/khroma-2.1.0.tgz#45f2ce94ce231a437cf5b63c2e886e6eb42bbbb1" + integrity sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw== + kind-of@^6.0.0, kind-of@^6.0.2: version "6.0.3" resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" @@ -4967,6 +5318,16 @@ launch-editor@^2.6.0: picocolors "^1.0.0" shell-quote "^1.8.1" +layout-base@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/layout-base/-/layout-base-1.0.2.tgz#1291e296883c322a9dd4c5dd82063721b53e26e2" + integrity sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg== + +layout-base@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/layout-base/-/layout-base-2.0.1.tgz#d0337913586c90f9c2c075292069f5c2da5dd285" + integrity sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg== + leven@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" @@ -5023,6 +5384,11 @@ locate-path@^6.0.0: dependencies: p-locate "^5.0.0" +lodash-es@^4.17.21: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash-es/-/lodash-es-4.17.21.tgz#43e626c46e6591b7750beb2b50117390c609e3ee" + integrity sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw== + lodash.curry@^4.0.1: version "4.1.1" resolved "https://registry.yarnpkg.com/lodash.curry/-/lodash.curry-4.1.1.tgz#248e36072ede906501d75966200a86dab8b23170" @@ -5142,6 +5508,17 @@ mdast-util-definitions@^4.0.0: dependencies: unist-util-visit "^2.0.0" +mdast-util-from-markdown@^0.8.0: + version "0.8.5" + resolved "https://registry.yarnpkg.com/mdast-util-from-markdown/-/mdast-util-from-markdown-0.8.5.tgz#d1ef2ca42bc377ecb0463a987910dae89bd9a28c" + integrity sha512-2hkTXtYYnr+NubD/g6KGBS/0mFmBcifAsI0yIWRiRo0PjVs6SSOSOdtzbp6kSGnShDN6G5aWZpKQ2lWRy27mWQ== + dependencies: + "@types/mdast" "^3.0.0" + mdast-util-to-string "^2.0.0" + micromark "~2.11.0" + parse-entities "^2.0.0" + unist-util-stringify-position "^2.0.0" + mdast-util-to-hast@10.0.1: version "10.0.1" resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz#0cfc82089494c52d46eb0e3edb7a4eb2aea021eb" @@ -5156,6 +5533,20 @@ mdast-util-to-hast@10.0.1: unist-util-position "^3.0.0" unist-util-visit "^2.0.0" +mdast-util-to-hast@^10.2.0: + version "10.2.0" + resolved "https://registry.yarnpkg.com/mdast-util-to-hast/-/mdast-util-to-hast-10.2.0.tgz#61875526a017d8857b71abc9333942700b2d3604" + integrity sha512-JoPBfJ3gBnHZ18icCwHR50orC9kNH81tiR1gs01D8Q5YpV6adHNO9nKNuFBCJQ941/32PT1a63UF/DitmS3amQ== + dependencies: + "@types/mdast" "^3.0.0" + "@types/unist" "^2.0.0" + mdast-util-definitions "^4.0.0" + mdurl "^1.0.0" + unist-builder "^2.0.0" + unist-util-generated "^1.0.0" + unist-util-position "^3.0.0" + unist-util-visit "^2.0.0" + mdast-util-to-string@^2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz#b8cfe6a713e1091cb5b728fc48885a4767f8b97b" @@ -5198,11 +5589,41 @@ merge2@^1.3.0, merge2@^1.4.1: resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== +mermaid@^9.2.2: + version "9.4.3" + resolved "https://registry.yarnpkg.com/mermaid/-/mermaid-9.4.3.tgz#62cf210c246b74972ea98c19837519b6f03427f2" + integrity sha512-TLkQEtqhRSuEHSE34lh5bCa94KATCyluAXmFnNI2PRZwOpXFeqiJWwZl+d2CcemE1RS6QbbueSSq9QIg8Uxcyw== + dependencies: + "@braintree/sanitize-url" "^6.0.0" + cytoscape "^3.23.0" + cytoscape-cose-bilkent "^4.1.0" + cytoscape-fcose "^2.1.0" + d3 "^7.4.0" + dagre-d3-es "7.0.9" + dayjs "^1.11.7" + dompurify "2.4.3" + elkjs "^0.8.2" + khroma "^2.0.0" + lodash-es "^4.17.21" + non-layered-tidy-tree-layout "^2.0.2" + stylis "^4.1.2" + ts-dedent "^2.2.0" + uuid "^9.0.0" + web-worker "^1.2.0" + methods@~1.1.2: version "1.1.2" resolved "https://registry.yarnpkg.com/methods/-/methods-1.1.2.tgz#5529a4d67654134edcc5266656835b0f851afcee" integrity sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w== +micromark@~2.11.0: + version "2.11.4" + resolved "https://registry.yarnpkg.com/micromark/-/micromark-2.11.4.tgz#d13436138eea826383e822449c9a5c50ee44665a" + integrity sha512-+WoovN/ppKolQOFIAajxi7Lu9kInbPxFuTBVEavFcL8eAfVstoc5MocPmqBeAdBOJV00uaVjegzH4+MA0DN/uA== + dependencies: + debug "^4.0.0" + parse-entities "^2.0.0" + micromatch@^4.0.2, micromatch@^4.0.4, micromatch@^4.0.5: version "4.0.5" resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" @@ -5349,6 +5770,11 @@ node-releases@^2.0.13: resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.13.tgz#d5ed1627c23e3461e819b02e57b75e4899b1c81d" integrity sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ== +non-layered-tidy-tree-layout@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/non-layered-tidy-tree-layout/-/non-layered-tidy-tree-layout-2.0.2.tgz#57d35d13c356643fc296a55fb11ac15e74da7804" + integrity sha512-gkXMxRzUH+PB0ax9dUN0yYF0S25BqeAYqhgMaLUFmpXLEk7Fcu8f4emJuOAY0V8kjDICxROIKsTAKsV/v355xw== + normalize-path@^3.0.0, normalize-path@~3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" @@ -6202,6 +6628,11 @@ react-is@^16.13.1, react-is@^16.6.0, react-is@^16.7.0: resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== +react-is@^17.0.0: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-17.0.2.tgz#e691d4a8e9c789365655539ab372762b0efb54f0" + integrity sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w== + react-json-view@^1.21.3: version "1.21.3" resolved "https://registry.yarnpkg.com/react-json-view/-/react-json-view-1.21.3.tgz#f184209ee8f1bf374fb0c41b0813cff54549c475" @@ -6224,6 +6655,24 @@ react-loadable-ssr-addon-v5-slorber@^1.0.1: dependencies: "@babel/runtime" "^7.10.3" +react-markdown@6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/react-markdown/-/react-markdown-6.0.0.tgz#e63cd32d095e864384d524986c44c34c919de517" + integrity sha512-MC+zljUJeoLb4RbDm/wRbfoQFEZGz4TDOt/wb4dEehdaJWxLMn/T2IgwhQy0VYhuPEd2fhd7iOayE8lmENU0FA== + dependencies: + "@types/hast" "^2.0.0" + "@types/unist" "^2.0.3" + comma-separated-tokens "^1.0.0" + prop-types "^15.7.2" + property-information "^5.0.0" + react-is "^17.0.0" + remark-parse "^9.0.0" + remark-rehype "^8.0.0" + space-separated-tokens "^1.1.0" + style-to-object "^0.3.0" + unified "^9.0.0" + unist-util-visit "^2.0.0" + react-router-config@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/react-router-config/-/react-router-config-5.1.1.tgz#0f4263d1a80c6b2dc7b9c1902c9526478194a988" @@ -6461,6 +6910,20 @@ remark-parse@8.0.3: vfile-location "^3.0.0" xtend "^4.0.1" +remark-parse@^9.0.0: + version "9.0.0" + resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-9.0.0.tgz#4d20a299665880e4f4af5d90b7c7b8a935853640" + integrity sha512-geKatMwSzEXKHuzBNU1z676sGcDcFoChMK38TgdHJNAYfFtsfHDQG7MoJAjs6sgYMqyLduCYWDIWZIxiPeafEw== + dependencies: + mdast-util-from-markdown "^0.8.0" + +remark-rehype@^8.0.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/remark-rehype/-/remark-rehype-8.1.0.tgz#610509a043484c1e697437fa5eb3fd992617c945" + integrity sha512-EbCu9kHgAxKmW1yEYjx3QafMyGY3q8noUbNUI5xyKbaFP89wbhDrKxyIQNukNYthzjNHZu6J7hwFg7hRm1svYA== + dependencies: + mdast-util-to-hast "^10.2.0" + remark-squeeze-paragraphs@4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz#76eb0e085295131c84748c8e43810159c5653ead" @@ -6542,6 +7005,11 @@ rimraf@^3.0.2: dependencies: glob "^7.1.3" +robust-predicates@^3.0.0: + version "3.0.2" + resolved "https://registry.yarnpkg.com/robust-predicates/-/robust-predicates-3.0.2.tgz#d5b28528c4824d20fc48df1928d41d9efa1ad771" + integrity sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg== + rtl-detect@^1.0.4: version "1.0.4" resolved "https://registry.yarnpkg.com/rtl-detect/-/rtl-detect-1.0.4.tgz#40ae0ea7302a150b96bc75af7d749607392ecac6" @@ -6564,6 +7032,11 @@ run-parallel@^1.1.9: dependencies: queue-microtask "^1.2.2" +rw@1: + version "1.3.3" + resolved "https://registry.yarnpkg.com/rw/-/rw-1.3.3.tgz#3f862dfa91ab766b14885ef4d01124bfda074fb4" + integrity sha512-PdhdWy89SiZogBLaw42zdeqtRJ//zFd2PgQavcICDUgJT5oW10QCRKbJ6bg4r0/UY2M6BWd5tkxuGFRvCkgfHQ== + rxjs@^7.5.4: version "7.8.1" resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.8.1.tgz#6f6f3d99ea8044291efd92e7c7fcf562c4057543" @@ -6581,7 +7054,7 @@ safe-buffer@5.2.1, safe-buffer@>=5.1.0, safe-buffer@^5.1.0, safe-buffer@~5.2.0: resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== -"safer-buffer@>= 2.1.2 < 3": +"safer-buffer@>= 2.1.2 < 3", "safer-buffer@>= 2.1.2 < 3.0.0": version "2.1.2" resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== @@ -6881,7 +7354,7 @@ source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.0: resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== -space-separated-tokens@^1.0.0: +space-separated-tokens@^1.0.0, space-separated-tokens@^1.1.0: version "1.1.5" resolved "https://registry.yarnpkg.com/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz#85f32c3d10d9682007e917414ddc5c26d1aa6899" integrity sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA== @@ -7029,6 +7502,11 @@ stylehacks@^5.1.1: browserslist "^4.21.4" postcss-selector-parser "^6.0.4" +stylis@^4.1.2: + version "4.3.0" + resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.3.0.tgz#abe305a669fc3d8777e10eefcfc73ad861c5588c" + integrity sha512-E87pIogpwUsUwXw7dNyU4QDjdgVMy52m+XEOPEKUn161cCzWjjhPSQhByfd1CcNvrOLnXQ6OnnZDwnJrz/Z4YQ== + supports-color@^5.3.0: version "5.5.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" @@ -7171,6 +7649,11 @@ trough@^1.0.0: resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.5.tgz#b8b639cefad7d0bb2abd37d433ff8293efa5f406" integrity sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA== +ts-dedent@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/ts-dedent/-/ts-dedent-2.2.0.tgz#39e4bd297cd036292ae2394eb3412be63f563bb5" + integrity sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ== + tslib@^2.0.3, tslib@^2.1.0, tslib@^2.4.0: version "2.6.2" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" @@ -7451,6 +7934,11 @@ uuid@^8.3.2: resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.2.tgz#80d5b5ced271bb9af6c445f21a1a04c606cefbe2" integrity sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg== +uuid@^9.0.0: + version "9.0.1" + resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30" + integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA== + value-equal@^1.0.1: version "1.0.1" resolved "https://registry.yarnpkg.com/value-equal/-/value-equal-1.0.1.tgz#1e0b794c734c5c0cade179c437d356d931a34d6c" @@ -7515,6 +8003,11 @@ web-namespaces@^1.0.0: resolved "https://registry.yarnpkg.com/web-namespaces/-/web-namespaces-1.1.4.tgz#bc98a3de60dadd7faefc403d1076d529f5e030ec" integrity sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw== +web-worker@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/web-worker/-/web-worker-1.2.0.tgz#5d85a04a7fbc1e7db58f66595d7a3ac7c9c180da" + integrity sha512-PgF341avzqyx60neE9DD+XS26MMNMoUQRz9NOZwW32nPQrF6p77f1htcnjBSEV8BGMKZ16choqUG4hyI0Hx7mA== + webidl-conversions@^3.0.0: version "3.0.1" resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871"