diff --git a/.aztec-sync-commit b/.aztec-sync-commit index 540b447693c..1cb130e7c58 100644 --- a/.aztec-sync-commit +++ b/.aztec-sync-commit @@ -1 +1 @@ -bb719200034e3bc6db09fb56538dadca4203abf4 +beab8c93857536e07fa37994213fc664a5864013 diff --git a/.github/workflows/gates_report.yml b/.github/workflows/gates_report.yml index ebf17f7374c..be55236c40f 100644 --- a/.github/workflows/gates_report.yml +++ b/.github/workflows/gates_report.yml @@ -74,7 +74,7 @@ jobs: - name: Compare gates reports id: gates_diff - uses: TomAFrench/noir-gates-diff@df05f34e2ab275ddc4f2cac065df1c88f8a05e5d + uses: vezenovm/noir-gates-diff@f80ea702d579873ff80f0261c62e2bae5203748e with: report: gates_report.json summaryQuantile: 0.9 # only display the 10% most significant circuit size diffs in the summary (defaults to 20%) diff --git a/.github/workflows/test-rust-workspace-msrv.yml b/.github/workflows/test-rust-workspace-msrv.yml index 0b2855fa834..cdd7a064a8d 100644 --- a/.github/workflows/test-rust-workspace-msrv.yml +++ b/.github/workflows/test-rust-workspace-msrv.yml @@ -112,6 +112,10 @@ jobs: # We treat any cancelled, skipped or failing jobs as a failure for the workflow as a whole. FAIL: ${{ contains(needs.*.result, 'failure') || contains(needs.*.result, 'cancelled') || contains(needs.*.result, 'skipped') }} + - name: Checkout + if: ${{ failure() }} + uses: actions/checkout@v4 + # Raise an issue if the tests failed - name: Alert on failed publish uses: JasonEtco/create-an-issue@v2 @@ -122,4 +126,4 @@ jobs: WORKFLOW_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} with: update_existing: true - filename: .github/JS_PUBLISH_FAILED.md \ No newline at end of file + filename: .github/ACVM_NOT_PUBLISHABLE.md diff --git a/.gitignore b/.gitignore index 9a829afab8b..2c877a4d02c 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ examples/**/target/ examples/9 node_modules pkg/ +.idea # Yarn .pnp.* diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d4cc095c484..7579928c999 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,4 +1,4 @@ { - ".": "0.26.0", - "acvm-repo": "0.42.0" + ".": "0.27.0", + "acvm-repo": "0.43.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c4bcad5840..0ab84df44e7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,43 @@ # Changelog +## [0.27.0](https://github.com/noir-lang/noir/compare/v0.26.0...v0.27.0) (2024-04-10) + + +### ⚠ BREAKING CHANGES + +* Brillig typed memory (https://github.com/AztecProtocol/aztec-packages/pull/5395) + +### Features + +* **acir_gen:** Fold attribute at compile-time and initial non inlined ACIR (https://github.com/AztecProtocol/aztec-packages/pull/5341) ([a0f7474](https://github.com/noir-lang/noir/commit/a0f7474ae6bd74132efdb945d2eb2383f3913cce)) +* **acvm_js:** Execute program ([#4694](https://github.com/noir-lang/noir/issues/4694)) ([386f6d0](https://github.com/noir-lang/noir/commit/386f6d0a5822912db878285cb001032a7c0ff622)) +* **acvm:** Execute multiple circuits (https://github.com/AztecProtocol/aztec-packages/pull/5380) ([a0f7474](https://github.com/noir-lang/noir/commit/a0f7474ae6bd74132efdb945d2eb2383f3913cce)) +* Add `remove_enable_side_effects` SSA pass ([#4224](https://github.com/noir-lang/noir/issues/4224)) ([94952db](https://github.com/noir-lang/noir/commit/94952db604b70a1ec18115b291de3c52565a641e)) +* Allow slices to brillig entry points ([#4713](https://github.com/noir-lang/noir/issues/4713)) ([62423d5](https://github.com/noir-lang/noir/commit/62423d552beca749b6f86b1330555aab18db58d0)) +* Brillig typed memory (https://github.com/AztecProtocol/aztec-packages/pull/5395) ([0bc18c4](https://github.com/noir-lang/noir/commit/0bc18c4f78171590dd58bded959f68f53a44cc8c)) +* **docs:** Documenting noir codegen ([#4454](https://github.com/noir-lang/noir/issues/4454)) ([24f6d85](https://github.com/noir-lang/noir/commit/24f6d85f2467a109399d21729f8bb0f97c5ba6db)) +* Improve nargo check cli with --override flag and feedback for existing files ([#4575](https://github.com/noir-lang/noir/issues/4575)) ([5e7fbd4](https://github.com/noir-lang/noir/commit/5e7fbd4e706b1691ba2dd960469cfa3b31dfb753)) +* Improve optimisations on range constraints ([#4690](https://github.com/noir-lang/noir/issues/4690)) ([96b8110](https://github.com/noir-lang/noir/commit/96b811079b0e7c0345210cfc705c00345b0b3334)) +* Improve SSA type-awareness in EQ and MUL instructions ([#4691](https://github.com/noir-lang/noir/issues/4691)) ([669f1a0](https://github.com/noir-lang/noir/commit/669f1a0fa47ad9093888a8ce8e525cb02bcf19b5)) +* **nargo:** Multiple circuits info for binary programs ([#4719](https://github.com/noir-lang/noir/issues/4719)) ([50d2735](https://github.com/noir-lang/noir/commit/50d2735825454a8638a308156d4ea23b3c4420d8)) + + +### Bug Fixes + +* "Types in a binary operation should match, but found T and T" ([#4648](https://github.com/noir-lang/noir/issues/4648)) ([30c9f31](https://github.com/noir-lang/noir/commit/30c9f3151d447de8c7467ccbee82e32b8c46a396)) +* **acvm:** Mark outputs of Opcode::Call solvable ([#4708](https://github.com/noir-lang/noir/issues/4708)) ([8fea405](https://github.com/noir-lang/noir/commit/8fea40576f262bd5bb588923c0660d8967404e56)) +* Correct ICE panic messages in brillig `convert_black_box_call` ([#4761](https://github.com/noir-lang/noir/issues/4761)) ([f3eee6c](https://github.com/noir-lang/noir/commit/f3eee6c00a9b1ea939c5757d91faac693e909301)) +* Error when a type variable is unbound during monomorphization instead of defaulting to Field ([#4674](https://github.com/noir-lang/noir/issues/4674)) ([03cdba4](https://github.com/noir-lang/noir/commit/03cdba45ac073fd6fdd91549736f36f1abaef15a)) +* Field comparisons ([#4704](https://github.com/noir-lang/noir/issues/4704)) ([079cb2a](https://github.com/noir-lang/noir/commit/079cb2a99d2d50b50688bfb56fa014acde3e3d71)) +* Impl search no longer selects an impl if multiple are applicable ([#4662](https://github.com/noir-lang/noir/issues/4662)) ([0150600](https://github.com/noir-lang/noir/commit/0150600922ee8b3e67c9b592338e8832f446685b)) +* Last use analysis & make it an SSA pass ([#4686](https://github.com/noir-lang/noir/issues/4686)) ([0d3d5fd](https://github.com/noir-lang/noir/commit/0d3d5fda9659a563ba9c2014b7c1af9e1d332ab0)) +* Slice coercions ([#4640](https://github.com/noir-lang/noir/issues/4640)) ([c0bae17](https://github.com/noir-lang/noir/commit/c0bae17e70f55ebf4b1639e0dfb075d8c5c97892)) +* **ssa:** Accurate constant type for slice dummy data in flattening ([#4661](https://github.com/noir-lang/noir/issues/4661)) ([b87654e](https://github.com/noir-lang/noir/commit/b87654e2b4761dfacc916dac70d43c1b572ec636)) +* **ssa:** Do not use get_value_max_num_bits when we want pure type information ([#4700](https://github.com/noir-lang/noir/issues/4700)) ([b55a580](https://github.com/noir-lang/noir/commit/b55a580388abc95bab6c6ef8c50eae3c5497eb3f)) +* **ssa:** Fix slice intrinsic handling in the capacity tracker ([#4643](https://github.com/noir-lang/noir/issues/4643)) ([1b50ce1](https://github.com/noir-lang/noir/commit/1b50ce155cf95193937729c2a23f34b0ade42ea0)) +* Unknown slice lengths coming from as_slice ([#4725](https://github.com/noir-lang/noir/issues/4725)) ([f21129e](https://github.com/noir-lang/noir/commit/f21129ef05efb76c5df6ee15a134f1ea535d8e90)) +* Update commit for noir-gates-diff ([#4773](https://github.com/noir-lang/noir/issues/4773)) ([a9766c5](https://github.com/noir-lang/noir/commit/a9766c5e9650160bcafc693f2617e441ed47721a)) + ## [0.26.0](https://github.com/noir-lang/noir/compare/v0.25.0...v0.26.0) (2024-03-25) diff --git a/Cargo.lock b/Cargo.lock index 170b23be189..b01c22ed75b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,15 +4,18 @@ version = 3 [[package]] name = "acir" -version = "0.42.0" +version = "0.43.0" dependencies = [ "acir_field", "base64 0.21.2", "bincode", "brillig", + "criterion", "flate2", "fxhash", + "pprof 0.13.0", "serde", + "serde-big-array", "serde-generate", "serde-reflection", "serde_json", @@ -23,7 +26,7 @@ dependencies = [ [[package]] name = "acir_field" -version = "0.42.0" +version = "0.43.0" dependencies = [ "ark-bls12-381", "ark-bn254", @@ -37,7 +40,7 @@ dependencies = [ [[package]] name = "acvm" -version = "0.42.0" +version = "0.43.0" dependencies = [ "acir", "acvm_blackbox_solver", @@ -53,13 +56,14 @@ dependencies = [ [[package]] name = "acvm_blackbox_solver" -version = "0.42.0" +version = "0.43.0" dependencies = [ "acir", "blake2", "blake3", "k256", "keccak", + "num-bigint", "p256", "sha2", "sha3", @@ -88,7 +92,7 @@ dependencies = [ [[package]] name = "acvm_js" -version = "0.42.0" +version = "0.43.0" dependencies = [ "acvm", "bn254_blackbox_solver", @@ -231,7 +235,7 @@ checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "arena" -version = "0.26.0" +version = "0.27.0" [[package]] name = "ark-bls12-381" @@ -268,7 +272,7 @@ dependencies = [ "ark-std", "derivative", "hashbrown 0.13.2", - "itertools", + "itertools 0.10.5", "num-traits", "zeroize", ] @@ -285,7 +289,7 @@ dependencies = [ "ark-std", "derivative", "digest", - "itertools", + "itertools 0.10.5", "num-bigint", "num-traits", "paste", @@ -374,6 +378,15 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "ascii-canvas" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" +dependencies = [ + "term", +] + [[package]] name = "assert_cmd" version = "2.0.12" @@ -432,7 +445,7 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "aztec_macros" -version = "0.26.0" +version = "0.27.0" dependencies = [ "convert_case 0.6.0", "iter-extended", @@ -541,9 +554,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "bitmaps" @@ -599,18 +612,21 @@ dependencies = [ [[package]] name = "bn254_blackbox_solver" -version = "0.42.0" +version = "0.43.0" dependencies = [ "acir", "acvm_blackbox_solver", "ark-ec", "ark-ff", "cfg-if 1.0.0", + "criterion", "getrandom 0.2.10", + "hex", "js-sys", + "lazy_static", "noir_grumpkin", "num-bigint", - "num-traits", + "pprof 0.12.1", "thiserror", "wasm-bindgen-futures", "wasmer", @@ -618,7 +634,7 @@ dependencies = [ [[package]] name = "brillig" -version = "0.42.0" +version = "0.43.0" dependencies = [ "acir_field", "serde", @@ -626,7 +642,7 @@ dependencies = [ [[package]] name = "brillig_vm" -version = "0.42.0" +version = "0.43.0" dependencies = [ "acir", "acvm_blackbox_solver", @@ -1178,7 +1194,7 @@ dependencies = [ "clap", "criterion-plot", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits", "once_cell", "oorandom", @@ -1199,7 +1215,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] @@ -1255,6 +1271,12 @@ dependencies = [ "cfg-if 1.0.0", ] +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + [[package]] name = "crypto-bigint" version = "0.4.9" @@ -1526,6 +1548,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ena" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c533630cf40e9caa44bd91aadc88a75d75a4c3a12b4cfde353cbed41daa1e1f1" +dependencies = [ + "log", +] + [[package]] name = "encode_unicode" version = "0.3.6" @@ -1602,12 +1633,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "ac3e13f66a2f95e32a39eaa81f6b95d42878ca0e1db0c7543723dfe12557e860" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -1732,7 +1763,7 @@ dependencies = [ [[package]] name = "fm" -version = "0.26.0" +version = "0.27.0" dependencies = [ "codespan-reporting", "iter-extended", @@ -2353,7 +2384,7 @@ dependencies = [ [[package]] name = "iter-extended" -version = "0.26.0" +version = "0.27.0" [[package]] name = "itertools" @@ -2364,6 +2395,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.9" @@ -2534,6 +2574,37 @@ dependencies = [ "libc", ] +[[package]] +name = "lalrpop" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cb077ad656299f160924eb2912aa147d7339ea7d69e1b5517326fdcec3c1ca" +dependencies = [ + "ascii-canvas", + "bit-set", + "ena", + "itertools 0.11.0", + "lalrpop-util", + "petgraph", + "pico-args", + "regex", + "regex-syntax 0.8.2", + "string_cache", + "term", + "tiny-keccak", + "unicode-xid", + "walkdir", +] + +[[package]] +name = "lalrpop-util" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" +dependencies = [ + "regex-automata 0.4.5", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -2564,16 +2635,16 @@ version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3af92c55d7d839293953fcd0fda5ecfe93297cfde6ffbdec13b41d99c0ba6607" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "libc", "redox_syscall 0.4.1", ] [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" [[package]] name = "lock_api" @@ -2738,7 +2809,7 @@ checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" [[package]] name = "nargo" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "codespan-reporting", @@ -2765,7 +2836,7 @@ dependencies = [ [[package]] name = "nargo_cli" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "assert_cmd", @@ -2797,7 +2868,7 @@ dependencies = [ "notify", "notify-debouncer-full", "paste", - "pprof", + "pprof 0.13.0", "predicates 2.1.5", "prettytable-rs", "rayon", @@ -2820,7 +2891,7 @@ dependencies = [ [[package]] name = "nargo_fmt" -version = "0.26.0" +version = "0.27.0" dependencies = [ "bytecount", "noirc_frontend", @@ -2832,7 +2903,7 @@ dependencies = [ [[package]] name = "nargo_toml" -version = "0.26.0" +version = "0.27.0" dependencies = [ "dirs", "fm", @@ -2856,6 +2927,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" + [[package]] name = "nibble_vec" version = "0.1.0" @@ -2905,7 +2982,7 @@ dependencies = [ [[package]] name = "noir_debugger" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "assert_cmd", @@ -2940,7 +3017,7 @@ dependencies = [ [[package]] name = "noir_lsp" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "async-lsp", @@ -2966,7 +3043,7 @@ dependencies = [ [[package]] name = "noir_wasm" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "build-data", @@ -2989,7 +3066,7 @@ dependencies = [ [[package]] name = "noirc_abi" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "iter-extended", @@ -3006,7 +3083,7 @@ dependencies = [ [[package]] name = "noirc_abi_wasm" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "build-data", @@ -3023,7 +3100,7 @@ dependencies = [ [[package]] name = "noirc_driver" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "aztec_macros", @@ -3044,7 +3121,7 @@ dependencies = [ [[package]] name = "noirc_errors" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "base64 0.21.2", @@ -3062,7 +3139,7 @@ dependencies = [ [[package]] name = "noirc_evaluator" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "chrono", @@ -3079,14 +3156,17 @@ dependencies = [ [[package]] name = "noirc_frontend" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "arena", "base64 0.21.2", "chumsky", "fm", + "im", "iter-extended", + "lalrpop", + "lalrpop-util", "noirc_errors", "noirc_printable_type", "petgraph", @@ -3105,7 +3185,7 @@ dependencies = [ [[package]] name = "noirc_printable_type" -version = "0.26.0" +version = "0.27.0" dependencies = [ "acvm", "iter-extended", @@ -3128,7 +3208,7 @@ version = "6.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "crossbeam-channel", "filetime", "fsevent-sys", @@ -3386,6 +3466,12 @@ dependencies = [ "siphasher", ] +[[package]] +name = "pico-args" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" + [[package]] name = "pin-project-lite" version = "0.2.13" @@ -3464,12 +3550,40 @@ dependencies = [ "thiserror", ] +[[package]] +name = "pprof" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef5c97c51bd34c7e742402e216abdeb44d415fbe6ae41d56b114723e953711cb" +dependencies = [ + "backtrace", + "cfg-if 1.0.0", + "criterion", + "findshlibs", + "inferno", + "libc", + "log", + "nix 0.26.4", + "once_cell", + "parking_lot 0.12.1", + "smallvec", + "symbolic-demangle", + "tempfile", + "thiserror", +] + [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +[[package]] +name = "precomputed-hash" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" + [[package]] name = "predicates" version = "2.1.5" @@ -3478,7 +3592,7 @@ checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", - "itertools", + "itertools 0.10.5", "normalize-line-endings", "predicates-core", "regex", @@ -3492,7 +3606,7 @@ checksum = "09963355b9f467184c04017ced4a2ba2d75cbcb4e7462690d388233253d4b1a9" dependencies = [ "anstyle", "difflib", - "itertools", + "itertools 0.10.5", "predicates-core", ] @@ -3582,7 +3696,7 @@ checksum = "7c003ac8c77cb07bb74f5f198bce836a689bcd5a42574612bf14d17bfd08c20e" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.4.2", + "bitflags 2.5.0", "lazy_static", "num-traits", "rand 0.8.5", @@ -4069,15 +4183,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.28" +version = "0.38.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" +checksum = "0a962918ea88d644592894bc6dc55acc6c0956488adcebbfb6e273506b7fd6e5" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -4297,6 +4411,15 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +dependencies = [ + "serde", +] + [[package]] name = "serde-generate" version = "0.25.1" @@ -4603,6 +4726,19 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9091b6114800a5f2141aee1d1b9d6ca3592ac062dc5decb3764ec5895a47b4eb" +[[package]] +name = "string_cache" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +dependencies = [ + "new_debug_unreachable", + "once_cell", + "parking_lot 0.12.1", + "phf_shared", + "precomputed-hash", +] + [[package]] name = "strsim" version = "0.10.0" @@ -4845,6 +4981,15 @@ dependencies = [ "time-core", ] +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinytemplate" version = "1.2.1" @@ -5266,9 +5411,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.3.3" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -5534,7 +5679,7 @@ version = "0.121.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9dbe55c8f9d0dbd25d9447a5a889ff90c0cc3feaa7395310d3d826b2c703eaab" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "indexmap 2.0.0", "semver", ] diff --git a/Cargo.toml b/Cargo.toml index 46ccb401fbd..cdbb40f630a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -41,7 +41,7 @@ resolver = "2" [workspace.package] # x-release-please-start-version -version = "0.26.0" +version = "0.27.0" # x-release-please-end authors = ["The Noir Team "] edition = "2021" @@ -52,13 +52,13 @@ repository = "https://github.com/noir-lang/noir/" [workspace.dependencies] # ACVM workspace dependencies -acir_field = { version = "0.42.0", path = "acvm-repo/acir_field", default-features = false } -acir = { version = "0.42.0", path = "acvm-repo/acir", default-features = false } -acvm = { version = "0.42.0", path = "acvm-repo/acvm" } -brillig = { version = "0.42.0", path = "acvm-repo/brillig", default-features = false } -brillig_vm = { version = "0.42.0", path = "acvm-repo/brillig_vm", default-features = false } -acvm_blackbox_solver = { version = "0.42.0", path = "acvm-repo/blackbox_solver", default-features = false } -bn254_blackbox_solver = { version = "0.42.0", path = "acvm-repo/bn254_blackbox_solver", default-features = false } +acir_field = { version = "0.43.0", path = "acvm-repo/acir_field", default-features = false } +acir = { version = "0.43.0", path = "acvm-repo/acir", default-features = false } +acvm = { version = "0.43.0", path = "acvm-repo/acvm" } +brillig = { version = "0.43.0", path = "acvm-repo/brillig", default-features = false } +brillig_vm = { version = "0.43.0", path = "acvm-repo/brillig_vm", default-features = false } +acvm_blackbox_solver = { version = "0.43.0", path = "acvm-repo/blackbox_solver", default-features = false } +bn254_blackbox_solver = { version = "0.43.0", path = "acvm-repo/bn254_blackbox_solver", default-features = false } # Noir compiler workspace dependencies arena = { path = "compiler/utils/arena" } @@ -104,6 +104,14 @@ chumsky = { git = "https://github.com/jfecher/chumsky", rev = "ad9d312", default "ahash", "std", ] } + +# Benchmarking +criterion = "0.5.0" +# Note that using the "frame-pointer" feature breaks framegraphs on linux +# https://github.com/tikv/pprof-rs/pull/172 +pprof = { version = "0.13", features = ["flamegraph","criterion"] } + + dirs = "4" serde = { version = "1.0.136", features = ["derive"] } serde_json = "1.0" @@ -124,6 +132,7 @@ tempfile = "3.6.0" jsonrpc = { version = "0.16.0", features = ["minreq_http"] } flate2 = "1.0.24" +im = { version = "15.1", features = ["serde"] } tracing = "0.1.40" tracing-web = "0.1.3" tracing-subscriber = { version = "0.3.18", features = ["env-filter"] } diff --git a/acvm-repo/CHANGELOG.md b/acvm-repo/CHANGELOG.md index 33cc83d7dd9..9d9ff539559 100644 --- a/acvm-repo/CHANGELOG.md +++ b/acvm-repo/CHANGELOG.md @@ -5,6 +5,88 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.43.0](https://github.com/noir-lang/noir/compare/v0.42.0...v0.43.0) (2024-04-10) + + +### ⚠ BREAKING CHANGES + +* Brillig typed memory (https://github.com/AztecProtocol/aztec-packages/pull/5395) +* **acir:** Program and witness stack structure (https://github.com/AztecProtocol/aztec-packages/pull/5149) +* automatic NoteInterface and NoteGetterOptions auto select (https://github.com/AztecProtocol/aztec-packages/pull/4508) +* Acir call opcode (https://github.com/AztecProtocol/aztec-packages/pull/4773) +* Support contracts with no constructor (https://github.com/AztecProtocol/aztec-packages/pull/5175) +* Internal as a macro (https://github.com/AztecProtocol/aztec-packages/pull/4898) +* move noir out of yarn-project (https://github.com/AztecProtocol/aztec-packages/pull/4479) +* note type ids (https://github.com/AztecProtocol/aztec-packages/pull/4500) +* rename bigint_neg into bigint_sub (https://github.com/AztecProtocol/aztec-packages/pull/4420) +* Add expression width into acir (https://github.com/AztecProtocol/aztec-packages/pull/4014) +* init storage macro (https://github.com/AztecProtocol/aztec-packages/pull/4200) +* **acir:** Move `is_recursive` flag to be part of the circuit definition (https://github.com/AztecProtocol/aztec-packages/pull/4221) +* Sync commits from `aztec-packages` ([#4144](https://github.com/noir-lang/noir/issues/4144)) +* Breaking changes from aztec-packages ([#3955](https://github.com/noir-lang/noir/issues/3955)) +* Rename Arithmetic opcode to AssertZero ([#3840](https://github.com/noir-lang/noir/issues/3840)) +* Remove unused methods on ACIR opcodes ([#3841](https://github.com/noir-lang/noir/issues/3841)) + +### Features + +* Acir call opcode (https://github.com/AztecProtocol/aztec-packages/pull/4773) ([c3c9e19](https://github.com/noir-lang/noir/commit/c3c9e19a20d61272a04b95fd6c7d34cc4cb96e45)) +* **acir_gen:** Fold attribute at compile-time and initial non inlined ACIR (https://github.com/AztecProtocol/aztec-packages/pull/5341) ([a0f7474](https://github.com/noir-lang/noir/commit/a0f7474ae6bd74132efdb945d2eb2383f3913cce)) +* **acir:** Program and witness stack structure (https://github.com/AztecProtocol/aztec-packages/pull/5149) ([13eb71b](https://github.com/noir-lang/noir/commit/13eb71b8de44eb6aad9c37943ad06fc73db589f5)) +* **acvm_js:** Execute program ([#4694](https://github.com/noir-lang/noir/issues/4694)) ([386f6d0](https://github.com/noir-lang/noir/commit/386f6d0a5822912db878285cb001032a7c0ff622)) +* **acvm:** Execute multiple circuits (https://github.com/AztecProtocol/aztec-packages/pull/5380) ([a0f7474](https://github.com/noir-lang/noir/commit/a0f7474ae6bd74132efdb945d2eb2383f3913cce)) +* Add bit size to const opcode (https://github.com/AztecProtocol/aztec-packages/pull/4385) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Add CMOV instruction to brillig and brillig gen (https://github.com/AztecProtocol/aztec-packages/pull/5308) ([13eb71b](https://github.com/noir-lang/noir/commit/13eb71b8de44eb6aad9c37943ad06fc73db589f5)) +* Add expression width into acir (https://github.com/AztecProtocol/aztec-packages/pull/4014) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Add instrumentation for tracking variables in debugging ([#4122](https://github.com/noir-lang/noir/issues/4122)) ([c58d691](https://github.com/noir-lang/noir/commit/c58d69141b54a918cd1675400c00bfd48720f896)) +* Add poseidon2 opcode implementation for acvm/brillig, and Noir ([#4398](https://github.com/noir-lang/noir/issues/4398)) ([10e8292](https://github.com/noir-lang/noir/commit/10e82920798380f50046e52db4a20ca205191ab7)) +* Add support for overriding expression width ([#4117](https://github.com/noir-lang/noir/issues/4117)) ([c8026d5](https://github.com/noir-lang/noir/commit/c8026d557d535b10fe455165d6445076df7a03de)) +* Added cast opcode and cast calldata (https://github.com/AztecProtocol/aztec-packages/pull/4423) ([78ef013](https://github.com/noir-lang/noir/commit/78ef0134b82e76a73dadb6c7975def22290e3a1a)) +* Allow brillig to read arrays directly from memory (https://github.com/AztecProtocol/aztec-packages/pull/4460) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Allow nested arrays and vectors in Brillig foreign calls (https://github.com/AztecProtocol/aztec-packages/pull/4478) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Allow variables and stack trace inspection in the debugger ([#4184](https://github.com/noir-lang/noir/issues/4184)) ([bf263fc](https://github.com/noir-lang/noir/commit/bf263fc8d843940f328a90f6366edd2671fb2682)) +* Automatic NoteInterface and NoteGetterOptions auto select (https://github.com/AztecProtocol/aztec-packages/pull/4508) ([13eb71b](https://github.com/noir-lang/noir/commit/13eb71b8de44eb6aad9c37943ad06fc73db589f5)) +* **avm:** Back in avm context with macro - refactor context (https://github.com/AztecProtocol/aztec-packages/pull/4438) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* **avm:** Brillig CONST of size > u128 (https://github.com/AztecProtocol/aztec-packages/pull/5217) ([c3c9e19](https://github.com/noir-lang/noir/commit/c3c9e19a20d61272a04b95fd6c7d34cc4cb96e45)) +* **aztec-nr:** Initial work for aztec public vm macro (https://github.com/AztecProtocol/aztec-packages/pull/4400) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Backpropagate constants in ACIR during optimization ([#3926](https://github.com/noir-lang/noir/issues/3926)) ([aad0da0](https://github.com/noir-lang/noir/commit/aad0da024c69663f42e6913e674682d5864b26ae)) +* Breaking changes from aztec-packages ([#3955](https://github.com/noir-lang/noir/issues/3955)) ([5be049e](https://github.com/noir-lang/noir/commit/5be049eee6c342649462282ee04f6411e6ea392c)) +* Brillig IR refactor (https://github.com/AztecProtocol/aztec-packages/pull/5233) ([c3c9e19](https://github.com/noir-lang/noir/commit/c3c9e19a20d61272a04b95fd6c7d34cc4cb96e45)) +* Brillig typed memory (https://github.com/AztecProtocol/aztec-packages/pull/5395) ([0bc18c4](https://github.com/noir-lang/noir/commit/0bc18c4f78171590dd58bded959f68f53a44cc8c)) +* Check initializer msg.sender matches deployer from address preimage (https://github.com/AztecProtocol/aztec-packages/pull/5222) ([c3c9e19](https://github.com/noir-lang/noir/commit/c3c9e19a20d61272a04b95fd6c7d34cc4cb96e45)) +* Evaluation of dynamic assert messages ([#4101](https://github.com/noir-lang/noir/issues/4101)) ([c284e01](https://github.com/noir-lang/noir/commit/c284e01bfe20ceae4414dc123624b5cbb8b66d09)) +* Init storage macro (https://github.com/AztecProtocol/aztec-packages/pull/4200) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Initial Earthly CI (https://github.com/AztecProtocol/aztec-packages/pull/5069) ([c3c9e19](https://github.com/noir-lang/noir/commit/c3c9e19a20d61272a04b95fd6c7d34cc4cb96e45)) +* Internal as a macro (https://github.com/AztecProtocol/aztec-packages/pull/4898) ([5f57ebb](https://github.com/noir-lang/noir/commit/5f57ebb7ff4b810802f90699a10f4325ef904f2e)) +* New brillig field operations and refactor of binary operations (https://github.com/AztecProtocol/aztec-packages/pull/5208) ([c3c9e19](https://github.com/noir-lang/noir/commit/c3c9e19a20d61272a04b95fd6c7d34cc4cb96e45)) +* Note type ids (https://github.com/AztecProtocol/aztec-packages/pull/4500) ([78ef013](https://github.com/noir-lang/noir/commit/78ef0134b82e76a73dadb6c7975def22290e3a1a)) +* Remove range constraints from witnesses which are constrained to be constants ([#3928](https://github.com/noir-lang/noir/issues/3928)) ([afe9c7a](https://github.com/noir-lang/noir/commit/afe9c7a38bb9d4245205d3aa46d4ce23d70a5671)) +* Remove replacement of boolean range opcodes with `AssertZero` opcodes ([#4107](https://github.com/noir-lang/noir/issues/4107)) ([dac0e87](https://github.com/noir-lang/noir/commit/dac0e87ee3be3446b92bbb12ef4832fd493fcee3)) +* Signed integer division and modulus in brillig gen (https://github.com/AztecProtocol/aztec-packages/pull/5279) ([c3c9e19](https://github.com/noir-lang/noir/commit/c3c9e19a20d61272a04b95fd6c7d34cc4cb96e45)) +* Support contracts with no constructor (https://github.com/AztecProtocol/aztec-packages/pull/5175) ([c3c9e19](https://github.com/noir-lang/noir/commit/c3c9e19a20d61272a04b95fd6c7d34cc4cb96e45)) +* Sync `aztec-packages` ([#4011](https://github.com/noir-lang/noir/issues/4011)) ([fee2452](https://github.com/noir-lang/noir/commit/fee24523c427c27f0bdaf98ea09a852a2da3e94c)) +* Sync commits from `aztec-packages` ([#4068](https://github.com/noir-lang/noir/issues/4068)) ([7a8f3a3](https://github.com/noir-lang/noir/commit/7a8f3a33b57875e681e3d81e667e3570a1cdbdcc)) +* Sync commits from `aztec-packages` ([#4144](https://github.com/noir-lang/noir/issues/4144)) ([0205d3b](https://github.com/noir-lang/noir/commit/0205d3b4ad0cf5ffd775a43eb5af273a772cf138)) +* Sync from aztec-packages ([#4483](https://github.com/noir-lang/noir/issues/4483)) ([fe8f277](https://github.com/noir-lang/noir/commit/fe8f2776ccfde29209a2c3fc162311c99e4f59be)) +* Sync from noir (https://github.com/AztecProtocol/aztec-packages/pull/5234) ([c3c9e19](https://github.com/noir-lang/noir/commit/c3c9e19a20d61272a04b95fd6c7d34cc4cb96e45)) +* Sync from noir (https://github.com/AztecProtocol/aztec-packages/pull/5286) ([c3c9e19](https://github.com/noir-lang/noir/commit/c3c9e19a20d61272a04b95fd6c7d34cc4cb96e45)) + + +### Bug Fixes + +* **acvm:** Mark outputs of Opcode::Call solvable ([#4708](https://github.com/noir-lang/noir/issues/4708)) ([8fea405](https://github.com/noir-lang/noir/commit/8fea40576f262bd5bb588923c0660d8967404e56)) +* Noir test incorrect reporting (https://github.com/AztecProtocol/aztec-packages/pull/4925) ([5f57ebb](https://github.com/noir-lang/noir/commit/5f57ebb7ff4b810802f90699a10f4325ef904f2e)) +* Remove panic from `init_log_level` in `acvm_js` ([#4195](https://github.com/noir-lang/noir/issues/4195)) ([2e26530](https://github.com/noir-lang/noir/commit/2e26530bf53006c1ed4fee310bcaa905c95dd95b)) +* Return error rather instead of panicking on invalid circuit ([#3976](https://github.com/noir-lang/noir/issues/3976)) ([67201bf](https://github.com/noir-lang/noir/commit/67201bfc21a9c8858aa86be9cd47d463fb78d925)) + + +### Miscellaneous Chores + +* **acir:** Move `is_recursive` flag to be part of the circuit definition (https://github.com/AztecProtocol/aztec-packages/pull/4221) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) +* Move noir out of yarn-project (https://github.com/AztecProtocol/aztec-packages/pull/4479) ([78ef013](https://github.com/noir-lang/noir/commit/78ef0134b82e76a73dadb6c7975def22290e3a1a)) +* Remove unused methods on ACIR opcodes ([#3841](https://github.com/noir-lang/noir/issues/3841)) ([9e5d0e8](https://github.com/noir-lang/noir/commit/9e5d0e813d61a0bfb5ee68174ed287c5a20f1579)) +* Rename Arithmetic opcode to AssertZero ([#3840](https://github.com/noir-lang/noir/issues/3840)) ([836f171](https://github.com/noir-lang/noir/commit/836f17145c2901060706294461c2d282dd121b3e)) +* Rename bigint_neg into bigint_sub (https://github.com/AztecProtocol/aztec-packages/pull/4420) ([158c8ce](https://github.com/noir-lang/noir/commit/158c8cec7f0dc698042e9512001dd2c9d6b40bcc)) + ## [0.42.0](https://github.com/noir-lang/noir/compare/v0.41.0...v0.42.0) (2024-03-25) diff --git a/acvm-repo/acir/Cargo.toml b/acvm-repo/acir/Cargo.toml index 368f49258f9..d6990f83281 100644 --- a/acvm-repo/acir/Cargo.toml +++ b/acvm-repo/acir/Cargo.toml @@ -2,7 +2,7 @@ name = "acir" description = "ACIR is the IR that the VM processes, it is analogous to LLVM IR" # x-release-please-start-version -version = "0.42.0" +version = "0.43.0" # x-release-please-end authors.workspace = true edition.workspace = true @@ -20,6 +20,7 @@ thiserror.workspace = true flate2.workspace = true bincode.workspace = true base64.workspace = true +serde-big-array = "0.5.1" [dev-dependencies] serde_json = "1.0" @@ -28,8 +29,14 @@ strum_macros = "0.24" serde-reflection = "0.3.6" serde-generate = "0.25.1" fxhash.workspace = true +criterion.workspace = true +pprof.workspace = true [features] default = ["bn254"] bn254 = ["acir_field/bn254", "brillig/bn254"] bls12_381 = ["acir_field/bls12_381", "brillig/bls12_381"] + +[[bench]] +name = "serialization" +harness = false diff --git a/acvm-repo/acir/benches/serialization.rs b/acvm-repo/acir/benches/serialization.rs new file mode 100644 index 00000000000..73e3916a73b --- /dev/null +++ b/acvm-repo/acir/benches/serialization.rs @@ -0,0 +1,123 @@ +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; +use std::{collections::BTreeSet, time::Duration}; + +use acir::{ + circuit::{Circuit, ExpressionWidth, Opcode, Program, PublicInputs}, + native_types::{Expression, Witness}, + FieldElement, +}; + +use pprof::criterion::{Output, PProfProfiler}; + +const SIZES: [usize; 9] = [10, 50, 100, 500, 1000, 5000, 10000, 50000, 100000]; + +fn sample_program(num_opcodes: usize) -> Program { + let assert_zero_opcodes: Vec = (0..num_opcodes) + .map(|i| { + Opcode::AssertZero(Expression { + mul_terms: vec![( + FieldElement::from(2 * i), + Witness(i as u32), + Witness(i as u32 + 10), + )], + linear_combinations: vec![ + (FieldElement::from(2 * i), Witness(i as u32)), + (FieldElement::from(3 * i), Witness(i as u32 + 1)), + ], + q_c: FieldElement::from(i), + }) + }) + .collect(); + + Program { + functions: vec![Circuit { + current_witness_index: 4000, + opcodes: assert_zero_opcodes.to_vec(), + expression_width: ExpressionWidth::Bounded { width: 3 }, + private_parameters: BTreeSet::from([Witness(1), Witness(2), Witness(3), Witness(4)]), + public_parameters: PublicInputs(BTreeSet::from([Witness(5)])), + return_values: PublicInputs(BTreeSet::from([Witness(6)])), + assert_messages: Vec::new(), + recursive: false, + }], + } +} + +fn bench_serialization(c: &mut Criterion) { + let mut group = c.benchmark_group("serialize_program"); + for size in SIZES.iter() { + let program = sample_program(*size); + + group.throughput(Throughput::Elements(*size as u64)); + group.bench_with_input(BenchmarkId::from_parameter(size), &program, |b, program| { + b.iter(|| Program::serialize_program(program)); + }); + } + group.finish(); + + let mut group = c.benchmark_group("serialize_program_json"); + for size in SIZES.iter() { + let program = sample_program(*size); + + group.throughput(Throughput::Elements(*size as u64)); + group.bench_with_input(BenchmarkId::from_parameter(size), &program, |b, program| { + b.iter(|| { + let mut bytes = Vec::new(); + let mut serializer = serde_json::Serializer::new(&mut bytes); + Program::serialize_program_base64(program, &mut serializer) + }); + }); + } + group.finish(); +} + +fn bench_deserialization(c: &mut Criterion) { + let mut group = c.benchmark_group("deserialize_program"); + for size in SIZES.iter() { + let program = sample_program(*size); + let serialized_program = Program::serialize_program(&program); + + group.throughput(Throughput::Elements(*size as u64)); + group.bench_with_input( + BenchmarkId::from_parameter(size), + &serialized_program, + |b, program| { + b.iter(|| Program::deserialize_program(program)); + }, + ); + } + group.finish(); + + let mut group = c.benchmark_group("deserialize_program_json"); + for size in SIZES.iter() { + let program = sample_program(*size); + + let serialized_program = { + let mut bytes = Vec::new(); + let mut serializer = serde_json::Serializer::new(&mut bytes); + Program::serialize_program_base64(&program, &mut serializer).expect("should succeed"); + bytes + }; + + group.throughput(Throughput::Elements(*size as u64)); + group.bench_with_input( + BenchmarkId::from_parameter(size), + &serialized_program, + |b, program| { + b.iter(|| { + let mut deserializer = serde_json::Deserializer::from_slice(program); + Program::deserialize_program_base64(&mut deserializer) + }); + }, + ); + } + group.finish(); +} + +criterion_group!( + name = benches; + config = Criterion::default().sample_size(40).measurement_time(Duration::from_secs(20)).with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = bench_serialization, bench_deserialization +); + +criterion_main!(benches); diff --git a/acvm-repo/acir/codegen/acir.cpp b/acvm-repo/acir/codegen/acir.cpp index d7ef849ab75..6c7bd347e5d 100644 --- a/acvm-repo/acir/codegen/acir.cpp +++ b/acvm-repo/acir/codegen/acir.cpp @@ -54,7 +54,7 @@ namespace Program { struct SHA256 { std::vector inputs; - std::vector outputs; + std::array outputs; friend bool operator==(const SHA256&, const SHA256&); std::vector bincodeSerialize() const; @@ -63,7 +63,7 @@ namespace Program { struct Blake2s { std::vector inputs; - std::vector outputs; + std::array outputs; friend bool operator==(const Blake2s&, const Blake2s&); std::vector bincodeSerialize() const; @@ -72,7 +72,7 @@ namespace Program { struct Blake3 { std::vector inputs; - std::vector outputs; + std::array outputs; friend bool operator==(const Blake3&, const Blake3&); std::vector bincodeSerialize() const; @@ -82,7 +82,7 @@ namespace Program { struct SchnorrVerify { Program::FunctionInput public_key_x; Program::FunctionInput public_key_y; - std::vector signature; + std::array signature; std::vector message; Program::Witness output; @@ -112,10 +112,10 @@ namespace Program { }; struct EcdsaSecp256k1 { - std::vector public_key_x; - std::vector public_key_y; - std::vector signature; - std::vector hashed_message; + std::array public_key_x; + std::array public_key_y; + std::array signature; + std::array hashed_message; Program::Witness output; friend bool operator==(const EcdsaSecp256k1&, const EcdsaSecp256k1&); @@ -124,10 +124,10 @@ namespace Program { }; struct EcdsaSecp256r1 { - std::vector public_key_x; - std::vector public_key_y; - std::vector signature; - std::vector hashed_message; + std::array public_key_x; + std::array public_key_y; + std::array signature; + std::array hashed_message; Program::Witness output; friend bool operator==(const EcdsaSecp256r1&, const EcdsaSecp256r1&); @@ -159,26 +159,17 @@ namespace Program { struct Keccak256 { std::vector inputs; - std::vector outputs; + Program::FunctionInput var_message_size; + std::array outputs; friend bool operator==(const Keccak256&, const Keccak256&); std::vector bincodeSerialize() const; static Keccak256 bincodeDeserialize(std::vector); }; - struct Keccak256VariableLength { - std::vector inputs; - Program::FunctionInput var_message_size; - std::vector outputs; - - friend bool operator==(const Keccak256VariableLength&, const Keccak256VariableLength&); - std::vector bincodeSerialize() const; - static Keccak256VariableLength bincodeDeserialize(std::vector); - }; - struct Keccakf1600 { - std::vector inputs; - std::vector outputs; + std::array inputs; + std::array outputs; friend bool operator==(const Keccakf1600&, const Keccakf1600&); std::vector bincodeSerialize() const; @@ -266,16 +257,16 @@ namespace Program { }; struct Sha256Compression { - std::vector inputs; - std::vector hash_values; - std::vector outputs; + std::array inputs; + std::array hash_values; + std::array outputs; friend bool operator==(const Sha256Compression&, const Sha256Compression&); std::vector bincodeSerialize() const; static Sha256Compression bincodeDeserialize(std::vector); }; - std::variant value; + std::variant value; friend bool operator==(const BlackBoxFuncCall&, const BlackBoxFuncCall&); std::vector bincodeSerialize() const; @@ -931,6 +922,9 @@ namespace Program { }; struct Trap { + uint64_t revert_data_offset; + uint64_t revert_data_size; + friend bool operator==(const Trap&, const Trap&); std::vector bincodeSerialize() const; static Trap bincodeDeserialize(std::vector); @@ -1070,17 +1064,29 @@ namespace Program { static MemoryInit bincodeDeserialize(std::vector); }; + struct BrilligCall { + uint32_t id; + std::vector inputs; + std::vector outputs; + std::optional predicate; + + friend bool operator==(const BrilligCall&, const BrilligCall&); + std::vector bincodeSerialize() const; + static BrilligCall bincodeDeserialize(std::vector); + }; + struct Call { uint32_t id; std::vector inputs; std::vector outputs; + std::optional predicate; friend bool operator==(const Call&, const Call&); std::vector bincodeSerialize() const; static Call bincodeDeserialize(std::vector); }; - std::variant value; + std::variant value; friend bool operator==(const Opcode&, const Opcode&); std::vector bincodeSerialize() const; @@ -1159,8 +1165,17 @@ namespace Program { static Circuit bincodeDeserialize(std::vector); }; + struct BrilligBytecode { + std::vector bytecode; + + friend bool operator==(const BrilligBytecode&, const BrilligBytecode&); + std::vector bincodeSerialize() const; + static BrilligBytecode bincodeDeserialize(std::vector); + }; + struct Program { std::vector functions; + std::vector unconstrained_functions; friend bool operator==(const Program&, const Program&); std::vector bincodeSerialize() const; @@ -2581,6 +2596,7 @@ namespace Program { inline bool operator==(const BlackBoxFuncCall::Keccak256 &lhs, const BlackBoxFuncCall::Keccak256 &rhs) { if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.var_message_size == rhs.var_message_size)) { return false; } if (!(lhs.outputs == rhs.outputs)) { return false; } return true; } @@ -2606,6 +2622,7 @@ template <> template void serde::Serializable::serialize(const Program::BlackBoxFuncCall::Keccak256 &obj, Serializer &serializer) { serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.var_message_size, serializer); serde::Serializable::serialize(obj.outputs, serializer); } @@ -2614,49 +2631,6 @@ template Program::BlackBoxFuncCall::Keccak256 serde::Deserializable::deserialize(Deserializer &deserializer) { Program::BlackBoxFuncCall::Keccak256 obj; obj.inputs = serde::Deserializable::deserialize(deserializer); - obj.outputs = serde::Deserializable::deserialize(deserializer); - return obj; -} - -namespace Program { - - inline bool operator==(const BlackBoxFuncCall::Keccak256VariableLength &lhs, const BlackBoxFuncCall::Keccak256VariableLength &rhs) { - if (!(lhs.inputs == rhs.inputs)) { return false; } - if (!(lhs.var_message_size == rhs.var_message_size)) { return false; } - if (!(lhs.outputs == rhs.outputs)) { return false; } - return true; - } - - inline std::vector BlackBoxFuncCall::Keccak256VariableLength::bincodeSerialize() const { - auto serializer = serde::BincodeSerializer(); - serde::Serializable::serialize(*this, serializer); - return std::move(serializer).bytes(); - } - - inline BlackBoxFuncCall::Keccak256VariableLength BlackBoxFuncCall::Keccak256VariableLength::bincodeDeserialize(std::vector input) { - auto deserializer = serde::BincodeDeserializer(input); - auto value = serde::Deserializable::deserialize(deserializer); - if (deserializer.get_buffer_offset() < input.size()) { - throw serde::deserialization_error("Some input bytes were not read"); - } - return value; - } - -} // end of namespace Program - -template <> -template -void serde::Serializable::serialize(const Program::BlackBoxFuncCall::Keccak256VariableLength &obj, Serializer &serializer) { - serde::Serializable::serialize(obj.inputs, serializer); - serde::Serializable::serialize(obj.var_message_size, serializer); - serde::Serializable::serialize(obj.outputs, serializer); -} - -template <> -template -Program::BlackBoxFuncCall::Keccak256VariableLength serde::Deserializable::deserialize(Deserializer &deserializer) { - Program::BlackBoxFuncCall::Keccak256VariableLength obj; - obj.inputs = serde::Deserializable::deserialize(deserializer); obj.var_message_size = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); return obj; @@ -4120,6 +4094,48 @@ Program::Brillig serde::Deserializable::deserialize(Deserializ return obj; } +namespace Program { + + inline bool operator==(const BrilligBytecode &lhs, const BrilligBytecode &rhs) { + if (!(lhs.bytecode == rhs.bytecode)) { return false; } + return true; + } + + inline std::vector BrilligBytecode::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline BrilligBytecode BrilligBytecode::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } + +} // end of namespace Program + +template <> +template +void serde::Serializable::serialize(const Program::BrilligBytecode &obj, Serializer &serializer) { + serializer.increase_container_depth(); + serde::Serializable::serialize(obj.bytecode, serializer); + serializer.decrease_container_depth(); +} + +template <> +template +Program::BrilligBytecode serde::Deserializable::deserialize(Deserializer &deserializer) { + deserializer.increase_container_depth(); + Program::BrilligBytecode obj; + obj.bytecode = serde::Deserializable::deserialize(deserializer); + deserializer.decrease_container_depth(); + return obj; +} + namespace Program { inline bool operator==(const BrilligInputs &lhs, const BrilligInputs &rhs) { @@ -5001,6 +5017,8 @@ Program::BrilligOpcode::BlackBox serde::Deserializable template void serde::Serializable::serialize(const Program::BrilligOpcode::Trap &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.revert_data_offset, serializer); + serde::Serializable::serialize(obj.revert_data_size, serializer); } template <> template Program::BrilligOpcode::Trap serde::Deserializable::deserialize(Deserializer &deserializer) { Program::BrilligOpcode::Trap obj; + obj.revert_data_offset = serde::Deserializable::deserialize(deserializer); + obj.revert_data_size = serde::Deserializable::deserialize(deserializer); return obj; } @@ -6167,12 +6189,60 @@ Program::Opcode::MemoryInit serde::Deserializable:: return obj; } +namespace Program { + + inline bool operator==(const Opcode::BrilligCall &lhs, const Opcode::BrilligCall &rhs) { + if (!(lhs.id == rhs.id)) { return false; } + if (!(lhs.inputs == rhs.inputs)) { return false; } + if (!(lhs.outputs == rhs.outputs)) { return false; } + if (!(lhs.predicate == rhs.predicate)) { return false; } + return true; + } + + inline std::vector Opcode::BrilligCall::bincodeSerialize() const { + auto serializer = serde::BincodeSerializer(); + serde::Serializable::serialize(*this, serializer); + return std::move(serializer).bytes(); + } + + inline Opcode::BrilligCall Opcode::BrilligCall::bincodeDeserialize(std::vector input) { + auto deserializer = serde::BincodeDeserializer(input); + auto value = serde::Deserializable::deserialize(deserializer); + if (deserializer.get_buffer_offset() < input.size()) { + throw serde::deserialization_error("Some input bytes were not read"); + } + return value; + } + +} // end of namespace Program + +template <> +template +void serde::Serializable::serialize(const Program::Opcode::BrilligCall &obj, Serializer &serializer) { + serde::Serializable::serialize(obj.id, serializer); + serde::Serializable::serialize(obj.inputs, serializer); + serde::Serializable::serialize(obj.outputs, serializer); + serde::Serializable::serialize(obj.predicate, serializer); +} + +template <> +template +Program::Opcode::BrilligCall serde::Deserializable::deserialize(Deserializer &deserializer) { + Program::Opcode::BrilligCall obj; + obj.id = serde::Deserializable::deserialize(deserializer); + obj.inputs = serde::Deserializable::deserialize(deserializer); + obj.outputs = serde::Deserializable::deserialize(deserializer); + obj.predicate = serde::Deserializable::deserialize(deserializer); + return obj; +} + namespace Program { inline bool operator==(const Opcode::Call &lhs, const Opcode::Call &rhs) { if (!(lhs.id == rhs.id)) { return false; } if (!(lhs.inputs == rhs.inputs)) { return false; } if (!(lhs.outputs == rhs.outputs)) { return false; } + if (!(lhs.predicate == rhs.predicate)) { return false; } return true; } @@ -6199,6 +6269,7 @@ void serde::Serializable::serialize(const Program::Opcode serde::Serializable::serialize(obj.id, serializer); serde::Serializable::serialize(obj.inputs, serializer); serde::Serializable::serialize(obj.outputs, serializer); + serde::Serializable::serialize(obj.predicate, serializer); } template <> @@ -6208,6 +6279,7 @@ Program::Opcode::Call serde::Deserializable::deserialize( obj.id = serde::Deserializable::deserialize(deserializer); obj.inputs = serde::Deserializable::deserialize(deserializer); obj.outputs = serde::Deserializable::deserialize(deserializer); + obj.predicate = serde::Deserializable::deserialize(deserializer); return obj; } @@ -6336,6 +6408,7 @@ namespace Program { inline bool operator==(const Program &lhs, const Program &rhs) { if (!(lhs.functions == rhs.functions)) { return false; } + if (!(lhs.unconstrained_functions == rhs.unconstrained_functions)) { return false; } return true; } @@ -6361,6 +6434,7 @@ template void serde::Serializable::serialize(const Program::Program &obj, Serializer &serializer) { serializer.increase_container_depth(); serde::Serializable::serialize(obj.functions, serializer); + serde::Serializable::serialize(obj.unconstrained_functions, serializer); serializer.decrease_container_depth(); } @@ -6370,6 +6444,7 @@ Program::Program serde::Deserializable::deserialize(Deserializ deserializer.increase_container_depth(); Program::Program obj; obj.functions = serde::Deserializable::deserialize(deserializer); + obj.unconstrained_functions = serde::Deserializable::deserialize(deserializer); deserializer.decrease_container_depth(); return obj; } diff --git a/acvm-repo/acir/src/circuit/brillig.rs b/acvm-repo/acir/src/circuit/brillig.rs index f394a46ff82..e75d335d52b 100644 --- a/acvm-repo/acir/src/circuit/brillig.rs +++ b/acvm-repo/acir/src/circuit/brillig.rs @@ -29,3 +29,11 @@ pub struct Brillig { /// Predicate of the Brillig execution - indicates if it should be skipped pub predicate: Option, } + +/// This is purely a wrapper struct around a list of Brillig opcode's which represents +/// a full Brillig function to be executed by the Brillig VM. +/// This is stored separately on a program and accessed through a [BrilligPointer]. +#[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +pub struct BrilligBytecode { + pub bytecode: Vec, +} diff --git a/acvm-repo/acir/src/circuit/mod.rs b/acvm-repo/acir/src/circuit/mod.rs index cb846bdaffa..d655d136bc8 100644 --- a/acvm-repo/acir/src/circuit/mod.rs +++ b/acvm-repo/acir/src/circuit/mod.rs @@ -15,6 +15,8 @@ use serde::{de::Error as DeserializationError, Deserialize, Deserializer, Serial use std::collections::BTreeSet; +use self::brillig::BrilligBytecode; + /// Specifies the maximum width of the expressions which will be constrained. /// /// Unbounded Expressions are useful if you are eventually going to pass the ACIR @@ -37,6 +39,7 @@ pub enum ExpressionWidth { #[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Default)] pub struct Program { pub functions: Vec, + pub unconstrained_functions: Vec, } #[derive(Clone, PartialEq, Eq, Serialize, Deserialize, Default)] @@ -86,6 +89,16 @@ impl Circuit { } } +#[derive(Debug, Copy, Clone)] +/// The opcode location for a call to a separate ACIR circuit +/// This includes the function index of the caller within a [program][Program] +/// and the index in the callers ACIR to the specific call opcode. +/// This is only resolved and set during circuit execution. +pub struct ResolvedOpcodeLocation { + pub acir_function_index: usize, + pub opcode_location: OpcodeLocation, +} + #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] /// Opcodes are locatable so that callers can /// map opcodes to debug information related to their context. @@ -263,6 +276,10 @@ impl std::fmt::Display for Program { writeln!(f, "func {}", func_index)?; writeln!(f, "{}", function)?; } + for (func_index, function) in self.unconstrained_functions.iter().enumerate() { + writeln!(f, "unconstrained func {}", func_index)?; + writeln!(f, "{:?}", function.bytecode)?; + } Ok(()) } } @@ -314,61 +331,31 @@ mod tests { }) } fn keccakf1600_opcode() -> Opcode { - Opcode::BlackBoxFuncCall(BlackBoxFuncCall::Keccakf1600 { - inputs: vec![ - FunctionInput { witness: Witness(1), num_bits: 64 }, - FunctionInput { witness: Witness(2), num_bits: 64 }, - FunctionInput { witness: Witness(3), num_bits: 64 }, - FunctionInput { witness: Witness(4), num_bits: 64 }, - FunctionInput { witness: Witness(5), num_bits: 64 }, - FunctionInput { witness: Witness(6), num_bits: 64 }, - FunctionInput { witness: Witness(7), num_bits: 64 }, - FunctionInput { witness: Witness(8), num_bits: 64 }, - FunctionInput { witness: Witness(9), num_bits: 64 }, - FunctionInput { witness: Witness(10), num_bits: 64 }, - FunctionInput { witness: Witness(11), num_bits: 64 }, - FunctionInput { witness: Witness(12), num_bits: 64 }, - FunctionInput { witness: Witness(13), num_bits: 64 }, - FunctionInput { witness: Witness(14), num_bits: 64 }, - FunctionInput { witness: Witness(15), num_bits: 64 }, - FunctionInput { witness: Witness(16), num_bits: 64 }, - FunctionInput { witness: Witness(17), num_bits: 64 }, - FunctionInput { witness: Witness(18), num_bits: 64 }, - FunctionInput { witness: Witness(19), num_bits: 64 }, - FunctionInput { witness: Witness(20), num_bits: 64 }, - FunctionInput { witness: Witness(21), num_bits: 64 }, - FunctionInput { witness: Witness(22), num_bits: 64 }, - FunctionInput { witness: Witness(23), num_bits: 64 }, - FunctionInput { witness: Witness(24), num_bits: 64 }, - FunctionInput { witness: Witness(25), num_bits: 64 }, - ], - outputs: vec![ - Witness(26), - Witness(27), - Witness(28), - Witness(29), - Witness(30), - Witness(31), - Witness(32), - Witness(33), - Witness(34), - Witness(35), - Witness(36), - Witness(37), - Witness(38), - Witness(39), - Witness(40), - Witness(41), - Witness(42), - Witness(43), - Witness(44), - Witness(45), - Witness(46), - Witness(47), - Witness(48), - Witness(49), - Witness(50), - ], + let inputs: Box<[FunctionInput; 25]> = Box::new(std::array::from_fn(|i| FunctionInput { + witness: Witness(i as u32 + 1), + num_bits: 8, + })); + let outputs: Box<[Witness; 25]> = Box::new(std::array::from_fn(|i| Witness(i as u32 + 26))); + + Opcode::BlackBoxFuncCall(BlackBoxFuncCall::Keccakf1600 { inputs, outputs }) + } + fn schnorr_verify_opcode() -> Opcode { + let public_key_x = + FunctionInput { witness: Witness(1), num_bits: FieldElement::max_num_bits() }; + let public_key_y = + FunctionInput { witness: Witness(2), num_bits: FieldElement::max_num_bits() }; + let signature: Box<[FunctionInput; 64]> = Box::new(std::array::from_fn(|i| { + FunctionInput { witness: Witness(i as u32 + 3), num_bits: 8 } + })); + let message: Vec = vec![FunctionInput { witness: Witness(67), num_bits: 8 }]; + let output = Witness(68); + + Opcode::BlackBoxFuncCall(BlackBoxFuncCall::SchnorrVerify { + public_key_x, + public_key_y, + signature, + message, + output, }) } @@ -377,14 +364,14 @@ mod tests { let circuit = Circuit { current_witness_index: 5, expression_width: ExpressionWidth::Unbounded, - opcodes: vec![and_opcode(), range_opcode()], + opcodes: vec![and_opcode(), range_opcode(), schnorr_verify_opcode()], private_parameters: BTreeSet::new(), public_parameters: PublicInputs(BTreeSet::from_iter(vec![Witness(2), Witness(12)])), return_values: PublicInputs(BTreeSet::from_iter(vec![Witness(4), Witness(12)])), assert_messages: Default::default(), recursive: false, }; - let program = Program { functions: vec![circuit] }; + let program = Program { functions: vec![circuit], unconstrained_functions: Vec::new() }; fn read_write(program: Program) -> (Program, Program) { let bytes = Program::serialize_program(&program); @@ -410,6 +397,7 @@ mod tests { range_opcode(), and_opcode(), keccakf1600_opcode(), + schnorr_verify_opcode(), ], private_parameters: BTreeSet::new(), public_parameters: PublicInputs(BTreeSet::from_iter(vec![Witness(2)])), @@ -417,7 +405,7 @@ mod tests { assert_messages: Default::default(), recursive: false, }; - let program = Program { functions: vec![circuit] }; + let program = Program { functions: vec![circuit], unconstrained_functions: Vec::new() }; let json = serde_json::to_string_pretty(&program).unwrap(); diff --git a/acvm-repo/acir/src/circuit/opcodes.rs b/acvm-repo/acir/src/circuit/opcodes.rs index 68d28b287e6..b0b8e286e0c 100644 --- a/acvm-repo/acir/src/circuit/opcodes.rs +++ b/acvm-repo/acir/src/circuit/opcodes.rs @@ -1,4 +1,7 @@ -use super::{brillig::Brillig, directives::Directive}; +use super::{ + brillig::{Brillig, BrilligInputs, BrilligOutputs}, + directives::Directive, +}; use crate::native_types::{Expression, Witness}; use serde::{Deserialize, Serialize}; @@ -29,6 +32,18 @@ pub enum Opcode { block_id: BlockId, init: Vec, }, + /// Calls to unconstrained functions + BrilligCall { + /// Id for the function being called. It is the responsibility of the executor + /// to fetch the appropriate Brillig bytecode from this id. + id: u32, + /// Inputs to the function call + inputs: Vec, + /// Outputs to the function call + outputs: Vec, + /// Predicate of the Brillig execution - indicates if it should be skipped + predicate: Option, + }, /// Calls to functions represented as a separate circuit. A call opcode allows us /// to build a call stack when executing the outer-most circuit. Call { @@ -39,6 +54,8 @@ pub enum Opcode { inputs: Vec, /// Outputs of the function call outputs: Vec, + /// Predicate of the circuit execution - indicates if it should be skipped + predicate: Option, }, } @@ -97,8 +114,21 @@ impl std::fmt::Display for Opcode { write!(f, "INIT ")?; write!(f, "(id: {}, len: {}) ", block_id.0, init.len()) } - Opcode::Call { id, inputs, outputs } => { + // We keep the display for a BrilligCall and circuit Call separate as they + // are distinct in their functionality and we should maintain this separation for debugging. + Opcode::BrilligCall { id, inputs, outputs, predicate } => { + write!(f, "BRILLIG CALL func {}: ", id)?; + if let Some(pred) = predicate { + writeln!(f, "PREDICATE = {pred}")?; + } + write!(f, "inputs: {:?}, ", inputs)?; + write!(f, "outputs: {:?}", outputs) + } + Opcode::Call { id, inputs, outputs, predicate } => { write!(f, "CALL func {}: ", id)?; + if let Some(pred) = predicate { + writeln!(f, "PREDICATE = {pred}")?; + } write!(f, "inputs: {:?}, ", inputs)?; write!(f, "outputs: {:?}", outputs) } diff --git a/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs b/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs index c955e435b37..405cd0cef00 100644 --- a/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs +++ b/acvm-repo/acir/src/circuit/opcodes/black_box_function_call.rs @@ -1,6 +1,6 @@ use crate::native_types::Witness; use crate::BlackBoxFunc; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; // Note: Some functions will not use all of the witness // So we need to supply how many bits of the witness is needed @@ -27,20 +27,24 @@ pub enum BlackBoxFuncCall { }, SHA256 { inputs: Vec, - outputs: Vec, + outputs: Box<[Witness; 32]>, }, Blake2s { inputs: Vec, - outputs: Vec, + outputs: Box<[Witness; 32]>, }, Blake3 { inputs: Vec, - outputs: Vec, + outputs: Box<[Witness; 32]>, }, SchnorrVerify { public_key_x: FunctionInput, public_key_y: FunctionInput, - signature: Vec, + #[serde( + serialize_with = "serialize_big_array", + deserialize_with = "deserialize_big_array_into_box" + )] + signature: Box<[FunctionInput; 64]>, message: Vec, output: Witness, }, @@ -55,17 +59,25 @@ pub enum BlackBoxFuncCall { output: Witness, }, EcdsaSecp256k1 { - public_key_x: Vec, - public_key_y: Vec, - signature: Vec, - hashed_message: Vec, + public_key_x: Box<[FunctionInput; 32]>, + public_key_y: Box<[FunctionInput; 32]>, + #[serde( + serialize_with = "serialize_big_array", + deserialize_with = "deserialize_big_array_into_box" + )] + signature: Box<[FunctionInput; 64]>, + hashed_message: Box<[FunctionInput; 32]>, output: Witness, }, EcdsaSecp256r1 { - public_key_x: Vec, - public_key_y: Vec, - signature: Vec, - hashed_message: Vec, + public_key_x: Box<[FunctionInput; 32]>, + public_key_y: Box<[FunctionInput; 32]>, + #[serde( + serialize_with = "serialize_big_array", + deserialize_with = "deserialize_big_array_into_box" + )] + signature: Box<[FunctionInput; 64]>, + hashed_message: Box<[FunctionInput; 32]>, output: Witness, }, FixedBaseScalarMul { @@ -81,21 +93,17 @@ pub enum BlackBoxFuncCall { outputs: (Witness, Witness), }, Keccak256 { - inputs: Vec, - outputs: Vec, - }, - Keccak256VariableLength { inputs: Vec, /// This is the number of bytes to take /// from the input. Note: if `var_message_size` /// is more than the number of bytes in the input, /// then an error is returned. var_message_size: FunctionInput, - outputs: Vec, + outputs: Box<[Witness; 32]>, }, Keccakf1600 { - inputs: Vec, - outputs: Vec, + inputs: Box<[FunctionInput; 25]>, + outputs: Box<[Witness; 25]>, }, RecursiveAggregation { verification_key: Vec, @@ -158,11 +166,11 @@ pub enum BlackBoxFuncCall { /// * `outputs` - result of the input compressed into 256 bits Sha256Compression { /// 512 bits of the input message, represented by 16 u32s - inputs: Vec, + inputs: Box<[FunctionInput; 16]>, /// Vector of 8 u32s used to compress the input - hash_values: Vec, + hash_values: Box<[FunctionInput; 8]>, /// Output of the compression, represented by 8 u32s - outputs: Vec, + outputs: Box<[Witness; 8]>, }, } @@ -183,7 +191,6 @@ impl BlackBoxFuncCall { BlackBoxFuncCall::FixedBaseScalarMul { .. } => BlackBoxFunc::FixedBaseScalarMul, BlackBoxFuncCall::EmbeddedCurveAdd { .. } => BlackBoxFunc::EmbeddedCurveAdd, BlackBoxFuncCall::Keccak256 { .. } => BlackBoxFunc::Keccak256, - BlackBoxFuncCall::Keccak256VariableLength { .. } => BlackBoxFunc::Keccak256, BlackBoxFuncCall::Keccakf1600 { .. } => BlackBoxFunc::Keccakf1600, BlackBoxFuncCall::RecursiveAggregation { .. } => BlackBoxFunc::RecursiveAggregation, BlackBoxFuncCall::BigIntAdd { .. } => BlackBoxFunc::BigIntAdd, @@ -206,14 +213,15 @@ impl BlackBoxFuncCall { BlackBoxFuncCall::SHA256 { inputs, .. } | BlackBoxFuncCall::Blake2s { inputs, .. } | BlackBoxFuncCall::Blake3 { inputs, .. } - | BlackBoxFuncCall::Keccak256 { inputs, .. } - | BlackBoxFuncCall::Keccakf1600 { inputs, .. } | BlackBoxFuncCall::PedersenCommitment { inputs, .. } | BlackBoxFuncCall::PedersenHash { inputs, .. } | BlackBoxFuncCall::BigIntFromLeBytes { inputs, .. } | BlackBoxFuncCall::Poseidon2Permutation { inputs, .. } => inputs.to_vec(), + + BlackBoxFuncCall::Keccakf1600 { inputs, .. } => inputs.to_vec(), + BlackBoxFuncCall::Sha256Compression { inputs, hash_values, .. } => { - inputs.iter().chain(hash_values).copied().collect() + inputs.iter().chain(hash_values.as_ref()).copied().collect() } BlackBoxFuncCall::AND { lhs, rhs, .. } | BlackBoxFuncCall::XOR { lhs, rhs, .. } => { vec![*lhs, *rhs] @@ -280,7 +288,7 @@ impl BlackBoxFuncCall { inputs.extend(hashed_message.iter().copied()); inputs } - BlackBoxFuncCall::Keccak256VariableLength { inputs, var_message_size, .. } => { + BlackBoxFuncCall::Keccak256 { inputs, var_message_size, .. } => { let mut inputs = inputs.clone(); inputs.push(*var_message_size); inputs @@ -306,11 +314,14 @@ impl BlackBoxFuncCall { BlackBoxFuncCall::SHA256 { outputs, .. } | BlackBoxFuncCall::Blake2s { outputs, .. } | BlackBoxFuncCall::Blake3 { outputs, .. } - | BlackBoxFuncCall::Keccak256 { outputs, .. } - | BlackBoxFuncCall::Keccakf1600 { outputs, .. } - | BlackBoxFuncCall::Keccak256VariableLength { outputs, .. } - | BlackBoxFuncCall::Poseidon2Permutation { outputs, .. } - | BlackBoxFuncCall::Sha256Compression { outputs, .. } => outputs.to_vec(), + | BlackBoxFuncCall::Keccak256 { outputs, .. } => outputs.to_vec(), + + BlackBoxFuncCall::Keccakf1600 { outputs, .. } => outputs.to_vec(), + + BlackBoxFuncCall::Sha256Compression { outputs, .. } => outputs.to_vec(), + + BlackBoxFuncCall::Poseidon2Permutation { outputs, .. } => outputs.to_vec(), + BlackBoxFuncCall::AND { output, .. } | BlackBoxFuncCall::XOR { output, .. } | BlackBoxFuncCall::SchnorrVerify { output, .. } @@ -429,3 +440,78 @@ impl std::fmt::Debug for BlackBoxFuncCall { std::fmt::Display::fmt(self, f) } } + +fn serialize_big_array(big_array: &[FunctionInput; 64], s: S) -> Result +where + S: Serializer, +{ + use serde_big_array::BigArray; + + (*big_array).serialize(s) +} + +fn deserialize_big_array_into_box<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + use serde_big_array::BigArray; + + let big_array: [FunctionInput; 64] = BigArray::deserialize(deserializer)?; + Ok(Box::new(big_array)) +} + +#[cfg(test)] +mod tests { + + use crate::{circuit::Opcode, native_types::Witness}; + use acir_field::FieldElement; + + use super::{BlackBoxFuncCall, FunctionInput}; + + fn keccakf1600_opcode() -> Opcode { + let inputs: Box<[FunctionInput; 25]> = Box::new(std::array::from_fn(|i| FunctionInput { + witness: Witness(i as u32 + 1), + num_bits: 8, + })); + let outputs: Box<[Witness; 25]> = Box::new(std::array::from_fn(|i| Witness(i as u32 + 26))); + + Opcode::BlackBoxFuncCall(BlackBoxFuncCall::Keccakf1600 { inputs, outputs }) + } + fn schnorr_verify_opcode() -> Opcode { + let public_key_x = + FunctionInput { witness: Witness(1), num_bits: FieldElement::max_num_bits() }; + let public_key_y = + FunctionInput { witness: Witness(2), num_bits: FieldElement::max_num_bits() }; + let signature: Box<[FunctionInput; 64]> = Box::new(std::array::from_fn(|i| { + FunctionInput { witness: Witness(i as u32 + 3), num_bits: 8 } + })); + let message: Vec = vec![FunctionInput { witness: Witness(67), num_bits: 8 }]; + let output = Witness(68); + + Opcode::BlackBoxFuncCall(BlackBoxFuncCall::SchnorrVerify { + public_key_x, + public_key_y, + signature, + message, + output, + }) + } + + #[test] + fn keccakf1600_serialization_roundtrip() { + let opcode = keccakf1600_opcode(); + let buf = bincode::serialize(&opcode).unwrap(); + let recovered_opcode = bincode::deserialize(&buf).unwrap(); + assert_eq!(opcode, recovered_opcode); + } + + #[test] + fn schnorr_serialization_roundtrip() { + let opcode = schnorr_verify_opcode(); + let buf = bincode::serialize(&opcode).unwrap(); + let recovered_opcode = bincode::deserialize(&buf).unwrap(); + assert_eq!(opcode, recovered_opcode); + } +} diff --git a/acvm-repo/acir/tests/test_program_serialization.rs b/acvm-repo/acir/tests/test_program_serialization.rs index a5b683c15e1..fb924a7437d 100644 --- a/acvm-repo/acir/tests/test_program_serialization.rs +++ b/acvm-repo/acir/tests/test_program_serialization.rs @@ -41,17 +41,17 @@ fn addition_circuit() { return_values: PublicInputs([Witness(3)].into()), ..Circuit::default() }; - let program = Program { functions: vec![circuit] }; + let program = Program { functions: vec![circuit], unconstrained_functions: vec![] }; let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 75, 14, 128, 32, 12, 68, 249, 120, 160, 150, - 182, 208, 238, 188, 138, 68, 184, 255, 17, 212, 200, 130, 196, 165, 188, 164, 153, 174, 94, - 38, 227, 221, 203, 118, 159, 119, 95, 226, 200, 125, 36, 252, 3, 253, 66, 87, 152, 92, 4, - 153, 185, 149, 212, 144, 240, 128, 100, 85, 5, 88, 106, 86, 84, 20, 149, 51, 41, 81, 83, - 214, 98, 213, 10, 24, 50, 53, 236, 98, 212, 135, 44, 174, 235, 5, 143, 35, 12, 151, 159, - 126, 55, 109, 28, 231, 145, 47, 245, 105, 191, 143, 133, 1, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 65, 14, 128, 32, 12, 4, 65, 124, 80, 75, 91, + 104, 111, 126, 69, 34, 252, 255, 9, 106, 228, 64, 162, 55, 153, 164, 217, 158, 38, 155, + 245, 238, 97, 189, 206, 187, 55, 161, 231, 214, 19, 254, 129, 126, 162, 107, 25, 92, 4, + 137, 185, 230, 88, 145, 112, 135, 104, 69, 5, 88, 74, 82, 84, 20, 149, 35, 42, 81, 85, 214, + 108, 197, 50, 24, 50, 85, 108, 98, 212, 186, 44, 204, 235, 5, 183, 99, 233, 46, 63, 252, + 110, 216, 56, 184, 15, 78, 146, 74, 173, 20, 141, 1, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -72,14 +72,14 @@ fn fixed_base_scalar_mul_circuit() { return_values: PublicInputs(BTreeSet::from_iter(vec![Witness(3), Witness(4)])), ..Circuit::default() }; - let program = Program { functions: vec![circuit] }; + let program = Program { functions: vec![circuit], unconstrained_functions: vec![] }; let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 138, 81, 10, 0, 48, 8, 66, 87, 219, 190, 118, 233, - 29, 61, 43, 3, 5, 121, 34, 207, 86, 231, 162, 198, 157, 124, 228, 71, 157, 220, 232, 161, - 227, 226, 206, 214, 95, 221, 74, 0, 116, 58, 13, 182, 105, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 85, 138, 81, 10, 0, 48, 8, 66, 87, 219, 190, 118, 233, + 29, 61, 35, 3, 19, 228, 137, 60, 91, 149, 139, 26, 119, 242, 145, 31, 117, 114, 163, 135, + 142, 139, 219, 91, 127, 117, 71, 2, 117, 84, 50, 98, 113, 0, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -100,14 +100,14 @@ fn pedersen_circuit() { return_values: PublicInputs(BTreeSet::from_iter(vec![Witness(2), Witness(3)])), ..Circuit::default() }; - let program = Program { functions: vec![circuit] }; + let program = Program { functions: vec![circuit], unconstrained_functions: vec![] }; let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 74, 7, 6, 0, 0, 8, 108, 209, 255, 63, 156, 54, 233, - 56, 55, 17, 26, 18, 196, 241, 169, 250, 178, 141, 167, 32, 159, 254, 234, 238, 255, 87, - 112, 52, 63, 63, 101, 105, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 74, 9, 10, 0, 0, 4, 115, 149, 255, 127, 88, 8, 133, + 213, 218, 137, 80, 144, 32, 182, 79, 213, 151, 173, 61, 5, 121, 245, 91, 103, 255, 191, 3, + 7, 16, 26, 112, 158, 113, 0, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -119,8 +119,11 @@ fn schnorr_verify_circuit() { FunctionInput { witness: Witness(1), num_bits: FieldElement::max_num_bits() }; let public_key_y = FunctionInput { witness: Witness(2), num_bits: FieldElement::max_num_bits() }; - let signature = - (3..(3 + 64)).map(|i| FunctionInput { witness: Witness(i), num_bits: 8 }).collect(); + let signature: [FunctionInput; 64] = (3..(3 + 64)) + .map(|i| FunctionInput { witness: Witness(i), num_bits: 8 }) + .collect::>() + .try_into() + .unwrap(); let message = ((3 + 64)..(3 + 64 + 10)) .map(|i| FunctionInput { witness: Witness(i), num_bits: 8 }) .collect(); @@ -130,7 +133,7 @@ fn schnorr_verify_circuit() { let schnorr = Opcode::BlackBoxFuncCall(BlackBoxFuncCall::SchnorrVerify { public_key_x, public_key_y, - signature, + signature: Box::new(signature), message, output, }); @@ -142,27 +145,27 @@ fn schnorr_verify_circuit() { return_values: PublicInputs(BTreeSet::from([output])), ..Circuit::default() }; - let program = Program { functions: vec![circuit] }; + let program = Program { functions: vec![circuit], unconstrained_functions: vec![] }; let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 210, 7, 78, 2, 1, 20, 69, 81, 236, 189, 247, 222, - 123, 239, 93, 177, 33, 34, 238, 194, 253, 47, 193, 200, 147, 67, 194, 36, 147, 163, 33, 33, - 228, 191, 219, 82, 168, 63, 63, 181, 183, 197, 223, 177, 147, 191, 181, 183, 149, 69, 159, - 183, 213, 222, 238, 218, 219, 206, 14, 118, 178, 139, 141, 183, 135, 189, 236, 99, 63, 7, - 56, 200, 33, 14, 115, 132, 163, 28, 227, 56, 39, 56, 201, 41, 78, 115, 134, 179, 156, 227, - 60, 23, 184, 200, 37, 46, 115, 133, 171, 92, 227, 58, 55, 184, 201, 45, 110, 115, 135, 187, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 85, 210, 85, 78, 67, 81, 24, 133, 209, 226, 238, 238, + 238, 238, 238, 165, 148, 82, 102, 193, 252, 135, 64, 232, 78, 87, 147, 114, 147, 147, 5, + 47, 132, 252, 251, 107, 41, 212, 191, 159, 218, 107, 241, 115, 236, 228, 111, 237, 181, + 178, 173, 246, 186, 107, 175, 157, 29, 236, 100, 23, 27, 175, 135, 189, 236, 99, 63, 7, 56, + 200, 33, 14, 115, 132, 163, 28, 227, 56, 39, 56, 201, 41, 78, 115, 134, 179, 156, 227, 60, + 23, 184, 200, 37, 46, 115, 133, 171, 92, 227, 58, 55, 184, 201, 45, 110, 115, 135, 187, 220, 227, 62, 15, 120, 200, 35, 30, 243, 132, 167, 60, 227, 57, 47, 120, 201, 43, 94, 243, - 134, 183, 188, 227, 61, 31, 248, 200, 39, 22, 249, 204, 151, 166, 29, 243, 188, 250, 255, - 141, 239, 44, 241, 131, 101, 126, 178, 194, 47, 86, 249, 237, 123, 171, 76, 127, 105, 47, - 189, 165, 181, 116, 150, 198, 26, 125, 245, 248, 45, 233, 41, 45, 165, 163, 52, 148, 126, - 210, 78, 186, 73, 51, 233, 37, 173, 164, 147, 52, 146, 62, 210, 70, 186, 72, 19, 233, 33, - 45, 164, 131, 52, 144, 253, 151, 11, 245, 221, 179, 121, 246, 206, 214, 217, 57, 27, 103, - 223, 109, 187, 238, 218, 115, 223, 142, 135, 246, 59, 182, 219, 169, 189, 206, 237, 116, - 105, 159, 107, 187, 220, 218, 227, 222, 14, 143, 238, 95, 116, 247, 23, 119, 126, 115, 223, - 146, 187, 150, 221, 179, 226, 142, 141, 155, 53, 238, 86, 104, 186, 231, 255, 243, 7, 100, - 141, 232, 192, 233, 3, 0, 0, + 134, 183, 188, 227, 61, 31, 248, 200, 39, 62, 243, 133, 175, 77, 59, 230, 123, 243, 123, + 145, 239, 44, 241, 131, 101, 126, 178, 194, 47, 86, 249, 237, 239, 86, 153, 238, 210, 92, + 122, 75, 107, 233, 44, 141, 53, 250, 234, 241, 191, 164, 167, 180, 148, 142, 210, 80, 250, + 73, 59, 233, 38, 205, 164, 151, 180, 146, 78, 210, 72, 250, 72, 27, 233, 34, 77, 164, 135, + 180, 144, 14, 210, 64, 246, 95, 46, 212, 119, 207, 230, 217, 59, 91, 103, 231, 108, 156, + 125, 183, 237, 186, 107, 207, 125, 59, 30, 218, 239, 216, 110, 167, 246, 58, 183, 211, 165, + 125, 174, 237, 114, 107, 143, 123, 59, 60, 186, 255, 179, 187, 191, 186, 115, 209, 125, 75, + 238, 90, 118, 207, 138, 59, 54, 110, 214, 184, 91, 161, 233, 158, 255, 190, 63, 165, 188, + 93, 151, 233, 3, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -206,16 +209,17 @@ fn simple_brillig_foreign_call() { private_parameters: BTreeSet::from([Witness(1), Witness(2)]), ..Circuit::default() }; - let program = Program { functions: vec![circuit] }; + let program = Program { functions: vec![circuit], unconstrained_functions: vec![] }; let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 61, 10, 192, 48, 8, 133, 53, 133, 82, 186, - 245, 38, 233, 13, 122, 153, 14, 93, 58, 132, 144, 227, 135, 252, 41, 56, 36, 46, 201, 7, - 162, 168, 200, 123, 34, 52, 142, 28, 72, 245, 38, 106, 9, 247, 30, 202, 118, 142, 27, 215, - 221, 178, 82, 175, 33, 15, 133, 189, 163, 159, 57, 197, 252, 251, 195, 235, 188, 230, 186, - 16, 65, 255, 12, 239, 92, 131, 89, 149, 198, 77, 3, 10, 9, 119, 8, 198, 242, 152, 1, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 61, 10, 192, 32, 12, 133, 19, 11, 165, 116, + 235, 77, 236, 13, 122, 153, 14, 93, 58, 136, 120, 124, 241, 47, 129, 12, 42, 130, 126, 16, + 18, 146, 16, 222, 11, 66, 225, 136, 129, 84, 111, 162, 150, 112, 239, 161, 172, 231, 184, + 113, 221, 45, 45, 245, 42, 242, 144, 216, 43, 250, 153, 83, 204, 191, 223, 189, 198, 246, + 92, 39, 60, 244, 63, 195, 59, 87, 99, 150, 165, 113, 83, 193, 0, 1, 19, 247, 29, 5, 160, 1, + 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -306,20 +310,20 @@ fn complex_brillig_foreign_call() { private_parameters: BTreeSet::from([Witness(1), Witness(2), Witness(3)]), ..Circuit::default() }; - let program = Program { functions: vec![circuit] }; + let program = Program { functions: vec![circuit], unconstrained_functions: vec![] }; let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 84, 93, 10, 131, 48, 12, 78, 218, 233, 100, 111, - 187, 193, 96, 59, 64, 231, 9, 188, 139, 248, 166, 232, 163, 167, 23, 11, 126, 197, 24, 250, - 34, 86, 208, 64, 72, 218, 252, 125, 36, 105, 153, 22, 42, 60, 51, 116, 235, 217, 64, 103, - 156, 37, 5, 191, 10, 210, 29, 163, 63, 167, 203, 229, 206, 194, 104, 110, 128, 209, 158, - 128, 49, 236, 195, 69, 231, 157, 114, 46, 73, 251, 103, 35, 239, 231, 225, 57, 243, 156, - 227, 252, 132, 44, 112, 79, 176, 125, 84, 223, 73, 248, 145, 152, 69, 149, 4, 107, 233, - 114, 90, 119, 145, 85, 237, 151, 192, 89, 247, 221, 208, 54, 163, 85, 174, 26, 234, 87, - 232, 63, 101, 103, 21, 55, 169, 216, 73, 72, 249, 5, 197, 234, 132, 123, 179, 35, 247, 155, - 214, 246, 102, 20, 73, 204, 72, 168, 123, 191, 161, 25, 66, 136, 159, 187, 53, 5, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 84, 75, 10, 132, 48, 12, 77, 218, 209, 145, 217, + 205, 13, 6, 198, 3, 84, 79, 224, 93, 196, 157, 162, 75, 79, 47, 22, 124, 197, 16, 186, 17, + 43, 104, 32, 36, 109, 126, 143, 36, 45, 211, 70, 133, 103, 134, 110, 61, 27, 232, 140, 179, + 164, 224, 215, 64, 186, 115, 84, 113, 186, 92, 238, 42, 140, 230, 1, 24, 237, 5, 24, 195, + 62, 220, 116, 222, 41, 231, 146, 180, 127, 54, 242, 126, 94, 158, 51, 207, 57, 206, 111, + 200, 2, 247, 4, 219, 79, 245, 157, 132, 31, 137, 89, 52, 73, 176, 214, 46, 167, 125, 23, + 89, 213, 254, 8, 156, 237, 56, 76, 125, 55, 91, 229, 170, 161, 254, 133, 94, 42, 59, 171, + 184, 69, 197, 46, 66, 202, 47, 40, 86, 39, 220, 155, 3, 185, 191, 180, 183, 55, 163, 72, + 98, 70, 66, 221, 251, 40, 173, 255, 35, 68, 62, 61, 5, 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -348,16 +352,16 @@ fn memory_op_circuit() { return_values: PublicInputs([Witness(4)].into()), ..Circuit::default() }; - let program = Program { functions: vec![circuit] }; + let program = Program { functions: vec![circuit], unconstrained_functions: vec![] }; let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 81, 201, 13, 0, 32, 8, 147, 195, 125, 112, 3, 247, - 159, 74, 141, 60, 106, 226, 79, 120, 216, 132, 180, 124, 154, 82, 168, 108, 212, 57, 2, - 122, 129, 157, 201, 181, 150, 59, 186, 179, 189, 161, 101, 251, 82, 176, 175, 196, 121, 89, - 118, 185, 246, 91, 185, 26, 125, 187, 64, 80, 134, 29, 195, 31, 79, 24, 2, 250, 167, 252, - 27, 3, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 81, 57, 14, 0, 32, 8, 147, 195, 255, 224, 15, 252, + 255, 171, 212, 200, 208, 129, 77, 24, 108, 66, 90, 150, 166, 20, 106, 23, 125, 143, 128, + 62, 96, 103, 114, 173, 45, 198, 116, 182, 55, 140, 106, 95, 74, 246, 149, 60, 47, 171, 46, + 215, 126, 43, 87, 179, 111, 23, 8, 202, 176, 99, 248, 240, 9, 11, 137, 33, 212, 110, 35, 3, + 0, 0, ]; assert_eq!(bytes, expected_serialization) @@ -380,10 +384,18 @@ fn nested_acir_call_circuit() { // assert(x == y); // x // } - let nested_call = - Opcode::Call { id: 1, inputs: vec![Witness(0), Witness(1)], outputs: vec![Witness(2)] }; - let nested_call_two = - Opcode::Call { id: 1, inputs: vec![Witness(0), Witness(1)], outputs: vec![Witness(3)] }; + let nested_call = Opcode::Call { + id: 1, + inputs: vec![Witness(0), Witness(1)], + outputs: vec![Witness(2)], + predicate: None, + }; + let nested_call_two = Opcode::Call { + id: 1, + inputs: vec![Witness(0), Witness(1)], + outputs: vec![Witness(3)], + predicate: None, + }; let assert_nested_call_results = Opcode::AssertZero(Expression { mul_terms: Vec::new(), @@ -410,8 +422,12 @@ fn nested_acir_call_circuit() { ], q_c: FieldElement::one() + FieldElement::one(), }); - let call = - Opcode::Call { id: 2, inputs: vec![Witness(2), Witness(1)], outputs: vec![Witness(3)] }; + let call = Opcode::Call { + id: 2, + inputs: vec![Witness(2), Witness(1)], + outputs: vec![Witness(3)], + predicate: None, + }; let nested_call = Circuit { current_witness_index: 3, @@ -438,20 +454,21 @@ fn nested_acir_call_circuit() { ..Circuit::default() }; - let program = Program { functions: vec![main, nested_call, inner_call] }; + let program = + Program { functions: vec![main, nested_call, inner_call], unconstrained_functions: vec![] }; let bytes = Program::serialize_program(&program); let expected_serialization: Vec = vec![ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 205, 146, 97, 10, 195, 32, 12, 133, 163, 66, 207, 147, - 24, 109, 227, 191, 93, 101, 50, 123, 255, 35, 172, 99, 25, 83, 17, 250, 99, 14, 250, 224, - 97, 144, 16, 146, 143, 231, 224, 45, 167, 126, 105, 57, 108, 14, 91, 248, 202, 168, 65, - 255, 207, 122, 28, 180, 250, 244, 221, 244, 197, 223, 68, 182, 154, 197, 184, 134, 80, 54, - 95, 136, 233, 142, 62, 101, 137, 24, 98, 94, 133, 132, 162, 196, 135, 23, 230, 34, 65, 182, - 148, 211, 134, 137, 2, 23, 218, 99, 226, 93, 135, 185, 121, 123, 33, 84, 12, 234, 218, 192, - 64, 174, 3, 248, 47, 88, 48, 17, 150, 157, 183, 151, 95, 244, 86, 91, 221, 61, 10, 81, 31, - 178, 190, 110, 194, 102, 96, 76, 251, 202, 80, 13, 204, 77, 224, 25, 176, 70, 79, 197, 128, - 18, 64, 3, 4, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 205, 146, 65, 10, 3, 33, 12, 69, 163, 46, 230, 58, 137, + 209, 49, 238, 122, 149, 74, 157, 251, 31, 161, 83, 154, 161, 86, 132, 89, 212, 194, 124, + 248, 24, 36, 132, 228, 241, 29, 188, 229, 212, 47, 45, 187, 205, 110, 11, 31, 25, 53, 28, + 255, 103, 77, 14, 58, 29, 141, 55, 125, 241, 55, 145, 109, 102, 49, 174, 33, 212, 228, 43, + 49, 221, 209, 231, 34, 17, 67, 44, 171, 144, 80, 148, 248, 240, 194, 92, 37, 72, 202, 37, + 39, 204, 20, 184, 210, 22, 51, 111, 58, 204, 205, 219, 11, 161, 129, 208, 214, 6, 6, 114, + 29, 193, 127, 193, 130, 137, 176, 236, 188, 189, 252, 162, 183, 218, 230, 238, 97, 138, + 250, 152, 245, 245, 87, 220, 12, 140, 113, 95, 153, 170, 129, 185, 17, 60, 3, 54, 212, 19, + 104, 145, 195, 151, 14, 4, 0, 0, ]; assert_eq!(bytes, expected_serialization); } diff --git a/acvm-repo/acir_field/Cargo.toml b/acvm-repo/acir_field/Cargo.toml index d63a885bfd8..7a260ea1fa2 100644 --- a/acvm-repo/acir_field/Cargo.toml +++ b/acvm-repo/acir_field/Cargo.toml @@ -2,7 +2,7 @@ name = "acir_field" description = "The field implementation being used by ACIR." # x-release-please-start-version -version = "0.42.0" +version = "0.43.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/acvm/Cargo.toml b/acvm-repo/acvm/Cargo.toml index d0ea52e859d..e6554d3f773 100644 --- a/acvm-repo/acvm/Cargo.toml +++ b/acvm-repo/acvm/Cargo.toml @@ -2,7 +2,7 @@ name = "acvm" description = "The virtual machine that processes ACIR given a backend/proof system." # x-release-please-start-version -version = "0.42.0" +version = "0.43.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/acvm/src/compiler/transformers/mod.rs b/acvm-repo/acvm/src/compiler/transformers/mod.rs index 003cd4279a1..d13fac1672a 100644 --- a/acvm-repo/acvm/src/compiler/transformers/mod.rs +++ b/acvm-repo/acvm/src/compiler/transformers/mod.rs @@ -142,6 +142,21 @@ pub(super) fn transform_internal( new_acir_opcode_positions.push(acir_opcode_positions[index]); transformed_opcodes.push(opcode); } + Opcode::BrilligCall { ref outputs, .. } => { + for output in outputs { + match output { + BrilligOutputs::Simple(w) => transformer.mark_solvable(*w), + BrilligOutputs::Array(v) => { + for witness in v { + transformer.mark_solvable(*witness); + } + } + } + } + + new_acir_opcode_positions.push(acir_opcode_positions[index]); + transformed_opcodes.push(opcode); + } Opcode::Call { ref outputs, .. } => { for witness in outputs { transformer.mark_solvable(*witness); diff --git a/acvm-repo/acvm/src/pwg/blackbox/bigint.rs b/acvm-repo/acvm/src/pwg/blackbox/bigint.rs index f094bb1ba20..3c05fb2761d 100644 --- a/acvm-repo/acvm/src/pwg/blackbox/bigint.rs +++ b/acvm-repo/acvm/src/pwg/blackbox/bigint.rs @@ -1,53 +1,23 @@ -use std::collections::HashMap; - use acir::{ circuit::opcodes::FunctionInput, native_types::{Witness, WitnessMap}, BlackBoxFunc, FieldElement, }; -use num_bigint::BigUint; +use acvm_blackbox_solver::BigIntSolver; use crate::pwg::OpcodeResolutionError; -/// Resolve BigInt opcodes by storing BigInt values (and their moduli) by their ID in a HashMap: +/// Resolve BigInt opcodes by storing BigInt values (and their moduli) by their ID in the BigIntSolver /// - When it encounters a bigint operation opcode, it performs the operation on the stored values /// and store the result using the provided ID. /// - When it gets a to_bytes opcode, it simply looks up the value and resolves the output witness accordingly. #[derive(Default)] -pub(crate) struct BigIntSolver { - bigint_id_to_value: HashMap, - bigint_id_to_modulus: HashMap, +pub(crate) struct AcvmBigIntSolver { + bigint_solver: BigIntSolver, } -impl BigIntSolver { - pub(crate) fn get_bigint( - &self, - id: u32, - func: BlackBoxFunc, - ) -> Result { - self.bigint_id_to_value - .get(&id) - .ok_or(OpcodeResolutionError::BlackBoxFunctionFailed( - func, - format!("could not find bigint of id {id}"), - )) - .cloned() - } - - pub(crate) fn get_modulus( - &self, - id: u32, - func: BlackBoxFunc, - ) -> Result { - self.bigint_id_to_modulus - .get(&id) - .ok_or(OpcodeResolutionError::BlackBoxFunctionFailed( - func, - format!("could not find bigint of id {id}"), - )) - .cloned() - } +impl AcvmBigIntSolver { pub(crate) fn bigint_from_bytes( &mut self, inputs: &[FunctionInput], @@ -59,10 +29,7 @@ impl BigIntSolver { .iter() .map(|input| initial_witness.get(&input.witness).unwrap().to_u128() as u8) .collect::>(); - let bigint = BigUint::from_bytes_le(&bytes); - self.bigint_id_to_value.insert(output, bigint); - let modulus = BigUint::from_bytes_le(modulus); - self.bigint_id_to_modulus.insert(output, modulus); + self.bigint_solver.bigint_from_bytes(&bytes, modulus, output)?; Ok(()) } @@ -72,9 +39,7 @@ impl BigIntSolver { outputs: &[Witness], initial_witness: &mut WitnessMap, ) -> Result<(), OpcodeResolutionError> { - let bigint = self.get_bigint(input, BlackBoxFunc::BigIntToLeBytes)?; - - let mut bytes = bigint.to_bytes_le(); + let mut bytes = self.bigint_solver.bigint_to_bytes(input)?; while bytes.len() < outputs.len() { bytes.push(0); } @@ -91,30 +56,7 @@ impl BigIntSolver { output: u32, func: BlackBoxFunc, ) -> Result<(), OpcodeResolutionError> { - let modulus = self.get_modulus(lhs, func)?; - let lhs = self.get_bigint(lhs, func)?; - let rhs = self.get_bigint(rhs, func)?; - let mut result = match func { - BlackBoxFunc::BigIntAdd => lhs + rhs, - BlackBoxFunc::BigIntSub => { - if lhs >= rhs { - &lhs - &rhs - } else { - &lhs + &modulus - &rhs - } - } - BlackBoxFunc::BigIntMul => lhs * rhs, - BlackBoxFunc::BigIntDiv => { - lhs * rhs.modpow(&(&modulus - BigUint::from(1_u32)), &modulus) - } //TODO ensure that modulus is prime - _ => unreachable!("ICE - bigint_op must be called for an operation"), - }; - if result > modulus { - let q = &result / &modulus; - result -= q * &modulus; - } - self.bigint_id_to_value.insert(output, result); - self.bigint_id_to_modulus.insert(output, modulus); + self.bigint_solver.bigint_op(lhs, rhs, output, func)?; Ok(()) } } diff --git a/acvm-repo/acvm/src/pwg/blackbox/hash.rs b/acvm-repo/acvm/src/pwg/blackbox/hash.rs index 24c835a636a..caa09ea8973 100644 --- a/acvm-repo/acvm/src/pwg/blackbox/hash.rs +++ b/acvm-repo/acvm/src/pwg/blackbox/hash.rs @@ -1,7 +1,7 @@ use acir::{ circuit::opcodes::FunctionInput, native_types::{Witness, WitnessMap}, - BlackBoxFunc, FieldElement, + FieldElement, }; use acvm_blackbox_solver::{sha256compression, BlackBoxFunctionSolver, BlackBoxResolutionError}; @@ -14,22 +14,13 @@ pub(super) fn solve_generic_256_hash_opcode( initial_witness: &mut WitnessMap, inputs: &[FunctionInput], var_message_size: Option<&FunctionInput>, - outputs: &[Witness], + outputs: &[Witness; 32], hash_function: fn(data: &[u8]) -> Result<[u8; 32], BlackBoxResolutionError>, - black_box_func: BlackBoxFunc, ) -> Result<(), OpcodeResolutionError> { let message_input = get_hash_input(initial_witness, inputs, var_message_size)?; let digest: [u8; 32] = hash_function(&message_input)?; - let outputs: [Witness; 32] = outputs.try_into().map_err(|_| { - OpcodeResolutionError::BlackBoxFunctionFailed( - black_box_func, - format!("Expected 32 outputs but encountered {}", outputs.len()), - ) - })?; - write_digest_to_outputs(initial_witness, outputs, digest)?; - - Ok(()) + write_digest_to_outputs(initial_witness, outputs, digest) } /// Reads the hash function input from a [`WitnessMap`]. @@ -73,7 +64,7 @@ fn get_hash_input( /// Writes a `digest` to the [`WitnessMap`] at witness indices `outputs`. fn write_digest_to_outputs( initial_witness: &mut WitnessMap, - outputs: [Witness; 32], + outputs: &[Witness; 32], digest: [u8; 32], ) -> Result<(), OpcodeResolutionError> { for (output_witness, value) in outputs.iter().zip(digest.into_iter()) { @@ -87,44 +78,29 @@ fn write_digest_to_outputs( Ok(()) } +fn to_u32_array( + initial_witness: &WitnessMap, + inputs: &[FunctionInput; N], +) -> Result<[u32; N], OpcodeResolutionError> { + let mut result = [0; N]; + for (it, input) in result.iter_mut().zip(inputs) { + let witness_value = witness_to_value(initial_witness, input.witness)?; + *it = witness_value.to_u128() as u32; + } + Ok(result) +} + pub(crate) fn solve_sha_256_permutation_opcode( initial_witness: &mut WitnessMap, - inputs: &[FunctionInput], - hash_values: &[FunctionInput], - outputs: &[Witness], - black_box_func: BlackBoxFunc, + inputs: &[FunctionInput; 16], + hash_values: &[FunctionInput; 8], + outputs: &[Witness; 8], ) -> Result<(), OpcodeResolutionError> { - let mut message = [0; 16]; - if inputs.len() != 16 { - return Err(OpcodeResolutionError::BlackBoxFunctionFailed( - black_box_func, - format!("Expected 16 inputs but encountered {}", &message.len()), - )); - } - for (i, input) in inputs.iter().enumerate() { - let value = witness_to_value(initial_witness, input.witness)?; - message[i] = value.to_u128() as u32; - } - - if hash_values.len() != 8 { - return Err(OpcodeResolutionError::BlackBoxFunctionFailed( - black_box_func, - format!("Expected 8 values but encountered {}", hash_values.len()), - )); - } - let mut state = [0; 8]; - for (i, hash) in hash_values.iter().enumerate() { - let value = witness_to_value(initial_witness, hash.witness)?; - state[i] = value.to_u128() as u32; - } + let message = to_u32_array(initial_witness, inputs)?; + let mut state = to_u32_array(initial_witness, hash_values)?; sha256compression(&mut state, &message); - let outputs: [Witness; 8] = outputs.try_into().map_err(|_| { - OpcodeResolutionError::BlackBoxFunctionFailed( - black_box_func, - format!("Expected 8 outputs but encountered {}", outputs.len()), - ) - })?; + for (output_witness, value) in outputs.iter().zip(state.into_iter()) { insert_value(output_witness, FieldElement::from(value as u128), initial_witness)?; } diff --git a/acvm-repo/acvm/src/pwg/blackbox/mod.rs b/acvm-repo/acvm/src/pwg/blackbox/mod.rs index 6ee926043cd..2753c7baaaa 100644 --- a/acvm-repo/acvm/src/pwg/blackbox/mod.rs +++ b/acvm-repo/acvm/src/pwg/blackbox/mod.rs @@ -6,7 +6,7 @@ use acir::{ use acvm_blackbox_solver::{blake2s, blake3, keccak256, keccakf1600, sha256}; use self::{ - bigint::BigIntSolver, hash::solve_poseidon2_permutation_opcode, pedersen::pedersen_hash, + bigint::AcvmBigIntSolver, hash::solve_poseidon2_permutation_opcode, pedersen::pedersen_hash, }; use super::{insert_value, OpcodeNotSolvable, OpcodeResolutionError}; @@ -56,7 +56,7 @@ pub(crate) fn solve( backend: &impl BlackBoxFunctionSolver, initial_witness: &mut WitnessMap, bb_func: &BlackBoxFuncCall, - bigint_solver: &mut BigIntSolver, + bigint_solver: &mut AcvmBigIntSolver, ) -> Result<(), OpcodeResolutionError> { let inputs = bb_func.get_inputs_vec(); if !contains_all_inputs(initial_witness, &inputs) { @@ -71,57 +71,34 @@ pub(crate) fn solve( BlackBoxFuncCall::AND { lhs, rhs, output } => and(initial_witness, lhs, rhs, output), BlackBoxFuncCall::XOR { lhs, rhs, output } => xor(initial_witness, lhs, rhs, output), BlackBoxFuncCall::RANGE { input } => solve_range_opcode(initial_witness, input), - BlackBoxFuncCall::SHA256 { inputs, outputs } => solve_generic_256_hash_opcode( - initial_witness, - inputs, - None, - outputs, - sha256, - bb_func.get_black_box_func(), - ), - BlackBoxFuncCall::Blake2s { inputs, outputs } => solve_generic_256_hash_opcode( - initial_witness, - inputs, - None, - outputs, - blake2s, - bb_func.get_black_box_func(), - ), - BlackBoxFuncCall::Blake3 { inputs, outputs } => solve_generic_256_hash_opcode( - initial_witness, - inputs, - None, - outputs, - blake3, - bb_func.get_black_box_func(), - ), - BlackBoxFuncCall::Keccak256 { inputs, outputs } => solve_generic_256_hash_opcode( - initial_witness, - inputs, - None, - outputs, - keccak256, - bb_func.get_black_box_func(), - ), - BlackBoxFuncCall::Keccak256VariableLength { inputs, var_message_size, outputs } => { + BlackBoxFuncCall::SHA256 { inputs, outputs } => { + solve_generic_256_hash_opcode(initial_witness, inputs, None, outputs, sha256) + } + BlackBoxFuncCall::Blake2s { inputs, outputs } => { + solve_generic_256_hash_opcode(initial_witness, inputs, None, outputs, blake2s) + } + BlackBoxFuncCall::Blake3 { inputs, outputs } => { + solve_generic_256_hash_opcode(initial_witness, inputs, None, outputs, blake3) + } + + BlackBoxFuncCall::Keccak256 { inputs, var_message_size, outputs } => { solve_generic_256_hash_opcode( initial_witness, inputs, Some(var_message_size), outputs, keccak256, - bb_func.get_black_box_func(), ) } BlackBoxFuncCall::Keccakf1600 { inputs, outputs } => { let mut state = [0; 25]; - for (i, input) in inputs.iter().enumerate() { + for (it, input) in state.iter_mut().zip(inputs.as_ref()) { let witness = input.witness; let num_bits = input.num_bits as usize; assert_eq!(num_bits, 64); let witness_assignment = witness_to_value(initial_witness, witness)?; let lane = witness_assignment.try_to_u64(); - state[i] = lane.unwrap(); + *it = lane.unwrap(); } let output_state = keccakf1600(state)?; for (output_witness, value) in outputs.iter().zip(output_state.into_iter()) { @@ -140,7 +117,7 @@ pub(crate) fn solve( initial_witness, *public_key_x, *public_key_y, - signature, + signature.as_ref(), message, *output, ), @@ -161,7 +138,7 @@ pub(crate) fn solve( public_key_x, public_key_y, signature, - message, + message.as_ref(), *output, ), BlackBoxFuncCall::EcdsaSecp256r1 { @@ -175,7 +152,7 @@ pub(crate) fn solve( public_key_x, public_key_y, signature, - message, + message.as_ref(), *output, ), BlackBoxFuncCall::FixedBaseScalarMul { low, high, outputs } => { @@ -207,13 +184,7 @@ pub(crate) fn solve( bigint_solver.bigint_to_bytes(*input, outputs, initial_witness) } BlackBoxFuncCall::Sha256Compression { inputs, hash_values, outputs } => { - solve_sha_256_permutation_opcode( - initial_witness, - inputs, - hash_values, - outputs, - bb_func.get_black_box_func(), - ) + solve_sha_256_permutation_opcode(initial_witness, inputs, hash_values, outputs) } BlackBoxFuncCall::Poseidon2Permutation { inputs, outputs, len } => { solve_poseidon2_permutation_opcode(backend, initial_witness, inputs, outputs, *len) diff --git a/acvm-repo/acvm/src/pwg/blackbox/signature/ecdsa.rs b/acvm-repo/acvm/src/pwg/blackbox/signature/ecdsa.rs index 8f0df8378ad..b113c801251 100644 --- a/acvm-repo/acvm/src/pwg/blackbox/signature/ecdsa.rs +++ b/acvm-repo/acvm/src/pwg/blackbox/signature/ecdsa.rs @@ -7,85 +7,42 @@ use acvm_blackbox_solver::{ecdsa_secp256k1_verify, ecdsa_secp256r1_verify}; use crate::{pwg::insert_value, OpcodeResolutionError}; -use super::to_u8_vec; +use super::{to_u8_array, to_u8_vec}; pub(crate) fn secp256k1_prehashed( initial_witness: &mut WitnessMap, - public_key_x_inputs: &[FunctionInput], - public_key_y_inputs: &[FunctionInput], - signature_inputs: &[FunctionInput], + public_key_x_inputs: &[FunctionInput; 32], + public_key_y_inputs: &[FunctionInput; 32], + signature_inputs: &[FunctionInput; 64], hashed_message_inputs: &[FunctionInput], output: Witness, ) -> Result<(), OpcodeResolutionError> { let hashed_message = to_u8_vec(initial_witness, hashed_message_inputs)?; - // These errors should never be emitted in practice as they would imply malformed ACIR generation. - let pub_key_x: [u8; 32] = - to_u8_vec(initial_witness, public_key_x_inputs)?.try_into().map_err(|_| { - OpcodeResolutionError::BlackBoxFunctionFailed( - acir::BlackBoxFunc::EcdsaSecp256k1, - format!("expected pubkey_x size 32 but received {}", public_key_x_inputs.len()), - ) - })?; - - let pub_key_y: [u8; 32] = - to_u8_vec(initial_witness, public_key_y_inputs)?.try_into().map_err(|_| { - OpcodeResolutionError::BlackBoxFunctionFailed( - acir::BlackBoxFunc::EcdsaSecp256k1, - format!("expected pubkey_y size 32 but received {}", public_key_y_inputs.len()), - ) - })?; - - let signature: [u8; 64] = - to_u8_vec(initial_witness, signature_inputs)?.try_into().map_err(|_| { - OpcodeResolutionError::BlackBoxFunctionFailed( - acir::BlackBoxFunc::EcdsaSecp256k1, - format!("expected signature size 64 but received {}", signature_inputs.len()), - ) - })?; + let pub_key_x: [u8; 32] = to_u8_array(initial_witness, public_key_x_inputs)?; + let pub_key_y: [u8; 32] = to_u8_array(initial_witness, public_key_y_inputs)?; + let signature: [u8; 64] = to_u8_array(initial_witness, signature_inputs)?; let is_valid = ecdsa_secp256k1_verify(&hashed_message, &pub_key_x, &pub_key_y, &signature)?; - insert_value(&output, FieldElement::from(is_valid), initial_witness)?; - Ok(()) + insert_value(&output, FieldElement::from(is_valid), initial_witness) } pub(crate) fn secp256r1_prehashed( initial_witness: &mut WitnessMap, - public_key_x_inputs: &[FunctionInput], - public_key_y_inputs: &[FunctionInput], - signature_inputs: &[FunctionInput], + public_key_x_inputs: &[FunctionInput; 32], + public_key_y_inputs: &[FunctionInput; 32], + signature_inputs: &[FunctionInput; 64], hashed_message_inputs: &[FunctionInput], output: Witness, ) -> Result<(), OpcodeResolutionError> { let hashed_message = to_u8_vec(initial_witness, hashed_message_inputs)?; - let pub_key_x: [u8; 32] = - to_u8_vec(initial_witness, public_key_x_inputs)?.try_into().map_err(|_| { - OpcodeResolutionError::BlackBoxFunctionFailed( - acir::BlackBoxFunc::EcdsaSecp256r1, - format!("expected pubkey_x size 32 but received {}", public_key_x_inputs.len()), - ) - })?; - - let pub_key_y: [u8; 32] = - to_u8_vec(initial_witness, public_key_y_inputs)?.try_into().map_err(|_| { - OpcodeResolutionError::BlackBoxFunctionFailed( - acir::BlackBoxFunc::EcdsaSecp256r1, - format!("expected pubkey_y size 32 but received {}", public_key_y_inputs.len()), - ) - })?; - - let signature: [u8; 64] = - to_u8_vec(initial_witness, signature_inputs)?.try_into().map_err(|_| { - OpcodeResolutionError::BlackBoxFunctionFailed( - acir::BlackBoxFunc::EcdsaSecp256r1, - format!("expected signature size 64 but received {}", signature_inputs.len()), - ) - })?; + let pub_key_x: [u8; 32] = to_u8_array(initial_witness, public_key_x_inputs)?; + let pub_key_y: [u8; 32] = to_u8_array(initial_witness, public_key_y_inputs)?; + let signature: [u8; 64] = to_u8_array(initial_witness, signature_inputs)?; let is_valid = ecdsa_secp256r1_verify(&hashed_message, &pub_key_x, &pub_key_y, &signature)?; - insert_value(&output, FieldElement::from(is_valid), initial_witness)?; - Ok(()) + insert_value(&output, FieldElement::from(is_valid), initial_witness) } diff --git a/acvm-repo/acvm/src/pwg/blackbox/signature/mod.rs b/acvm-repo/acvm/src/pwg/blackbox/signature/mod.rs index 0e28a63ff68..bd223ecd0c9 100644 --- a/acvm-repo/acvm/src/pwg/blackbox/signature/mod.rs +++ b/acvm-repo/acvm/src/pwg/blackbox/signature/mod.rs @@ -2,6 +2,21 @@ use acir::{circuit::opcodes::FunctionInput, native_types::WitnessMap}; use crate::pwg::{witness_to_value, OpcodeResolutionError}; +fn to_u8_array( + initial_witness: &WitnessMap, + inputs: &[FunctionInput; N], +) -> Result<[u8; N], OpcodeResolutionError> { + let mut result = [0; N]; + for (it, input) in result.iter_mut().zip(inputs) { + let witness_value_bytes = witness_to_value(initial_witness, input.witness)?.to_be_bytes(); + let byte = witness_value_bytes + .last() + .expect("Field element must be represented by non-zero amount of bytes"); + *it = *byte; + } + Ok(result) +} + fn to_u8_vec( initial_witness: &WitnessMap, inputs: &[FunctionInput], diff --git a/acvm-repo/acvm/src/pwg/blackbox/signature/schnorr.rs b/acvm-repo/acvm/src/pwg/blackbox/signature/schnorr.rs index 7f5381cee91..3d0216fa217 100644 --- a/acvm-repo/acvm/src/pwg/blackbox/signature/schnorr.rs +++ b/acvm-repo/acvm/src/pwg/blackbox/signature/schnorr.rs @@ -1,4 +1,4 @@ -use super::to_u8_vec; +use super::{to_u8_array, to_u8_vec}; use crate::{ pwg::{insert_value, witness_to_value, OpcodeResolutionError}, BlackBoxFunctionSolver, @@ -15,15 +15,14 @@ pub(crate) fn schnorr_verify( initial_witness: &mut WitnessMap, public_key_x: FunctionInput, public_key_y: FunctionInput, - signature: &[FunctionInput], + signature: &[FunctionInput; 64], message: &[FunctionInput], output: Witness, ) -> Result<(), OpcodeResolutionError> { let public_key_x: &FieldElement = witness_to_value(initial_witness, public_key_x.witness)?; let public_key_y: &FieldElement = witness_to_value(initial_witness, public_key_y.witness)?; - let signature = to_u8_vec(initial_witness, signature)?; - + let signature = to_u8_array(initial_witness, signature)?; let message = to_u8_vec(initial_witness, message)?; let valid_signature = diff --git a/acvm-repo/acvm/src/pwg/brillig.rs b/acvm-repo/acvm/src/pwg/brillig.rs index bcf736cd926..67faf7f5007 100644 --- a/acvm-repo/acvm/src/pwg/brillig.rs +++ b/acvm-repo/acvm/src/pwg/brillig.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use acir::{ - brillig::{ForeignCallParam, ForeignCallResult}, + brillig::{ForeignCallParam, ForeignCallResult, Opcode as BrilligOpcode}, circuit::{ brillig::{Brillig, BrilligInputs, BrilligOutputs}, opcodes::BlockId, @@ -11,7 +11,7 @@ use acir::{ FieldElement, }; use acvm_blackbox_solver::BlackBoxFunctionSolver; -use brillig_vm::{MemoryValue, VMStatus, VM}; +use brillig_vm::{FailureReason, MemoryValue, VMStatus, VM}; use crate::{pwg::OpcodeNotSolvable, OpcodeResolutionError}; @@ -46,9 +46,9 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { /// Assigns the zero value to all outputs of the given [`Brillig`] bytecode. pub(super) fn zero_out_brillig_outputs( initial_witness: &mut WitnessMap, - brillig: &Brillig, + outputs: &[BrilligOutputs], ) -> Result<(), OpcodeResolutionError> { - for output in &brillig.outputs { + for output in outputs { match output { BrilligOutputs::Simple(witness) => { insert_value(witness, FieldElement::zero(), initial_witness)?; @@ -63,6 +63,7 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { Ok(()) } + // TODO: Delete this old method once `Brillig` is deleted /// Constructs a solver for a Brillig block given the bytecode and initial /// witness. pub(crate) fn new( @@ -72,13 +73,45 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { bb_solver: &'b B, acir_index: usize, ) -> Result { + let vm = Self::setup_brillig_vm( + initial_witness, + memory, + &brillig.inputs, + &brillig.bytecode, + bb_solver, + )?; + Ok(Self { vm, acir_index }) + } + + /// Constructs a solver for a Brillig block given the bytecode and initial + /// witness. + pub(crate) fn new_call( + initial_witness: &WitnessMap, + memory: &HashMap, + inputs: &'b [BrilligInputs], + brillig_bytecode: &'b [BrilligOpcode], + bb_solver: &'b B, + acir_index: usize, + ) -> Result { + let vm = + Self::setup_brillig_vm(initial_witness, memory, inputs, brillig_bytecode, bb_solver)?; + Ok(Self { vm, acir_index }) + } + + fn setup_brillig_vm( + initial_witness: &WitnessMap, + memory: &HashMap, + inputs: &[BrilligInputs], + brillig_bytecode: &'b [BrilligOpcode], + bb_solver: &'b B, + ) -> Result, OpcodeResolutionError> { // Set input values let mut calldata: Vec = Vec::new(); // Each input represents an expression or array of expressions to evaluate. // Iterate over each input and evaluate the expression(s) associated with it. // Push the results into memory. // If a certain expression is not solvable, we stall the ACVM and do not proceed with Brillig VM execution. - for input in &brillig.inputs { + for input in inputs { match input { BrilligInputs::Single(expr) => match get_value(expr, initial_witness) { Ok(value) => calldata.push(value), @@ -118,8 +151,8 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { // Instantiate a Brillig VM given the solved calldata // along with the Brillig bytecode. - let vm = VM::new(calldata, &brillig.bytecode, vec![], bb_solver); - Ok(Self { vm, acir_index }) + let vm = VM::new(calldata, brillig_bytecode, vec![], bb_solver); + Ok(vm) } pub fn get_memory(&self) -> &[MemoryValue] { @@ -159,7 +192,31 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { match vm_status { VMStatus::Finished { .. } => Ok(BrilligSolverStatus::Finished), VMStatus::InProgress => Ok(BrilligSolverStatus::InProgress), - VMStatus::Failure { message, call_stack } => { + VMStatus::Failure { reason, call_stack } => { + let message = match reason { + FailureReason::RuntimeError { message } => Some(message), + FailureReason::Trap { revert_data_offset, revert_data_size } => { + // Since noir can only revert with strings currently, we can parse return data as a string + if revert_data_size == 0 { + None + } else { + let memory = self.vm.get_memory(); + let bytes = memory + [revert_data_offset..(revert_data_offset + revert_data_size)] + .iter() + .map(|memory_value| { + memory_value + .try_into() + .expect("Assert message character is not a byte") + }) + .collect(); + Some( + String::from_utf8(bytes) + .expect("Assert message is not valid UTF-8"), + ) + } + } + }; Err(OpcodeResolutionError::BrilligFunctionFailed { message, call_stack: call_stack @@ -180,13 +237,13 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { pub(crate) fn finalize( self, witness: &mut WitnessMap, - brillig: &Brillig, + outputs: &[BrilligOutputs], ) -> Result<(), OpcodeResolutionError> { // Finish the Brillig execution by writing the outputs to the witness map let vm_status = self.vm.get_status(); match vm_status { VMStatus::Finished { return_data_offset, return_data_size } => { - self.write_brillig_outputs(witness, return_data_offset, return_data_size, brillig)?; + self.write_brillig_outputs(witness, return_data_offset, return_data_size, outputs)?; Ok(()) } _ => panic!("Brillig VM has not completed execution"), @@ -198,26 +255,27 @@ impl<'b, B: BlackBoxFunctionSolver> BrilligSolver<'b, B> { witness_map: &mut WitnessMap, return_data_offset: usize, return_data_size: usize, - brillig: &Brillig, + outputs: &[BrilligOutputs], ) -> Result<(), OpcodeResolutionError> { // Write VM execution results into the witness map let memory = self.vm.get_memory(); let mut current_ret_data_idx = return_data_offset; - for output in brillig.outputs.iter() { + for output in outputs.iter() { match output { BrilligOutputs::Simple(witness) => { - insert_value(witness, memory[current_ret_data_idx].value, witness_map)?; + insert_value(witness, memory[current_ret_data_idx].to_field(), witness_map)?; current_ret_data_idx += 1; } BrilligOutputs::Array(witness_arr) => { for witness in witness_arr.iter() { - let value = memory[current_ret_data_idx]; - insert_value(witness, value.value, witness_map)?; + let value = &memory[current_ret_data_idx]; + insert_value(witness, value.to_field(), witness_map)?; current_ret_data_idx += 1; } } } } + assert!( current_ret_data_idx == return_data_offset + return_data_size, "Brillig VM did not write the expected number of return values" diff --git a/acvm-repo/acvm/src/pwg/memory_op.rs b/acvm-repo/acvm/src/pwg/memory_op.rs index e51797707a7..672c13e11c2 100644 --- a/acvm-repo/acvm/src/pwg/memory_op.rs +++ b/acvm-repo/acvm/src/pwg/memory_op.rs @@ -6,7 +6,9 @@ use acir::{ FieldElement, }; -use super::{arithmetic::ExpressionSolver, get_value, insert_value, witness_to_value}; +use super::{ + arithmetic::ExpressionSolver, get_value, insert_value, is_predicate_false, witness_to_value, +}; use super::{ErrorLocation, OpcodeResolutionError}; type MemoryIndex = u32; @@ -80,11 +82,8 @@ impl MemoryOpSolver { // `operation == 0` implies a read operation. (`operation == 1` implies write operation). let is_read_operation = operation.is_zero(); - // If the predicate is `None`, then we simply return the value 1 - let pred_value = match predicate { - Some(pred) => get_value(pred, initial_witness), - None => Ok(FieldElement::one()), - }?; + // Fetch whether or not the predicate is false (e.g. equal to zero) + let skip_operation = is_predicate_false(initial_witness, predicate)?; if is_read_operation { // `value_read = arr[memory_index]` @@ -97,7 +96,7 @@ impl MemoryOpSolver { // A zero predicate indicates that we should skip the read operation // and zero out the operation's output. - let value_in_array = if pred_value.is_zero() { + let value_in_array = if skip_operation { FieldElement::zero() } else { self.read_memory_index(memory_index)? @@ -111,7 +110,7 @@ impl MemoryOpSolver { let value_write = value; // A zero predicate indicates that we should skip the write operation. - if pred_value.is_zero() { + if skip_operation { // We only want to write to already initialized memory. // Do nothing if the predicate is zero. Ok(()) diff --git a/acvm-repo/acvm/src/pwg/mod.rs b/acvm-repo/acvm/src/pwg/mod.rs index 3cedcfc0399..652e173867a 100644 --- a/acvm-repo/acvm/src/pwg/mod.rs +++ b/acvm-repo/acvm/src/pwg/mod.rs @@ -4,14 +4,14 @@ use std::collections::HashMap; use acir::{ brillig::ForeignCallResult, - circuit::{opcodes::BlockId, Opcode, OpcodeLocation}, + circuit::{brillig::BrilligBytecode, opcodes::BlockId, Opcode, OpcodeLocation}, native_types::{Expression, Witness, WitnessMap}, BlackBoxFunc, FieldElement, }; use acvm_blackbox_solver::BlackBoxResolutionError; use self::{ - arithmetic::ExpressionSolver, blackbox::bigint::BigIntSolver, directives::solve_directives, + arithmetic::ExpressionSolver, blackbox::bigint::AcvmBigIntSolver, directives::solve_directives, memory_op::MemoryOpSolver, }; use crate::BlackBoxFunctionSolver; @@ -122,8 +122,8 @@ pub enum OpcodeResolutionError { IndexOutOfBounds { opcode_location: ErrorLocation, index: u32, array_size: u32 }, #[error("Failed to solve blackbox function: {0}, reason: {1}")] BlackBoxFunctionFailed(BlackBoxFunc, String), - #[error("Failed to solve brillig function, reason: {message}")] - BrilligFunctionFailed { message: String, call_stack: Vec }, + #[error("Failed to solve brillig function{}", .message.as_ref().map(|m| format!(", reason: {}", m)).unwrap_or_default())] + BrilligFunctionFailed { message: Option, call_stack: Vec }, #[error("Attempted to call `main` with a `Call` opcode")] AcirMainCallAttempted { opcode_location: ErrorLocation }, #[error("{results_size:?} result values were provided for {outputs_size:?} call output witnesses, most likely due to bad ACIR codegen")] @@ -148,7 +148,7 @@ pub struct ACVM<'a, B: BlackBoxFunctionSolver> { /// Stores the solver for memory operations acting on blocks of memory disambiguated by [block][`BlockId`]. block_solvers: HashMap, - bigint_solver: BigIntSolver, + bigint_solver: AcvmBigIntSolver, /// A list of opcodes which are to be executed by the ACVM. opcodes: &'a [Opcode], @@ -165,22 +165,31 @@ pub struct ACVM<'a, B: BlackBoxFunctionSolver> { /// Represents the outputs of all ACIR calls during an ACVM process /// List is appended onto by the caller upon reaching a [ACVMStatus::RequiresAcirCall] acir_call_results: Vec>, + + // Each unconstrained function referenced in the program + unconstrained_functions: &'a [BrilligBytecode], } impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { - pub fn new(backend: &'a B, opcodes: &'a [Opcode], initial_witness: WitnessMap) -> Self { + pub fn new( + backend: &'a B, + opcodes: &'a [Opcode], + initial_witness: WitnessMap, + unconstrained_functions: &'a [BrilligBytecode], + ) -> Self { let status = if opcodes.is_empty() { ACVMStatus::Solved } else { ACVMStatus::InProgress }; ACVM { status, backend, block_solvers: HashMap::default(), - bigint_solver: BigIntSolver::default(), + bigint_solver: AcvmBigIntSolver::default(), opcodes, instruction_pointer: 0, witness_map: initial_witness, brillig_solver: None, acir_call_counter: 0, acir_call_results: Vec::default(), + unconstrained_functions, } } @@ -324,6 +333,10 @@ impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { Ok(Some(foreign_call)) => return self.wait_for_foreign_call(foreign_call), res => res.map(|_| ()), }, + Opcode::BrilligCall { .. } => match self.solve_brillig_call_opcode() { + Ok(Some(foreign_call)) => return self.wait_for_foreign_call(foreign_call), + res => res.map(|_| ()), + }, Opcode::Call { .. } => match self.solve_call_opcode() { Ok(Some(input_values)) => return self.wait_for_acir_call(input_values), res => res.map(|_| ()), @@ -377,8 +390,9 @@ impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { }; let witness = &mut self.witness_map; - if BrilligSolver::::should_skip(witness, brillig)? { - return BrilligSolver::::zero_out_brillig_outputs(witness, brillig).map(|_| None); + if is_predicate_false(witness, &brillig.predicate)? { + return BrilligSolver::::zero_out_brillig_outputs(witness, &brillig.outputs) + .map(|_| None); } // If we're resuming execution after resolving a foreign call then @@ -404,7 +418,51 @@ impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { } BrilligSolverStatus::Finished => { // Write execution outputs - solver.finalize(witness, brillig)?; + solver.finalize(witness, &brillig.outputs)?; + Ok(None) + } + } + } + + fn solve_brillig_call_opcode( + &mut self, + ) -> Result, OpcodeResolutionError> { + let Opcode::BrilligCall { id, inputs, outputs, predicate } = + &self.opcodes[self.instruction_pointer] + else { + unreachable!("Not executing a Brillig opcode"); + }; + + let witness = &mut self.witness_map; + if is_predicate_false(witness, predicate)? { + return BrilligSolver::::zero_out_brillig_outputs(witness, outputs).map(|_| None); + } + + // If we're resuming execution after resolving a foreign call then + // there will be a cached `BrilligSolver` to avoid recomputation. + let mut solver: BrilligSolver<'_, B> = match self.brillig_solver.take() { + Some(solver) => solver, + None => BrilligSolver::new_call( + witness, + &self.block_solvers, + inputs, + &self.unconstrained_functions[*id as usize].bytecode, + self.backend, + self.instruction_pointer, + )?, + }; + match solver.solve()? { + BrilligSolverStatus::ForeignCallWait(foreign_call) => { + // Cache the current state of the solver + self.brillig_solver = Some(solver); + Ok(Some(foreign_call)) + } + BrilligSolverStatus::InProgress => { + unreachable!("Brillig solver still in progress") + } + BrilligSolverStatus::Finished => { + // Write execution outputs + solver.finalize(witness, outputs)?; Ok(None) } } @@ -422,7 +480,8 @@ impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { }; if should_skip { - let resolution = BrilligSolver::::zero_out_brillig_outputs(witness, brillig); + let resolution = + BrilligSolver::::zero_out_brillig_outputs(witness, &brillig.outputs); return StepResult::Status(self.handle_opcode_resolution(resolution)); } @@ -448,7 +507,9 @@ impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { } pub fn solve_call_opcode(&mut self) -> Result, OpcodeResolutionError> { - let Opcode::Call { id, inputs, outputs } = &self.opcodes[self.instruction_pointer] else { + let Opcode::Call { id, inputs, outputs, predicate } = + &self.opcodes[self.instruction_pointer] + else { unreachable!("Not executing a Call opcode"); }; if *id == 0 { @@ -459,6 +520,14 @@ impl<'a, B: BlackBoxFunctionSolver> ACVM<'a, B> { }); } + if is_predicate_false(&self.witness_map, predicate)? { + // Zero out the outputs if we have a false predicate + for output in outputs { + insert_value(output, FieldElement::zero(), &mut self.witness_map)?; + } + return Ok(None); + } + if self.acir_call_counter >= self.acir_call_results.len() { let mut initial_witness = WitnessMap::default(); for (i, input_witness) in inputs.iter().enumerate() { @@ -556,6 +625,20 @@ fn any_witness_from_expression(expr: &Expression) -> Option { } } +/// Returns `true` if the predicate is zero +/// A predicate is used to indicate whether we should skip a certain operation. +/// If we have a zero predicate it means the operation should be skipped. +pub(crate) fn is_predicate_false( + witness: &WitnessMap, + predicate: &Option, +) -> Result { + match predicate { + Some(pred) => get_value(pred, witness).map(|pred_value| pred_value.is_zero()), + // If the predicate is `None`, then we treat it as an unconditional `true` + None => Ok(false), + } +} + #[derive(Debug, Clone, PartialEq)] pub struct AcirCallWaitInfo { /// Index in the list of ACIR function's that should be called diff --git a/acvm-repo/acvm/tests/solver.rs b/acvm-repo/acvm/tests/solver.rs index a708db5b030..f009e2c05b8 100644 --- a/acvm-repo/acvm/tests/solver.rs +++ b/acvm-repo/acvm/tests/solver.rs @@ -104,8 +104,9 @@ fn inversion_brillig_oracle_equivalence() { (Witness(2), FieldElement::from(3u128)), ]) .into(); - - let mut acvm = ACVM::new(&StubbedBlackBoxSolver, &opcodes, witness_assignments); + let unconstrained_functions = vec![]; + let mut acvm = + ACVM::new(&StubbedBlackBoxSolver, &opcodes, witness_assignments, &unconstrained_functions); // use the partial witness generation solver with our acir program let solver_status = acvm.solve(); @@ -241,8 +242,9 @@ fn double_inversion_brillig_oracle() { (Witness(9), FieldElement::from(10u128)), ]) .into(); - - let mut acvm = ACVM::new(&StubbedBlackBoxSolver, &opcodes, witness_assignments); + let unconstrained_functions = vec![]; + let mut acvm = + ACVM::new(&StubbedBlackBoxSolver, &opcodes, witness_assignments, &unconstrained_functions); // use the partial witness generation solver with our acir program let solver_status = acvm.solve(); @@ -370,8 +372,9 @@ fn oracle_dependent_execution() { let witness_assignments = BTreeMap::from([(w_x, FieldElement::from(2u128)), (w_y, FieldElement::from(2u128))]).into(); - - let mut acvm = ACVM::new(&StubbedBlackBoxSolver, &opcodes, witness_assignments); + let unconstrained_functions = vec![]; + let mut acvm = + ACVM::new(&StubbedBlackBoxSolver, &opcodes, witness_assignments, &unconstrained_functions); // use the partial witness generation solver with our acir program let solver_status = acvm.solve(); @@ -474,8 +477,9 @@ fn brillig_oracle_predicate() { (Witness(2), FieldElement::from(3u128)), ]) .into(); - - let mut acvm = ACVM::new(&StubbedBlackBoxSolver, &opcodes, witness_assignments); + let unconstrained_functions = vec![]; + let mut acvm = + ACVM::new(&StubbedBlackBoxSolver, &opcodes, witness_assignments, &unconstrained_functions); let solver_status = acvm.solve(); assert_eq!(solver_status, ACVMStatus::Solved, "should be fully solved"); @@ -509,7 +513,8 @@ fn unsatisfied_opcode_resolved() { values.insert(d, FieldElement::from(2_i128)); let opcodes = vec![Opcode::AssertZero(opcode_a)]; - let mut acvm = ACVM::new(&StubbedBlackBoxSolver, &opcodes, values); + let unconstrained_functions = vec![]; + let mut acvm = ACVM::new(&StubbedBlackBoxSolver, &opcodes, values, &unconstrained_functions); let solver_status = acvm.solve(); assert_eq!( solver_status, @@ -549,7 +554,7 @@ fn unsatisfied_opcode_resolved_brillig() { let jmp_if_opcode = BrilligOpcode::JumpIf { condition: MemoryAddress::from(2), location: location_of_stop }; - let trap_opcode = BrilligOpcode::Trap; + let trap_opcode = BrilligOpcode::Trap { revert_data_offset: 0, revert_data_size: 0 }; let stop_opcode = BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 0 }; let brillig_opcode = Opcode::Brillig(Brillig { @@ -591,13 +596,13 @@ fn unsatisfied_opcode_resolved_brillig() { values.insert(w_result, FieldElement::from(0_i128)); let opcodes = vec![brillig_opcode, Opcode::AssertZero(opcode_a)]; - - let mut acvm = ACVM::new(&StubbedBlackBoxSolver, &opcodes, values); + let unconstrained_functions = vec![]; + let mut acvm = ACVM::new(&StubbedBlackBoxSolver, &opcodes, values, &unconstrained_functions); let solver_status = acvm.solve(); assert_eq!( solver_status, ACVMStatus::Failure(OpcodeResolutionError::BrilligFunctionFailed { - message: "explicit trap hit in brillig".to_string(), + message: None, call_stack: vec![OpcodeLocation::Brillig { acir_index: 0, brillig_index: 3 }] }), "The first opcode is not satisfiable, expected an error indicating this" @@ -635,8 +640,9 @@ fn memory_operations() { }); let opcodes = vec![init, read_op, expression]; - - let mut acvm = ACVM::new(&StubbedBlackBoxSolver, &opcodes, initial_witness); + let unconstrained_functions = vec![]; + let mut acvm = + ACVM::new(&StubbedBlackBoxSolver, &opcodes, initial_witness, &unconstrained_functions); let solver_status = acvm.solve(); assert_eq!(solver_status, ACVMStatus::Solved); let witness_map = acvm.finalize(); diff --git a/acvm-repo/acvm_js/Cargo.toml b/acvm-repo/acvm_js/Cargo.toml index 8319c38aee2..4635dc8663e 100644 --- a/acvm-repo/acvm_js/Cargo.toml +++ b/acvm-repo/acvm_js/Cargo.toml @@ -2,7 +2,7 @@ name = "acvm_js" description = "Typescript wrapper around the ACVM allowing execution of ACIR code" # x-release-please-start-version -version = "0.42.0" +version = "0.43.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/acvm_js/build.sh b/acvm-repo/acvm_js/build.sh index fe0b4dcbfff..4486a214c9c 100755 --- a/acvm-repo/acvm_js/build.sh +++ b/acvm-repo/acvm_js/build.sh @@ -14,6 +14,13 @@ function run_or_fail { exit $status fi } +function run_if_available { + if command -v "$1" >/dev/null 2>&1; then + "$@" + else + echo "$1 is not installed. Please install it to use this feature." >&2 + fi +} require_command jq require_command cargo diff --git a/acvm-repo/acvm_js/package.json b/acvm-repo/acvm_js/package.json index 44d99f13c31..63f12942018 100644 --- a/acvm-repo/acvm_js/package.json +++ b/acvm-repo/acvm_js/package.json @@ -1,6 +1,6 @@ { "name": "@noir-lang/acvm_js", - "version": "0.42.0", + "version": "0.43.0", "publishConfig": { "access": "public" }, diff --git a/acvm-repo/acvm_js/src/execute.rs b/acvm-repo/acvm_js/src/execute.rs index 0e58ccf039c..2fab684467e 100644 --- a/acvm-repo/acvm_js/src/execute.rs +++ b/acvm-repo/acvm_js/src/execute.rs @@ -1,5 +1,6 @@ use std::{future::Future, pin::Pin}; +use acvm::acir::circuit::brillig::BrilligBytecode; use acvm::BlackBoxFunctionSolver; use acvm::{ acir::circuit::{Circuit, Program}, @@ -13,7 +14,8 @@ use wasm_bindgen::prelude::wasm_bindgen; use crate::{ foreign_call::{resolve_brillig, ForeignCallHandler}, - JsExecutionError, JsWitnessMap, JsWitnessStack, + public_witness::extract_indices, + JsExecutionError, JsSolvedAndReturnWitness, JsWitnessMap, JsWitnessStack, }; #[wasm_bindgen] @@ -58,6 +60,44 @@ pub async fn execute_circuit( Ok(witness_map.into()) } +/// Executes an ACIR circuit to generate the solved witness from the initial witness. +/// This method also extracts the public return values from the solved witness into its own return witness. +/// +/// @param {&WasmBlackBoxFunctionSolver} solver - A black box solver. +/// @param {Uint8Array} circuit - A serialized representation of an ACIR circuit +/// @param {WitnessMap} initial_witness - The initial witness map defining all of the inputs to `circuit`.. +/// @param {ForeignCallHandler} foreign_call_handler - A callback to process any foreign calls from the circuit. +/// @returns {SolvedAndReturnWitness} The solved witness calculated by executing the circuit on the provided inputs, as well as the return witness indices as specified by the circuit. +#[wasm_bindgen(js_name = executeCircuitWithReturnWitness, skip_jsdoc)] +pub async fn execute_circuit_with_return_witness( + solver: &WasmBlackBoxFunctionSolver, + program: Vec, + initial_witness: JsWitnessMap, + foreign_call_handler: ForeignCallHandler, +) -> Result { + console_error_panic_hook::set_once(); + + let program: Program = Program::deserialize_program(&program) + .map_err(|_| JsExecutionError::new("Failed to deserialize circuit. This is likely due to differing serialization formats between ACVM_JS and your compiler".to_string(), None))?; + + let mut witness_stack = execute_program_with_native_program_and_return( + solver, + &program, + initial_witness, + &foreign_call_handler, + ) + .await?; + let solved_witness = + witness_stack.pop().expect("Should have at least one witness on the stack").witness; + + let main_circuit = &program.functions[0]; + let return_witness = + extract_indices(&solved_witness, main_circuit.return_values.0.iter().copied().collect()) + .map_err(|err| JsExecutionError::new(err, None))?; + + Ok((solved_witness, return_witness).into()) +} + /// Executes an ACIR circuit to generate the solved witness from the initial witness. /// /// @param {&WasmBlackBoxFunctionSolver} solver - A black box solver. @@ -127,7 +167,27 @@ async fn execute_program_with_native_type_return( let program: Program = Program::deserialize_program(&program) .map_err(|_| JsExecutionError::new("Failed to deserialize circuit. This is likely due to differing serialization formats between ACVM_JS and your compiler".to_string(), None))?; - let executor = ProgramExecutor::new(&program.functions, &solver.0, foreign_call_executor); + execute_program_with_native_program_and_return( + solver, + &program, + initial_witness, + foreign_call_executor, + ) + .await +} + +async fn execute_program_with_native_program_and_return( + solver: &WasmBlackBoxFunctionSolver, + program: &Program, + initial_witness: JsWitnessMap, + foreign_call_executor: &ForeignCallHandler, +) -> Result { + let executor = ProgramExecutor::new( + &program.functions, + &program.unconstrained_functions, + &solver.0, + foreign_call_executor, + ); let witness_stack = executor.execute(initial_witness.into()).await?; Ok(witness_stack) @@ -136,6 +196,8 @@ async fn execute_program_with_native_type_return( struct ProgramExecutor<'a, B: BlackBoxFunctionSolver> { functions: &'a [Circuit], + unconstrained_functions: &'a [BrilligBytecode], + blackbox_solver: &'a B, foreign_call_handler: &'a ForeignCallHandler, @@ -144,10 +206,16 @@ struct ProgramExecutor<'a, B: BlackBoxFunctionSolver> { impl<'a, B: BlackBoxFunctionSolver> ProgramExecutor<'a, B> { fn new( functions: &'a [Circuit], + unconstrained_functions: &'a [BrilligBytecode], blackbox_solver: &'a B, foreign_call_handler: &'a ForeignCallHandler, ) -> Self { - ProgramExecutor { functions, blackbox_solver, foreign_call_handler } + ProgramExecutor { + functions, + unconstrained_functions, + blackbox_solver, + foreign_call_handler, + } } async fn execute(&self, initial_witness: WitnessMap) -> Result { @@ -166,7 +234,12 @@ impl<'a, B: BlackBoxFunctionSolver> ProgramExecutor<'a, B> { witness_stack: &'a mut WitnessStack, ) -> Pin> + 'a>> { Box::pin(async { - let mut acvm = ACVM::new(self.blackbox_solver, &circuit.opcodes, initial_witness); + let mut acvm = ACVM::new( + self.blackbox_solver, + &circuit.opcodes, + initial_witness, + self.unconstrained_functions, + ); loop { let solver_status = acvm.solve(); @@ -177,7 +250,7 @@ impl<'a, B: BlackBoxFunctionSolver> ProgramExecutor<'a, B> { unreachable!("Execution should not stop while in `InProgress` state.") } ACVMStatus::Failure(error) => { - let (assert_message, call_stack) = match &error { + let (assert_message, call_stack): (Option<&str>, _) = match &error { OpcodeResolutionError::UnsatisfiedConstrain { opcode_location: ErrorLocation::Resolved(opcode_location), } @@ -188,12 +261,16 @@ impl<'a, B: BlackBoxFunctionSolver> ProgramExecutor<'a, B> { circuit.get_assert_message(*opcode_location), Some(vec![*opcode_location]), ), - OpcodeResolutionError::BrilligFunctionFailed { call_stack, .. } => { + OpcodeResolutionError::BrilligFunctionFailed { + call_stack, + message, + } => { + let revert_message = message.as_ref().map(String::as_str); let failing_opcode = call_stack .last() .expect("Brillig error call stacks cannot be empty"); ( - circuit.get_assert_message(*failing_opcode), + revert_message.or(circuit.get_assert_message(*failing_opcode)), Some(call_stack.clone()), ) } diff --git a/acvm-repo/acvm_js/src/js_witness_map.rs b/acvm-repo/acvm_js/src/js_witness_map.rs index 481b8caaa2d..c4482c4a234 100644 --- a/acvm-repo/acvm_js/src/js_witness_map.rs +++ b/acvm-repo/acvm_js/src/js_witness_map.rs @@ -2,13 +2,23 @@ use acvm::{ acir::native_types::{Witness, WitnessMap}, FieldElement, }; -use js_sys::{JsString, Map}; +use js_sys::{JsString, Map, Object}; use wasm_bindgen::prelude::{wasm_bindgen, JsValue}; #[wasm_bindgen(typescript_custom_section)] const WITNESS_MAP: &'static str = r#" // Map from witness index to hex string value of witness. export type WitnessMap = Map; + +/** + * An execution result containing two witnesses. + * 1. The full solved witness of the execution. + * 2. The return witness which contains the given public return values within the full witness. + */ +export type SolvedAndReturnWitness = { + solvedWitness: WitnessMap; + returnWitness: WitnessMap; +} "#; // WitnessMap @@ -21,6 +31,12 @@ extern "C" { #[wasm_bindgen(constructor, js_class = "Map")] pub fn new() -> JsWitnessMap; + #[wasm_bindgen(extends = Object, js_name = "SolvedAndReturnWitness", typescript_type = "SolvedAndReturnWitness")] + #[derive(Clone, Debug, PartialEq, Eq)] + pub type JsSolvedAndReturnWitness; + + #[wasm_bindgen(constructor, js_class = "Object")] + pub fn new() -> JsSolvedAndReturnWitness; } impl Default for JsWitnessMap { @@ -29,6 +45,12 @@ impl Default for JsWitnessMap { } } +impl Default for JsSolvedAndReturnWitness { + fn default() -> Self { + Self::new() + } +} + impl From for JsWitnessMap { fn from(witness_map: WitnessMap) -> Self { let js_map = JsWitnessMap::new(); @@ -54,6 +76,20 @@ impl From for WitnessMap { } } +impl From<(WitnessMap, WitnessMap)> for JsSolvedAndReturnWitness { + fn from(witness_maps: (WitnessMap, WitnessMap)) -> Self { + let js_solved_witness = JsWitnessMap::from(witness_maps.0); + let js_return_witness = JsWitnessMap::from(witness_maps.1); + + let entry_map = Map::new(); + entry_map.set(&JsValue::from_str("solvedWitness"), &js_solved_witness); + entry_map.set(&JsValue::from_str("returnWitness"), &js_return_witness); + + let solved_and_return_witness = Object::from_entries(&entry_map).unwrap(); + JsSolvedAndReturnWitness { obj: solved_and_return_witness } + } +} + pub(crate) fn js_value_to_field_element(js_value: JsValue) -> Result { let hex_str = js_value.as_string().ok_or("failed to parse field element from non-string")?; diff --git a/acvm-repo/acvm_js/src/lib.rs b/acvm-repo/acvm_js/src/lib.rs index d7ecc0ae192..66a4388b132 100644 --- a/acvm-repo/acvm_js/src/lib.rs +++ b/acvm-repo/acvm_js/src/lib.rs @@ -22,9 +22,10 @@ pub use compression::{ }; pub use execute::{ create_black_box_solver, execute_circuit, execute_circuit_with_black_box_solver, - execute_program, execute_program_with_black_box_solver, + execute_circuit_with_return_witness, execute_program, execute_program_with_black_box_solver, }; pub use js_execution_error::JsExecutionError; +pub use js_witness_map::JsSolvedAndReturnWitness; pub use js_witness_map::JsWitnessMap; pub use js_witness_stack::JsWitnessStack; pub use logging::init_log_level; diff --git a/acvm-repo/acvm_js/src/public_witness.rs b/acvm-repo/acvm_js/src/public_witness.rs index a0d5b5f8be2..4ba054732d4 100644 --- a/acvm-repo/acvm_js/src/public_witness.rs +++ b/acvm-repo/acvm_js/src/public_witness.rs @@ -7,7 +7,10 @@ use wasm_bindgen::prelude::wasm_bindgen; use crate::JsWitnessMap; -fn extract_indices(witness_map: &WitnessMap, indices: Vec) -> Result { +pub(crate) fn extract_indices( + witness_map: &WitnessMap, + indices: Vec, +) -> Result { let mut extracted_witness_map = WitnessMap::new(); for witness in indices { let witness_value = witness_map.get(&witness).ok_or(format!( @@ -44,7 +47,7 @@ pub fn get_return_witness( let witness_map = WitnessMap::from(witness_map); let return_witness = - extract_indices(&witness_map, circuit.return_values.0.clone().into_iter().collect())?; + extract_indices(&witness_map, circuit.return_values.0.iter().copied().collect())?; Ok(JsWitnessMap::from(return_witness)) } @@ -71,7 +74,7 @@ pub fn get_public_parameters_witness( let witness_map = WitnessMap::from(solved_witness); let public_params_witness = - extract_indices(&witness_map, circuit.public_parameters.0.clone().into_iter().collect())?; + extract_indices(&witness_map, circuit.public_parameters.0.iter().copied().collect())?; Ok(JsWitnessMap::from(public_params_witness)) } diff --git a/acvm-repo/acvm_js/test/shared/addition.ts b/acvm-repo/acvm_js/test/shared/addition.ts index b56a4286878..820a415acf3 100644 --- a/acvm-repo/acvm_js/test/shared/addition.ts +++ b/acvm-repo/acvm_js/test/shared/addition.ts @@ -2,11 +2,11 @@ import { WitnessMap } from '@noir-lang/acvm_js'; // See `addition_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 75, 14, 128, 32, 12, 68, 249, 120, 160, 150, 182, 208, 238, 188, 138, 68, - 184, 255, 17, 212, 200, 130, 196, 165, 188, 164, 153, 174, 94, 38, 227, 221, 203, 118, 159, 119, 95, 226, 200, 125, - 36, 252, 3, 253, 66, 87, 152, 92, 4, 153, 185, 149, 212, 144, 240, 128, 100, 85, 5, 88, 106, 86, 84, 20, 149, 51, 41, - 81, 83, 214, 98, 213, 10, 24, 50, 53, 236, 98, 212, 135, 44, 174, 235, 5, 143, 35, 12, 151, 159, 126, 55, 109, 28, - 231, 145, 47, 245, 105, 191, 143, 133, 1, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 65, 14, 128, 32, 12, 4, 65, 124, 80, 75, 91, 104, 111, 126, 69, 34, 252, + 255, 9, 106, 228, 64, 162, 55, 153, 164, 217, 158, 38, 155, 245, 238, 97, 189, 206, 187, 55, 161, 231, 214, 19, 254, + 129, 126, 162, 107, 25, 92, 4, 137, 185, 230, 88, 145, 112, 135, 104, 69, 5, 88, 74, 82, 84, 20, 149, 35, 42, 81, 85, + 214, 108, 197, 50, 24, 50, 85, 108, 98, 212, 186, 44, 204, 235, 5, 183, 99, 233, 46, 63, 252, 110, 216, 56, 184, 15, + 78, 146, 74, 173, 20, 141, 1, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ diff --git a/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts b/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts index e074cf1ad38..722bae8e015 100644 --- a/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts +++ b/acvm-repo/acvm_js/test/shared/complex_foreign_call.ts @@ -2,13 +2,13 @@ import { WitnessMap } from '@noir-lang/acvm_js'; // See `complex_brillig_foreign_call` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 84, 93, 10, 131, 48, 12, 78, 218, 233, 100, 111, 187, 193, 96, 59, 64, 231, 9, - 188, 139, 248, 166, 232, 163, 167, 23, 11, 126, 197, 24, 250, 34, 86, 208, 64, 72, 218, 252, 125, 36, 105, 153, 22, - 42, 60, 51, 116, 235, 217, 64, 103, 156, 37, 5, 191, 10, 210, 29, 163, 63, 167, 203, 229, 206, 194, 104, 110, 128, - 209, 158, 128, 49, 236, 195, 69, 231, 157, 114, 46, 73, 251, 103, 35, 239, 231, 225, 57, 243, 156, 227, 252, 132, 44, - 112, 79, 176, 125, 84, 223, 73, 248, 145, 152, 69, 149, 4, 107, 233, 114, 90, 119, 145, 85, 237, 151, 192, 89, 247, - 221, 208, 54, 163, 85, 174, 26, 234, 87, 232, 63, 101, 103, 21, 55, 169, 216, 73, 72, 249, 5, 197, 234, 132, 123, 179, - 35, 247, 155, 214, 246, 102, 20, 73, 204, 72, 168, 123, 191, 161, 25, 66, 136, 159, 187, 53, 5, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 84, 75, 10, 132, 48, 12, 77, 218, 209, 145, 217, 205, 13, 6, 198, 3, 84, 79, + 224, 93, 196, 157, 162, 75, 79, 47, 22, 124, 197, 16, 186, 17, 43, 104, 32, 36, 109, 126, 143, 36, 45, 211, 70, 133, + 103, 134, 110, 61, 27, 232, 140, 179, 164, 224, 215, 64, 186, 115, 84, 113, 186, 92, 238, 42, 140, 230, 1, 24, 237, 5, + 24, 195, 62, 220, 116, 222, 41, 231, 146, 180, 127, 54, 242, 126, 94, 158, 51, 207, 57, 206, 111, 200, 2, 247, 4, 219, + 79, 245, 157, 132, 31, 137, 89, 52, 73, 176, 214, 46, 167, 125, 23, 89, 213, 254, 8, 156, 237, 56, 76, 125, 55, 91, + 229, 170, 161, 254, 133, 94, 42, 59, 171, 184, 69, 197, 46, 66, 202, 47, 40, 86, 39, 220, 155, 3, 185, 191, 180, 183, + 55, 163, 72, 98, 70, 66, 221, 251, 40, 173, 255, 35, 68, 62, 61, 5, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ [1, '0x0000000000000000000000000000000000000000000000000000000000000001'], diff --git a/acvm-repo/acvm_js/test/shared/fixed_base_scalar_mul.ts b/acvm-repo/acvm_js/test/shared/fixed_base_scalar_mul.ts index 5aef521f231..97b5041121a 100644 --- a/acvm-repo/acvm_js/test/shared/fixed_base_scalar_mul.ts +++ b/acvm-repo/acvm_js/test/shared/fixed_base_scalar_mul.ts @@ -1,8 +1,8 @@ // See `fixed_base_scalar_mul_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 138, 81, 10, 0, 48, 8, 66, 87, 219, 190, 118, 233, 29, 61, 43, 3, 5, 121, 34, - 207, 86, 231, 162, 198, 157, 124, 228, 71, 157, 220, 232, 161, 227, 226, 206, 214, 95, 221, 74, 0, 116, 58, 13, 182, - 105, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 85, 138, 81, 10, 0, 48, 8, 66, 87, 219, 190, 118, 233, 29, 61, 35, 3, 19, 228, 137, + 60, 91, 149, 139, 26, 119, 242, 145, 31, 117, 114, 163, 135, 142, 139, 219, 91, 127, 117, 71, 2, 117, 84, 50, 98, 113, + 0, 0, 0, ]); export const initialWitnessMap = new Map([ [1, '0x0000000000000000000000000000000000000000000000000000000000000001'], diff --git a/acvm-repo/acvm_js/test/shared/foreign_call.ts b/acvm-repo/acvm_js/test/shared/foreign_call.ts index eb14cb2e9f1..0e3d77f62a9 100644 --- a/acvm-repo/acvm_js/test/shared/foreign_call.ts +++ b/acvm-repo/acvm_js/test/shared/foreign_call.ts @@ -2,10 +2,10 @@ import { WitnessMap } from '@noir-lang/acvm_js'; // See `simple_brillig_foreign_call` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 61, 10, 192, 48, 8, 133, 53, 133, 82, 186, 245, 38, 233, 13, 122, 153, - 14, 93, 58, 132, 144, 227, 135, 252, 41, 56, 36, 46, 201, 7, 162, 168, 200, 123, 34, 52, 142, 28, 72, 245, 38, 106, 9, - 247, 30, 202, 118, 142, 27, 215, 221, 178, 82, 175, 33, 15, 133, 189, 163, 159, 57, 197, 252, 251, 195, 235, 188, 230, - 186, 16, 65, 255, 12, 239, 92, 131, 89, 149, 198, 77, 3, 10, 9, 119, 8, 198, 242, 152, 1, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 173, 144, 61, 10, 192, 32, 12, 133, 19, 11, 165, 116, 235, 77, 236, 13, 122, 153, + 14, 93, 58, 136, 120, 124, 241, 47, 129, 12, 42, 130, 126, 16, 18, 146, 16, 222, 11, 66, 225, 136, 129, 84, 111, 162, + 150, 112, 239, 161, 172, 231, 184, 113, 221, 45, 45, 245, 42, 242, 144, 216, 43, 250, 153, 83, 204, 191, 223, 189, + 198, 246, 92, 39, 60, 244, 63, 195, 59, 87, 99, 150, 165, 113, 83, 193, 0, 1, 19, 247, 29, 5, 160, 1, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ [1, '0x0000000000000000000000000000000000000000000000000000000000000005'], diff --git a/acvm-repo/acvm_js/test/shared/memory_op.ts b/acvm-repo/acvm_js/test/shared/memory_op.ts index 1d0e06b3c8a..a69ae443259 100644 --- a/acvm-repo/acvm_js/test/shared/memory_op.ts +++ b/acvm-repo/acvm_js/test/shared/memory_op.ts @@ -1,9 +1,9 @@ // See `memory_op_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 81, 201, 13, 0, 32, 8, 147, 195, 125, 112, 3, 247, 159, 74, 141, 60, 106, 226, - 79, 120, 216, 132, 180, 124, 154, 82, 168, 108, 212, 57, 2, 122, 129, 157, 201, 181, 150, 59, 186, 179, 189, 161, 101, - 251, 82, 176, 175, 196, 121, 89, 118, 185, 246, 91, 185, 26, 125, 187, 64, 80, 134, 29, 195, 31, 79, 24, 2, 250, 167, - 252, 27, 3, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 213, 81, 57, 14, 0, 32, 8, 147, 195, 255, 224, 15, 252, 255, 171, 212, 200, 208, + 129, 77, 24, 108, 66, 90, 150, 166, 20, 106, 23, 125, 143, 128, 62, 96, 103, 114, 173, 45, 198, 116, 182, 55, 140, + 106, 95, 74, 246, 149, 60, 47, 171, 46, 215, 126, 43, 87, 179, 111, 23, 8, 202, 176, 99, 248, 240, 9, 11, 137, 33, + 212, 110, 35, 3, 0, 0, ]); export const initialWitnessMap = new Map([ diff --git a/acvm-repo/acvm_js/test/shared/nested_acir_call.ts b/acvm-repo/acvm_js/test/shared/nested_acir_call.ts index ce91282a681..4b73d01bb01 100644 --- a/acvm-repo/acvm_js/test/shared/nested_acir_call.ts +++ b/acvm-repo/acvm_js/test/shared/nested_acir_call.ts @@ -2,13 +2,13 @@ import { WitnessMap, StackItem, WitnessStack } from '@noir-lang/acvm_js'; // See `nested_acir_call_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 205, 146, 97, 10, 195, 32, 12, 133, 163, 66, 207, 147, 24, 109, 227, 191, 93, 101, - 50, 123, 255, 35, 172, 99, 25, 83, 17, 250, 99, 14, 250, 224, 97, 144, 16, 146, 143, 231, 224, 45, 167, 126, 105, 57, - 108, 14, 91, 248, 202, 168, 65, 255, 207, 122, 28, 180, 250, 244, 221, 244, 197, 223, 68, 182, 154, 197, 184, 134, 80, - 54, 95, 136, 233, 142, 62, 101, 137, 24, 98, 94, 133, 132, 162, 196, 135, 23, 230, 34, 65, 182, 148, 211, 134, 137, 2, - 23, 218, 99, 226, 93, 135, 185, 121, 123, 33, 84, 12, 234, 218, 192, 64, 174, 3, 248, 47, 88, 48, 17, 150, 157, 183, - 151, 95, 244, 86, 91, 221, 61, 10, 81, 31, 178, 190, 110, 194, 102, 96, 76, 251, 202, 80, 13, 204, 77, 224, 25, 176, - 70, 79, 197, 128, 18, 64, 3, 4, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 205, 146, 65, 10, 3, 33, 12, 69, 163, 46, 230, 58, 137, 209, 49, 238, 122, 149, 74, + 157, 251, 31, 161, 83, 154, 161, 86, 132, 89, 212, 194, 124, 248, 24, 36, 132, 228, 241, 29, 188, 229, 212, 47, 45, + 187, 205, 110, 11, 31, 25, 53, 28, 255, 103, 77, 14, 58, 29, 141, 55, 125, 241, 55, 145, 109, 102, 49, 174, 33, 212, + 228, 43, 49, 221, 209, 231, 34, 17, 67, 44, 171, 144, 80, 148, 248, 240, 194, 92, 37, 72, 202, 37, 39, 204, 20, 184, + 210, 22, 51, 111, 58, 204, 205, 219, 11, 161, 129, 208, 214, 6, 6, 114, 29, 193, 127, 193, 130, 137, 176, 236, 188, + 189, 252, 162, 183, 218, 230, 238, 97, 138, 250, 152, 245, 245, 87, 220, 12, 140, 113, 95, 153, 170, 129, 185, 17, 60, + 3, 54, 212, 19, 104, 145, 195, 151, 14, 4, 0, 0, ]); export const initialWitnessMap: WitnessMap = new Map([ diff --git a/acvm-repo/acvm_js/test/shared/pedersen.ts b/acvm-repo/acvm_js/test/shared/pedersen.ts index 00d207053d8..e8ddc893d87 100644 --- a/acvm-repo/acvm_js/test/shared/pedersen.ts +++ b/acvm-repo/acvm_js/test/shared/pedersen.ts @@ -1,7 +1,7 @@ // See `pedersen_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 74, 7, 6, 0, 0, 8, 108, 209, 255, 63, 156, 54, 233, 56, 55, 17, 26, 18, 196, - 241, 169, 250, 178, 141, 167, 32, 159, 254, 234, 238, 255, 87, 112, 52, 63, 63, 101, 105, 0, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 93, 74, 9, 10, 0, 0, 4, 115, 149, 255, 127, 88, 8, 133, 213, 218, 137, 80, 144, 32, + 182, 79, 213, 151, 173, 61, 5, 121, 245, 91, 103, 255, 191, 3, 7, 16, 26, 112, 158, 113, 0, 0, 0, ]); export const initialWitnessMap = new Map([[1, '0x0000000000000000000000000000000000000000000000000000000000000001']]); diff --git a/acvm-repo/acvm_js/test/shared/schnorr_verify.ts b/acvm-repo/acvm_js/test/shared/schnorr_verify.ts index 14c32c615c8..a207aa12b2c 100644 --- a/acvm-repo/acvm_js/test/shared/schnorr_verify.ts +++ b/acvm-repo/acvm_js/test/shared/schnorr_verify.ts @@ -1,17 +1,17 @@ // See `schnorr_verify_circuit` integration test in `acir/tests/test_program_serialization.rs`. export const bytecode = Uint8Array.from([ - 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 77, 210, 7, 78, 2, 1, 20, 69, 81, 236, 189, 247, 222, 123, 239, 93, 177, 33, 34, - 238, 194, 253, 47, 193, 200, 147, 67, 194, 36, 147, 163, 33, 33, 228, 191, 219, 82, 168, 63, 63, 181, 183, 197, 223, - 177, 147, 191, 181, 183, 149, 69, 159, 183, 213, 222, 238, 218, 219, 206, 14, 118, 178, 139, 141, 183, 135, 189, 236, - 99, 63, 7, 56, 200, 33, 14, 115, 132, 163, 28, 227, 56, 39, 56, 201, 41, 78, 115, 134, 179, 156, 227, 60, 23, 184, - 200, 37, 46, 115, 133, 171, 92, 227, 58, 55, 184, 201, 45, 110, 115, 135, 187, 220, 227, 62, 15, 120, 200, 35, 30, - 243, 132, 167, 60, 227, 57, 47, 120, 201, 43, 94, 243, 134, 183, 188, 227, 61, 31, 248, 200, 39, 22, 249, 204, 151, - 166, 29, 243, 188, 250, 255, 141, 239, 44, 241, 131, 101, 126, 178, 194, 47, 86, 249, 237, 123, 171, 76, 127, 105, 47, - 189, 165, 181, 116, 150, 198, 26, 125, 245, 248, 45, 233, 41, 45, 165, 163, 52, 148, 126, 210, 78, 186, 73, 51, 233, - 37, 173, 164, 147, 52, 146, 62, 210, 70, 186, 72, 19, 233, 33, 45, 164, 131, 52, 144, 253, 151, 11, 245, 221, 179, - 121, 246, 206, 214, 217, 57, 27, 103, 223, 109, 187, 238, 218, 115, 223, 142, 135, 246, 59, 182, 219, 169, 189, 206, - 237, 116, 105, 159, 107, 187, 220, 218, 227, 222, 14, 143, 238, 95, 116, 247, 23, 119, 126, 115, 223, 146, 187, 150, - 221, 179, 226, 142, 141, 155, 53, 238, 86, 104, 186, 231, 255, 243, 7, 100, 141, 232, 192, 233, 3, 0, 0, + 31, 139, 8, 0, 0, 0, 0, 0, 0, 255, 85, 210, 85, 78, 67, 81, 24, 133, 209, 226, 238, 238, 238, 238, 238, 165, 148, 82, + 102, 193, 252, 135, 64, 232, 78, 87, 147, 114, 147, 147, 5, 47, 132, 252, 251, 107, 41, 212, 191, 159, 218, 107, 241, + 115, 236, 228, 111, 237, 181, 178, 173, 246, 186, 107, 175, 157, 29, 236, 100, 23, 27, 175, 135, 189, 236, 99, 63, 7, + 56, 200, 33, 14, 115, 132, 163, 28, 227, 56, 39, 56, 201, 41, 78, 115, 134, 179, 156, 227, 60, 23, 184, 200, 37, 46, + 115, 133, 171, 92, 227, 58, 55, 184, 201, 45, 110, 115, 135, 187, 220, 227, 62, 15, 120, 200, 35, 30, 243, 132, 167, + 60, 227, 57, 47, 120, 201, 43, 94, 243, 134, 183, 188, 227, 61, 31, 248, 200, 39, 62, 243, 133, 175, 77, 59, 230, 123, + 243, 123, 145, 239, 44, 241, 131, 101, 126, 178, 194, 47, 86, 249, 237, 239, 86, 153, 238, 210, 92, 122, 75, 107, 233, + 44, 141, 53, 250, 234, 241, 191, 164, 167, 180, 148, 142, 210, 80, 250, 73, 59, 233, 38, 205, 164, 151, 180, 146, 78, + 210, 72, 250, 72, 27, 233, 34, 77, 164, 135, 180, 144, 14, 210, 64, 246, 95, 46, 212, 119, 207, 230, 217, 59, 91, 103, + 231, 108, 156, 125, 183, 237, 186, 107, 207, 125, 59, 30, 218, 239, 216, 110, 167, 246, 58, 183, 211, 165, 125, 174, + 237, 114, 107, 143, 123, 59, 60, 186, 255, 179, 187, 191, 186, 115, 209, 125, 75, 238, 90, 118, 207, 138, 59, 54, 110, + 214, 184, 91, 161, 233, 158, 255, 190, 63, 165, 188, 93, 151, 233, 3, 0, 0, ]); export const initialWitnessMap = new Map([ diff --git a/acvm-repo/blackbox_solver/Cargo.toml b/acvm-repo/blackbox_solver/Cargo.toml index 8f5ff862360..1d6629c8223 100644 --- a/acvm-repo/blackbox_solver/Cargo.toml +++ b/acvm-repo/blackbox_solver/Cargo.toml @@ -2,7 +2,7 @@ name = "acvm_blackbox_solver" description = "A solver for the blackbox functions found in ACIR and Brillig" # x-release-please-start-version -version = "0.42.0" +version = "0.43.0" # x-release-please-end authors.workspace = true edition.workspace = true @@ -15,6 +15,7 @@ repository.workspace = true [dependencies] acir.workspace = true thiserror.workspace = true +num-bigint = "0.4" blake2 = "0.10.6" blake3 = "1.5.0" diff --git a/acvm-repo/blackbox_solver/src/bigint.rs b/acvm-repo/blackbox_solver/src/bigint.rs new file mode 100644 index 00000000000..5b19f03a238 --- /dev/null +++ b/acvm-repo/blackbox_solver/src/bigint.rs @@ -0,0 +1,99 @@ +use std::collections::HashMap; + +use acir::BlackBoxFunc; + +use num_bigint::BigUint; + +use crate::BlackBoxResolutionError; + +/// Resolve BigInt opcodes by storing BigInt values (and their moduli) by their ID in a HashMap: +/// - When it encounters a bigint operation opcode, it performs the operation on the stored values +/// and store the result using the provided ID. +/// - When it gets a to_bytes opcode, it simply looks up the value and resolves the output witness accordingly. +#[derive(Default, Debug, Clone, PartialEq, Eq)] + +pub struct BigIntSolver { + bigint_id_to_value: HashMap, + bigint_id_to_modulus: HashMap, +} + +impl BigIntSolver { + pub(crate) fn get_bigint( + &self, + id: u32, + func: BlackBoxFunc, + ) -> Result { + self.bigint_id_to_value + .get(&id) + .ok_or(BlackBoxResolutionError::Failed( + func, + format!("could not find bigint of id {id}"), + )) + .cloned() + } + + pub(crate) fn get_modulus( + &self, + id: u32, + func: BlackBoxFunc, + ) -> Result { + self.bigint_id_to_modulus + .get(&id) + .ok_or(BlackBoxResolutionError::Failed( + func, + format!("could not find bigint of id {id}"), + )) + .cloned() + } + pub fn bigint_from_bytes( + &mut self, + inputs: &[u8], + modulus: &[u8], + output: u32, + ) -> Result<(), BlackBoxResolutionError> { + let bigint = BigUint::from_bytes_le(inputs); + self.bigint_id_to_value.insert(output, bigint); + let modulus = BigUint::from_bytes_le(modulus); + self.bigint_id_to_modulus.insert(output, modulus); + Ok(()) + } + + pub fn bigint_to_bytes(&self, input: u32) -> Result, BlackBoxResolutionError> { + let bigint = self.get_bigint(input, BlackBoxFunc::BigIntToLeBytes)?; + Ok(bigint.to_bytes_le()) + } + + pub fn bigint_op( + &mut self, + lhs: u32, + rhs: u32, + output: u32, + func: BlackBoxFunc, + ) -> Result<(), BlackBoxResolutionError> { + let modulus = self.get_modulus(lhs, func)?; + let lhs = self.get_bigint(lhs, func)?; + let rhs = self.get_bigint(rhs, func)?; + let mut result = match func { + BlackBoxFunc::BigIntAdd => lhs + rhs, + BlackBoxFunc::BigIntSub => { + if lhs >= rhs { + &lhs - &rhs + } else { + &lhs + &modulus - &rhs + } + } + BlackBoxFunc::BigIntMul => lhs * rhs, + BlackBoxFunc::BigIntDiv => { + lhs * rhs.modpow(&(&modulus - BigUint::from(2_u32)), &modulus) + } //TODO ensure that modulus is prime + _ => unreachable!("ICE - bigint_op must be called for an operation"), + }; + if result > modulus { + let q = &result / &modulus; + result -= q * &modulus; + } + self.bigint_id_to_value.insert(output, result); + self.bigint_id_to_modulus.insert(output, modulus); + Ok(()) + } +} diff --git a/acvm-repo/blackbox_solver/src/curve_specific_solver.rs b/acvm-repo/blackbox_solver/src/curve_specific_solver.rs index f0ab4561229..fab67467d9a 100644 --- a/acvm-repo/blackbox_solver/src/curve_specific_solver.rs +++ b/acvm-repo/blackbox_solver/src/curve_specific_solver.rs @@ -11,7 +11,7 @@ pub trait BlackBoxFunctionSolver { &self, public_key_x: &FieldElement, public_key_y: &FieldElement, - signature: &[u8], + signature: &[u8; 64], message: &[u8], ) -> Result; fn pedersen_commitment( @@ -59,7 +59,7 @@ impl BlackBoxFunctionSolver for StubbedBlackBoxSolver { &self, _public_key_x: &FieldElement, _public_key_y: &FieldElement, - _signature: &[u8], + _signature: &[u8; 64], _message: &[u8], ) -> Result { Err(Self::fail(BlackBoxFunc::SchnorrVerify)) diff --git a/acvm-repo/blackbox_solver/src/lib.rs b/acvm-repo/blackbox_solver/src/lib.rs index dc798bdab32..0f57f2ce7da 100644 --- a/acvm-repo/blackbox_solver/src/lib.rs +++ b/acvm-repo/blackbox_solver/src/lib.rs @@ -10,10 +10,12 @@ use acir::BlackBoxFunc; use thiserror::Error; +mod bigint; mod curve_specific_solver; mod ecdsa; mod hash; +pub use bigint::BigIntSolver; pub use curve_specific_solver::{BlackBoxFunctionSolver, StubbedBlackBoxSolver}; pub use ecdsa::{ecdsa_secp256k1_verify, ecdsa_secp256r1_verify}; pub use hash::{blake2s, blake3, keccak256, keccakf1600, sha256, sha256compression}; diff --git a/acvm-repo/bn254_blackbox_solver/Cargo.toml b/acvm-repo/bn254_blackbox_solver/Cargo.toml index 1ad5103d2cb..448642e1a9e 100644 --- a/acvm-repo/bn254_blackbox_solver/Cargo.toml +++ b/acvm-repo/bn254_blackbox_solver/Cargo.toml @@ -2,7 +2,7 @@ name = "bn254_blackbox_solver" description = "Solvers for black box functions which are specific for the bn254 curve" # x-release-please-start-version -version = "0.42.0" +version = "0.43.0" # x-release-please-end authors.workspace = true edition.workspace = true @@ -16,8 +16,9 @@ repository.workspace = true acir.workspace = true acvm_blackbox_solver.workspace = true thiserror.workspace = true -num-traits.workspace = true cfg-if = "1.0.0" +hex.workspace = true +lazy_static = "1.4" # BN254 fixed base scalar multiplication solver grumpkin = { version = "0.1.0", package = "noir_grumpkin", features = ["std"] } @@ -38,6 +39,18 @@ js-sys.workspace = true getrandom.workspace = true wasmer = "4.2.6" +[dev-dependencies] +criterion = "0.5.0" +pprof = { version = "0.12", features = [ + "flamegraph", + "frame-pointer", + "criterion", +] } + +[[bench]] +name = "criterion" +harness = false + [features] default = ["bn254"] bn254 = ["acir/bn254"] diff --git a/acvm-repo/bn254_blackbox_solver/benches/criterion.rs b/acvm-repo/bn254_blackbox_solver/benches/criterion.rs new file mode 100644 index 00000000000..eb529ed2c11 --- /dev/null +++ b/acvm-repo/bn254_blackbox_solver/benches/criterion.rs @@ -0,0 +1,21 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use std::{hint::black_box, time::Duration}; + +use acir::FieldElement; +use bn254_blackbox_solver::poseidon2_permutation; + +use pprof::criterion::{Output, PProfProfiler}; + +fn bench_poseidon2(c: &mut Criterion) { + let inputs = [FieldElement::zero(); 4]; + + c.bench_function("poseidon2", |b| b.iter(|| poseidon2_permutation(black_box(&inputs), 4))); +} + +criterion_group!( + name = benches; + config = Criterion::default().sample_size(40).measurement_time(Duration::from_secs(20)).with_profiler(PProfProfiler::new(100, Output::Flamegraph(None))); + targets = bench_poseidon2 +); + +criterion_main!(benches); diff --git a/acvm-repo/bn254_blackbox_solver/src/fixed_base_scalar_mul.rs b/acvm-repo/bn254_blackbox_solver/src/fixed_base_scalar_mul.rs index 5e68c7d4030..cd91c290f49 100644 --- a/acvm-repo/bn254_blackbox_solver/src/fixed_base_scalar_mul.rs +++ b/acvm-repo/bn254_blackbox_solver/src/fixed_base_scalar_mul.rs @@ -47,17 +47,29 @@ pub fn fixed_base_scalar_mul( } } +fn create_point(x: FieldElement, y: FieldElement) -> Result { + let point = grumpkin::SWAffine::new_unchecked(x.into_repr(), y.into_repr()); + if !point.is_on_curve() { + return Err(format!("Point ({}, {}) is not on curve", x.to_hex(), y.to_hex())); + }; + if !point.is_in_correct_subgroup_assuming_on_curve() { + return Err(format!("Point ({}, {}) is not in correct subgroup", x.to_hex(), y.to_hex())); + }; + Ok(point) +} + pub fn embedded_curve_add( input1_x: FieldElement, input1_y: FieldElement, input2_x: FieldElement, input2_y: FieldElement, ) -> Result<(FieldElement, FieldElement), BlackBoxResolutionError> { - let mut point1 = grumpkin::SWAffine::new(input1_x.into_repr(), input1_y.into_repr()); - let point2 = grumpkin::SWAffine::new(input2_x.into_repr(), input2_y.into_repr()); - let res = point1 + point2; - point1 = res.into(); - if let Some((res_x, res_y)) = point1.xy() { + let point1 = create_point(input1_x, input1_y) + .map_err(|e| BlackBoxResolutionError::Failed(BlackBoxFunc::EmbeddedCurveAdd, e))?; + let point2 = create_point(input2_x, input2_y) + .map_err(|e| BlackBoxResolutionError::Failed(BlackBoxFunc::EmbeddedCurveAdd, e))?; + let res = grumpkin::SWAffine::from(point1 + point2); + if let Some((res_x, res_y)) = res.xy() { Ok((FieldElement::from_repr(*res_x), FieldElement::from_repr(*res_y))) } else { Err(BlackBoxResolutionError::Failed( @@ -72,6 +84,7 @@ mod grumpkin_fixed_base_scalar_mul { use ark_ff::BigInteger; use super::*; + #[test] fn smoke_test() -> Result<(), BlackBoxResolutionError> { let input = FieldElement::one(); @@ -84,6 +97,7 @@ mod grumpkin_fixed_base_scalar_mul { assert_eq!(y, res.1.to_hex()); Ok(()) } + #[test] fn low_high_smoke_test() -> Result<(), BlackBoxResolutionError> { let low = FieldElement::one(); @@ -103,9 +117,9 @@ mod grumpkin_fixed_base_scalar_mul { let max_limb = FieldElement::from(u128::MAX); let invalid_limb = max_limb + FieldElement::one(); - let expected_error = Err(BlackBoxResolutionError::Failed( + let expected_error = Err(BlackBoxResolutionError::Failed( BlackBoxFunc::FixedBaseScalarMul, - "Limb 0000000000000000000000000000000100000000000000000000000000000000 is not less than 2^128".into() + "Limb 0000000000000000000000000000000100000000000000000000000000000000 is not less than 2^128".into(), )); let res = fixed_base_scalar_mul(&invalid_limb, &FieldElement::zero()); @@ -128,7 +142,23 @@ mod grumpkin_fixed_base_scalar_mul { res, Err(BlackBoxResolutionError::Failed( BlackBoxFunc::FixedBaseScalarMul, - "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47 is not a valid grumpkin scalar".into() + "30644e72e131a029b85045b68181585d97816a916871ca8d3c208c16d87cfd47 is not a valid grumpkin scalar".into(), + )) + ); + } + + #[test] + fn rejects_addition_of_points_not_in_curve() { + let x = FieldElement::from(1u128); + let y = FieldElement::from(2u128); + + let res = embedded_curve_add(x, y, x, y); + + assert_eq!( + res, + Err(BlackBoxResolutionError::Failed( + BlackBoxFunc::EmbeddedCurveAdd, + "Point (0000000000000000000000000000000000000000000000000000000000000001, 0000000000000000000000000000000000000000000000000000000000000002) is not on curve".into(), )) ); } diff --git a/acvm-repo/bn254_blackbox_solver/src/lib.rs b/acvm-repo/bn254_blackbox_solver/src/lib.rs index 231594170e3..25b10252a78 100644 --- a/acvm-repo/bn254_blackbox_solver/src/lib.rs +++ b/acvm-repo/bn254_blackbox_solver/src/lib.rs @@ -10,7 +10,7 @@ mod poseidon2; mod wasm; pub use fixed_base_scalar_mul::{embedded_curve_add, fixed_base_scalar_mul}; -use poseidon2::Poseidon2; +pub use poseidon2::poseidon2_permutation; use wasm::Barretenberg; use self::wasm::{Pedersen, SchnorrSig}; @@ -52,7 +52,7 @@ impl BlackBoxFunctionSolver for Bn254BlackBoxSolver { &self, public_key_x: &FieldElement, public_key_y: &FieldElement, - signature: &[u8], + signature: &[u8; 64], message: &[u8], ) -> Result { let pub_key_bytes: Vec = @@ -112,7 +112,6 @@ impl BlackBoxFunctionSolver for Bn254BlackBoxSolver { inputs: &[FieldElement], len: u32, ) -> Result, BlackBoxResolutionError> { - let poseidon = Poseidon2::new(); - poseidon.permutation(inputs, len) + poseidon2_permutation(inputs, len) } } diff --git a/acvm-repo/bn254_blackbox_solver/src/poseidon2.rs b/acvm-repo/bn254_blackbox_solver/src/poseidon2.rs index e0ed5bcd053..65058e15099 100644 --- a/acvm-repo/bn254_blackbox_solver/src/poseidon2.rs +++ b/acvm-repo/bn254_blackbox_solver/src/poseidon2.rs @@ -1,9 +1,20 @@ use acir::FieldElement; use acvm_blackbox_solver::BlackBoxResolutionError; -use num_bigint::BigUint; -use num_traits::Num; +use lazy_static::lazy_static; -pub(crate) struct Poseidon2 { +pub fn poseidon2_permutation( + inputs: &[FieldElement], + len: u32, +) -> Result, BlackBoxResolutionError> { + let poseidon = Poseidon2::new(); + poseidon.permutation(inputs, len) +} + +pub(crate) struct Poseidon2<'a> { + config: &'a Poseidon2Config, +} + +struct Poseidon2Config { t: u32, rounds_f: u32, rounds_p: u32, @@ -11,929 +22,415 @@ pub(crate) struct Poseidon2 { round_constant: [[FieldElement; 4]; 64], } -impl Poseidon2 { +fn field_from_hex(hex: &str) -> FieldElement { + FieldElement::from_be_bytes_reduce(&hex::decode(hex).expect("Should be passed only valid hex")) +} + +lazy_static! { + static ref INTERNAL_MATRIX_DIAGONAL: [FieldElement; 4] = [ + field_from_hex("10dc6e9c006ea38b04b1e03b4bd9490c0d03f98929ca1d7fb56821fd19d3b6e7"), + field_from_hex("0c28145b6a44df3e0149b3d0a30b3bb599df9756d4dd9b84a86b38cfb45a740b"), + field_from_hex("00544b8338791518b2c7645a50392798b21f75bb60e3596170067d00141cac15"), + field_from_hex("222c01175718386f2e2e82eb122789e352e105a3b8fa852613bc534433ee428b"), + ]; + static ref ROUND_CONSTANT: [[FieldElement; 4]; 64] = [ + [ + field_from_hex("19b849f69450b06848da1d39bd5e4a4302bb86744edc26238b0878e269ed23e5"), + field_from_hex("265ddfe127dd51bd7239347b758f0a1320eb2cc7450acc1dad47f80c8dcf34d6"), + field_from_hex("199750ec472f1809e0f66a545e1e51624108ac845015c2aa3dfc36bab497d8aa"), + field_from_hex("157ff3fe65ac7208110f06a5f74302b14d743ea25067f0ffd032f787c7f1cdf8"), + ], + [ + field_from_hex("2e49c43c4569dd9c5fd35ac45fca33f10b15c590692f8beefe18f4896ac94902"), + field_from_hex("0e35fb89981890520d4aef2b6d6506c3cb2f0b6973c24fa82731345ffa2d1f1e"), + field_from_hex("251ad47cb15c4f1105f109ae5e944f1ba9d9e7806d667ffec6fe723002e0b996"), + field_from_hex("13da07dc64d428369873e97160234641f8beb56fdd05e5f3563fa39d9c22df4e"), + ], + [ + field_from_hex("0c009b84e650e6d23dc00c7dccef7483a553939689d350cd46e7b89055fd4738"), + field_from_hex("011f16b1c63a854f01992e3956f42d8b04eb650c6d535eb0203dec74befdca06"), + field_from_hex("0ed69e5e383a688f209d9a561daa79612f3f78d0467ad45485df07093f367549"), + field_from_hex("04dba94a7b0ce9e221acad41472b6bbe3aec507f5eb3d33f463672264c9f789b"), + ], + [ + field_from_hex("0a3f2637d840f3a16eb094271c9d237b6036757d4bb50bf7ce732ff1d4fa28e8"), + field_from_hex("259a666f129eea198f8a1c502fdb38fa39b1f075569564b6e54a485d1182323f"), + field_from_hex("28bf7459c9b2f4c6d8e7d06a4ee3a47f7745d4271038e5157a32fdf7ede0d6a1"), + field_from_hex("0a1ca941f057037526ea200f489be8d4c37c85bbcce6a2aeec91bd6941432447"), + ], + [ + field_from_hex("0c6f8f958be0e93053d7fd4fc54512855535ed1539f051dcb43a26fd926361cf"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("123106a93cd17578d426e8128ac9d90aa9e8a00708e296e084dd57e69caaf811"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("26e1ba52ad9285d97dd3ab52f8e840085e8fa83ff1e8f1877b074867cd2dee75"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("1cb55cad7bd133de18a64c5c47b9c97cbe4d8b7bf9e095864471537e6a4ae2c5"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("1dcd73e46acd8f8e0e2c7ce04bde7f6d2a53043d5060a41c7143f08e6e9055d0"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("011003e32f6d9c66f5852f05474a4def0cda294a0eb4e9b9b12b9bb4512e5574"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("2b1e809ac1d10ab29ad5f20d03a57dfebadfe5903f58bafed7c508dd2287ae8c"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("2539de1785b735999fb4dac35ee17ed0ef995d05ab2fc5faeaa69ae87bcec0a5"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("0c246c5a2ef8ee0126497f222b3e0a0ef4e1c3d41c86d46e43982cb11d77951d"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("192089c4974f68e95408148f7c0632edbb09e6a6ad1a1c2f3f0305f5d03b527b"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("1eae0ad8ab68b2f06a0ee36eeb0d0c058529097d91096b756d8fdc2fb5a60d85"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("179190e5d0e22179e46f8282872abc88db6e2fdc0dee99e69768bd98c5d06bfb"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("29bb9e2c9076732576e9a81c7ac4b83214528f7db00f31bf6cafe794a9b3cd1c"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("225d394e42207599403efd0c2464a90d52652645882aac35b10e590e6e691e08"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("064760623c25c8cf753d238055b444532be13557451c087de09efd454b23fd59"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("10ba3a0e01df92e87f301c4b716d8a394d67f4bf42a75c10922910a78f6b5b87"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("0e070bf53f8451b24f9c6e96b0c2a801cb511bc0c242eb9d361b77693f21471c"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("1b94cd61b051b04dd39755ff93821a73ccd6cb11d2491d8aa7f921014de252fb"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("1d7cb39bafb8c744e148787a2e70230f9d4e917d5713bb050487b5aa7d74070b"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("2ec93189bd1ab4f69117d0fe980c80ff8785c2961829f701bb74ac1f303b17db"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("2db366bfdd36d277a692bb825b86275beac404a19ae07a9082ea46bd83517926"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("062100eb485db06269655cf186a68532985275428450359adc99cec6960711b8"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("0761d33c66614aaa570e7f1e8244ca1120243f92fa59e4f900c567bf41f5a59b"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("20fc411a114d13992c2705aa034e3f315d78608a0f7de4ccf7a72e494855ad0d"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("25b5c004a4bdfcb5add9ec4e9ab219ba102c67e8b3effb5fc3a30f317250bc5a"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("23b1822d278ed632a494e58f6df6f5ed038b186d8474155ad87e7dff62b37f4b"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("22734b4c5c3f9493606c4ba9012499bf0f14d13bfcfcccaa16102a29cc2f69e0"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("26c0c8fe09eb30b7e27a74dc33492347e5bdff409aa3610254413d3fad795ce5"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("070dd0ccb6bd7bbae88eac03fa1fbb26196be3083a809829bbd626df348ccad9"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("12b6595bdb329b6fb043ba78bb28c3bec2c0a6de46d8c5ad6067c4ebfd4250da"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("248d97d7f76283d63bec30e7a5876c11c06fca9b275c671c5e33d95bb7e8d729"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("1a306d439d463b0816fc6fd64cc939318b45eb759ddde4aa106d15d9bd9baaaa"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("28a8f8372e3c38daced7c00421cb4621f4f1b54ddc27821b0d62d3d6ec7c56cf"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("0094975717f9a8a8bb35152f24d43294071ce320c829f388bc852183e1e2ce7e"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("04d5ee4c3aa78f7d80fde60d716480d3593f74d4f653ae83f4103246db2e8d65"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("2a6cf5e9aa03d4336349ad6fb8ed2269c7bef54b8822cc76d08495c12efde187"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("2304d31eaab960ba9274da43e19ddeb7f792180808fd6e43baae48d7efcba3f3"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("03fd9ac865a4b2a6d5e7009785817249bff08a7e0726fcb4e1c11d39d199f0b0"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("00b7258ded52bbda2248404d55ee5044798afc3a209193073f7954d4d63b0b64"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("159f81ada0771799ec38fca2d4bf65ebb13d3a74f3298db36272c5ca65e92d9a"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("1ef90e67437fbc8550237a75bc28e3bb9000130ea25f0c5471e144cf4264431f"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("1e65f838515e5ff0196b49aa41a2d2568df739bc176b08ec95a79ed82932e30d"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("2b1b045def3a166cec6ce768d079ba74b18c844e570e1f826575c1068c94c33f"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("0832e5753ceb0ff6402543b1109229c165dc2d73bef715e3f1c6e07c168bb173"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("02f614e9cedfb3dc6b762ae0a37d41bab1b841c2e8b6451bc5a8e3c390b6ad16"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("0e2427d38bd46a60dd640b8e362cad967370ebb777bedff40f6a0be27e7ed705"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("0493630b7c670b6deb7c84d414e7ce79049f0ec098c3c7c50768bbe29214a53a"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("22ead100e8e482674decdab17066c5a26bb1515355d5461a3dc06cc85327cea9"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("25b3e56e655b42cdaae2626ed2554d48583f1ae35626d04de5084e0b6d2a6f16"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("1e32752ada8836ef5837a6cde8ff13dbb599c336349e4c584b4fdc0a0cf6f9d0"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("2fa2a871c15a387cc50f68f6f3c3455b23c00995f05078f672a9864074d412e5"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("2f569b8a9a4424c9278e1db7311e889f54ccbf10661bab7fcd18e7c7a7d83505"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("044cb455110a8fdd531ade530234c518a7df93f7332ffd2144165374b246b43d"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("227808de93906d5d420246157f2e42b191fe8c90adfe118178ddc723a5319025"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("02fcca2934e046bc623adead873579865d03781ae090ad4a8579d2e7a6800355"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("0ef915f0ac120b876abccceb344a1d36bad3f3c5ab91a8ddcbec2e060d8befac"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + field_from_hex("0000000000000000000000000000000000000000000000000000000000000000"), + ], + [ + field_from_hex("1797130f4b7a3e1777eb757bc6f287f6ab0fb85f6be63b09f3b16ef2b1405d38"), + field_from_hex("0a76225dc04170ae3306c85abab59e608c7f497c20156d4d36c668555decc6e5"), + field_from_hex("1fffb9ec1992d66ba1e77a7b93209af6f8fa76d48acb664796174b5326a31a5c"), + field_from_hex("25721c4fc15a3f2853b57c338fa538d85f8fbba6c6b9c6090611889b797b9c5f"), + ], + [ + field_from_hex("0c817fd42d5f7a41215e3d07ba197216adb4c3790705da95eb63b982bfcaf75a"), + field_from_hex("13abe3f5239915d39f7e13c2c24970b6df8cf86ce00a22002bc15866e52b5a96"), + field_from_hex("2106feea546224ea12ef7f39987a46c85c1bc3dc29bdbd7a92cd60acb4d391ce"), + field_from_hex("21ca859468a746b6aaa79474a37dab49f1ca5a28c748bc7157e1b3345bb0f959"), + ], + [ + field_from_hex("05ccd6255c1e6f0c5cf1f0df934194c62911d14d0321662a8f1a48999e34185b"), + field_from_hex("0f0e34a64b70a626e464d846674c4c8816c4fb267fe44fe6ea28678cb09490a4"), + field_from_hex("0558531a4e25470c6157794ca36d0e9647dbfcfe350d64838f5b1a8a2de0d4bf"), + field_from_hex("09d3dca9173ed2faceea125157683d18924cadad3f655a60b72f5864961f1455"), + ], + [ + field_from_hex("0328cbd54e8c0913493f866ed03d218bf23f92d68aaec48617d4c722e5bd4335"), + field_from_hex("2bf07216e2aff0a223a487b1a7094e07e79e7bcc9798c648ee3347dd5329d34b"), + field_from_hex("1daf345a58006b736499c583cb76c316d6f78ed6a6dffc82111e11a63fe412df"), + field_from_hex("176563472456aaa746b694c60e1823611ef39039b2edc7ff391e6f2293d2c404"), + ], + ]; + static ref POSEIDON2_CONFIG: Poseidon2Config = Poseidon2Config { + t: 4, + rounds_f: 8, + rounds_p: 56, + internal_matrix_diagonal: *INTERNAL_MATRIX_DIAGONAL, + round_constant: *ROUND_CONSTANT, + }; +} + +impl<'a> Poseidon2<'a> { pub(crate) fn new() -> Self { - Poseidon2 { - t: 4, - rounds_f: 8, - rounds_p: 56, - internal_matrix_diagonal: [ - Poseidon2::field_from_hex( - "0x10dc6e9c006ea38b04b1e03b4bd9490c0d03f98929ca1d7fb56821fd19d3b6e7", - ), - Poseidon2::field_from_hex( - "0x0c28145b6a44df3e0149b3d0a30b3bb599df9756d4dd9b84a86b38cfb45a740b", - ), - Poseidon2::field_from_hex( - "0x00544b8338791518b2c7645a50392798b21f75bb60e3596170067d00141cac15", - ), - Poseidon2::field_from_hex( - "0x222c01175718386f2e2e82eb122789e352e105a3b8fa852613bc534433ee428b", - ), - ], - round_constant: [ - [ - Poseidon2::field_from_hex( - "0x19b849f69450b06848da1d39bd5e4a4302bb86744edc26238b0878e269ed23e5", - ), - Poseidon2::field_from_hex( - "0x265ddfe127dd51bd7239347b758f0a1320eb2cc7450acc1dad47f80c8dcf34d6", - ), - Poseidon2::field_from_hex( - "0x199750ec472f1809e0f66a545e1e51624108ac845015c2aa3dfc36bab497d8aa", - ), - Poseidon2::field_from_hex( - "0x157ff3fe65ac7208110f06a5f74302b14d743ea25067f0ffd032f787c7f1cdf8", - ), - ], - [ - Poseidon2::field_from_hex( - "0x2e49c43c4569dd9c5fd35ac45fca33f10b15c590692f8beefe18f4896ac94902", - ), - Poseidon2::field_from_hex( - "0x0e35fb89981890520d4aef2b6d6506c3cb2f0b6973c24fa82731345ffa2d1f1e", - ), - Poseidon2::field_from_hex( - "0x251ad47cb15c4f1105f109ae5e944f1ba9d9e7806d667ffec6fe723002e0b996", - ), - Poseidon2::field_from_hex( - "0x13da07dc64d428369873e97160234641f8beb56fdd05e5f3563fa39d9c22df4e", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0c009b84e650e6d23dc00c7dccef7483a553939689d350cd46e7b89055fd4738", - ), - Poseidon2::field_from_hex( - "0x011f16b1c63a854f01992e3956f42d8b04eb650c6d535eb0203dec74befdca06", - ), - Poseidon2::field_from_hex( - "0x0ed69e5e383a688f209d9a561daa79612f3f78d0467ad45485df07093f367549", - ), - Poseidon2::field_from_hex( - "0x04dba94a7b0ce9e221acad41472b6bbe3aec507f5eb3d33f463672264c9f789b", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0a3f2637d840f3a16eb094271c9d237b6036757d4bb50bf7ce732ff1d4fa28e8", - ), - Poseidon2::field_from_hex( - "0x259a666f129eea198f8a1c502fdb38fa39b1f075569564b6e54a485d1182323f", - ), - Poseidon2::field_from_hex( - "0x28bf7459c9b2f4c6d8e7d06a4ee3a47f7745d4271038e5157a32fdf7ede0d6a1", - ), - Poseidon2::field_from_hex( - "0x0a1ca941f057037526ea200f489be8d4c37c85bbcce6a2aeec91bd6941432447", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0c6f8f958be0e93053d7fd4fc54512855535ed1539f051dcb43a26fd926361cf", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x123106a93cd17578d426e8128ac9d90aa9e8a00708e296e084dd57e69caaf811", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x26e1ba52ad9285d97dd3ab52f8e840085e8fa83ff1e8f1877b074867cd2dee75", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x1cb55cad7bd133de18a64c5c47b9c97cbe4d8b7bf9e095864471537e6a4ae2c5", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x1dcd73e46acd8f8e0e2c7ce04bde7f6d2a53043d5060a41c7143f08e6e9055d0", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x011003e32f6d9c66f5852f05474a4def0cda294a0eb4e9b9b12b9bb4512e5574", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x2b1e809ac1d10ab29ad5f20d03a57dfebadfe5903f58bafed7c508dd2287ae8c", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x2539de1785b735999fb4dac35ee17ed0ef995d05ab2fc5faeaa69ae87bcec0a5", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0c246c5a2ef8ee0126497f222b3e0a0ef4e1c3d41c86d46e43982cb11d77951d", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x192089c4974f68e95408148f7c0632edbb09e6a6ad1a1c2f3f0305f5d03b527b", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x1eae0ad8ab68b2f06a0ee36eeb0d0c058529097d91096b756d8fdc2fb5a60d85", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x179190e5d0e22179e46f8282872abc88db6e2fdc0dee99e69768bd98c5d06bfb", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x29bb9e2c9076732576e9a81c7ac4b83214528f7db00f31bf6cafe794a9b3cd1c", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x225d394e42207599403efd0c2464a90d52652645882aac35b10e590e6e691e08", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x064760623c25c8cf753d238055b444532be13557451c087de09efd454b23fd59", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x10ba3a0e01df92e87f301c4b716d8a394d67f4bf42a75c10922910a78f6b5b87", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0e070bf53f8451b24f9c6e96b0c2a801cb511bc0c242eb9d361b77693f21471c", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x1b94cd61b051b04dd39755ff93821a73ccd6cb11d2491d8aa7f921014de252fb", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x1d7cb39bafb8c744e148787a2e70230f9d4e917d5713bb050487b5aa7d74070b", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x2ec93189bd1ab4f69117d0fe980c80ff8785c2961829f701bb74ac1f303b17db", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x2db366bfdd36d277a692bb825b86275beac404a19ae07a9082ea46bd83517926", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x062100eb485db06269655cf186a68532985275428450359adc99cec6960711b8", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0761d33c66614aaa570e7f1e8244ca1120243f92fa59e4f900c567bf41f5a59b", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x20fc411a114d13992c2705aa034e3f315d78608a0f7de4ccf7a72e494855ad0d", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x25b5c004a4bdfcb5add9ec4e9ab219ba102c67e8b3effb5fc3a30f317250bc5a", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x23b1822d278ed632a494e58f6df6f5ed038b186d8474155ad87e7dff62b37f4b", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x22734b4c5c3f9493606c4ba9012499bf0f14d13bfcfcccaa16102a29cc2f69e0", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x26c0c8fe09eb30b7e27a74dc33492347e5bdff409aa3610254413d3fad795ce5", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x070dd0ccb6bd7bbae88eac03fa1fbb26196be3083a809829bbd626df348ccad9", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x12b6595bdb329b6fb043ba78bb28c3bec2c0a6de46d8c5ad6067c4ebfd4250da", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x248d97d7f76283d63bec30e7a5876c11c06fca9b275c671c5e33d95bb7e8d729", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x1a306d439d463b0816fc6fd64cc939318b45eb759ddde4aa106d15d9bd9baaaa", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x28a8f8372e3c38daced7c00421cb4621f4f1b54ddc27821b0d62d3d6ec7c56cf", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0094975717f9a8a8bb35152f24d43294071ce320c829f388bc852183e1e2ce7e", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x04d5ee4c3aa78f7d80fde60d716480d3593f74d4f653ae83f4103246db2e8d65", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x2a6cf5e9aa03d4336349ad6fb8ed2269c7bef54b8822cc76d08495c12efde187", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x2304d31eaab960ba9274da43e19ddeb7f792180808fd6e43baae48d7efcba3f3", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x03fd9ac865a4b2a6d5e7009785817249bff08a7e0726fcb4e1c11d39d199f0b0", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x00b7258ded52bbda2248404d55ee5044798afc3a209193073f7954d4d63b0b64", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x159f81ada0771799ec38fca2d4bf65ebb13d3a74f3298db36272c5ca65e92d9a", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x1ef90e67437fbc8550237a75bc28e3bb9000130ea25f0c5471e144cf4264431f", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x1e65f838515e5ff0196b49aa41a2d2568df739bc176b08ec95a79ed82932e30d", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x2b1b045def3a166cec6ce768d079ba74b18c844e570e1f826575c1068c94c33f", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0832e5753ceb0ff6402543b1109229c165dc2d73bef715e3f1c6e07c168bb173", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x02f614e9cedfb3dc6b762ae0a37d41bab1b841c2e8b6451bc5a8e3c390b6ad16", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0e2427d38bd46a60dd640b8e362cad967370ebb777bedff40f6a0be27e7ed705", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0493630b7c670b6deb7c84d414e7ce79049f0ec098c3c7c50768bbe29214a53a", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x22ead100e8e482674decdab17066c5a26bb1515355d5461a3dc06cc85327cea9", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x25b3e56e655b42cdaae2626ed2554d48583f1ae35626d04de5084e0b6d2a6f16", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x1e32752ada8836ef5837a6cde8ff13dbb599c336349e4c584b4fdc0a0cf6f9d0", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x2fa2a871c15a387cc50f68f6f3c3455b23c00995f05078f672a9864074d412e5", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x2f569b8a9a4424c9278e1db7311e889f54ccbf10661bab7fcd18e7c7a7d83505", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x044cb455110a8fdd531ade530234c518a7df93f7332ffd2144165374b246b43d", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x227808de93906d5d420246157f2e42b191fe8c90adfe118178ddc723a5319025", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x02fcca2934e046bc623adead873579865d03781ae090ad4a8579d2e7a6800355", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0ef915f0ac120b876abccceb344a1d36bad3f3c5ab91a8ddcbec2e060d8befac", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - Poseidon2::field_from_hex( - "0x0000000000000000000000000000000000000000000000000000000000000000", - ), - ], - [ - Poseidon2::field_from_hex( - "0x1797130f4b7a3e1777eb757bc6f287f6ab0fb85f6be63b09f3b16ef2b1405d38", - ), - Poseidon2::field_from_hex( - "0x0a76225dc04170ae3306c85abab59e608c7f497c20156d4d36c668555decc6e5", - ), - Poseidon2::field_from_hex( - "0x1fffb9ec1992d66ba1e77a7b93209af6f8fa76d48acb664796174b5326a31a5c", - ), - Poseidon2::field_from_hex( - "0x25721c4fc15a3f2853b57c338fa538d85f8fbba6c6b9c6090611889b797b9c5f", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0c817fd42d5f7a41215e3d07ba197216adb4c3790705da95eb63b982bfcaf75a", - ), - Poseidon2::field_from_hex( - "0x13abe3f5239915d39f7e13c2c24970b6df8cf86ce00a22002bc15866e52b5a96", - ), - Poseidon2::field_from_hex( - "0x2106feea546224ea12ef7f39987a46c85c1bc3dc29bdbd7a92cd60acb4d391ce", - ), - Poseidon2::field_from_hex( - "0x21ca859468a746b6aaa79474a37dab49f1ca5a28c748bc7157e1b3345bb0f959", - ), - ], - [ - Poseidon2::field_from_hex( - "0x05ccd6255c1e6f0c5cf1f0df934194c62911d14d0321662a8f1a48999e34185b", - ), - Poseidon2::field_from_hex( - "0x0f0e34a64b70a626e464d846674c4c8816c4fb267fe44fe6ea28678cb09490a4", - ), - Poseidon2::field_from_hex( - "0x0558531a4e25470c6157794ca36d0e9647dbfcfe350d64838f5b1a8a2de0d4bf", - ), - Poseidon2::field_from_hex( - "0x09d3dca9173ed2faceea125157683d18924cadad3f655a60b72f5864961f1455", - ), - ], - [ - Poseidon2::field_from_hex( - "0x0328cbd54e8c0913493f866ed03d218bf23f92d68aaec48617d4c722e5bd4335", - ), - Poseidon2::field_from_hex( - "0x2bf07216e2aff0a223a487b1a7094e07e79e7bcc9798c648ee3347dd5329d34b", - ), - Poseidon2::field_from_hex( - "0x1daf345a58006b736499c583cb76c316d6f78ed6a6dffc82111e11a63fe412df", - ), - Poseidon2::field_from_hex( - "0x176563472456aaa746b694c60e1823611ef39039b2edc7ff391e6f2293d2c404", - ), - ], - ], - } - } - fn field_from_hex(hex: &str) -> FieldElement { - let bigint = BigUint::from_str_radix(hex.strip_prefix("0x").unwrap(), 16).unwrap(); - FieldElement::from_be_bytes_reduce(&bigint.to_bytes_be()) + Poseidon2 { config: &POSEIDON2_CONFIG } } fn single_box(x: FieldElement) -> FieldElement { @@ -948,7 +445,9 @@ impl Poseidon2 { } fn add_round_constants(&self, state: &mut [FieldElement], round: usize) { - for (state_element, constant_element) in state.iter_mut().zip(self.round_constant[round]) { + for (state_element, constant_element) in + state.iter_mut().zip(self.config.round_constant[round]) + { *state_element += constant_element; } } @@ -982,7 +481,7 @@ impl Poseidon2 { sum += *i; } for (index, i) in input.iter_mut().enumerate() { - *i = *i * self.internal_matrix_diagonal[index]; + *i = *i * self.config.internal_matrix_diagonal[index]; *i += sum; } } @@ -1002,10 +501,10 @@ impl Poseidon2 { ), )); } - if len != self.t { + if len != self.config.t { return Err(BlackBoxResolutionError::Failed( acir::BlackBoxFunc::Poseidon2Permutation, - format!("Expected {} values but encountered {}", self.t, len), + format!("Expected {} values but encountered {}", self.config.t, len), )); } // Read witness assignments @@ -1017,22 +516,22 @@ impl Poseidon2 { Self::matrix_multiplication_4x4(&mut state); // First set of external rounds - let rf_first = self.rounds_f / 2; + let rf_first = self.config.rounds_f / 2; for r in 0..rf_first { self.add_round_constants(&mut state, r as usize); Self::s_box(&mut state); Self::matrix_multiplication_4x4(&mut state); } // Internal rounds - let p_end = rf_first + self.rounds_p; + let p_end = rf_first + self.config.rounds_p; for r in rf_first..p_end { - state[0] += self.round_constant[r as usize][0]; + state[0] += self.config.round_constant[r as usize][0]; state[0] = Self::single_box(state[0]); self.internal_m_multiplication(&mut state); } // Remaining external rounds - let num_rounds = self.rounds_f + self.rounds_p; + let num_rounds = self.config.rounds_f + self.config.rounds_p; for i in p_end..num_rounds { self.add_round_constants(&mut state, i as usize); Self::s_box(&mut state); @@ -1041,3 +540,24 @@ impl Poseidon2 { Ok(state.into()) } } + +#[cfg(test)] +mod test { + use acir::FieldElement; + + use super::{field_from_hex, poseidon2_permutation}; + + #[test] + fn smoke_test() { + let inputs = [FieldElement::zero(); 4]; + let result = poseidon2_permutation(&inputs, 4).expect("should successfully permute"); + + let expected_result = [ + field_from_hex("18DFB8DC9B82229CFF974EFEFC8DF78B1CE96D9D844236B496785C698BC6732E"), + field_from_hex("095C230D1D37A246E8D2D5A63B165FE0FADE040D442F61E25F0590E5FB76F839"), + field_from_hex("0BB9545846E1AFA4FA3C97414A60A20FC4949F537A68CCECA34C5CE71E28AA59"), + field_from_hex("18A4F34C9C6F99335FF7638B82AEED9018026618358873C982BBDDE265B2ED6D"), + ]; + assert_eq!(result, expected_result); + } +} diff --git a/acvm-repo/brillig/Cargo.toml b/acvm-repo/brillig/Cargo.toml index d3f082fda86..463f6286d6b 100644 --- a/acvm-repo/brillig/Cargo.toml +++ b/acvm-repo/brillig/Cargo.toml @@ -2,7 +2,7 @@ name = "brillig" description = "Brillig is the bytecode ACIR uses for non-determinism." # x-release-please-start-version -version = "0.42.0" +version = "0.43.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/brillig/src/opcodes.rs b/acvm-repo/brillig/src/opcodes.rs index d1345351986..468fd88db45 100644 --- a/acvm-repo/brillig/src/opcodes.rs +++ b/acvm-repo/brillig/src/opcodes.rs @@ -177,8 +177,11 @@ pub enum BrilligOpcode { source: MemoryAddress, }, BlackBox(BlackBoxOp), - /// Used to denote execution failure - Trap, + /// Used to denote execution failure, returning data after the offset + Trap { + revert_data_offset: usize, + revert_data_size: usize, + }, /// Stop execution, returning data after the offset Stop { return_data_offset: usize, diff --git a/acvm-repo/brillig_vm/Cargo.toml b/acvm-repo/brillig_vm/Cargo.toml index 95675469479..67e16c21d8b 100644 --- a/acvm-repo/brillig_vm/Cargo.toml +++ b/acvm-repo/brillig_vm/Cargo.toml @@ -2,7 +2,7 @@ name = "brillig_vm" description = "The virtual machine that processes Brillig bytecode, used to introduce non-determinism to the ACVM" # x-release-please-start-version -version = "0.42.0" +version = "0.43.0" # x-release-please-end authors.workspace = true edition.workspace = true diff --git a/acvm-repo/brillig_vm/src/arithmetic.rs b/acvm-repo/brillig_vm/src/arithmetic.rs index 3d77982ffb1..2107d10c093 100644 --- a/acvm-repo/brillig_vm/src/arithmetic.rs +++ b/acvm-repo/brillig_vm/src/arithmetic.rs @@ -1,9 +1,10 @@ use acir::brillig::{BinaryFieldOp, BinaryIntOp}; use acir::FieldElement; use num_bigint::BigUint; -use num_traits::{One, ToPrimitive, Zero}; +use num_traits::ToPrimitive; +use num_traits::{One, Zero}; -use crate::memory::MemoryValue; +use crate::memory::{MemoryTypeError, MemoryValue}; #[derive(Debug, thiserror::Error)] pub(crate) enum BrilligArithmeticError { @@ -11,6 +12,8 @@ pub(crate) enum BrilligArithmeticError { MismatchedLhsBitSize { lhs_bit_size: u32, op_bit_size: u32 }, #[error("Bit size for rhs {rhs_bit_size} does not match op bit size {op_bit_size}")] MismatchedRhsBitSize { rhs_bit_size: u32, op_bit_size: u32 }, + #[error("Integer operation BinaryIntOp::{op:?} is not supported on FieldElement")] + IntegerOperationOnField { op: BinaryIntOp }, #[error("Shift with bit size {op_bit_size} is invalid")] InvalidShift { op_bit_size: u32 }, } @@ -21,21 +24,19 @@ pub(crate) fn evaluate_binary_field_op( lhs: MemoryValue, rhs: MemoryValue, ) -> Result { - if lhs.bit_size != FieldElement::max_num_bits() { + let MemoryValue::Field(a) = lhs else { return Err(BrilligArithmeticError::MismatchedLhsBitSize { - lhs_bit_size: lhs.bit_size, + lhs_bit_size: lhs.bit_size(), op_bit_size: FieldElement::max_num_bits(), }); - } - if rhs.bit_size != FieldElement::max_num_bits() { - return Err(BrilligArithmeticError::MismatchedRhsBitSize { - rhs_bit_size: rhs.bit_size, + }; + let MemoryValue::Field(b) = rhs else { + return Err(BrilligArithmeticError::MismatchedLhsBitSize { + lhs_bit_size: rhs.bit_size(), op_bit_size: FieldElement::max_num_bits(), }); - } + }; - let a = lhs.value; - let b = rhs.value; Ok(match op { // Perform addition, subtraction, multiplication, and division based on the BinaryOp variant. BinaryFieldOp::Add => (a + b).into(), @@ -62,21 +63,26 @@ pub(crate) fn evaluate_binary_int_op( rhs: MemoryValue, bit_size: u32, ) -> Result { - if lhs.bit_size != bit_size { - return Err(BrilligArithmeticError::MismatchedLhsBitSize { - lhs_bit_size: lhs.bit_size, - op_bit_size: bit_size, - }); - } - if rhs.bit_size != bit_size { - return Err(BrilligArithmeticError::MismatchedRhsBitSize { - rhs_bit_size: rhs.bit_size, - op_bit_size: bit_size, - }); - } + let lhs = lhs.expect_integer_with_bit_size(bit_size).map_err(|err| match err { + MemoryTypeError::MismatchedBitSize { value_bit_size, expected_bit_size } => { + BrilligArithmeticError::MismatchedLhsBitSize { + lhs_bit_size: value_bit_size, + op_bit_size: expected_bit_size, + } + } + })?; + let rhs = rhs.expect_integer_with_bit_size(bit_size).map_err(|err| match err { + MemoryTypeError::MismatchedBitSize { value_bit_size, expected_bit_size } => { + BrilligArithmeticError::MismatchedRhsBitSize { + rhs_bit_size: value_bit_size, + op_bit_size: expected_bit_size, + } + } + })?; - let lhs = BigUint::from_bytes_be(&lhs.value.to_be_bytes()); - let rhs = BigUint::from_bytes_be(&rhs.value.to_be_bytes()); + if bit_size == FieldElement::max_num_bits() { + return Err(BrilligArithmeticError::IntegerOperationOnField { op: *op }); + } let bit_modulo = &(BigUint::one() << bit_size); let result = match op { @@ -136,13 +142,11 @@ pub(crate) fn evaluate_binary_int_op( } }; - let result_as_field = FieldElement::from_be_bytes_reduce(&result.to_bytes_be()); - Ok(match op { BinaryIntOp::Equals | BinaryIntOp::LessThan | BinaryIntOp::LessThanEquals => { - MemoryValue::new(result_as_field, 1) + MemoryValue::new_integer(result, 1) } - _ => MemoryValue::new(result_as_field, bit_size), + _ => MemoryValue::new_integer(result, bit_size), }) } @@ -159,13 +163,13 @@ mod tests { fn evaluate_u128(op: &BinaryIntOp, a: u128, b: u128, bit_size: u32) -> u128 { let result_value = evaluate_binary_int_op( op, - MemoryValue::new(a.into(), bit_size), - MemoryValue::new(b.into(), bit_size), + MemoryValue::new_integer(a.into(), bit_size), + MemoryValue::new_integer(b.into(), bit_size), bit_size, ) .unwrap(); // Convert back to u128 - result_value.value.to_u128() + result_value.to_field().to_u128() } fn to_negative(a: u128, bit_size: u32) -> u128 { diff --git a/acvm-repo/brillig_vm/src/black_box.rs b/acvm-repo/brillig_vm/src/black_box.rs index bd33b5ee8fc..19407da52db 100644 --- a/acvm-repo/brillig_vm/src/black_box.rs +++ b/acvm-repo/brillig_vm/src/black_box.rs @@ -1,5 +1,6 @@ use acir::brillig::{BlackBoxOp, HeapArray, HeapVector}; use acir::{BlackBoxFunc, FieldElement}; +use acvm_blackbox_solver::BigIntSolver; use acvm_blackbox_solver::{ blake2s, blake3, ecdsa_secp256k1_verify, ecdsa_secp256r1_verify, keccak256, keccakf1600, sha256, sha256compression, BlackBoxFunctionSolver, BlackBoxResolutionError, @@ -20,7 +21,7 @@ fn read_heap_array<'a>(memory: &'a Memory, array: &HeapArray) -> &'a [MemoryValu /// Extracts the last byte of every value fn to_u8_vec(inputs: &[MemoryValue]) -> Vec { let mut result = Vec::with_capacity(inputs.len()); - for &input in inputs { + for input in inputs { result.push(input.try_into().unwrap()); } result @@ -34,6 +35,7 @@ pub(crate) fn evaluate_black_box( op: &BlackBoxOp, solver: &Solver, memory: &mut Memory, + bigint_solver: &mut BigIntSolver, ) -> Result<(), BlackBoxResolutionError> { match op { BlackBoxOp::Sha256 { message, output } => { @@ -63,7 +65,7 @@ pub(crate) fn evaluate_black_box( BlackBoxOp::Keccakf1600 { message, output } => { let state_vec: Vec = read_heap_vector(memory, message) .iter() - .map(|&memory_value| memory_value.try_into().unwrap()) + .map(|memory_value| memory_value.try_into().unwrap()) .collect(); let state: [u64; 25] = state_vec.try_into().unwrap(); @@ -127,7 +129,8 @@ pub(crate) fn evaluate_black_box( let public_key_x = memory.read(*public_key_x).try_into().unwrap(); let public_key_y = memory.read(*public_key_y).try_into().unwrap(); let message: Vec = to_u8_vec(read_heap_vector(memory, message)); - let signature: Vec = to_u8_vec(read_heap_vector(memory, signature)); + let signature: [u8; 64] = + to_u8_vec(read_heap_vector(memory, signature)).try_into().unwrap(); let verified = solver.schnorr_verify(&public_key_x, &public_key_y, &signature, &message)?; memory.write(*result, verified.into()); @@ -151,7 +154,7 @@ pub(crate) fn evaluate_black_box( } BlackBoxOp::PedersenCommitment { inputs, domain_separator, output } => { let inputs: Vec = - read_heap_vector(memory, inputs).iter().map(|&x| x.try_into().unwrap()).collect(); + read_heap_vector(memory, inputs).iter().map(|x| x.try_into().unwrap()).collect(); let domain_separator: u32 = memory.read(*domain_separator).try_into().map_err(|_| { BlackBoxResolutionError::Failed( @@ -165,7 +168,7 @@ pub(crate) fn evaluate_black_box( } BlackBoxOp::PedersenHash { inputs, domain_separator, output } => { let inputs: Vec = - read_heap_vector(memory, inputs).iter().map(|&x| x.try_into().unwrap()).collect(); + read_heap_vector(memory, inputs).iter().map(|x| x.try_into().unwrap()).collect(); let domain_separator: u32 = memory.read(*domain_separator).try_into().map_err(|_| { BlackBoxResolutionError::Failed( @@ -177,15 +180,60 @@ pub(crate) fn evaluate_black_box( memory.write(*output, hash.into()); Ok(()) } - BlackBoxOp::BigIntAdd { .. } => todo!(), - BlackBoxOp::BigIntSub { .. } => todo!(), - BlackBoxOp::BigIntMul { .. } => todo!(), - BlackBoxOp::BigIntDiv { .. } => todo!(), - BlackBoxOp::BigIntFromLeBytes { .. } => todo!(), - BlackBoxOp::BigIntToLeBytes { .. } => todo!(), + BlackBoxOp::BigIntAdd { lhs, rhs, output } => { + let lhs = memory.read(*lhs).try_into().unwrap(); + let rhs = memory.read(*rhs).try_into().unwrap(); + let output = memory.read(*output).try_into().unwrap(); + bigint_solver.bigint_op(lhs, rhs, output, BlackBoxFunc::BigIntAdd)?; + Ok(()) + } + BlackBoxOp::BigIntSub { lhs, rhs, output } => { + let lhs = memory.read(*lhs).try_into().unwrap(); + let rhs = memory.read(*rhs).try_into().unwrap(); + let output = memory.read(*output).try_into().unwrap(); + bigint_solver.bigint_op(lhs, rhs, output, BlackBoxFunc::BigIntSub)?; + Ok(()) + } + BlackBoxOp::BigIntMul { lhs, rhs, output } => { + let lhs = memory.read(*lhs).try_into().unwrap(); + let rhs = memory.read(*rhs).try_into().unwrap(); + let output = memory.read(*output).try_into().unwrap(); + bigint_solver.bigint_op(lhs, rhs, output, BlackBoxFunc::BigIntMul)?; + Ok(()) + } + BlackBoxOp::BigIntDiv { lhs, rhs, output } => { + let lhs = memory.read(*lhs).try_into().unwrap(); + let rhs = memory.read(*rhs).try_into().unwrap(); + let output = memory.read(*output).try_into().unwrap(); + bigint_solver.bigint_op(lhs, rhs, output, BlackBoxFunc::BigIntDiv)?; + Ok(()) + } + BlackBoxOp::BigIntFromLeBytes { inputs, modulus, output } => { + let input = read_heap_vector(memory, inputs); + let input: Vec = input.iter().map(|x| x.try_into().unwrap()).collect(); + let modulus = read_heap_vector(memory, modulus); + let modulus: Vec = modulus.iter().map(|x| x.try_into().unwrap()).collect(); + let output = memory.read(*output).try_into().unwrap(); + bigint_solver.bigint_from_bytes(&input, &modulus, output)?; + Ok(()) + } + BlackBoxOp::BigIntToLeBytes { input, output } => { + let input: u32 = memory.read(*input).try_into().unwrap(); + let bytes = bigint_solver.bigint_to_bytes(input)?; + let mut values = Vec::new(); + for i in 0..32 { + if i < bytes.len() { + values.push(bytes[i].into()); + } else { + values.push(0_u8.into()); + } + } + memory.write_slice(memory.read_ref(output.pointer), &values); + Ok(()) + } BlackBoxOp::Poseidon2Permutation { message, output, len } => { let input = read_heap_vector(memory, message); - let input: Vec = input.iter().map(|&x| x.try_into().unwrap()).collect(); + let input: Vec = input.iter().map(|x| x.try_into().unwrap()).collect(); let len = memory.read(*len).try_into().unwrap(); let result = solver.poseidon2_permutation(&input, len)?; let mut values = Vec::new(); @@ -204,7 +252,7 @@ pub(crate) fn evaluate_black_box( format!("Expected 16 inputs but encountered {}", &inputs.len()), )); } - for (i, &input) in inputs.iter().enumerate() { + for (i, input) in inputs.iter().enumerate() { message[i] = input.try_into().unwrap(); } let mut state = [0; 8]; @@ -215,7 +263,7 @@ pub(crate) fn evaluate_black_box( format!("Expected 8 values but encountered {}", &values.len()), )); } - for (i, &value) in values.iter().enumerate() { + for (i, value) in values.iter().enumerate() { state[i] = value.try_into().unwrap(); } @@ -256,7 +304,7 @@ fn black_box_function_from_op(op: &BlackBoxOp) -> BlackBoxFunc { #[cfg(test)] mod test { use acir::brillig::{BlackBoxOp, MemoryAddress}; - use acvm_blackbox_solver::StubbedBlackBoxSolver; + use acvm_blackbox_solver::{BigIntSolver, StubbedBlackBoxSolver}; use crate::{ black_box::{evaluate_black_box, to_u8_vec, to_value_vec}, @@ -281,7 +329,8 @@ mod test { output: HeapArray { pointer: 2.into(), size: 32 }, }; - evaluate_black_box(&op, &StubbedBlackBoxSolver, &mut memory).unwrap(); + evaluate_black_box(&op, &StubbedBlackBoxSolver, &mut memory, &mut BigIntSolver::default()) + .unwrap(); let result = memory.read_slice(MemoryAddress(result_pointer), 32); diff --git a/acvm-repo/brillig_vm/src/lib.rs b/acvm-repo/brillig_vm/src/lib.rs index 65654e24720..75299670f94 100644 --- a/acvm-repo/brillig_vm/src/lib.rs +++ b/acvm-repo/brillig_vm/src/lib.rs @@ -16,7 +16,7 @@ use acir::brillig::{ HeapVector, MemoryAddress, Opcode, ValueOrArray, }; use acir::FieldElement; -use acvm_blackbox_solver::BlackBoxFunctionSolver; +use acvm_blackbox_solver::{BigIntSolver, BlackBoxFunctionSolver}; use arithmetic::{evaluate_binary_field_op, evaluate_binary_int_op, BrilligArithmeticError}; use black_box::evaluate_black_box; use num_bigint::BigUint; @@ -32,6 +32,12 @@ mod memory; /// The error call stack contains the opcode indexes of the call stack at the time of failure, plus the index of the opcode that failed. pub type ErrorCallStack = Vec; +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum FailureReason { + Trap { revert_data_offset: usize, revert_data_size: usize }, + RuntimeError { message: String }, +} + #[derive(Debug, PartialEq, Eq, Clone)] pub enum VMStatus { Finished { @@ -40,7 +46,7 @@ pub enum VMStatus { }, InProgress, Failure { - message: String, + reason: FailureReason, call_stack: ErrorCallStack, }, /// The VM process is not solvable as a [foreign call][Opcode::ForeignCall] has been @@ -81,6 +87,8 @@ pub struct VM<'a, B: BlackBoxFunctionSolver> { call_stack: Vec, /// The solver for blackbox functions black_box_solver: &'a B, + // The solver for big integers + bigint_solver: BigIntSolver, } impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { @@ -101,6 +109,7 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { memory: Memory::default(), call_stack: Vec::new(), black_box_solver, + bigint_solver: Default::default(), } } @@ -138,13 +147,28 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { self.status(VMStatus::InProgress); } + fn get_error_stack(&self) -> Vec { + let mut error_stack: Vec<_> = self.call_stack.clone(); + error_stack.push(self.program_counter); + error_stack + } + /// Sets the current status of the VM to `fail`. /// Indicating that the VM encountered a `Trap` Opcode /// or an invalid state. + fn trap(&mut self, revert_data_offset: usize, revert_data_size: usize) -> VMStatus { + self.status(VMStatus::Failure { + call_stack: self.get_error_stack(), + reason: FailureReason::Trap { revert_data_offset, revert_data_size }, + }); + self.status.clone() + } + fn fail(&mut self, message: String) -> VMStatus { - let mut error_stack: Vec<_> = self.call_stack.clone(); - error_stack.push(self.program_counter); - self.status(VMStatus::Failure { call_stack: error_stack, message }); + self.status(VMStatus::Failure { + call_stack: self.get_error_stack(), + reason: FailureReason::RuntimeError { message }, + }); self.status.clone() } @@ -281,7 +305,9 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { } self.increment_program_counter() } - Opcode::Trap => self.fail("explicit trap hit in brillig".to_string()), + Opcode::Trap { revert_data_offset, revert_data_size } => { + self.trap(*revert_data_offset, *revert_data_size) + } Opcode::Stop { return_data_offset, return_data_size } => { self.finish(*return_data_offset, *return_data_size) } @@ -289,8 +315,8 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { // Convert our source_pointer to an address let source = self.memory.read_ref(*source_pointer); // Use our usize source index to lookup the value in memory - let value = &self.memory.read(source); - self.memory.write(*destination_address, *value); + let value = self.memory.read(source); + self.memory.write(*destination_address, value); self.increment_program_counter() } Opcode::Store { destination_pointer, source: source_address } => { @@ -307,11 +333,16 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { } Opcode::Const { destination, value, bit_size } => { // Consts are not checked in runtime to fit in the bit size, since they can safely be checked statically. - self.memory.write(*destination, MemoryValue::new(*value, *bit_size)); + self.memory.write(*destination, MemoryValue::new_from_field(*value, *bit_size)); self.increment_program_counter() } Opcode::BlackBox(black_box_op) => { - match evaluate_black_box(black_box_op, self.black_box_solver, &mut self.memory) { + match evaluate_black_box( + black_box_op, + self.black_box_solver, + &mut self.memory, + &mut self.bigint_solver, + ) { Ok(()) => self.increment_program_counter(), Err(e) => self.fail(e.to_string()), } @@ -348,7 +379,7 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { ) -> ForeignCallParam { match (input, value_type) { (ValueOrArray::MemoryAddress(value_index), HeapValueType::Simple(_)) => { - self.memory.read(value_index).value.into() + self.memory.read(value_index).to_field().into() } ( ValueOrArray::HeapArray(HeapArray { pointer: pointer_index, size }), @@ -357,7 +388,7 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { let start = self.memory.read_ref(pointer_index); self.read_slice_of_values_from_memory(start, size, value_types) .into_iter() - .map(|mem_value| mem_value.value) + .map(|mem_value| mem_value.to_field()) .collect::>() .into() } @@ -369,7 +400,7 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { let size = self.memory.read(size_index).to_usize(); self.read_slice_of_values_from_memory(start, size, value_types) .into_iter() - .map(|mem_value| mem_value.value) + .map(|mem_value| mem_value.to_field()) .collect::>() .into() } @@ -584,12 +615,9 @@ impl<'a, B: BlackBoxFunctionSolver> VM<'a, B> { /// Casts a value to a different bit size. fn cast(&self, bit_size: u32, source_value: MemoryValue) -> MemoryValue { - let lhs_big = BigUint::from_bytes_be(&source_value.value.to_be_bytes()); + let lhs_big = source_value.to_integer(); let mask = BigUint::from(2_u32).pow(bit_size) - 1_u32; - MemoryValue { - value: FieldElement::from_be_bytes_reduce(&(lhs_big & mask).to_bytes_be()), - bit_size, - } + MemoryValue::new_from_integer(lhs_big & mask, bit_size) } } @@ -627,7 +655,7 @@ mod tests { let VM { memory, .. } = vm; let output_value = memory.read(MemoryAddress::from(0)); - assert_eq!(output_value.value, FieldElement::from(27u128)); + assert_eq!(output_value.to_field(), FieldElement::from(27u128)); } #[test] @@ -666,7 +694,7 @@ mod tests { assert_eq!(status, VMStatus::InProgress); let output_cmp_value = vm.memory.read(destination); - assert_eq!(output_cmp_value.value, true.into()); + assert_eq!(output_cmp_value.to_field(), true.into()); let status = vm.process_opcode(); assert_eq!(status, VMStatus::InProgress); @@ -687,7 +715,7 @@ mod tests { let jump_opcode = Opcode::Jump { location: 3 }; - let trap_opcode = Opcode::Trap; + let trap_opcode = Opcode::Trap { revert_data_offset: 0, revert_data_size: 0 }; let not_equal_cmp_opcode = Opcode::BinaryFieldOp { op: BinaryFieldOp::Equals, @@ -725,7 +753,7 @@ mod tests { assert_eq!(status, VMStatus::InProgress); let output_cmp_value = vm.memory.read(MemoryAddress::from(2)); - assert_eq!(output_cmp_value.value, false.into()); + assert_eq!(output_cmp_value.to_field(), false.into()); let status = vm.process_opcode(); assert_eq!(status, VMStatus::InProgress); @@ -734,7 +762,7 @@ mod tests { assert_eq!( status, VMStatus::Failure { - message: "explicit trap hit in brillig".to_string(), + reason: FailureReason::Trap { revert_data_offset: 0, revert_data_size: 0 }, call_stack: vec![2] } ); @@ -742,7 +770,7 @@ mod tests { // The address at index `2` should have not changed as we jumped over the add opcode let VM { memory, .. } = vm; let output_value = memory.read(MemoryAddress::from(2)); - assert_eq!(output_value.value, false.into()); + assert_eq!(output_value.to_field(), false.into()); } #[test] @@ -776,7 +804,7 @@ mod tests { let VM { memory, .. } = vm; let casted_value = memory.read(MemoryAddress::from(1)); - assert_eq!(casted_value.value, (2_u128.pow(8) - 1).into()); + assert_eq!(casted_value.to_field(), (2_u128.pow(8) - 1).into()); } #[test] @@ -804,10 +832,10 @@ mod tests { let VM { memory, .. } = vm; let destination_value = memory.read(MemoryAddress::from(2)); - assert_eq!(destination_value.value, (1u128).into()); + assert_eq!(destination_value.to_field(), (1u128).into()); let source_value = memory.read(MemoryAddress::from(0)); - assert_eq!(source_value.value, (1u128).into()); + assert_eq!(source_value.to_field(), (1u128).into()); } #[test] @@ -869,10 +897,10 @@ mod tests { let VM { memory, .. } = vm; let destination_value = memory.read(MemoryAddress::from(4)); - assert_eq!(destination_value.value, (3_u128).into()); + assert_eq!(destination_value.to_field(), (3_u128).into()); let source_value = memory.read(MemoryAddress::from(5)); - assert_eq!(source_value.value, (2_u128).into()); + assert_eq!(source_value.to_field(), (2_u128).into()); } #[test] @@ -1120,7 +1148,7 @@ mod tests { let opcodes = [&start[..], &loop_body[..]].concat(); let vm = brillig_execute_and_get_vm(memory, &opcodes); - vm.memory.read(r_sum).value + vm.memory.read(r_sum).to_field() } assert_eq!( @@ -1359,7 +1387,7 @@ mod tests { // Check result in memory let result_values = vm.memory.read_slice(MemoryAddress(2), 4).to_vec(); assert_eq!( - result_values.into_iter().map(|mem_value| mem_value.value).collect::>(), + result_values.into_iter().map(|mem_value| mem_value.to_field()).collect::>(), expected_result ); @@ -1459,7 +1487,7 @@ mod tests { .memory .read_slice(MemoryAddress(4 + input_string.len()), output_string.len()) .iter() - .map(|mem_val| mem_val.value) + .map(|mem_val| mem_val.clone().to_field()) .collect(); assert_eq!(result_values, output_string); @@ -1532,13 +1560,21 @@ mod tests { assert_eq!(vm.status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); // Check initial memory still in place - let initial_values: Vec<_> = - vm.memory.read_slice(MemoryAddress(2), 4).iter().map(|mem_val| mem_val.value).collect(); + let initial_values: Vec<_> = vm + .memory + .read_slice(MemoryAddress(2), 4) + .iter() + .map(|mem_val| mem_val.clone().to_field()) + .collect(); assert_eq!(initial_values, initial_matrix); // Check result in memory - let result_values: Vec<_> = - vm.memory.read_slice(MemoryAddress(6), 4).iter().map(|mem_val| mem_val.value).collect(); + let result_values: Vec<_> = vm + .memory + .read_slice(MemoryAddress(6), 4) + .iter() + .map(|mem_val| mem_val.clone().to_field()) + .collect(); assert_eq!(result_values, expected_result); // Ensure the foreign call counter has been incremented @@ -1622,8 +1658,12 @@ mod tests { assert_eq!(vm.status, VMStatus::Finished { return_data_offset: 0, return_data_size: 0 }); // Check result in memory - let result_values: Vec<_> = - vm.memory.read_slice(MemoryAddress(0), 4).iter().map(|mem_val| mem_val.value).collect(); + let result_values: Vec<_> = vm + .memory + .read_slice(MemoryAddress(0), 4) + .iter() + .map(|mem_val| mem_val.clone().to_field()) + .collect(); assert_eq!(result_values, expected_result); // Ensure the foreign call counter has been incremented @@ -1698,7 +1738,7 @@ mod tests { .chain(memory.iter().enumerate().map(|(index, mem_value)| Opcode::Cast { destination: MemoryAddress(index), source: MemoryAddress(index), - bit_size: mem_value.bit_size, + bit_size: mem_value.bit_size(), })) .chain(vec![ // input = 0 @@ -1721,7 +1761,7 @@ mod tests { .collect(); let mut vm = brillig_execute_and_get_vm( - memory.into_iter().map(|mem_value| mem_value.value).collect(), + memory.into_iter().map(|mem_value| mem_value.to_field()).collect(), &program, ); diff --git a/acvm-repo/brillig_vm/src/memory.rs b/acvm-repo/brillig_vm/src/memory.rs index d563e13be2e..feeb3706bde 100644 --- a/acvm-repo/brillig_vm/src/memory.rs +++ b/acvm-repo/brillig_vm/src/memory.rs @@ -1,11 +1,13 @@ use acir::{brillig::MemoryAddress, FieldElement}; +use num_bigint::BigUint; +use num_traits::{One, Zero}; pub const MEMORY_ADDRESSING_BIT_SIZE: u32 = 64; -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct MemoryValue { - pub value: FieldElement, - pub bit_size: u32, +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum MemoryValue { + Field(FieldElement), + Integer(BigUint, u32), } #[derive(Debug, thiserror::Error)] @@ -15,53 +17,147 @@ pub enum MemoryTypeError { } impl MemoryValue { - pub fn new(value: FieldElement, bit_size: u32) -> Self { - MemoryValue { value, bit_size } + /// Builds a memory value from a field element. + pub fn new_from_field(value: FieldElement, bit_size: u32) -> Self { + if bit_size == FieldElement::max_num_bits() { + MemoryValue::new_field(value) + } else { + MemoryValue::new_integer(BigUint::from_bytes_be(&value.to_be_bytes()), bit_size) + } + } + + /// Builds a memory value from an integer + pub fn new_from_integer(value: BigUint, bit_size: u32) -> Self { + if bit_size == FieldElement::max_num_bits() { + MemoryValue::new_field(FieldElement::from_be_bytes_reduce(&value.to_bytes_be())) + } else { + MemoryValue::new_integer(value, bit_size) + } } + /// Builds a memory value from a field element, checking that the value is within the bit size. pub fn new_checked(value: FieldElement, bit_size: u32) -> Option { - if value.num_bits() > bit_size { + if bit_size < FieldElement::max_num_bits() && value.num_bits() > bit_size { return None; } - Some(MemoryValue::new(value, bit_size)) + Some(MemoryValue::new_from_field(value, bit_size)) } + /// Builds a field-typed memory value. pub fn new_field(value: FieldElement) -> Self { - MemoryValue { value, bit_size: FieldElement::max_num_bits() } + MemoryValue::Field(value) + } + + /// Builds an integer-typed memory value. + pub fn new_integer(value: BigUint, bit_size: u32) -> Self { + assert!( + bit_size != FieldElement::max_num_bits(), + "Tried to build a field memory value via new_integer" + ); + MemoryValue::Integer(value, bit_size) + } + + /// Extracts the field element from the memory value, if it is typed as field element. + pub fn extract_field(&self) -> Option<&FieldElement> { + match self { + MemoryValue::Field(value) => Some(value), + _ => None, + } + } + + /// Extracts the integer from the memory value, if it is typed as integer. + pub fn extract_integer(&self) -> Option<(&BigUint, u32)> { + match self { + MemoryValue::Integer(value, bit_size) => Some((value, *bit_size)), + _ => None, + } + } + + /// Converts the memory value to a field element, independent of its type. + pub fn to_field(&self) -> FieldElement { + match self { + MemoryValue::Field(value) => *value, + MemoryValue::Integer(value, _) => { + FieldElement::from_be_bytes_reduce(&value.to_bytes_be()) + } + } + } + + /// Converts the memory value to an integer, independent of its type. + pub fn to_integer(self) -> BigUint { + match self { + MemoryValue::Field(value) => BigUint::from_bytes_be(&value.to_be_bytes()), + MemoryValue::Integer(value, _) => value, + } + } + + pub fn bit_size(&self) -> u32 { + match self { + MemoryValue::Field(_) => FieldElement::max_num_bits(), + MemoryValue::Integer(_, bit_size) => *bit_size, + } } pub fn to_usize(&self) -> usize { - assert!(self.bit_size == MEMORY_ADDRESSING_BIT_SIZE, "value is not typed as brillig usize"); - self.value.to_u128() as usize + assert!( + self.bit_size() == MEMORY_ADDRESSING_BIT_SIZE, + "value is not typed as brillig usize" + ); + self.extract_integer().unwrap().0.try_into().unwrap() } - pub fn expect_bit_size(&self, expected_bit_size: u32) -> Result<(), MemoryTypeError> { - if self.bit_size != expected_bit_size { - return Err(MemoryTypeError::MismatchedBitSize { - value_bit_size: self.bit_size, + pub fn expect_field(&self) -> Result<&FieldElement, MemoryTypeError> { + match self { + MemoryValue::Integer(_, bit_size) => Err(MemoryTypeError::MismatchedBitSize { + value_bit_size: *bit_size, + expected_bit_size: FieldElement::max_num_bits(), + }), + MemoryValue::Field(field) => Ok(field), + } + } + + pub fn expect_integer_with_bit_size( + &self, + expected_bit_size: u32, + ) -> Result<&BigUint, MemoryTypeError> { + match self { + MemoryValue::Integer(value, bit_size) => { + if *bit_size != expected_bit_size { + return Err(MemoryTypeError::MismatchedBitSize { + value_bit_size: *bit_size, + expected_bit_size, + }); + } + Ok(value) + } + MemoryValue::Field(_) => Err(MemoryTypeError::MismatchedBitSize { + value_bit_size: FieldElement::max_num_bits(), expected_bit_size, - }); + }), } - Ok(()) } } impl std::fmt::Display for MemoryValue { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> { - let typ = match self.bit_size { - 0 => "null".to_string(), - 1 => "bool".to_string(), - _ if self.bit_size == FieldElement::max_num_bits() => "field".to_string(), - _ => format!("u{}", self.bit_size), - }; - f.write_str(format!("{}: {}", self.value, typ).as_str()) + match self { + MemoryValue::Field(value) => write!(f, "{}: field", value), + MemoryValue::Integer(value, bit_size) => { + let typ = match bit_size { + 0 => "null".to_string(), + 1 => "bool".to_string(), + _ => format!("u{}", bit_size), + }; + write!(f, "{}: {}", value, typ) + } + } } } impl Default for MemoryValue { fn default() -> Self { - MemoryValue::new(FieldElement::zero(), 0) + MemoryValue::new_integer(BigUint::zero(), 0) } } @@ -73,31 +169,32 @@ impl From for MemoryValue { impl From for MemoryValue { fn from(value: usize) -> Self { - MemoryValue::new(value.into(), MEMORY_ADDRESSING_BIT_SIZE) + MemoryValue::new_integer(value.into(), MEMORY_ADDRESSING_BIT_SIZE) } } impl From for MemoryValue { fn from(value: u64) -> Self { - MemoryValue::new((value as u128).into(), 64) + MemoryValue::new_integer(value.into(), 64) } } impl From for MemoryValue { fn from(value: u32) -> Self { - MemoryValue::new((value as u128).into(), 32) + MemoryValue::new_integer(value.into(), 32) } } impl From for MemoryValue { fn from(value: u8) -> Self { - MemoryValue::new((value as u128).into(), 8) + MemoryValue::new_integer(value.into(), 8) } } impl From for MemoryValue { fn from(value: bool) -> Self { - MemoryValue::new(value.into(), 1) + let value = if value { BigUint::one() } else { BigUint::zero() }; + MemoryValue::new_integer(value, 1) } } @@ -105,8 +202,7 @@ impl TryFrom for FieldElement { type Error = MemoryTypeError; fn try_from(memory_value: MemoryValue) -> Result { - memory_value.expect_bit_size(FieldElement::max_num_bits())?; - Ok(memory_value.value) + memory_value.expect_field().copied() } } @@ -114,8 +210,7 @@ impl TryFrom for u64 { type Error = MemoryTypeError; fn try_from(memory_value: MemoryValue) -> Result { - memory_value.expect_bit_size(64)?; - Ok(memory_value.value.to_u128() as u64) + memory_value.expect_integer_with_bit_size(64).map(|value| value.try_into().unwrap()) } } @@ -123,8 +218,7 @@ impl TryFrom for u32 { type Error = MemoryTypeError; fn try_from(memory_value: MemoryValue) -> Result { - memory_value.expect_bit_size(32)?; - Ok(memory_value.value.to_u128() as u32) + memory_value.expect_integer_with_bit_size(32).map(|value| value.try_into().unwrap()) } } @@ -132,9 +226,7 @@ impl TryFrom for u8 { type Error = MemoryTypeError; fn try_from(memory_value: MemoryValue) -> Result { - memory_value.expect_bit_size(8)?; - - Ok(memory_value.value.to_u128() as u8) + memory_value.expect_integer_with_bit_size(8).map(|value| value.try_into().unwrap()) } } @@ -142,11 +234,65 @@ impl TryFrom for bool { type Error = MemoryTypeError; fn try_from(memory_value: MemoryValue) -> Result { - memory_value.expect_bit_size(1)?; + let as_integer = memory_value.expect_integer_with_bit_size(1)?; + + if as_integer.is_zero() { + Ok(false) + } else if as_integer.is_one() { + Ok(true) + } else { + unreachable!("value typed as bool is greater than one") + } + } +} + +impl TryFrom<&MemoryValue> for FieldElement { + type Error = MemoryTypeError; + + fn try_from(memory_value: &MemoryValue) -> Result { + memory_value.expect_field().copied() + } +} + +impl TryFrom<&MemoryValue> for u64 { + type Error = MemoryTypeError; + + fn try_from(memory_value: &MemoryValue) -> Result { + memory_value.expect_integer_with_bit_size(64).map(|value| { + value.try_into().expect("memory_value has been asserted to contain a 64 bit integer") + }) + } +} + +impl TryFrom<&MemoryValue> for u32 { + type Error = MemoryTypeError; + + fn try_from(memory_value: &MemoryValue) -> Result { + memory_value.expect_integer_with_bit_size(32).map(|value| { + value.try_into().expect("memory_value has been asserted to contain a 32 bit integer") + }) + } +} + +impl TryFrom<&MemoryValue> for u8 { + type Error = MemoryTypeError; + + fn try_from(memory_value: &MemoryValue) -> Result { + memory_value.expect_integer_with_bit_size(8).map(|value| { + value.try_into().expect("memory_value has been asserted to contain an 8 bit integer") + }) + } +} + +impl TryFrom<&MemoryValue> for bool { + type Error = MemoryTypeError; + + fn try_from(memory_value: &MemoryValue) -> Result { + let as_integer = memory_value.expect_integer_with_bit_size(1)?; - if memory_value.value == FieldElement::zero() { + if as_integer.is_zero() { Ok(false) - } else if memory_value.value == FieldElement::one() { + } else if as_integer.is_one() { Ok(true) } else { unreachable!("value typed as bool is greater than one") @@ -164,7 +310,7 @@ pub struct Memory { impl Memory { /// Gets the value at pointer pub fn read(&self, ptr: MemoryAddress) -> MemoryValue { - self.inner.get(ptr.to_usize()).copied().unwrap_or_default() + self.inner.get(ptr.to_usize()).cloned().unwrap_or_default() } pub fn read_ref(&self, ptr: MemoryAddress) -> MemoryAddress { @@ -191,7 +337,7 @@ impl Memory { /// Sets the values after pointer `ptr` to `values` pub fn write_slice(&mut self, ptr: MemoryAddress, values: &[MemoryValue]) { self.resize_to_fit(ptr.to_usize() + values.len()); - self.inner[ptr.to_usize()..(ptr.to_usize() + values.len())].copy_from_slice(values); + self.inner[ptr.to_usize()..(ptr.to_usize() + values.len())].clone_from_slice(values); } /// Returns the values of the memory diff --git a/aztec_macros/src/lib.rs b/aztec_macros/src/lib.rs index 3ee6f9c21b9..dff3193a327 100644 --- a/aztec_macros/src/lib.rs +++ b/aztec_macros/src/lib.rs @@ -3,21 +3,20 @@ mod utils; use transforms::{ compute_note_hash_and_nullifier::inject_compute_note_hash_and_nullifier, + contract_interface::{ + generate_contract_interface, stub_function, update_fn_signatures_in_contract_interface, + }, events::{generate_selector_impl, transform_events}, - functions::{transform_function, transform_unconstrained}, - note_interface::generate_note_interface_impl, + functions::{export_fn_abi, transform_function, transform_unconstrained}, + note_interface::{generate_note_interface_impl, inject_note_exports}, storage::{ assign_storage_slots, check_for_storage_definition, check_for_storage_implementation, - generate_storage_implementation, + generate_storage_implementation, generate_storage_layout, }, }; -use noirc_frontend::{ - hir::def_collector::dc_crate::{UnresolvedFunctions, UnresolvedTraitImpl}, - macros_api::{ - CrateId, FileId, HirContext, MacroError, MacroProcessor, SecondaryAttribute, SortedModule, - Span, - }, +use noirc_frontend::macros_api::{ + CrateId, FileId, HirContext, MacroError, MacroProcessor, SortedModule, Span, }; use utils::{ @@ -39,16 +38,6 @@ impl MacroProcessor for AztecMacro { transform(ast, crate_id, file_id, context) } - fn process_collected_defs( - &self, - crate_id: &CrateId, - context: &mut HirContext, - collected_trait_impls: &[UnresolvedTraitImpl], - collected_functions: &mut [UnresolvedFunctions], - ) -> Result<(), (MacroError, FileId)> { - transform_collected_defs(crate_id, context, collected_trait_impls, collected_functions) - } - fn process_typed_ast( &self, crate_id: &CrateId, @@ -73,7 +62,14 @@ fn transform( // Usage -> mut ast -> aztec_library::transform(&mut ast) // Covers all functions in the ast for submodule in ast.submodules.iter_mut().filter(|submodule| submodule.is_contract) { - if transform_module(&mut submodule.contents).map_err(|err| (err.into(), file_id))? { + if transform_module( + crate_id, + context, + &mut submodule.contents, + submodule.name.0.contents.as_str(), + ) + .map_err(|err| (err.into(), file_id))? + { check_for_aztec_dependency(crate_id, context)?; } } @@ -86,19 +82,33 @@ fn transform( /// Determines if ast nodes are annotated with aztec attributes. /// For annotated functions it calls the `transform` function which will perform the required transformations. /// Returns true if an annotated node is found, false otherwise -fn transform_module(module: &mut SortedModule) -> Result { +fn transform_module( + crate_id: &CrateId, + context: &HirContext, + module: &mut SortedModule, + module_name: &str, +) -> Result { let mut has_transformed_module = false; // Check for a user defined storage struct - let storage_defined = check_for_storage_definition(module); - let storage_implemented = check_for_storage_implementation(module); - if storage_defined && !storage_implemented { - generate_storage_implementation(module)?; + let maybe_storage_struct_name = check_for_storage_definition(module)?; + let storage_defined = maybe_storage_struct_name.is_some(); + + if let Some(storage_struct_name) = maybe_storage_struct_name { + if !check_for_storage_implementation(module, &storage_struct_name) { + generate_storage_implementation(module, &storage_struct_name)?; + } + // Make sure we're only generating the storage layout for the root crate + // In case we got a contract importing other contracts for their interface, we + // don't want to generate the storage layout for them + if crate_id == context.root_crate_id() { + generate_storage_layout(module, storage_struct_name)?; + } } - for structure in module.types.iter() { - if structure.attributes.iter().any(|attr| matches!(attr, SecondaryAttribute::Event)) { + for structure in module.types.iter_mut() { + if structure.attributes.iter().any(|attr| is_custom_attribute(attr, "aztec(event)")) { module.impls.push(generate_selector_impl(structure)); has_transformed_module = true; } @@ -112,6 +122,8 @@ fn transform_module(module: &mut SortedModule) -> Result .any(|attr| is_custom_attribute(attr, "aztec(initializer)")) }); + let mut stubs: Vec<_> = vec![]; + for func in module.functions.iter_mut() { let mut is_private = false; let mut is_public = false; @@ -139,14 +151,18 @@ fn transform_module(module: &mut SortedModule) -> Result // Apply transformations to the function based on collected attributes if is_private || is_public || is_public_vm { + let fn_type = if is_private { + "Private" + } else if is_public_vm { + "Avm" + } else { + "Public" + }; + stubs.push(stub_function(fn_type, func)); + + export_fn_abi(&mut module.types, func)?; transform_function( - if is_private { - "Private" - } else if is_public_vm { - "Avm" - } else { - "Public" - }, + fn_type, func, storage_defined, is_initializer, @@ -180,29 +196,13 @@ fn transform_module(module: &mut SortedModule) -> Result span: Span::default(), }); } + + generate_contract_interface(module, module_name, &stubs)?; } Ok(has_transformed_module) } -fn transform_collected_defs( - crate_id: &CrateId, - context: &mut HirContext, - collected_trait_impls: &[UnresolvedTraitImpl], - collected_functions: &mut [UnresolvedFunctions], -) -> Result<(), (MacroError, FileId)> { - if has_aztec_dependency(crate_id, context) { - inject_compute_note_hash_and_nullifier( - crate_id, - context, - collected_trait_impls, - collected_functions, - ) - } else { - Ok(()) - } -} - // // Transform Hir Nodes for Aztec // @@ -212,6 +212,13 @@ fn transform_hir( crate_id: &CrateId, context: &mut HirContext, ) -> Result<(), (AztecMacroError, FileId)> { - transform_events(crate_id, context)?; - assign_storage_slots(crate_id, context) + if has_aztec_dependency(crate_id, context) { + transform_events(crate_id, context)?; + inject_compute_note_hash_and_nullifier(crate_id, context)?; + assign_storage_slots(crate_id, context)?; + inject_note_exports(crate_id, context)?; + update_fn_signatures_in_contract_interface(crate_id, context) + } else { + Ok(()) + } } diff --git a/aztec_macros/src/transforms/compute_note_hash_and_nullifier.rs b/aztec_macros/src/transforms/compute_note_hash_and_nullifier.rs index 1f5681ed470..4ff97a5dcae 100644 --- a/aztec_macros/src/transforms/compute_note_hash_and_nullifier.rs +++ b/aztec_macros/src/transforms/compute_note_hash_and_nullifier.rs @@ -1,48 +1,43 @@ use noirc_errors::{Location, Span}; use noirc_frontend::{ graph::CrateId, - hir::{ - def_collector::dc_crate::{UnresolvedFunctions, UnresolvedTraitImpl}, - def_map::{LocalModuleId, ModuleId}, - }, - macros_api::{FileId, HirContext, MacroError}, - node_interner::FuncId, - parse_program, FunctionReturnType, ItemVisibility, NoirFunction, UnresolvedTypeData, + macros_api::{FileId, HirContext}, + parse_program, FunctionReturnType, NoirFunction, Type, UnresolvedTypeData, }; -use crate::utils::hir_utils::fetch_struct_trait_impls; +use crate::utils::{ + errors::AztecMacroError, + hir_utils::{collect_crate_functions, fetch_notes, get_contract_module_data, inject_fn}, +}; // Check if "compute_note_hash_and_nullifier(AztecAddress,Field,Field,Field,[Field; N]) -> [Field; 4]" is defined fn check_for_compute_note_hash_and_nullifier_definition( - functions_data: &[(LocalModuleId, FuncId, NoirFunction)], - module_id: LocalModuleId, + crate_id: &CrateId, + context: &HirContext, ) -> bool { - functions_data.iter().filter(|func_data| func_data.0 == module_id).any(|func_data| { - func_data.2.def.name.0.contents == "compute_note_hash_and_nullifier" - && func_data.2.def.parameters.len() == 5 - && match &func_data.2.def.parameters[0].typ.typ { - UnresolvedTypeData::Named(path, _, _) => path.segments.last().unwrap().0.contents == "AztecAddress", - _ => false, - } - && func_data.2.def.parameters[1].typ.typ == UnresolvedTypeData::FieldElement - && func_data.2.def.parameters[2].typ.typ == UnresolvedTypeData::FieldElement - && func_data.2.def.parameters[3].typ.typ == UnresolvedTypeData::FieldElement - // checks if the 5th parameter is an array and the Box in - // Array(Option, Box) contains only fields - && match &func_data.2.def.parameters[4].typ.typ { - UnresolvedTypeData::Array(_, inner_type) => { - matches!(inner_type.typ, UnresolvedTypeData::FieldElement) - }, - _ => false, - } + collect_crate_functions(crate_id, context).iter().any(|funct_id| { + let func_data = context.def_interner.function_meta(funct_id); + let func_name = context.def_interner.function_name(funct_id); + func_name == "compute_note_hash_and_nullifier" + && func_data.parameters.len() == 5 + && func_data.parameters.0.first().is_some_and(| (_, typ, _) | match typ { + Type::Struct(struct_typ, _) => struct_typ.borrow().name.0.contents == "AztecAddress", + _ => false + }) + && func_data.parameters.0.get(1).is_some_and(|(_, typ, _)| typ.is_field()) + && func_data.parameters.0.get(2).is_some_and(|(_, typ, _)| typ.is_field()) + && func_data.parameters.0.get(3).is_some_and(|(_, typ, _)| typ.is_field()) + // checks if the 5th parameter is an array and contains only fields + && func_data.parameters.0.get(4).is_some_and(|(_, typ, _)| match typ { + Type::Array(_, inner_type) => inner_type.to_owned().is_field(), + _ => false + }) // We check the return type the same way as we did the 5th parameter - && match &func_data.2.def.return_type { + && match &func_data.return_type { FunctionReturnType::Default(_) => false, FunctionReturnType::Ty(unresolved_type) => { match &unresolved_type.typ { - UnresolvedTypeData::Array(_, inner_type) => { - matches!(inner_type.typ, UnresolvedTypeData::FieldElement) - }, + UnresolvedTypeData::Array(_, inner_type) => matches!(inner_type.typ, UnresolvedTypeData::FieldElement), _ => false, } } @@ -53,77 +48,40 @@ fn check_for_compute_note_hash_and_nullifier_definition( pub fn inject_compute_note_hash_and_nullifier( crate_id: &CrateId, context: &mut HirContext, - unresolved_traits_impls: &[UnresolvedTraitImpl], - collected_functions: &mut [UnresolvedFunctions], -) -> Result<(), (MacroError, FileId)> { - // We first fetch modules in this crate which correspond to contracts, along with their file id. - let contract_module_file_ids: Vec<(LocalModuleId, FileId)> = context - .def_map(crate_id) - .expect("ICE: Missing crate in def_map") - .modules() - .iter() - .filter(|(_, module)| module.is_contract) - .map(|(idx, module)| (LocalModuleId(idx), module.location.file)) - .collect(); - - // If the current crate does not contain a contract module we simply skip it. - if contract_module_file_ids.is_empty() { - return Ok(()); - } else if contract_module_file_ids.len() != 1 { - panic!("Found multiple contracts in the same crate"); +) -> Result<(), (AztecMacroError, FileId)> { + if let Some((_, module_id, file_id)) = get_contract_module_data(context, crate_id) { + // If compute_note_hash_and_nullifier is already defined by the user, we skip auto-generation in order to provide an + // escape hatch for this mechanism. + // TODO(#4647): improve this diagnosis and error messaging. + if context.crate_graph.root_crate_id() != crate_id + || check_for_compute_note_hash_and_nullifier_definition(crate_id, context) + { + return Ok(()); + } + + // In order to implement compute_note_hash_and_nullifier, we need to know all of the different note types the + // contract might use. These are the types that are marked as #[aztec(note)]. + let note_types = + fetch_notes(context).iter().map(|(path, _)| path.to_string()).collect::>(); + + // We can now generate a version of compute_note_hash_and_nullifier tailored for the contract in this crate. + let func = generate_compute_note_hash_and_nullifier(¬e_types); + + // And inject the newly created function into the contract. + + // TODO(#4373): We don't have a reasonable location for the source code of this autogenerated function, so we simply + // pass an empty span. This function should not produce errors anyway so this should not matter. + let location = Location::new(Span::empty(0), file_id); + + inject_fn(crate_id, context, func, location, module_id, file_id).map_err(|err| { + ( + AztecMacroError::CouldNotImplementComputeNoteHashAndNullifier { + secondary_message: err.secondary_message, + }, + file_id, + ) + })?; } - - let (module_id, file_id) = contract_module_file_ids[0]; - - // If compute_note_hash_and_nullifier is already defined by the user, we skip auto-generation in order to provide an - // escape hatch for this mechanism. - // TODO(#4647): improve this diagnosis and error messaging. - if collected_functions.iter().any(|coll_funcs_data| { - check_for_compute_note_hash_and_nullifier_definition(&coll_funcs_data.functions, module_id) - }) { - return Ok(()); - } - - // In order to implement compute_note_hash_and_nullifier, we need to know all of the different note types the - // contract might use. These are the types that implement the NoteInterface trait, which provides the - // get_note_type_id function. - let note_types = fetch_struct_trait_impls(context, unresolved_traits_impls, "NoteInterface"); - - // We can now generate a version of compute_note_hash_and_nullifier tailored for the contract in this crate. - let func = generate_compute_note_hash_and_nullifier(¬e_types); - - // And inject the newly created function into the contract. - - // TODO(#4373): We don't have a reasonable location for the source code of this autogenerated function, so we simply - // pass an empty span. This function should not produce errors anyway so this should not matter. - let location = Location::new(Span::empty(0), file_id); - - // These are the same things the ModCollector does when collecting functions: we push the function to the - // NodeInterner, declare it in the module (which checks for duplicate definitions), and finally add it to the list - // on collected but unresolved functions. - - let func_id = context.def_interner.push_empty_fn(); - context.def_interner.push_function( - func_id, - &func.def, - ModuleId { krate: *crate_id, local_id: module_id }, - location, - ); - - context.def_map_mut(crate_id).unwrap() - .modules_mut()[module_id.0] - .declare_function( - func.name_ident().clone(), ItemVisibility::Public, func_id - ).expect( - "Failed to declare the autogenerated compute_note_hash_and_nullifier function, likely due to a duplicate definition. See https://github.com/AztecProtocol/aztec-packages/issues/4647." - ); - - collected_functions - .iter_mut() - .find(|fns| fns.file_id == file_id) - .expect("ICE: no functions found in contract file") - .push_fn(module_id, func_id, func.clone()); - Ok(()) } @@ -149,7 +107,7 @@ fn generate_compute_note_hash_and_nullifier_source(note_types: &[String]) -> Str // so we include a dummy version. " unconstrained fn compute_note_hash_and_nullifier( - contract_address: AztecAddress, + contract_address: dep::aztec::protocol_types::address::AztecAddress, nonce: Field, storage_slot: Field, note_type_id: Field, @@ -179,7 +137,7 @@ fn generate_compute_note_hash_and_nullifier_source(note_types: &[String]) -> Str format!( " unconstrained fn compute_note_hash_and_nullifier( - contract_address: AztecAddress, + contract_address: dep::aztec::protocol_types::address::AztecAddress, nonce: Field, storage_slot: Field, note_type_id: Field, diff --git a/aztec_macros/src/transforms/contract_interface.rs b/aztec_macros/src/transforms/contract_interface.rs new file mode 100644 index 00000000000..4401c867df9 --- /dev/null +++ b/aztec_macros/src/transforms/contract_interface.rs @@ -0,0 +1,326 @@ +use noirc_frontend::{ + graph::CrateId, + macros_api::{FileId, HirContext, HirExpression, HirLiteral, HirStatement}, + parse_program, + parser::SortedModule, + NoirFunction, Type, UnresolvedTypeData, +}; + +use crate::utils::{ + constants::SELECTOR_PLACEHOLDER, + errors::AztecMacroError, + hir_utils::{collect_crate_structs, get_contract_module_data, signature_of_type}, +}; + +// Generates the stubs for contract functions as low level calls using CallInterface, turning +// #[aztec(public)] // also private +// fn a_function(first_arg: Field, second_arg: Struct, third_arg: [Field; 4]) -> Field { +// ... +// } +// +// into +// +// pub fn a_function(self, first_arg: Field, second_arg: Struct, third_arg: [Field; 4]) -> PublicCallInterface { +// let mut args_acc: [Field] = &[]; +// args_acc = args_acc.append(first_arg.serialize().as_slice()); +// args_acc = args_acc.append(second_arg.serialize().as_slice()); +// let hash_third_arg = third_arg.map(|x: Field| x.serialize()); +// for i in 0..third_arg.len() { +// args_acc = args_acc.append(third_arg[i].serialize().as_slice()); +// } +// let args_hash = dep::aztec::hash::hash_args(args_acc); +// assert(args_hash == dep::aztec::oracle::arguments::pack_arguments(args_acc)); +// PublicCallInterface { +// target_contract: self.target_contract, +// selector: FunctionSelector::from_signature("SELECTOR_PLACEHOLDER"), +// args_hash +// } +// } +// +// The selector placeholder has to be replaced with the actual function signature after type checking in the next macro pass +pub fn stub_function(aztec_visibility: &str, func: &NoirFunction) -> String { + let fn_name = func.name().to_string(); + let fn_parameters = func + .parameters() + .iter() + .map(|param| { + format!( + "{}: {}", + param.pattern.name_ident().0.contents, + param.typ.to_string().replace("plain::", "") + ) + }) + .collect::>() + .join(", "); + let fn_return_type: noirc_frontend::UnresolvedType = func.return_type(); + + let fn_selector = format!("dep::aztec::protocol_types::abis::function_selector::FunctionSelector::from_signature(\"{}\")", SELECTOR_PLACEHOLDER); + + let parameters = func.parameters(); + let is_void = if matches!(fn_return_type.typ, UnresolvedTypeData::Unit) { "Void" } else { "" }; + let return_type_hint = if is_void == "Void" { + "".to_string() + } else { + format!("<{}>", fn_return_type.typ.to_string().replace("plain::", "")) + }; + let call_args = parameters + .iter() + .map(|arg| { + let param_name = arg.pattern.name_ident().0.contents.clone(); + match &arg.typ.typ { + UnresolvedTypeData::Array(_, typ) => { + format!( + "let hash_{0} = {0}.map(|x: {1}| x.serialize()); + for i in 0..{0}.len() {{ + args_acc = args_acc.append(hash_{0}[i].as_slice()); + }}\n", + param_name, typ.typ + ) + } + _ => { + format!("args_acc = args_acc.append({}.serialize().as_slice());\n", param_name) + } + } + }) + .collect::>() + .join(""); + if aztec_visibility != "Avm" { + let args_hash = if !parameters.is_empty() { + format!( + "let mut args_acc: [Field] = &[]; + {} + let args_hash = dep::aztec::hash::hash_args(args_acc); + assert(args_hash == dep::aztec::oracle::arguments::pack_arguments(args_acc));", + call_args + ) + } else { + "let args_hash = 0;".to_string() + }; + + let fn_body = format!( + "{} + dep::aztec::context::{}{}CallInterface {{ + target_contract: self.target_contract, + selector: {}, + args_hash, + }}", + args_hash, aztec_visibility, is_void, fn_selector, + ); + format!( + "pub fn {}(self, {}) -> dep::aztec::context::{}{}CallInterface{} {{ + {} + }}", + fn_name, fn_parameters, aztec_visibility, is_void, return_type_hint, fn_body + ) + } else { + let args = format!( + "let mut args_acc: [Field] = &[]; + {} + ", + call_args + ); + let fn_body = format!( + "{} + dep::aztec::context::Avm{}CallInterface {{ + target_contract: self.target_contract, + selector: {}, + args: args_acc, + }}", + args, is_void, fn_selector, + ); + format!( + "pub fn {}(self, {}) -> dep::aztec::context::Avm{}CallInterface{} {{ + {} + }}", + fn_name, fn_parameters, is_void, return_type_hint, fn_body + ) + } +} + +// Generates the contract interface as a struct with an `at` function that holds the stubbed functions and provides +// them with a target contract address. The struct has the same name as the contract (which is technically a module) +// so imports look nice. The `at` function is also exposed as a contract library method for external use. +pub fn generate_contract_interface( + module: &mut SortedModule, + module_name: &str, + stubs: &[String], +) -> Result<(), AztecMacroError> { + let contract_interface = format!( + " + struct {0} {{ + target_contract: dep::aztec::protocol_types::address::AztecAddress + }} + + impl {0} {{ + {1} + + pub fn at( + target_contract: dep::aztec::protocol_types::address::AztecAddress + ) -> Self {{ + Self {{ target_contract }} + }} + }} + + #[contract_library_method] + pub fn at( + target_contract: dep::aztec::protocol_types::address::AztecAddress + ) -> {0} {{ + {0} {{ target_contract }} + }} + ", + module_name, + stubs.join("\n"), + ); + + let (contract_interface_ast, errors) = parse_program(&contract_interface); + if !errors.is_empty() { + dbg!(errors); + return Err(AztecMacroError::CouldNotGenerateContractInterface { secondary_message: Some("Failed to parse Noir macro code during contract interface generation. This is either a bug in the compiler or the Noir macro code".to_string()), }); + } + + let mut contract_interface_ast = contract_interface_ast.into_sorted(); + module.types.push(contract_interface_ast.types.pop().unwrap()); + module.impls.push(contract_interface_ast.impls.pop().unwrap()); + module.functions.push(contract_interface_ast.functions.pop().unwrap()); + + Ok(()) +} + +fn compute_fn_signature(fn_name: &str, parameters: &[Type]) -> String { + format!( + "{}({})", + fn_name, + parameters.iter().map(signature_of_type).collect::>().join(",") + ) +} + +// Updates the function signatures in the contract interface with the actual ones, replacing the placeholder. +// This is done by locating the contract interface struct, its functions (stubs) and assuming the last statement of each +// is the constructor for a CallInterface. This constructor has a selector field that holds a +// FunctionSelector::from_signature function that receives the signature as a string literal. +pub fn update_fn_signatures_in_contract_interface( + crate_id: &CrateId, + context: &mut HirContext, +) -> Result<(), (AztecMacroError, FileId)> { + if let Some((name, _, file_id)) = get_contract_module_data(context, crate_id) { + let maybe_interface_struct = + collect_crate_structs(crate_id, context).iter().find_map(|struct_id| { + let r#struct = context.def_interner.get_struct(*struct_id); + if r#struct.borrow().name.0.contents == name { + Some(r#struct) + } else { + None + } + }); + + if let Some(interface_struct) = maybe_interface_struct { + let methods = context.def_interner.get_struct_methods(interface_struct.borrow().id); + + for func_id in methods.iter().flat_map(|methods| methods.direct.iter()) { + let name = context.def_interner.function_name(func_id); + let fn_parameters = &context.def_interner.function_meta(func_id).parameters.clone(); + + if name == "at" { + continue; + } + + let fn_signature = compute_fn_signature( + name, + &fn_parameters + .iter() + .skip(1) + .map(|(_, typ, _)| typ.clone()) + .collect::>(), + ); + let hir_func = context.def_interner.function(func_id).block(&context.def_interner); + let call_interface_constructor_statement = context.def_interner.statement( + hir_func + .statements() + .last() + .ok_or((AztecMacroError::AztecDepNotFound, file_id))?, + ); + let call_interface_constructor_expression = + match call_interface_constructor_statement { + HirStatement::Expression(expression_id) => { + match context.def_interner.expression(&expression_id) { + HirExpression::Constructor(hir_constructor_expression) => { + Ok(hir_constructor_expression) + } + _ => Err(( + AztecMacroError::CouldNotGenerateContractInterface { + secondary_message: Some( + "CallInterface constructor statement must be a constructor expression" + .to_string(), + ), + }, + file_id, + )), + } + } + _ => Err(( + AztecMacroError::CouldNotGenerateContractInterface { + secondary_message: Some( + "CallInterface constructor statement must be an expression" + .to_string(), + ), + }, + file_id, + )), + }?; + let (_, function_selector_expression_id) = + call_interface_constructor_expression.fields[1]; + let function_selector_expression = + context.def_interner.expression(&function_selector_expression_id); + + let current_fn_signature_expression_id = match function_selector_expression { + HirExpression::Call(call_expr) => Ok(call_expr.arguments[0]), + _ => Err(( + AztecMacroError::CouldNotGenerateContractInterface { + secondary_message: Some( + "Function selector argument expression must be call expression" + .to_string(), + ), + }, + file_id, + )), + }?; + + let current_fn_signature_expression = + context.def_interner.expression(¤t_fn_signature_expression_id); + + match current_fn_signature_expression { + HirExpression::Literal(HirLiteral::Str(signature)) => { + if signature != SELECTOR_PLACEHOLDER { + Err(( + AztecMacroError::CouldNotGenerateContractInterface { + secondary_message: Some(format!( + "Function signature argument must be a placeholder: {}", + SELECTOR_PLACEHOLDER + )), + }, + file_id, + )) + } else { + Ok(()) + } + } + _ => Err(( + AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some( + "Function signature argument must be a literal string".to_string(), + ), + }, + file_id, + )), + }?; + + context + .def_interner + .update_expression(current_fn_signature_expression_id, |expr| { + *expr = HirExpression::Literal(HirLiteral::Str(fn_signature)) + }); + } + } + } + Ok(()) +} diff --git a/aztec_macros/src/transforms/events.rs b/aztec_macros/src/transforms/events.rs index e7e39ed29ba..b77a5821b81 100644 --- a/aztec_macros/src/transforms/events.rs +++ b/aztec_macros/src/transforms/events.rs @@ -16,7 +16,8 @@ use crate::{ chained_dep, utils::{ ast_utils::{ - call, expression, ident, ident_path, make_statement, make_type, path, variable_path, + call, expression, ident, ident_path, is_custom_attribute, make_statement, make_type, + path, variable_path, }, constants::SIGNATURE_PLACEHOLDER, errors::AztecMacroError, @@ -38,7 +39,8 @@ use crate::{ /// This allows developers to emit events without having to write the signature of the event every time they emit it. /// The signature cannot be known at this point since types are not resolved yet, so we use a signature placeholder. /// It'll get resolved after by transforming the HIR. -pub fn generate_selector_impl(structure: &NoirStruct) -> TypeImpl { +pub fn generate_selector_impl(structure: &mut NoirStruct) -> TypeImpl { + structure.attributes.push(SecondaryAttribute::Abi("events".to_string())); let struct_type = make_type(UnresolvedTypeData::Named(path(structure.name.clone()), vec![], true)); @@ -174,7 +176,7 @@ pub fn transform_events( ) -> Result<(), (AztecMacroError, FileId)> { for struct_id in collect_crate_structs(crate_id, context) { let attributes = context.def_interner.struct_attributes(&struct_id); - if attributes.iter().any(|attr| matches!(attr, SecondaryAttribute::Event)) { + if attributes.iter().any(|attr| is_custom_attribute(attr, "aztec(event)")) { transform_event(struct_id, &mut context.def_interner)?; } } diff --git a/aztec_macros/src/transforms/functions.rs b/aztec_macros/src/transforms/functions.rs index 9844abc30fe..534d24289b7 100644 --- a/aztec_macros/src/transforms/functions.rs +++ b/aztec_macros/src/transforms/functions.rs @@ -1,20 +1,19 @@ use convert_case::{Case, Casing}; use noirc_errors::Span; use noirc_frontend::{ - macros_api::FieldElement, BlockExpression, ConstrainKind, ConstrainStatement, Distinctness, - Expression, ExpressionKind, ForLoopStatement, ForRange, FunctionReturnType, Ident, Literal, - NoirFunction, Param, PathKind, Pattern, Signedness, Statement, StatementKind, UnresolvedType, - UnresolvedTypeData, Visibility, + macros_api::FieldElement, parse_program, BlockExpression, ConstrainKind, ConstrainStatement, + Distinctness, Expression, ExpressionKind, ForLoopStatement, ForRange, FunctionReturnType, + Ident, Literal, NoirFunction, NoirStruct, Param, PathKind, Pattern, Signedness, Statement, + StatementKind, UnresolvedType, UnresolvedTypeData, Visibility, }; use crate::{ chained_dep, chained_path, utils::{ ast_utils::{ - assignment, call, cast, expression, ident, ident_path, index_array, - index_array_variable, make_eq, make_statement, make_type, member_access, method_call, - mutable_assignment, mutable_reference, path, return_type, variable, variable_ident, - variable_path, + assignment, assignment_with_type, call, cast, expression, ident, ident_path, + index_array, make_eq, make_statement, make_type, method_call, mutable_assignment, + mutable_reference, path, return_type, variable, variable_ident, variable_path, }, errors::AztecMacroError, }, @@ -45,13 +44,13 @@ pub fn transform_function( // Add initialization check if insert_init_check { - let init_check = create_init_check(); + let init_check = create_init_check(ty); func.def.body.statements.insert(0, init_check); } // Add assertion for initialization arguments and sender if is_initializer { - func.def.body.statements.insert(0, create_assert_initializer()); + func.def.body.statements.insert(0, create_assert_initializer(ty)); } // Add access to the storage struct @@ -74,18 +73,18 @@ pub fn transform_function( // Abstract return types such that they get added to the kernel's return_values if !is_avm { - if let Some(return_values) = abstract_return_values(func) { + if let Some(return_values_statements) = abstract_return_values(func)? { // In case we are pushing return values to the context, we remove the statement that originated it // This avoids running duplicate code, since blocks like if/else can be value returning statements func.def.body.statements.pop(); // Add the new return statement - func.def.body.statements.push(return_values); + func.def.body.statements.extend(return_values_statements); } } // Before returning mark the contract as initialized if is_initializer { - let mark_initialized = create_mark_as_initialized(); + let mark_initialized = create_mark_as_initialized(ty); func.def.body.statements.push(mark_initialized); } @@ -113,6 +112,92 @@ pub fn transform_function( Ok(()) } +// Generates a global struct containing the original (before transform_function gets executed) function abi that gets exported +// in the contract artifact after compilation. The abi will be later used to decode the function return values in the simulator. +pub fn export_fn_abi( + types: &mut Vec, + func: &NoirFunction, +) -> Result<(), AztecMacroError> { + let mut parameters_struct_source: Option<&str> = None; + + let struct_source = format!( + " + struct {}_parameters {{ + {} + }} + ", + func.name(), + func.parameters() + .iter() + .map(|param| { + let param_name = match param.pattern.clone() { + Pattern::Identifier(ident) => Ok(ident.0.contents), + _ => Err(AztecMacroError::CouldNotExportFunctionAbi { + span: Some(param.span), + secondary_message: Some( + "Only identifier patterns are supported".to_owned(), + ), + }), + }; + + format!( + "{}: {}", + param_name.unwrap(), + param.typ.typ.to_string().replace("plain::", "") + ) + }) + .collect::>() + .join(",\n"), + ); + + if !func.parameters().is_empty() { + parameters_struct_source = Some(&struct_source); + } + + let mut program = String::new(); + + let parameters = if let Some(parameters_struct_source) = parameters_struct_source { + program.push_str(parameters_struct_source); + format!("parameters: {}_parameters,\n", func.name()) + } else { + "".to_string() + }; + + let return_type_str = func.return_type().typ.to_string().replace("plain::", ""); + let return_type = if return_type_str != "()" { + format!("return_type: {},\n", return_type_str) + } else { + "".to_string() + }; + + let export_struct_source = format!( + " + #[abi(functions)] + struct {}_abi {{ + {}{} + }}", + func.name(), + parameters, + return_type + ); + + program.push_str(&export_struct_source); + + let (ast, errors) = parse_program(&program); + if !errors.is_empty() { + return Err(AztecMacroError::CouldNotExportFunctionAbi { + span: None, + secondary_message: Some( + format!("Failed to parse Noir macro code (struct {}_abi). This is either a bug in the compiler or the Noir macro code", func.name()) + ) + }); + } + + let sorted_ast = ast.into_sorted(); + types.extend(sorted_ast.types); + Ok(()) +} + /// Transform Unconstrained /// /// Inserts the following code at the beginning of an unconstrained function @@ -159,9 +244,10 @@ fn create_inputs(ty: &str) -> Param { /// ```noir /// assert_is_initialized(&mut context); /// ``` -fn create_init_check() -> Statement { +fn create_init_check(ty: &str) -> Statement { + let fname = format!("assert_is_initialized_{}", ty.to_case(Case::Snake)); make_statement(StatementKind::Expression(call( - variable_path(chained_dep!("aztec", "initializer", "assert_is_initialized")), + variable_path(chained_dep!("aztec", "initializer", &fname)), vec![mutable_reference("context")], ))) } @@ -172,9 +258,10 @@ fn create_init_check() -> Statement { /// ```noir /// mark_as_initialized(&mut context); /// ``` -fn create_mark_as_initialized() -> Statement { +fn create_mark_as_initialized(ty: &str) -> Statement { + let fname = format!("mark_as_initialized_{}", ty.to_case(Case::Snake)); make_statement(StatementKind::Expression(call( - variable_path(chained_dep!("aztec", "initializer", "mark_as_initialized")), + variable_path(chained_dep!("aztec", "initializer", &fname)), vec![mutable_reference("context")], ))) } @@ -205,17 +292,60 @@ fn create_internal_check(fname: &str) -> Statement { /// ```noir /// assert_initialization_matches_address_preimage(context); /// ``` -fn create_assert_initializer() -> Statement { +fn create_assert_initializer(ty: &str) -> Statement { + let fname = + format!("assert_initialization_matches_address_preimage_{}", ty.to_case(Case::Snake)); make_statement(StatementKind::Expression(call( - variable_path(chained_dep!( - "aztec", - "initializer", - "assert_initialization_matches_address_preimage" - )), + variable_path(chained_dep!("aztec", "initializer", &fname)), vec![variable("context")], ))) } +fn serialize_to_hasher( + identifier: &Ident, + typ: &UnresolvedTypeData, + hasher_name: &str, +) -> Option> { + let mut statements = Vec::new(); + + // Match the type to determine the padding to do + match typ { + // `{hasher_name}.extend_from_array({ident}.serialize())` + UnresolvedTypeData::Named(..) => { + statements.push(add_struct_to_hasher(identifier, hasher_name)); + } + UnresolvedTypeData::Array(_, arr_type) => { + statements.push(add_array_to_hasher(identifier, arr_type, hasher_name)); + } + // `{hasher_name}.push({ident})` + UnresolvedTypeData::FieldElement => { + statements.push(add_field_to_hasher(identifier, hasher_name)); + } + // Add the integer to the bounded vec, casted to a field + // `{hasher_name}.push({ident} as Field)` + UnresolvedTypeData::Integer(..) | UnresolvedTypeData::Bool => { + statements.push(add_cast_to_hasher(identifier, hasher_name)); + } + UnresolvedTypeData::String(..) => { + let (var_bytes, id) = str_to_bytes(identifier); + statements.push(var_bytes); + statements.push(add_array_to_hasher( + &id, + &UnresolvedType { + typ: UnresolvedTypeData::Integer( + Signedness::Unsigned, + noirc_frontend::IntegerBitSize::ThirtyTwo, + ), + span: None, + }, + hasher_name, + )) + } + _ => return None, + }; + Some(statements) +} + /// Creates the private context object to be accessed within the function, the parameters need to be extracted to be /// appended into the args hash object. /// @@ -223,80 +353,55 @@ fn create_assert_initializer() -> Statement { /// ```noir /// #[aztec(private)] /// fn foo(structInput: SomeStruct, arrayInput: [u8; 10], fieldInput: Field) -> Field { -/// // Create the bounded vec object -/// let mut serialized_args = BoundedVec::new(); +/// // Create the hasher object +/// let mut hasher = Hasher::new(); /// /// // struct inputs call serialize on them to add an array of fields -/// serialized_args.extend_from_array(structInput.serialize()); +/// hasher.add_multiple(structInput.serialize()); /// -/// // Array inputs are iterated over and each element is added to the bounded vec (as a field) +/// // Array inputs are iterated over and each element is added to the hasher (as a field) /// for i in 0..arrayInput.len() { -/// serialized_args.push(arrayInput[i] as Field); +/// hasher.add(arrayInput[i] as Field); /// } -/// // Field inputs are added to the bounded vec -/// serialized_args.push({ident}); +/// // Field inputs are added to the hasher +/// hasher.add({ident}); /// /// // Create the context /// // The inputs (injected by this `create_inputs`) and completed hash object are passed to the context -/// let mut context = PrivateContext::new(inputs, hash_args(serialized_args)); +/// let mut context = PrivateContext::new(inputs, hasher.hash()); /// } /// ``` fn create_context(ty: &str, params: &[Param]) -> Result, AztecMacroError> { - let mut injected_expressions: Vec = vec![]; + let mut injected_statements: Vec = vec![]; + + let hasher_name = "args_hasher"; - // `let mut serialized_args = BoundedVec::new();` - let let_serialized_args = mutable_assignment( - "serialized_args", // Assigned to + // `let mut args_hasher = Hasher::new();` + let let_hasher = mutable_assignment( + hasher_name, // Assigned to call( - variable_path(chained_dep!("std", "collections", "bounded_vec", "BoundedVec", "new")), // Path - vec![], // args + variable_path(chained_dep!("aztec", "hash", "ArgsHasher", "new")), // Path + vec![], // args ), ); - // Completes: `let mut serialized_args = BoundedVec::new();` - injected_expressions.push(let_serialized_args); + // Completes: `let mut args_hasher = Hasher::new();` + injected_statements.push(let_hasher); - // Iterate over each of the function parameters, adding to them to the bounded vec + // Iterate over each of the function parameters, adding to them to the hasher for Param { pattern, typ, span, .. } in params { match pattern { Pattern::Identifier(identifier) => { // Match the type to determine the padding to do let unresolved_type = &typ.typ; - let expression = match unresolved_type { - // `serialized_args.extend_from_array({ident}.serialize())` - UnresolvedTypeData::Named(..) => add_struct_to_serialized_args(identifier), - UnresolvedTypeData::Array(_, arr_type) => { - add_array_to_serialized_args(identifier, arr_type) - } - // `serialized_args.push({ident})` - UnresolvedTypeData::FieldElement => add_field_to_serialized_args(identifier), - // Add the integer to the serialized args, casted to a field - // `serialized_args.push({ident} as Field)` - UnresolvedTypeData::Integer(..) | UnresolvedTypeData::Bool => { - add_cast_to_serialized_args(identifier) - } - UnresolvedTypeData::String(..) => { - let (var_bytes, id) = str_to_bytes(identifier); - injected_expressions.push(var_bytes); - add_array_to_serialized_args( - &id, - &UnresolvedType { - typ: UnresolvedTypeData::Integer( - Signedness::Unsigned, - noirc_frontend::IntegerBitSize::ThirtyTwo, - ), - span: None, - }, - ) - } - _ => { - return Err(AztecMacroError::UnsupportedFunctionArgumentType { + injected_statements.extend( + serialize_to_hasher(identifier, unresolved_type, hasher_name).ok_or_else( + || AztecMacroError::UnsupportedFunctionArgumentType { typ: unresolved_type.clone(), span: *span, - }) - } - }; - injected_expressions.push(expression); + }, + )?, + ); } _ => todo!(), // Maybe unreachable? } @@ -304,10 +409,11 @@ fn create_context(ty: &str, params: &[Param]) -> Result, AztecMac // Create the inputs to the context let inputs_expression = variable("inputs"); - // `hash_args(serialized_args)` - let hash_call = call( - variable_path(chained_dep!("aztec", "hash", "hash_args")), // variable - vec![variable("serialized_args")], // args + // `args_hasher.hash()` + let hash_call = method_call( + variable(hasher_name), // variable + "hash", // method name + vec![], // args ); let path_snippet = ty.to_case(Case::Snake); // e.g. private_context @@ -320,10 +426,10 @@ fn create_context(ty: &str, params: &[Param]) -> Result, AztecMac vec![inputs_expression, hash_call], // args ), ); - injected_expressions.push(let_context); + injected_statements.push(let_context); // Return all expressions that will be injected by the hasher - Ok(injected_expressions) + Ok(injected_statements) } /// Creates the private context object to be accessed within the function, the parameters need to be extracted to be @@ -360,53 +466,86 @@ fn create_context_avm() -> Result, AztecMacroError> { /// Abstract Return Type /// -/// This function intercepts the function's current return type and replaces it with pushes -/// To the kernel +/// This function intercepts the function's current return type and replaces it with pushes to a hasher +/// that will be used to generate the returns hash for the kernel. /// /// The replaced code: /// ```noir /// /// Before /// #[aztec(private)] -/// fn foo() -> protocol_types::abis::private_circuit_public_inputs::PrivateCircuitPublicInputs { -/// // ... -/// let my_return_value: Field = 10; -/// context.return_values.push(my_return_value); -/// } -/// -/// /// After -/// #[aztec(private)] /// fn foo() -> Field { /// // ... /// let my_return_value: Field = 10; /// my_return_value /// } +/// +/// /// After +/// #[aztec(private)] +/// fn foo() -> protocol_types::abis::private_circuit_public_inputs::PrivateCircuitPublicInputs { +/// // ... +/// let my_return_value: Field = 10; +/// let macro__returned__values = my_return_value; +/// let mut returns_hasher = ArgsHasher::new(); +/// returns_hasher.add(macro__returned__values); +/// context.set_return_hash(returns_hasher); +/// } /// ``` -/// Similarly; Structs will be pushed to the context, after serialize() is called on them. -/// Arrays will be iterated over and each element will be pushed to the context. -/// Any primitive type that can be cast will be casted to a field and pushed to the context. -fn abstract_return_values(func: &NoirFunction) -> Option { +/// Similarly; Structs will be pushed to the hasher, after serialize() is called on them. +/// Arrays will be iterated over and each element will be pushed to the hasher. +/// Any primitive type that can be cast will be casted to a field and pushed to the hasher. +fn abstract_return_values(func: &NoirFunction) -> Result>, AztecMacroError> { let current_return_type = func.return_type().typ; - let last_statement = func.def.body.statements.last()?; - // TODO: (length, type) => We can limit the size of the array returned to be limited by kernel size - // Doesn't need done until we have settled on a kernel size + // Short circuit if the function doesn't return anything + match current_return_type { + UnresolvedTypeData::Unit | UnresolvedTypeData::Unspecified => return Ok(None), + _ => (), + } + + let Some(last_statement) = func.def.body.statements.last() else { + return Ok(None); + }; + // TODO: support tuples here and in inputs -> convert into an issue // Check if the return type is an expression, if it is, we can handle it match last_statement { Statement { kind: StatementKind::Expression(expression), .. } => { - match current_return_type { - // Call serialize on structs, push the whole array, calling push_array - UnresolvedTypeData::Named(..) => Some(make_struct_return_type(expression.clone())), - UnresolvedTypeData::Array(..) => Some(make_array_return_type(expression.clone())), - // Cast these types to a field before pushing - UnresolvedTypeData::Bool | UnresolvedTypeData::Integer(..) => { - Some(make_castable_return_type(expression.clone())) - } - UnresolvedTypeData::FieldElement => Some(make_return_push(expression.clone())), - _ => None, - } + let return_value_name = "macro__returned__values"; + let hasher_name = "returns_hasher"; + + let mut replacement_statements = vec![ + assignment_with_type( + return_value_name, // Assigned to + current_return_type.clone(), + expression.clone(), + ), + mutable_assignment( + hasher_name, // Assigned to + call( + variable_path(chained_dep!("aztec", "hash", "ArgsHasher", "new")), // Path + vec![], // args + ), + ), + ]; + + let serialization_statements = + serialize_to_hasher(&ident(return_value_name), ¤t_return_type, hasher_name) + .ok_or_else(|| AztecMacroError::UnsupportedFunctionReturnType { + typ: current_return_type.clone(), + span: func.return_type().span.unwrap_or_default(), + })?; + + replacement_statements.extend(serialization_statements); + + replacement_statements.push(make_statement(StatementKind::Semi(method_call( + variable("context"), + "set_return_hash", + vec![variable(hasher_name)], + )))); + + Ok(Some(replacement_statements)) } - _ => None, + _ => Ok(None), } } @@ -455,86 +594,6 @@ fn abstract_storage(typ: &str, unconstrained: bool) -> Statement { ) } -/// Context Return Values -/// -/// Creates an instance to the context return values -/// ```noir -/// `context.return_values` -/// ``` -fn context_return_values() -> Expression { - member_access("context", "return_values") -} - -/// Make return Push -/// -/// Translates to: -/// `context.return_values.push({push_value})` -fn make_return_push(push_value: Expression) -> Statement { - make_statement(StatementKind::Semi(method_call( - context_return_values(), - "push", - vec![push_value], - ))) -} - -/// Make Return push array -/// -/// Translates to: -/// `context.return_values.extend_from_array({push_value})` -fn make_return_extend_from_array(push_value: Expression) -> Statement { - make_statement(StatementKind::Semi(method_call( - context_return_values(), - "extend_from_array", - vec![push_value], - ))) -} - -/// Make struct return type -/// -/// Translates to: -/// ```noir -/// `context.return_values.extend_from_array({push_value}.serialize())` -fn make_struct_return_type(expression: Expression) -> Statement { - let serialized_call = method_call( - expression, // variable - "serialize", // method name - vec![], // args - ); - make_return_extend_from_array(serialized_call) -} - -/// Make array return type -/// -/// Translates to: -/// ```noir -/// for i in 0..{ident}.len() { -/// context.return_values.push({ident}[i] as Field) -/// } -/// ``` -fn make_array_return_type(expression: Expression) -> Statement { - let inner_cast_expression = - cast(index_array_variable(expression.clone(), "i"), UnresolvedTypeData::FieldElement); - let assignment = make_statement(StatementKind::Semi(method_call( - context_return_values(), // variable - "push", // method name - vec![inner_cast_expression], - ))); - - create_loop_over(expression, vec![assignment]) -} - -/// Castable return type -/// -/// Translates to: -/// ```noir -/// context.return_values.push({ident} as Field) -/// ``` -fn make_castable_return_type(expression: Expression) -> Statement { - // Cast these types to a field before pushing - let cast_expression = cast(expression, UnresolvedTypeData::FieldElement); - make_return_push(cast_expression) -} - /// Create Return Type /// /// Public functions return protocol_types::abis::public_circuit_public_inputs::PublicCircuitPublicInputs while @@ -591,11 +650,11 @@ fn create_context_finish() -> Statement { } // -// Methods to create hash_args inputs +// Methods to create hasher inputs // -fn add_struct_to_serialized_args(identifier: &Ident) -> Statement { - // If this is a struct, we call serialize and add the array to the serialized args +fn add_struct_to_hasher(identifier: &Ident, hasher_name: &str) -> Statement { + // If this is a struct, we call serialize and add the array to the hasher let serialized_call = method_call( variable_path(path(identifier.clone())), // variable "serialize", // method name @@ -603,9 +662,9 @@ fn add_struct_to_serialized_args(identifier: &Ident) -> Statement { ); make_statement(StatementKind::Semi(method_call( - variable("serialized_args"), // variable - "extend_from_array", // method name - vec![serialized_call], // args + variable(hasher_name), // variable + "add_multiple", // method name + vec![serialized_call], // args ))) } @@ -625,7 +684,7 @@ fn str_to_bytes(identifier: &Ident) -> (Statement, Ident) { } fn create_loop_over(var: Expression, loop_body: Vec) -> Statement { - // If this is an array of primitive types (integers / fields) we can add them each to the serialized args + // If this is an array of primitive types (integers / fields) we can add them each to the hasher // casted to a field let span = var.span; @@ -638,7 +697,7 @@ fn create_loop_over(var: Expression, loop_body: Vec) -> Statement { // What will be looped over - // - `serialized_args.push({ident}[i] as Field)` + // - `hasher.add({ident}[i] as Field)` let for_loop_block = expression(ExpressionKind::Block(BlockExpression { statements: loop_body })); @@ -657,66 +716,70 @@ fn create_loop_over(var: Expression, loop_body: Vec) -> Statement { })) } -fn add_array_to_serialized_args(identifier: &Ident, arr_type: &UnresolvedType) -> Statement { - // If this is an array of primitive types (integers / fields) we can add them each to the serialized_args +fn add_array_to_hasher( + identifier: &Ident, + arr_type: &UnresolvedType, + hasher_name: &str, +) -> Statement { + // If this is an array of primitive types (integers / fields) we can add them each to the hasher // casted to a field // Wrap in the semi thing - does that mean ended with semi colon? - // `serialized_args.push({ident}[i] as Field)` + // `hasher.add({ident}[i] as Field)` let arr_index = index_array(identifier.clone(), "i"); - let (add_expression, vec_method_name) = match arr_type.typ { + let (add_expression, hasher_method_name) = match arr_type.typ { UnresolvedTypeData::Named(..) => { - let vec_method_name = "extend_from_array".to_owned(); + let hasher_method_name = "add_multiple".to_owned(); let call = method_call( // All serialize on each element arr_index, // variable "serialize", // method name vec![], // args ); - (call, vec_method_name) + (call, hasher_method_name) } _ => { - let vec_method_name = "push".to_owned(); + let hasher_method_name = "add".to_owned(); let call = cast( arr_index, // lhs - `ident[i]` UnresolvedTypeData::FieldElement, // cast to - `as Field` ); - (call, vec_method_name) + (call, hasher_method_name) } }; let block_statement = make_statement(StatementKind::Semi(method_call( - variable("serialized_args"), // variable - &vec_method_name, // method name + variable(hasher_name), // variable + &hasher_method_name, // method name vec![add_expression], ))); create_loop_over(variable_ident(identifier.clone()), vec![block_statement]) } -fn add_field_to_serialized_args(identifier: &Ident) -> Statement { - // `serialized_args.push({ident})` +fn add_field_to_hasher(identifier: &Ident, hasher_name: &str) -> Statement { + // `hasher.add({ident})` let ident = variable_path(path(identifier.clone())); make_statement(StatementKind::Semi(method_call( - variable("serialized_args"), // variable - "push", // method name - vec![ident], // args + variable(hasher_name), // variable + "add", // method name + vec![ident], // args ))) } -fn add_cast_to_serialized_args(identifier: &Ident) -> Statement { - // `serialized_args.push({ident} as Field)` +fn add_cast_to_hasher(identifier: &Ident, hasher_name: &str) -> Statement { + // `hasher.add({ident} as Field)` // `{ident} as Field` let cast_operation = cast( variable_path(path(identifier.clone())), // lhs UnresolvedTypeData::FieldElement, // rhs ); - // `serialized_args.push({ident} as Field)` + // `hasher.add({ident} as Field)` make_statement(StatementKind::Semi(method_call( - variable("serialized_args"), // variable - "push", // method name - vec![cast_operation], // args + variable(hasher_name), // variable + "add", // method name + vec![cast_operation], // args ))) } diff --git a/aztec_macros/src/transforms/mod.rs b/aztec_macros/src/transforms/mod.rs index 5a454c75148..2a6fef7647f 100644 --- a/aztec_macros/src/transforms/mod.rs +++ b/aztec_macros/src/transforms/mod.rs @@ -1,4 +1,5 @@ pub mod compute_note_hash_and_nullifier; +pub mod contract_interface; pub mod events; pub mod functions; pub mod note_interface; diff --git a/aztec_macros/src/transforms/note_interface.rs b/aztec_macros/src/transforms/note_interface.rs index 01d0272088b..4b72759a5db 100644 --- a/aztec_macros/src/transforms/note_interface.rs +++ b/aztec_macros/src/transforms/note_interface.rs @@ -1,7 +1,11 @@ use noirc_errors::Span; use noirc_frontend::{ - parse_program, parser::SortedModule, ItemVisibility, NoirFunction, NoirStruct, PathKind, - TraitImplItem, TypeImpl, UnresolvedTypeData, UnresolvedTypeExpression, + graph::CrateId, + macros_api::{FileId, HirContext, HirExpression, HirLiteral, HirStatement}, + parse_program, + parser::SortedModule, + ItemVisibility, LetStatement, NoirFunction, NoirStruct, PathKind, TraitImplItem, Type, + TypeImpl, UnresolvedTypeData, UnresolvedTypeExpression, }; use regex::Regex; @@ -12,6 +16,7 @@ use crate::{ check_trait_method_implemented, ident, ident_path, is_custom_attribute, make_type, }, errors::AztecMacroError, + hir_utils::{fetch_notes, get_contract_module_data, inject_global}, }, }; @@ -24,7 +29,7 @@ pub fn generate_note_interface_impl(module: &mut SortedModule) -> Result<(), Azt .iter_mut() .filter(|typ| typ.attributes.iter().any(|attr| is_custom_attribute(attr, "aztec(note)"))); - let mut note_properties_structs = vec![]; + let mut structs_to_inject = vec![]; for note_struct in annotated_note_structs { // Look for the NoteInterface trait implementation for the note @@ -80,6 +85,7 @@ pub fn generate_note_interface_impl(module: &mut SortedModule) -> Result<(), Azt )), }), }?; + let note_type_id = note_type_id(¬e_type); // Automatically inject the header field if it's not present let (header_field_name, _) = if let Some(existing_header) = @@ -138,7 +144,7 @@ pub fn generate_note_interface_impl(module: &mut SortedModule) -> Result<(), Azt &header_field_name.0.contents, note_interface_impl_span, )?; - note_properties_structs.push(note_properties_struct); + structs_to_inject.push(note_properties_struct); let note_properties_fn = generate_note_properties_fn( ¬e_type, ¬e_fields, @@ -167,7 +173,7 @@ pub fn generate_note_interface_impl(module: &mut SortedModule) -> Result<(), Azt if !check_trait_method_implemented(trait_impl, "get_note_type_id") { let get_note_type_id_fn = - generate_note_get_type_id(¬e_type, note_interface_impl_span)?; + generate_note_get_type_id(¬e_type_id, note_interface_impl_span)?; trait_impl.items.push(TraitImplItem::Function(get_note_type_id_fn)); } @@ -178,7 +184,7 @@ pub fn generate_note_interface_impl(module: &mut SortedModule) -> Result<(), Azt } } - module.types.extend(note_properties_structs); + module.types.extend(structs_to_inject); Ok(()) } @@ -245,19 +251,16 @@ fn generate_note_set_header( // Automatically generate the note type id getter method. The id itself its calculated as the concatenation // of the conversion of the characters in the note's struct name to unsigned integers. fn generate_note_get_type_id( - note_type: &str, + note_type_id: &str, impl_span: Option, ) -> Result { - // TODO(#4519) Improve automatic note id generation and assignment - let note_id = - note_type.chars().map(|c| (c as u32).to_string()).collect::>().join(""); let function_source = format!( " fn get_note_type_id() -> Field {{ {} }} ", - note_id + note_type_id ) .to_string(); @@ -443,6 +446,34 @@ fn generate_compute_note_content_hash( Ok(noir_fn) } +fn generate_note_exports_global( + note_type: &str, + note_type_id: &str, +) -> Result { + let struct_source = format!( + " + #[abi(notes)] + global {0}_EXPORTS: (Field, str<{1}>) = ({2},\"{0}\"); + ", + note_type, + note_type_id.len(), + note_type_id + ) + .to_string(); + + let (global_ast, errors) = parse_program(&struct_source); + if !errors.is_empty() { + dbg!(errors); + return Err(AztecMacroError::CouldNotImplementNoteInterface { + secondary_message: Some(format!("Failed to parse Noir macro code (struct {}Exports). This is either a bug in the compiler or the Noir macro code", note_type)), + span: None + }); + } + + let mut global_ast = global_ast.into_sorted(); + Ok(global_ast.globals.pop().unwrap()) +} + // Source code generator functions. These utility methods produce Noir code as strings, that are then parsed and added to the AST. fn generate_note_properties_struct_source( @@ -497,12 +528,12 @@ fn generate_note_properties_fn_source( .join(", "); format!( " - pub fn properties() -> {}Properties {{ - {}Properties {{ - {} + pub fn properties() -> {0}Properties {{ + {0}Properties {{ + {1} }} }}", - note_type, note_type, note_property_selectors + note_type, note_property_selectors ) .to_string() } @@ -581,3 +612,85 @@ fn generate_note_deserialize_content_source( ) .to_string() } + +// Utility function to generate the note type id as a Field +fn note_type_id(note_type: &str) -> String { + // TODO(#4519) Improve automatic note id generation and assignment + note_type.chars().map(|c| (c as u32).to_string()).collect::>().join("") +} + +pub fn inject_note_exports( + crate_id: &CrateId, + context: &mut HirContext, +) -> Result<(), (AztecMacroError, FileId)> { + if let Some((_, module_id, file_id)) = get_contract_module_data(context, crate_id) { + let notes = fetch_notes(context); + + for (_, note) in notes { + let func_id = context + .def_interner + .lookup_method( + &Type::Struct(context.def_interner.get_struct(note.borrow().id), vec![]), + note.borrow().id, + "get_note_type_id", + false, + ) + .ok_or(( + AztecMacroError::CouldNotExportStorageLayout { + span: None, + secondary_message: Some(format!( + "Could not retrieve get_note_type_id function for note {}", + note.borrow().name.0.contents + )), + }, + file_id, + ))?; + let init_function = + context.def_interner.function(&func_id).block(&context.def_interner); + let init_function_statement_id = init_function.statements().first().ok_or(( + AztecMacroError::CouldNotExportStorageLayout { + span: None, + secondary_message: Some(format!( + "Could not retrieve note id statement from function for note {}", + note.borrow().name.0.contents + )), + }, + file_id, + ))?; + let note_id_statement = context.def_interner.statement(init_function_statement_id); + + let note_id_value = match note_id_statement { + HirStatement::Expression(expression_id) => { + match context.def_interner.expression(&expression_id) { + HirExpression::Literal(HirLiteral::Integer(value, _)) => Ok(value), + _ => Err(( + AztecMacroError::CouldNotExportStorageLayout { + span: None, + secondary_message: Some( + "note_id statement must be a literal expression".to_string(), + ), + }, + file_id, + )), + } + } + _ => Err(( + AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some( + "note_id statement must be an expression".to_string(), + ), + }, + file_id, + )), + }?; + let global = generate_note_exports_global( + ¬e.borrow().name.0.contents, + ¬e_id_value.to_string(), + ) + .map_err(|err| (err, file_id))?; + + inject_global(crate_id, context, global, module_id, file_id); + } + } + Ok(()) +} diff --git a/aztec_macros/src/transforms/storage.rs b/aztec_macros/src/transforms/storage.rs index 10f44d01bb4..9135be32443 100644 --- a/aztec_macros/src/transforms/storage.rs +++ b/aztec_macros/src/transforms/storage.rs @@ -1,4 +1,4 @@ -use std::borrow::{Borrow, BorrowMut}; +use std::borrow::Borrow; use noirc_errors::Span; use noirc_frontend::{ @@ -7,33 +7,53 @@ use noirc_frontend::{ FieldElement, FileId, HirContext, HirExpression, HirLiteral, HirStatement, NodeInterner, }, node_interner::{TraitId, TraitImplKind}, + parse_program, parser::SortedModule, + token::SecondaryAttribute, BlockExpression, Expression, ExpressionKind, FunctionDefinition, Ident, Literal, NoirFunction, - PathKind, Pattern, StatementKind, Type, TypeImpl, UnresolvedType, UnresolvedTypeData, + NoirStruct, PathKind, Pattern, StatementKind, Type, TypeImpl, UnresolvedType, + UnresolvedTypeData, }; use crate::{ chained_dep, chained_path, utils::{ ast_utils::{ - call, expression, ident, ident_path, lambda, make_statement, make_type, pattern, - return_type, variable, variable_path, + call, expression, ident, ident_path, is_custom_attribute, lambda, make_statement, + make_type, pattern, return_type, variable, variable_path, }, errors::AztecMacroError, - hir_utils::{collect_crate_structs, collect_traits}, + hir_utils::{collect_crate_structs, collect_traits, get_contract_module_data}, }, }; // Check to see if the user has defined a storage struct -pub fn check_for_storage_definition(module: &SortedModule) -> bool { - module.types.iter().any(|r#struct| r#struct.name.0.contents == "Storage") +pub fn check_for_storage_definition( + module: &SortedModule, +) -> Result, AztecMacroError> { + let result: Vec<&NoirStruct> = module + .types + .iter() + .filter(|r#struct| { + r#struct.attributes.iter().any(|attr| is_custom_attribute(attr, "aztec(storage)")) + }) + .collect(); + if result.len() > 1 { + return Err(AztecMacroError::MultipleStorageDefinitions { + span: result.first().map(|res| res.name.span()), + }); + } + Ok(result.iter().map(|&r#struct| r#struct.name.0.contents.clone()).next()) } // Check to see if the user has defined a storage struct -pub fn check_for_storage_implementation(module: &SortedModule) -> bool { +pub fn check_for_storage_implementation( + module: &SortedModule, + storage_struct_name: &String, +) -> bool { module.impls.iter().any(|r#impl| match &r#impl.object_type.typ { UnresolvedTypeData::Named(path, _, _) => { - path.segments.last().is_some_and(|segment| segment.0.contents == "Storage") + path.segments.last().is_some_and(|segment| segment.0.contents == *storage_struct_name) } _ => false, }) @@ -117,9 +137,15 @@ pub fn generate_storage_field_constructor( /// /// Storage slots are generated as 0 and will be populated using the information from the HIR /// at a later stage. -pub fn generate_storage_implementation(module: &mut SortedModule) -> Result<(), AztecMacroError> { - let definition = - module.types.iter().find(|r#struct| r#struct.name.0.contents == "Storage").unwrap(); +pub fn generate_storage_implementation( + module: &mut SortedModule, + storage_struct_name: &String, +) -> Result<(), AztecMacroError> { + let definition = module + .types + .iter() + .find(|r#struct| r#struct.name.0.contents == *storage_struct_name) + .unwrap(); let slot_zero = expression(ExpressionKind::Literal(Literal::Integer( FieldElement::from(i128::from(0)), @@ -136,7 +162,7 @@ pub fn generate_storage_implementation(module: &mut SortedModule) -> Result<(), .collect(); let storage_constructor_statement = make_statement(StatementKind::Expression(expression( - ExpressionKind::constructor((chained_path!("Storage"), field_constructors)), + ExpressionKind::constructor((chained_path!(storage_struct_name), field_constructors)), ))); let init = NoirFunction::normal(FunctionDefinition::normal( @@ -157,7 +183,7 @@ pub fn generate_storage_implementation(module: &mut SortedModule) -> Result<(), let storage_impl = TypeImpl { object_type: UnresolvedType { - typ: UnresolvedTypeData::Named(chained_path!("Storage"), vec![], true), + typ: UnresolvedTypeData::Named(chained_path!(storage_struct_name), vec![], true), span: Some(Span::default()), }, type_span: Span::default(), @@ -239,16 +265,55 @@ pub fn assign_storage_slots( context: &mut HirContext, ) -> Result<(), (AztecMacroError, FileId)> { let traits: Vec<_> = collect_traits(context); - for struct_id in collect_crate_structs(crate_id, context) { - let interner: &mut NodeInterner = context.def_interner.borrow_mut(); - let r#struct = interner.get_struct(struct_id); - let file_id = r#struct.borrow().location.file; - if r#struct.borrow().name.0.contents == "Storage" && r#struct.borrow().id.krate().is_root() + if let Some((_, _, file_id)) = get_contract_module_data(context, crate_id) { + let maybe_storage_struct = + collect_crate_structs(crate_id, context).iter().find_map(|struct_id| { + let r#struct = context.def_interner.get_struct(*struct_id); + let attributes = context.def_interner.struct_attributes(struct_id); + if attributes.iter().any(|attr| is_custom_attribute(attr, "aztec(storage)")) + && r#struct.borrow().id.krate() == *crate_id + { + Some(r#struct) + } else { + None + } + }); + + let maybe_storage_layout = + context.def_interner.get_all_globals().iter().find_map(|global_info| { + let statement = context.def_interner.get_global_let_statement(global_info.id); + if statement.clone().is_some_and(|stmt| { + stmt.attributes + .iter() + .any(|attr| *attr == SecondaryAttribute::Abi("storage".to_string())) + }) { + let expr = context.def_interner.expression(&statement.unwrap().expression); + match expr { + HirExpression::Constructor(hir_constructor_expression) => { + if hir_constructor_expression.r#type.borrow().id.krate() == *crate_id { + Some(hir_constructor_expression) + } else { + None + } + } + _ => None, + } + } else { + None + } + }); + + if let (Some(storage_struct), Some(storage_layout)) = + (maybe_storage_struct, maybe_storage_layout) { - let init_id = interner + let init_id = context + .def_interner .lookup_method( - &Type::Struct(interner.get_struct(struct_id), vec![]), - struct_id, + &Type::Struct( + context.def_interner.get_struct(storage_struct.borrow().id), + vec![], + ), + storage_struct.borrow().id, "init", false, ) @@ -260,28 +325,33 @@ pub fn assign_storage_slots( }, file_id, ))?; - let init_function = interner.function(&init_id).block(interner); + let init_function = + context.def_interner.function(&init_id).block(&context.def_interner); let init_function_statement_id = init_function.statements().first().ok_or(( AztecMacroError::CouldNotAssignStorageSlots { secondary_message: Some("Init storage statement not found".to_string()), }, file_id, ))?; - let storage_constructor_statement = interner.statement(init_function_statement_id); + let storage_constructor_statement = + context.def_interner.statement(init_function_statement_id); let storage_constructor_expression = match storage_constructor_statement { HirStatement::Expression(expression_id) => { - match interner.expression(&expression_id) { - HirExpression::Constructor(hir_constructor_expression) => { - Ok(hir_constructor_expression) - } - _ => Err((AztecMacroError::CouldNotAssignStorageSlots { + match context.def_interner.expression(&expression_id) { + HirExpression::Constructor(hir_constructor_expression) => { + Ok(hir_constructor_expression) + } + _ => Err(( + AztecMacroError::CouldNotAssignStorageSlots { secondary_message: Some( "Storage constructor statement must be a constructor expression" .to_string(), ), - }, file_id)) - } + }, + file_id, + )), + } } _ => Err(( AztecMacroError::CouldNotAssignStorageSlots { @@ -295,9 +365,9 @@ pub fn assign_storage_slots( let mut storage_slot: u64 = 1; for (index, (_, expr_id)) in storage_constructor_expression.fields.iter().enumerate() { - let fields = r#struct.borrow().get_fields(&[]); - let (_, field_type) = fields.get(index).unwrap(); - let new_call_expression = match interner.expression(expr_id) { + let fields = storage_struct.borrow().get_fields(&[]); + let (field_name, field_type) = fields.get(index).unwrap(); + let new_call_expression = match context.def_interner.expression(expr_id) { HirExpression::Call(hir_call_expression) => Ok(hir_call_expression), _ => Err(( AztecMacroError::CouldNotAssignStorageSlots { @@ -310,7 +380,8 @@ pub fn assign_storage_slots( )), }?; - let slot_arg_expression = interner.expression(&new_call_expression.arguments[1]); + let slot_arg_expression = + context.def_interner.expression(&new_call_expression.arguments[1]); let current_storage_slot = match slot_arg_expression { HirExpression::Literal(HirLiteral::Integer(slot, _)) => Ok(slot.to_u128()), @@ -325,22 +396,123 @@ pub fn assign_storage_slots( )), }?; - if current_storage_slot != 0 { - continue; - } + let storage_layout_field = + storage_layout.fields.iter().find(|field| field.0 .0.contents == *field_name); - let type_serialized_len = get_serialized_length(&traits, field_type, interner) - .map_err(|err| (err, file_id))?; - interner.update_expression(new_call_expression.arguments[1], |expr| { + let storage_layout_slot_expr_id = + if let Some((_, expr_id)) = storage_layout_field { + let expr = context.def_interner.expression(expr_id); + if let HirExpression::Constructor(storage_layout_field_storable_expr) = expr + { + storage_layout_field_storable_expr.fields.iter().find_map( + |(field, expr_id)| { + if field.0.contents == "slot" { + Some(*expr_id) + } else { + None + } + }, + ) + } else { + None + } + } else { + None + } + .ok_or(( + AztecMacroError::CouldNotAssignStorageSlots { + secondary_message: Some(format!( + "Storage layout field ({}) not found or has an incorrect type", + field_name + )), + }, + file_id, + ))?; + + let new_storage_slot = if current_storage_slot == 0 { + u128::from(storage_slot) + } else { + current_storage_slot + }; + + let type_serialized_len = + get_serialized_length(&traits, field_type, &context.def_interner) + .map_err(|err| (err, file_id))?; + + context.def_interner.update_expression(new_call_expression.arguments[1], |expr| { *expr = HirExpression::Literal(HirLiteral::Integer( - FieldElement::from(u128::from(storage_slot)), + FieldElement::from(new_storage_slot), false, - )); + )) + }); + + context.def_interner.update_expression(storage_layout_slot_expr_id, |expr| { + *expr = HirExpression::Literal(HirLiteral::Integer( + FieldElement::from(new_storage_slot), + false, + )) }); storage_slot += type_serialized_len; } } } + + Ok(()) +} + +pub fn generate_storage_layout( + module: &mut SortedModule, + storage_struct_name: String, +) -> Result<(), AztecMacroError> { + let definition = module + .types + .iter() + .find(|r#struct| r#struct.name.0.contents == *storage_struct_name) + .unwrap(); + + let mut generic_args = vec![]; + let mut storable_fields = vec![]; + let mut storable_fields_impl = vec![]; + + definition.fields.iter().enumerate().for_each(|(index, (field_ident, field_type))| { + storable_fields.push(format!("{}: dep::aztec::prelude::Storable", field_ident, index)); + generic_args.push(format!("N{}", index)); + storable_fields_impl.push(format!( + "{}: dep::aztec::prelude::Storable {{ slot: 0, typ: \"{}\" }}", + field_ident, + field_type.to_string().replace("plain::", "") + )); + }); + + let storage_fields_source = format!( + " + struct StorageLayout<{}> {{ + {} + }} + + #[abi(storage)] + global STORAGE_LAYOUT = StorageLayout {{ + {} + }}; + ", + generic_args.join(", "), + storable_fields.join(",\n"), + storable_fields_impl.join(",\n") + ); + + let (struct_ast, errors) = parse_program(&storage_fields_source); + if !errors.is_empty() { + dbg!(errors); + return Err(AztecMacroError::CouldNotImplementNoteInterface { + secondary_message: Some("Failed to parse Noir macro code (struct StorageLayout). This is either a bug in the compiler or the Noir macro code".to_string()), + span: None + }); + } + + let mut struct_ast = struct_ast.into_sorted(); + module.types.push(struct_ast.types.pop().unwrap()); + module.globals.push(struct_ast.globals.pop().unwrap()); + Ok(()) } diff --git a/aztec_macros/src/utils/ast_utils.rs b/aztec_macros/src/utils/ast_utils.rs index bdcbad646c2..1731dfab49c 100644 --- a/aztec_macros/src/utils/ast_utils.rs +++ b/aztec_macros/src/utils/ast_utils.rs @@ -2,9 +2,8 @@ use noirc_errors::{Span, Spanned}; use noirc_frontend::{ token::SecondaryAttribute, BinaryOpKind, CallExpression, CastExpression, Expression, ExpressionKind, FunctionReturnType, Ident, IndexExpression, InfixExpression, Lambda, - LetStatement, MemberAccessExpression, MethodCallExpression, NoirTraitImpl, Path, Pattern, - PrefixExpression, Statement, StatementKind, TraitImplItem, UnaryOp, UnresolvedType, - UnresolvedTypeData, + LetStatement, MethodCallExpression, NoirTraitImpl, Path, Pattern, PrefixExpression, Statement, + StatementKind, TraitImplItem, UnaryOp, UnresolvedType, UnresolvedTypeData, }; // @@ -67,6 +66,7 @@ pub fn mutable_assignment(name: &str, assigned_to: Expression) -> Statement { pattern: mutable(name), r#type: make_type(UnresolvedTypeData::Unspecified), expression: assigned_to, + attributes: vec![], })) } @@ -78,20 +78,22 @@ pub fn mutable_reference(variable_name: &str) -> Expression { } pub fn assignment(name: &str, assigned_to: Expression) -> Statement { + assignment_with_type(name, UnresolvedTypeData::Unspecified, assigned_to) +} + +pub fn assignment_with_type( + name: &str, + typ: UnresolvedTypeData, + assigned_to: Expression, +) -> Statement { make_statement(StatementKind::Let(LetStatement { pattern: pattern(name), - r#type: make_type(UnresolvedTypeData::Unspecified), + r#type: make_type(typ), expression: assigned_to, + attributes: vec![], })) } -pub fn member_access(lhs: &str, rhs: &str) -> Expression { - expression(ExpressionKind::MemberAccess(Box::new(MemberAccessExpression { - lhs: variable(lhs), - rhs: ident(rhs), - }))) -} - pub fn return_type(path: Path) -> FunctionReturnType { let ty = make_type(UnresolvedTypeData::Named(path, vec![], true)); FunctionReturnType::Ty(ty) @@ -167,13 +169,6 @@ pub fn index_array(array: Ident, index: &str) -> Expression { }))) } -pub fn index_array_variable(array: Expression, index: &str) -> Expression { - expression(ExpressionKind::Index(Box::new(IndexExpression { - collection: array, - index: variable(index), - }))) -} - pub fn check_trait_method_implemented(trait_impl: &NoirTraitImpl, method_name: &str) -> bool { trait_impl.items.iter().any(|item| match item { TraitImplItem::Function(func) => func.def.name.0.contents == method_name, diff --git a/aztec_macros/src/utils/constants.rs b/aztec_macros/src/utils/constants.rs index 464cd10e2c7..848cca0477d 100644 --- a/aztec_macros/src/utils/constants.rs +++ b/aztec_macros/src/utils/constants.rs @@ -1,3 +1,4 @@ pub const FUNCTION_TREE_HEIGHT: u32 = 5; pub const MAX_CONTRACT_PRIVATE_FUNCTIONS: usize = 2_usize.pow(FUNCTION_TREE_HEIGHT); pub const SIGNATURE_PLACEHOLDER: &str = "SIGNATURE_PLACEHOLDER"; +pub const SELECTOR_PLACEHOLDER: &str = "SELECTOR_PLACEHOLDER"; diff --git a/aztec_macros/src/utils/errors.rs b/aztec_macros/src/utils/errors.rs index 48186555eff..4c5411dfe0f 100644 --- a/aztec_macros/src/utils/errors.rs +++ b/aztec_macros/src/utils/errors.rs @@ -8,9 +8,15 @@ pub enum AztecMacroError { AztecDepNotFound, ContractHasTooManyPrivateFunctions { span: Span }, UnsupportedFunctionArgumentType { span: Span, typ: UnresolvedTypeData }, + UnsupportedFunctionReturnType { span: Span, typ: UnresolvedTypeData }, UnsupportedStorageType { span: Option, typ: UnresolvedTypeData }, CouldNotAssignStorageSlots { secondary_message: Option }, + CouldNotImplementComputeNoteHashAndNullifier { secondary_message: Option }, CouldNotImplementNoteInterface { span: Option, secondary_message: Option }, + MultipleStorageDefinitions { span: Option }, + CouldNotExportStorageLayout { span: Option, secondary_message: Option }, + CouldNotExportFunctionAbi { span: Option, secondary_message: Option }, + CouldNotGenerateContractInterface { secondary_message: Option }, EventError { span: Span, message: String }, UnsupportedAttributes { span: Span, secondary_message: Option }, } @@ -19,7 +25,7 @@ impl From for MacroError { fn from(err: AztecMacroError) -> Self { match err { AztecMacroError::AztecDepNotFound {} => MacroError { - primary_message: "Aztec dependency not found. Please add aztec as a dependency in your Cargo.toml. For more information go to https://docs.aztec.network/developers/debugging/aztecnr-errors#aztec-dependency-not-found-please-add-aztec-as-a-dependency-in-your-nargotoml".to_owned(), + primary_message: "Aztec dependency not found. Please add aztec as a dependency in your Nargo.toml. For more information go to https://docs.aztec.network/developers/debugging/aztecnr-errors#aztec-dependency-not-found-please-add-aztec-as-a-dependency-in-your-nargotoml".to_owned(), secondary_message: None, span: None, }, @@ -33,6 +39,11 @@ impl From for MacroError { secondary_message: None, span: Some(span), }, + AztecMacroError::UnsupportedFunctionReturnType { span, typ } => MacroError { + primary_message: format!("Provided return type `{typ:?}` is not supported in Aztec contract interface"), + secondary_message: None, + span: Some(span), + }, AztecMacroError::UnsupportedStorageType { span, typ } => MacroError { primary_message: format!("Provided storage type `{typ:?}` is not directly supported in Aztec. Please provide a custom storage implementation"), secondary_message: None, @@ -43,11 +54,36 @@ impl From for MacroError { secondary_message, span: None, }, + AztecMacroError::CouldNotImplementComputeNoteHashAndNullifier { secondary_message } => MacroError { + primary_message: "Could not implement compute_note_hash_and_nullifier automatically, please provide an implementation".to_string(), + secondary_message, + span: None, + }, AztecMacroError::CouldNotImplementNoteInterface { span, secondary_message } => MacroError { primary_message: "Could not implement automatic methods for note, please provide an implementation of the NoteInterface trait".to_string(), secondary_message, + span + }, + AztecMacroError::MultipleStorageDefinitions { span } => MacroError { + primary_message: "Only one struct can be tagged as #[aztec(storage)]".to_string(), + secondary_message: None, + span, + }, + AztecMacroError::CouldNotExportStorageLayout { secondary_message, span } => MacroError { + primary_message: "Could not generate and export storage layout".to_string(), + secondary_message, + span, + }, + AztecMacroError::CouldNotExportFunctionAbi { secondary_message, span } => MacroError { + primary_message: "Could not generate and export function abi".to_string(), + secondary_message, span, }, + AztecMacroError::CouldNotGenerateContractInterface { secondary_message } => MacroError { + primary_message: "Could not generate contract interface".to_string(), + secondary_message, + span: None + }, AztecMacroError::EventError { span, message } => MacroError { primary_message: message, secondary_message: None, diff --git a/aztec_macros/src/utils/hir_utils.rs b/aztec_macros/src/utils/hir_utils.rs index f31a0584261..ae895d2075c 100644 --- a/aztec_macros/src/utils/hir_utils.rs +++ b/aztec_macros/src/utils/hir_utils.rs @@ -1,27 +1,47 @@ use iter_extended::vecmap; +use noirc_errors::Location; use noirc_frontend::{ graph::CrateId, - hir::def_collector::dc_crate::UnresolvedTraitImpl, - macros_api::{HirContext, ModuleDefId, StructId}, - node_interner::{TraitId, TraitImplId}, - Signedness, Type, UnresolvedTypeData, + hir::{ + def_map::{LocalModuleId, ModuleId}, + resolution::{path_resolver::StandardPathResolver, resolver::Resolver}, + type_check::type_check_func, + }, + macros_api::{FileId, HirContext, MacroError, ModuleDefId, StructId}, + node_interner::{FuncId, TraitId}, + ItemVisibility, LetStatement, NoirFunction, Shared, Signedness, StructType, Type, }; +use super::ast_utils::is_custom_attribute; + pub fn collect_crate_structs(crate_id: &CrateId, context: &HirContext) -> Vec { + context + .def_map(crate_id) + .map(|def_map| { + def_map + .modules() + .iter() + .flat_map(|(_, module)| { + module.type_definitions().filter_map(move |typ| { + if let ModuleDefId::TypeId(struct_id) = typ { + Some(struct_id) + } else { + None + } + }) + }) + .collect() + }) + .unwrap_or_default() +} + +pub fn collect_crate_functions(crate_id: &CrateId, context: &HirContext) -> Vec { context .def_map(crate_id) .expect("ICE: Missing crate in def_map") .modules() .iter() - .flat_map(|(_, module)| { - module.type_definitions().filter_map(|typ| { - if let ModuleDefId::TypeId(struct_id) = typ { - Some(struct_id) - } else { - None - } - }) - }) + .flat_map(|(_, module)| module.value_definitions().filter_map(|id| id.as_function())) .collect() } @@ -32,8 +52,8 @@ pub fn collect_traits(context: &HirContext) -> Vec { .flatten() .flat_map(|module| { module.type_definitions().filter_map(|typ| { - if let ModuleDefId::TraitId(struct_id) = typ { - Some(struct_id) + if let ModuleDefId::TraitId(trait_id) = typ { + Some(trait_id) } else { None } @@ -65,54 +85,227 @@ pub fn signature_of_type(typ: &Type) -> String { let fields = vecmap(types, signature_of_type); format!("({})", fields.join(",")) } + Type::String(len_typ) => { + if let Type::Constant(len) = **len_typ { + format!("str<{len}>") + } else { + unimplemented!( + "Cannot generate signature for string with length type {:?}", + len_typ + ) + } + } + Type::MutableReference(typ) => signature_of_type(typ), _ => unimplemented!("Cannot generate signature for type {:?}", typ), } } -// Fetches the name of all structs that implement trait_name, both in the current crate and all of its dependencies. -pub fn fetch_struct_trait_impls( - context: &mut HirContext, - unresolved_traits_impls: &[UnresolvedTraitImpl], - trait_name: &str, -) -> Vec { - let mut struct_typenames: Vec = Vec::new(); - - // These structs can be declared in either external crates or the current one. External crates that contain - // dependencies have already been processed and resolved, but are available here via the NodeInterner. Note that - // crates on which the current crate does not depend on may not have been processed, and will be ignored. - for trait_impl_id in 0..context.def_interner.next_trait_impl_id().0 { - let trait_impl = &context.def_interner.get_trait_implementation(TraitImplId(trait_impl_id)); - - if trait_impl.borrow().ident.0.contents == *trait_name { - if let Type::Struct(s, _) = &trait_impl.borrow().typ { - struct_typenames.push(s.borrow().name.0.contents.clone()); +// Fetches the name of all structs tagged as #[aztec(note)] in a given crate, avoiding +// contract dependencies that are just there for their interfaces. +pub fn fetch_crate_notes( + context: &HirContext, + crate_id: &CrateId, +) -> Vec<(String, Shared)> { + collect_crate_structs(crate_id, context) + .iter() + .filter_map(|struct_id| { + let r#struct = context.def_interner.get_struct(*struct_id); + let attributes = context.def_interner.struct_attributes(struct_id); + if attributes.iter().any(|attr| is_custom_attribute(attr, "aztec(note)")) { + let module_id = struct_id.module_id(); + + fully_qualified_note_path(context, *struct_id).map(|path| { + let path = if path.contains("::") { + let prefix = if &module_id.krate == context.root_crate_id() { + "crate" + } else { + "dep" + }; + format!("{}::{}", prefix, path) + } else { + path + }; + (path.clone(), r#struct) + }) } else { - panic!("Found impl for {} on non-Struct", trait_name); + None } - } + }) + .collect() +} + +// Fetches the name of all structs tagged as #[aztec(note)], both in the current crate and all of its dependencies. +pub fn fetch_notes(context: &HirContext) -> Vec<(String, Shared)> { + context.crates().flat_map(|crate_id| fetch_crate_notes(context, &crate_id)).collect() +} + +pub fn get_contract_module_data( + context: &mut HirContext, + crate_id: &CrateId, +) -> Option<(String, LocalModuleId, FileId)> { + let def_map = context.def_map(crate_id).expect("ICE: Missing crate in def_map"); + // We first fetch modules in this crate which correspond to contracts, along with their file id. + let contract_module_file_ids: Vec<(String, LocalModuleId, FileId)> = def_map + .modules() + .iter() + .filter(|(_, module)| module.is_contract) + .map(|(idx, module)| { + (def_map.get_module_path(idx, module.parent), LocalModuleId(idx), module.location.file) + }) + .collect(); + + // If the current crate does not contain a contract module we simply skip it. + if contract_module_file_ids.is_empty() { + return None; } - // This crate's traits and impls have not yet been resolved, so we look for impls in unresolved_trait_impls. - struct_typenames.extend( - unresolved_traits_impls - .iter() - .filter(|trait_impl| { - trait_impl - .trait_path - .segments - .last() - .expect("ICE: empty trait_impl path") - .0 - .contents - == *trait_name - }) - .filter_map(|trait_impl| match &trait_impl.object_type.typ { - UnresolvedTypeData::Named(path, _, _) => { - Some(path.segments.last().unwrap().0.contents.clone()) - } - _ => None, - }), + Some(contract_module_file_ids[0].clone()) +} + +pub fn inject_fn( + crate_id: &CrateId, + context: &mut HirContext, + func: NoirFunction, + location: Location, + module_id: LocalModuleId, + file_id: FileId, +) -> Result<(), MacroError> { + let func_id = context.def_interner.push_empty_fn(); + context.def_interner.push_function( + func_id, + &func.def, + ModuleId { krate: *crate_id, local_id: module_id }, + location, ); - struct_typenames + context.def_map_mut(crate_id).unwrap().modules_mut()[module_id.0] + .declare_function(func.name_ident().clone(), ItemVisibility::Public, func_id) + .map_err(|err| MacroError { + primary_message: format!("Failed to declare autogenerated {} function", func.name()), + secondary_message: Some(format!("Duplicate definition found {}", err.0)), + span: None, + })?; + + let def_maps = &mut context.def_maps; + + let path_resolver = + StandardPathResolver::new(ModuleId { local_id: module_id, krate: *crate_id }); + + let resolver = Resolver::new(&mut context.def_interner, &path_resolver, def_maps, file_id); + + let (hir_func, meta, _) = resolver.resolve_function(func, func_id); + + context.def_interner.push_fn_meta(meta, func_id); + context.def_interner.update_fn(func_id, hir_func); + + let errors = type_check_func(&mut context.def_interner, func_id); + + if !errors.is_empty() { + return Err(MacroError { + primary_message: "Failed to type check autogenerated function".to_owned(), + secondary_message: Some(errors.iter().map(|err| err.to_string()).collect::()), + span: None, + }); + } + + Ok(()) +} + +pub fn inject_global( + crate_id: &CrateId, + context: &mut HirContext, + global: LetStatement, + module_id: LocalModuleId, + file_id: FileId, +) { + let name = global.pattern.name_ident().clone(); + + let global_id = context.def_interner.push_empty_global( + name.clone(), + module_id, + file_id, + global.attributes.clone(), + ); + + // Add the statement to the scope so its path can be looked up later + context.def_map_mut(crate_id).unwrap().modules_mut()[module_id.0] + .declare_global(name, global_id) + .unwrap_or_else(|(name, _)| { + panic!( + "Failed to declare autogenerated {} global, likely due to a duplicate definition", + name + ) + }); + + let def_maps = &mut context.def_maps; + + let path_resolver = + StandardPathResolver::new(ModuleId { local_id: module_id, krate: *crate_id }); + + let mut resolver = Resolver::new(&mut context.def_interner, &path_resolver, def_maps, file_id); + + let hir_stmt = resolver.resolve_global_let(global, global_id); + + let statement_id = context.def_interner.get_global(global_id).let_statement; + context.def_interner.replace_statement(statement_id, hir_stmt); +} + +pub fn fully_qualified_note_path(context: &HirContext, note_id: StructId) -> Option { + let module_id = note_id.module_id(); + let child_id = module_id.local_id.0; + let def_map = + context.def_map(&module_id.krate).expect("The local crate should be analyzed already"); + + let module = context.module(module_id); + + let module_path = def_map.get_module_path_with_separator(child_id, module.parent, "::"); + + if &module_id.krate == context.root_crate_id() { + Some(module_path) + } else { + find_non_contract_dependencies_bfs(context, context.root_crate_id(), &module_id.krate) + .map(|crates| crates.join("::") + "::" + &module_path) + } +} + +fn filter_contract_modules(context: &HirContext, crate_id: &CrateId) -> bool { + if let Some(def_map) = context.def_map(crate_id) { + !def_map.modules().iter().any(|(_, module)| module.is_contract) + } else { + true + } +} + +fn find_non_contract_dependencies_bfs( + context: &HirContext, + crate_id: &CrateId, + target_crate_id: &CrateId, +) -> Option> { + context.crate_graph[crate_id] + .dependencies + .iter() + .filter(|dep| filter_contract_modules(context, &dep.crate_id)) + .find_map(|dep| { + if &dep.crate_id == target_crate_id { + Some(vec![dep.name.to_string()]) + } else { + None + } + }) + .or_else(|| { + context.crate_graph[crate_id] + .dependencies + .iter() + .filter(|dep| filter_contract_modules(context, &dep.crate_id)) + .find_map(|dep| { + if let Some(mut path) = + find_non_contract_dependencies_bfs(context, &dep.crate_id, target_crate_id) + { + path.insert(0, dep.name.to_string()); + Some(path) + } else { + None + } + }) + }) } diff --git a/compiler/noirc_driver/src/abi_gen.rs b/compiler/noirc_driver/src/abi_gen.rs index 7fafa719186..86f10818dbc 100644 --- a/compiler/noirc_driver/src/abi_gen.rs +++ b/compiler/noirc_driver/src/abi_gen.rs @@ -2,10 +2,11 @@ use std::collections::BTreeMap; use acvm::acir::native_types::Witness; use iter_extended::{btree_map, vecmap}; -use noirc_abi::{Abi, AbiParameter, AbiReturnType, AbiType}; +use noirc_abi::{Abi, AbiParameter, AbiReturnType, AbiType, AbiValue}; use noirc_frontend::{ hir::Context, - hir_def::{function::Param, stmt::HirPattern}, + hir_def::{expr::HirArrayLiteral, function::Param, stmt::HirPattern}, + macros_api::{HirExpression, HirLiteral}, node_interner::{FuncId, NodeInterner}, Visibility, }; @@ -109,6 +110,60 @@ fn collapse_ranges(witnesses: &[Witness]) -> Vec> { wit } +pub(super) fn value_from_hir_expression(context: &Context, expression: HirExpression) -> AbiValue { + match expression { + HirExpression::Tuple(expr_ids) => { + let fields = expr_ids + .iter() + .map(|expr_id| { + value_from_hir_expression(context, context.def_interner.expression(expr_id)) + }) + .collect(); + AbiValue::Tuple { fields } + } + HirExpression::Constructor(constructor) => { + let fields = constructor + .fields + .iter() + .map(|(ident, expr_id)| { + ( + ident.0.contents.to_string(), + value_from_hir_expression( + context, + context.def_interner.expression(expr_id), + ), + ) + }) + .collect(); + AbiValue::Struct { fields } + } + HirExpression::Literal(literal) => match literal { + HirLiteral::Array(hir_array) => match hir_array { + HirArrayLiteral::Standard(expr_ids) => { + let value = expr_ids + .iter() + .map(|expr_id| { + value_from_hir_expression( + context, + context.def_interner.expression(expr_id), + ) + }) + .collect(); + AbiValue::Array { value } + } + _ => unreachable!("Repeated arrays cannot be used in the abi"), + }, + HirLiteral::Bool(value) => AbiValue::Boolean { value }, + HirLiteral::Str(value) => AbiValue::String { value }, + HirLiteral::Integer(field, sign) => { + AbiValue::Integer { value: field.to_string(), sign } + } + _ => unreachable!("Literal cannot be used in the abi"), + }, + _ => unreachable!("Type cannot be used in the abi {:?}", expression), + } +} + #[cfg(test)] mod test { use std::ops::Range; diff --git a/compiler/noirc_driver/src/contract.rs b/compiler/noirc_driver/src/contract.rs index bd78cb23cdc..d6c3dc6205d 100644 --- a/compiler/noirc_driver/src/contract.rs +++ b/compiler/noirc_driver/src/contract.rs @@ -1,14 +1,20 @@ use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use acvm::acir::circuit::Program; use fm::FileId; -use noirc_abi::{Abi, ContractEvent}; +use noirc_abi::{Abi, AbiType, AbiValue}; use noirc_errors::debug_info::DebugInfo; use noirc_evaluator::errors::SsaReport; use super::debug::DebugFile; +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct CompiledContractOutputs { + pub structs: HashMap>, + pub globals: HashMap>, +} + #[derive(Clone, Debug, Serialize, Deserialize)] pub struct CompiledContract { pub noir_version: String, @@ -19,10 +25,7 @@ pub struct CompiledContract { /// stored in this `Vector`. pub functions: Vec, - /// All the events defined inside the contract scope. - /// An event is a struct value that can be emitted via oracles - /// by any contract function during execution. - pub events: Vec, + pub outputs: CompiledContractOutputs, pub file_map: BTreeMap, pub warnings: Vec, @@ -50,7 +53,7 @@ pub struct ContractFunction { )] pub bytecode: Program, - pub debug: DebugInfo, + pub debug: Vec, /// Names of the functions in the program. These are used for more informative debugging and benchmarking. pub names: Vec, diff --git a/compiler/noirc_driver/src/lib.rs b/compiler/noirc_driver/src/lib.rs index 8fa2d9680c8..8a554879e9f 100644 --- a/compiler/noirc_driver/src/lib.rs +++ b/compiler/noirc_driver/src/lib.rs @@ -3,11 +3,12 @@ #![warn(unreachable_pub)] #![warn(clippy::semicolon_if_nothing_returned)] +use abi_gen::value_from_hir_expression; use acvm::acir::circuit::ExpressionWidth; use clap::Args; use fm::{FileId, FileManager}; use iter_extended::vecmap; -use noirc_abi::{AbiParameter, AbiType, ContractEvent}; +use noirc_abi::{AbiParameter, AbiType, AbiValue}; use noirc_errors::{CustomDiagnostic, FileDiagnostic}; use noirc_evaluator::create_program; use noirc_evaluator::errors::RuntimeError; @@ -34,7 +35,7 @@ mod stdlib; use debug::filter_relevant_files; -pub use contract::{CompiledContract, ContractFunction}; +pub use contract::{CompiledContract, CompiledContractOutputs, ContractFunction}; pub use debug::DebugFile; pub use program::CompiledProgram; @@ -429,21 +430,55 @@ fn compile_contract_inner( } if errors.is_empty() { - let debug_infos: Vec<_> = functions.iter().map(|function| function.debug.clone()).collect(); + let debug_infos: Vec<_> = + functions.iter().flat_map(|function| function.debug.clone()).collect(); let file_map = filter_relevant_files(&debug_infos, &context.file_manager); + let out_structs = contract + .outputs + .structs + .into_iter() + .map(|(tag, structs)| { + let structs = structs + .into_iter() + .map(|struct_id| { + let typ = context.def_interner.get_struct(struct_id); + let typ = typ.borrow(); + let fields = vecmap(typ.get_fields(&[]), |(name, typ)| { + (name, AbiType::from_type(context, &typ)) + }); + let path = + context.fully_qualified_struct_path(context.root_crate_id(), typ.id); + AbiType::Struct { path, fields } + }) + .collect(); + (tag.to_string(), structs) + }) + .collect(); + + let out_globals = contract + .outputs + .globals + .iter() + .map(|(tag, globals)| { + let globals: Vec = globals + .iter() + .map(|global_id| { + let let_statement = + context.def_interner.get_global_let_statement(*global_id).unwrap(); + let hir_expression = + context.def_interner.expression(&let_statement.expression); + value_from_hir_expression(context, hir_expression) + }) + .collect(); + (tag.to_string(), globals) + }) + .collect(); + Ok(CompiledContract { name: contract.name, - events: contract - .events - .iter() - .map(|event_id| { - let typ = context.def_interner.get_struct(*event_id); - let typ = typ.borrow(); - ContractEvent::from_struct_type(context, &typ) - }) - .collect(), functions, + outputs: CompiledContractOutputs { structs: out_structs, globals: out_globals }, file_map, noir_version: NOIR_ARTIFACT_VERSION_STRING.to_string(), warnings, @@ -513,13 +548,8 @@ pub fn compile_no_check( Ok(CompiledProgram { hash, - // TODO(https://github.com/noir-lang/noir/issues/4428) program, - // TODO(https://github.com/noir-lang/noir/issues/4428) - // Debug info is only relevant for errors at execution time which is not yet supported - // The CompileProgram `debug` field is used in multiple places and is better - // left to be updated once execution of multiple ACIR functions is enabled - debug: debug[0].clone(), + debug, abi, file_map, noir_version: NOIR_ARTIFACT_VERSION_STRING.to_string(), diff --git a/compiler/noirc_driver/src/program.rs b/compiler/noirc_driver/src/program.rs index 9ffd2d70dda..ed7ddb29f59 100644 --- a/compiler/noirc_driver/src/program.rs +++ b/compiler/noirc_driver/src/program.rs @@ -24,7 +24,7 @@ pub struct CompiledProgram { )] pub program: Program, pub abi: noirc_abi::Abi, - pub debug: DebugInfo, + pub debug: Vec, pub file_map: BTreeMap, pub warnings: Vec, /// Names of the functions in the program. These are used for more informative debugging and benchmarking. diff --git a/compiler/noirc_errors/src/debug_info.rs b/compiler/noirc_errors/src/debug_info.rs index 09117bdc3b7..54e2521e413 100644 --- a/compiler/noirc_errors/src/debug_info.rs +++ b/compiler/noirc_errors/src/debug_info.rs @@ -46,6 +46,49 @@ pub type DebugVariables = BTreeMap; pub type DebugFunctions = BTreeMap; pub type DebugTypes = BTreeMap; +#[derive(Default, Debug, Clone, Deserialize, Serialize)] +pub struct ProgramDebugInfo { + pub debug_infos: Vec, +} + +impl ProgramDebugInfo { + pub fn serialize_compressed_base64_json( + debug_info: &ProgramDebugInfo, + s: S, + ) -> Result + where + S: Serializer, + { + let json_str = serde_json::to_string(debug_info).map_err(S::Error::custom)?; + + let mut encoder = DeflateEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(json_str.as_bytes()).map_err(S::Error::custom)?; + let compressed_data = encoder.finish().map_err(S::Error::custom)?; + + let encoded_b64 = base64::prelude::BASE64_STANDARD.encode(compressed_data); + s.serialize_str(&encoded_b64) + } + + pub fn deserialize_compressed_base64_json<'de, D>( + deserializer: D, + ) -> Result + where + D: Deserializer<'de>, + { + let encoded_b64: String = Deserialize::deserialize(deserializer)?; + + let compressed_data = + base64::prelude::BASE64_STANDARD.decode(encoded_b64).map_err(D::Error::custom)?; + + let mut decoder = DeflateDecoder::new(&compressed_data[..]); + let mut decompressed_data = Vec::new(); + decoder.read_to_end(&mut decompressed_data).map_err(D::Error::custom)?; + + let json_str = String::from_utf8(decompressed_data).map_err(D::Error::custom)?; + serde_json::from_str(&json_str).map_err(D::Error::custom) + } +} + #[serde_as] #[derive(Default, Debug, Clone, Deserialize, Serialize)] pub struct DebugInfo { @@ -130,40 +173,4 @@ impl DebugInfo { counted_opcodes } - - pub fn serialize_compressed_base64_json( - debug_info: &DebugInfo, - s: S, - ) -> Result - where - S: Serializer, - { - let json_str = serde_json::to_string(debug_info).map_err(S::Error::custom)?; - - let mut encoder = DeflateEncoder::new(Vec::new(), Compression::default()); - encoder.write_all(json_str.as_bytes()).map_err(S::Error::custom)?; - let compressed_data = encoder.finish().map_err(S::Error::custom)?; - - let encoded_b64 = base64::prelude::BASE64_STANDARD.encode(compressed_data); - s.serialize_str(&encoded_b64) - } - - pub fn deserialize_compressed_base64_json<'de, D>( - deserializer: D, - ) -> Result - where - D: Deserializer<'de>, - { - let encoded_b64: String = Deserialize::deserialize(deserializer)?; - - let compressed_data = - base64::prelude::BASE64_STANDARD.decode(encoded_b64).map_err(D::Error::custom)?; - - let mut decoder = DeflateDecoder::new(&compressed_data[..]); - let mut decompressed_data = Vec::new(); - decoder.read_to_end(&mut decompressed_data).map_err(D::Error::custom)?; - - let json_str = String::from_utf8(decompressed_data).map_err(D::Error::custom)?; - serde_json::from_str(&json_str).map_err(D::Error::custom) - } } diff --git a/compiler/noirc_evaluator/Cargo.toml b/compiler/noirc_evaluator/Cargo.toml index fad7c3c309e..fb2f003aa56 100644 --- a/compiler/noirc_evaluator/Cargo.toml +++ b/compiler/noirc_evaluator/Cargo.toml @@ -15,7 +15,7 @@ fxhash.workspace = true iter-extended.workspace = true thiserror.workspace = true num-bigint = "0.4" -im = { version = "15.1", features = ["serde"] } +im.workspace = true serde.workspace = true tracing.workspace = true -chrono = "0.4.37" \ No newline at end of file +chrono = "0.4.37" diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs index 36b5f8793cb..ee047903743 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_black_box.rs @@ -1,8 +1,11 @@ -use acvm::acir::{brillig::BlackBoxOp, BlackBoxFunc}; +use acvm::{ + acir::{brillig::BlackBoxOp, BlackBoxFunc}, + FieldElement, +}; use crate::brillig::brillig_ir::{ brillig_variable::{BrilligVariable, BrilligVector, SingleAddrVariable}, - BrilligContext, + BrilligBinaryOp, BrilligContext, }; /// Transforms SSA's black box function calls into the corresponding brillig instructions @@ -235,10 +238,17 @@ pub(crate) fn convert_black_box_call( ), BlackBoxFunc::BigIntAdd => { if let ( - [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(rhs)], - [BrilligVariable::SingleAddr(output)], + [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(lhs_modulus), BrilligVariable::SingleAddr(rhs), BrilligVariable::SingleAddr(rhs_modulus)], + [BrilligVariable::SingleAddr(output), BrilligVariable::SingleAddr(modulus_id)], ) = (function_arguments, function_results) { + prepare_bigint_output( + brillig_context, + lhs_modulus, + rhs_modulus, + output, + modulus_id, + ); brillig_context.black_box_op_instruction(BlackBoxOp::BigIntAdd { lhs: lhs.address, rhs: rhs.address, @@ -246,16 +256,23 @@ pub(crate) fn convert_black_box_call( }); } else { unreachable!( - "ICE: BigIntAdd expects two register arguments and one result register" + "ICE: BigIntAdd expects four register arguments and two result registers" ) } } BlackBoxFunc::BigIntSub => { if let ( - [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(rhs)], - [BrilligVariable::SingleAddr(output)], + [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(lhs_modulus), BrilligVariable::SingleAddr(rhs), BrilligVariable::SingleAddr(rhs_modulus)], + [BrilligVariable::SingleAddr(output), BrilligVariable::SingleAddr(modulus_id)], ) = (function_arguments, function_results) { + prepare_bigint_output( + brillig_context, + lhs_modulus, + rhs_modulus, + output, + modulus_id, + ); brillig_context.black_box_op_instruction(BlackBoxOp::BigIntSub { lhs: lhs.address, rhs: rhs.address, @@ -263,16 +280,23 @@ pub(crate) fn convert_black_box_call( }); } else { unreachable!( - "ICE: BigIntSub expects two register arguments and one result register" + "ICE: BigIntSub expects four register arguments and two result registers" ) } } BlackBoxFunc::BigIntMul => { if let ( - [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(rhs)], - [BrilligVariable::SingleAddr(output)], + [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(lhs_modulus), BrilligVariable::SingleAddr(rhs), BrilligVariable::SingleAddr(rhs_modulus)], + [BrilligVariable::SingleAddr(output), BrilligVariable::SingleAddr(modulus_id)], ) = (function_arguments, function_results) { + prepare_bigint_output( + brillig_context, + lhs_modulus, + rhs_modulus, + output, + modulus_id, + ); brillig_context.black_box_op_instruction(BlackBoxOp::BigIntMul { lhs: lhs.address, rhs: rhs.address, @@ -280,16 +304,23 @@ pub(crate) fn convert_black_box_call( }); } else { unreachable!( - "ICE: BigIntMul expects two register arguments and one result register" + "ICE: BigIntMul expects four register arguments and two result registers" ) } } BlackBoxFunc::BigIntDiv => { if let ( - [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(rhs)], - [BrilligVariable::SingleAddr(output)], + [BrilligVariable::SingleAddr(lhs), BrilligVariable::SingleAddr(lhs_modulus), BrilligVariable::SingleAddr(rhs), BrilligVariable::SingleAddr(rhs_modulus)], + [BrilligVariable::SingleAddr(output), BrilligVariable::SingleAddr(modulus_id)], ) = (function_arguments, function_results) { + prepare_bigint_output( + brillig_context, + lhs_modulus, + rhs_modulus, + output, + modulus_id, + ); brillig_context.black_box_op_instruction(BlackBoxOp::BigIntDiv { lhs: lhs.address, rhs: rhs.address, @@ -297,16 +328,20 @@ pub(crate) fn convert_black_box_call( }); } else { unreachable!( - "ICE: BigIntDiv expects two register arguments and one result register" + "ICE: BigIntDiv expects four register arguments and two result registers" ) } } BlackBoxFunc::BigIntFromLeBytes => { - if let ([inputs, modulus], [BrilligVariable::SingleAddr(output)]) = - (function_arguments, function_results) + if let ( + [inputs, modulus], + [BrilligVariable::SingleAddr(output), BrilligVariable::SingleAddr(_modulus_id)], + ) = (function_arguments, function_results) { let inputs_vector = convert_array_or_vector(brillig_context, inputs, bb_func); let modulus_vector = convert_array_or_vector(brillig_context, modulus, bb_func); + let output_id = brillig_context.get_new_bigint_id(); + brillig_context.const_instruction(*output, FieldElement::from(output_id as u128)); brillig_context.black_box_op_instruction(BlackBoxOp::BigIntFromLeBytes { inputs: inputs_vector.to_heap_vector(), modulus: modulus_vector.to_heap_vector(), @@ -314,23 +349,24 @@ pub(crate) fn convert_black_box_call( }); } else { unreachable!( - "ICE: BigIntFromLeBytes expects two register arguments and one result register" + "ICE: BigIntFromLeBytes expects a register and an array as arguments and two result registers" ) } } BlackBoxFunc::BigIntToLeBytes => { if let ( - [BrilligVariable::SingleAddr(input)], - [BrilligVariable::BrilligVector(result_vector)], + [BrilligVariable::SingleAddr(input), BrilligVariable::SingleAddr(_modulus)], + [result_array], ) = (function_arguments, function_results) { + let output = convert_array_or_vector(brillig_context, result_array, bb_func); brillig_context.black_box_op_instruction(BlackBoxOp::BigIntToLeBytes { input: input.address, - output: result_vector.to_heap_vector(), + output: output.to_heap_vector(), }); } else { unreachable!( - "ICE: BigIntToLeBytes expects one register argument and one array result" + "ICE: BigIntToLeBytes expects two register arguments and one array result" ) } } @@ -383,3 +419,30 @@ fn convert_array_or_vector( ), } } + +fn prepare_bigint_output( + brillig_context: &mut BrilligContext, + lhs_modulus: &SingleAddrVariable, + rhs_modulus: &SingleAddrVariable, + output: &SingleAddrVariable, + modulus_id: &SingleAddrVariable, +) { + // Check moduli + let condition = brillig_context.allocate_register(); + let condition_adr = SingleAddrVariable { address: condition, bit_size: 1 }; + brillig_context.binary_instruction( + *lhs_modulus, + *rhs_modulus, + condition_adr, + BrilligBinaryOp::Equals, + ); + brillig_context.codegen_constrain( + condition_adr, + Some("moduli should be identical in BigInt operation".to_string()), + ); + brillig_context.deallocate_register(condition); + // Set output id + let output_id = brillig_context.get_new_bigint_id(); + brillig_context.const_instruction(*output, FieldElement::from(output_id as u128)); + brillig_context.mov_instruction(modulus_id.address, lhs_modulus.address); +} diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs index e3d8b33837c..9a408f23517 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_block.rs @@ -5,7 +5,7 @@ use crate::brillig::brillig_ir::{ BrilligBinaryOp, BrilligContext, BRILLIG_MEMORY_ADDRESSING_BIT_SIZE, }; use crate::ssa::ir::dfg::CallStack; -use crate::ssa::ir::instruction::ConstrainError; +use crate::ssa::ir::instruction::{ConstrainError, UserDefinedConstrainError}; use crate::ssa::ir::{ basic_block::{BasicBlock, BasicBlockId}, dfg::DataFlowGraph, @@ -248,10 +248,15 @@ impl<'block> BrilligBlock<'block> { self.convert_ssa_binary(binary, dfg, result_var); } Instruction::Constrain(lhs, rhs, assert_message) => { - let assert_message = if let Some(error) = assert_message { + let (has_revert_data, static_assert_message) = if let Some(error) = assert_message { match error.as_ref() { - ConstrainError::Static(string) => Some(string.clone()), - ConstrainError::Dynamic(call_instruction) => { + ConstrainError::Intrinsic(string) => (false, Some(string.clone())), + ConstrainError::UserDefined(UserDefinedConstrainError::Static(string)) => { + (true, Some(string.clone())) + } + ConstrainError::UserDefined(UserDefinedConstrainError::Dynamic( + call_instruction, + )) => { let Instruction::Call { func, arguments } = call_instruction else { unreachable!("expected a call instruction") }; @@ -264,11 +269,11 @@ impl<'block> BrilligBlock<'block> { // Dynamic assert messages are handled in the generated function call. // We then don't need to attach one to the constrain instruction. - None + (false, None) } } } else { - None + (false, None) }; let condition = SingleAddrVariable { @@ -281,8 +286,12 @@ impl<'block> BrilligBlock<'block> { dfg, condition, ); - - self.brillig_context.constrain_instruction(condition, assert_message); + if has_revert_data { + self.brillig_context + .codegen_constrain_with_revert_data(condition, static_assert_message); + } else { + self.brillig_context.codegen_constrain(condition, static_assert_message); + } self.brillig_context.deallocate_single_addr(condition); } Instruction::Allocate => { @@ -670,7 +679,7 @@ impl<'block> BrilligBlock<'block> { BrilligBinaryOp::LessThanEquals, ); - self.brillig_context.constrain_instruction(condition, assert_message.clone()); + self.brillig_context.codegen_constrain(condition, assert_message.clone()); self.brillig_context.deallocate_single_addr(condition); self.brillig_context.deallocate_single_addr(left); self.brillig_context.deallocate_single_addr(right); @@ -805,7 +814,7 @@ impl<'block> BrilligBlock<'block> { ); self.brillig_context - .constrain_instruction(condition, Some("Array index out of bounds".to_owned())); + .codegen_constrain(condition, Some("Array index out of bounds".to_owned())); if should_deallocate_size { self.brillig_context.deallocate_single_addr(size_as_register); @@ -1506,10 +1515,8 @@ impl<'block> BrilligBlock<'block> { condition, BrilligBinaryOp::LessThanEquals, ); - self.brillig_context.constrain_instruction( - condition, - Some("attempt to add with overflow".to_string()), - ); + self.brillig_context + .codegen_constrain(condition, Some("attempt to add with overflow".to_string())); self.brillig_context.deallocate_single_addr(condition); } (BrilligBinaryOp::Sub, false) => { @@ -1522,7 +1529,7 @@ impl<'block> BrilligBlock<'block> { condition, BrilligBinaryOp::LessThanEquals, ); - self.brillig_context.constrain_instruction( + self.brillig_context.codegen_constrain( condition, Some("attempt to subtract with overflow".to_string()), ); @@ -1552,7 +1559,7 @@ impl<'block> BrilligBlock<'block> { BrilligBinaryOp::UnsignedDiv, ); ctx.binary_instruction(division, left, condition, BrilligBinaryOp::Equals); - ctx.constrain_instruction( + ctx.codegen_constrain( condition, Some("attempt to multiply with overflow".to_string()), ); @@ -1781,7 +1788,7 @@ pub(crate) fn type_of_binary_operation(lhs_type: &Type, rhs_type: &Type) -> Type (Type::Numeric(lhs_type), Type::Numeric(rhs_type)) => { assert_eq!( lhs_type, rhs_type, - "lhs and rhs types in a binary operation are always the same" + "lhs and rhs types in a binary operation are always the same but got {lhs_type} and {rhs_type}" ); Type::Numeric(*lhs_type) } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs index 15a2a531e78..4b97a61491d 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_directive.rs @@ -67,61 +67,105 @@ pub(crate) fn directive_invert() -> GeneratedBrillig { /// (a/b, a-a/b*b) /// } /// ``` -pub(crate) fn directive_quotient(mut bit_size: u32) -> GeneratedBrillig { +pub(crate) fn directive_quotient(bit_size: u32) -> GeneratedBrillig { // `a` is (0) (i.e register index 0) // `b` is (1) - if bit_size > FieldElement::max_num_bits() { - bit_size = FieldElement::max_num_bits(); - } - GeneratedBrillig { - byte_code: vec![ - BrilligOpcode::CalldataCopy { - destination_address: MemoryAddress::from(0), - size: 2, - offset: 0, - }, - BrilligOpcode::Cast { - destination: MemoryAddress(0), - source: MemoryAddress(0), - bit_size, - }, - BrilligOpcode::Cast { - destination: MemoryAddress(1), - source: MemoryAddress(1), - bit_size, - }, - //q = a/b is set into register (2) - BrilligOpcode::BinaryIntOp { - op: BinaryIntOp::Div, - lhs: MemoryAddress::from(0), - rhs: MemoryAddress::from(1), - destination: MemoryAddress::from(2), - bit_size, - }, - //(1)= q*b - BrilligOpcode::BinaryIntOp { - op: BinaryIntOp::Mul, - lhs: MemoryAddress::from(2), - rhs: MemoryAddress::from(1), - destination: MemoryAddress::from(1), - bit_size, - }, - //(1) = a-q*b - BrilligOpcode::BinaryIntOp { - op: BinaryIntOp::Sub, - lhs: MemoryAddress::from(0), - rhs: MemoryAddress::from(1), - destination: MemoryAddress::from(1), - bit_size, - }, - //(0) = q - BrilligOpcode::Mov { - destination: MemoryAddress::from(0), - source: MemoryAddress::from(2), - }, - BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 2 }, - ], - assert_messages: Default::default(), - locations: Default::default(), + + // TODO: The only difference between these implementations is the integer version will truncate the input to the `bit_size` via cast. + // Once we deduplicate brillig functions then we can modify this so that fields and integers share the same quotient function. + if bit_size >= FieldElement::max_num_bits() { + // Field version + GeneratedBrillig { + byte_code: vec![ + BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress::from(0), + size: 2, + offset: 0, + }, + // No cast, since calldata is typed as field by default + //q = a/b is set into register (2) + BrilligOpcode::BinaryFieldOp { + op: BinaryFieldOp::IntegerDiv, // We want integer division, not field division! + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(2), + }, + //(1)= q*b + BrilligOpcode::BinaryFieldOp { + op: BinaryFieldOp::Mul, + lhs: MemoryAddress::from(2), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(1), + }, + //(1) = a-q*b + BrilligOpcode::BinaryFieldOp { + op: BinaryFieldOp::Sub, + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(1), + }, + //(0) = q + BrilligOpcode::Mov { + destination: MemoryAddress::from(0), + source: MemoryAddress::from(2), + }, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 2 }, + ], + assert_messages: Default::default(), + locations: Default::default(), + } + } else { + // Integer version + GeneratedBrillig { + byte_code: vec![ + BrilligOpcode::CalldataCopy { + destination_address: MemoryAddress::from(0), + size: 2, + offset: 0, + }, + BrilligOpcode::Cast { + destination: MemoryAddress(0), + source: MemoryAddress(0), + bit_size, + }, + BrilligOpcode::Cast { + destination: MemoryAddress(1), + source: MemoryAddress(1), + bit_size, + }, + //q = a/b is set into register (2) + BrilligOpcode::BinaryIntOp { + op: BinaryIntOp::Div, + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(2), + bit_size, + }, + //(1)= q*b + BrilligOpcode::BinaryIntOp { + op: BinaryIntOp::Mul, + lhs: MemoryAddress::from(2), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(1), + bit_size, + }, + //(1) = a-q*b + BrilligOpcode::BinaryIntOp { + op: BinaryIntOp::Sub, + lhs: MemoryAddress::from(0), + rhs: MemoryAddress::from(1), + destination: MemoryAddress::from(1), + bit_size, + }, + //(0) = q + BrilligOpcode::Mov { + destination: MemoryAddress::from(0), + source: MemoryAddress::from(2), + }, + BrilligOpcode::Stop { return_data_offset: 0, return_data_size: 2 }, + ], + assert_messages: Default::default(), + locations: Default::default(), + } } } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs index b93693d9c79..3e1515b1eed 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_gen/brillig_slice_ops.rs @@ -465,7 +465,7 @@ mod tests { assert_eq!( vm.get_memory()[return_data_offset..(return_data_offset + expected_return.len())] .iter() - .map(|mem_val| mem_val.value) + .map(|mem_val| mem_val.to_field()) .collect::>(), expected_return ); @@ -590,7 +590,7 @@ mod tests { assert_eq!( vm.get_memory()[return_data_offset..(return_data_offset + expected_return.len())] .iter() - .map(|mem_val| mem_val.value) + .map(|mem_val| mem_val.to_field()) .collect::>(), expected_return ); @@ -686,7 +686,7 @@ mod tests { assert_eq!( vm.get_memory()[return_data_offset..(return_data_offset + expected_return.len())] .iter() - .map(|mem_val| mem_val.value) + .map(|mem_val| mem_val.to_field()) .collect::>(), expected_return ); @@ -838,7 +838,7 @@ mod tests { assert_eq!( vm.get_memory()[return_data_offset..(return_data_offset + expected_return.len())] .iter() - .map(|mem_val| mem_val.value) + .map(|mem_val| mem_val.to_field()) .collect::>(), expected_return ); diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir.rs index e5c731be679..7e37e1da434 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir.rs @@ -86,6 +86,8 @@ pub(crate) struct BrilligContext { next_section: usize, /// IR printer debug_show: DebugShow, + /// Counter for generating bigint ids in unconstrained functions + bigint_new_id: u32, } impl BrilligContext { @@ -98,9 +100,15 @@ impl BrilligContext { section_label: 0, next_section: 1, debug_show: DebugShow::new(enable_debug_trace), + bigint_new_id: 0, } } + pub(crate) fn get_new_bigint_id(&mut self) -> u32 { + let result = self.bigint_new_id; + self.bigint_new_id += 1; + result + } /// Adds a brillig instruction to the brillig byte code fn push_opcode(&mut self, opcode: BrilligOpcode) { self.obj.push_opcode(opcode); @@ -140,7 +148,7 @@ pub(crate) mod tests { &self, _public_key_x: &FieldElement, _public_key_y: &FieldElement, - _signature: &[u8], + _signature: &[u8; 64], _message: &[u8], ) -> Result { Ok(true) @@ -262,7 +270,7 @@ pub(crate) mod tests { // uses unresolved jumps which requires a block to be constructed in SSA and // we don't need this for Brillig IR tests context.push_opcode(BrilligOpcode::JumpIf { condition: r_equality, location: 8 }); - context.push_opcode(BrilligOpcode::Trap); + context.push_opcode(BrilligOpcode::Trap { revert_data_offset: 0, revert_data_size: 0 }); context.stop_instruction(); diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs index 8ce15ba4e73..8a4f469f5c9 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/artifact.rs @@ -4,7 +4,7 @@ use std::collections::{BTreeMap, HashMap}; use crate::ssa::ir::dfg::CallStack; /// Represents a parameter or a return value of an entry point function. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)] pub(crate) enum BrilligParameter { /// A single address parameter or return value. Holds the bit size of the parameter. SingleAddr(u32), @@ -17,7 +17,7 @@ pub(crate) enum BrilligParameter { /// The result of compiling and linking brillig artifacts. /// This is ready to run bytecode with attached metadata. -#[derive(Debug)] +#[derive(Debug, Default)] pub(crate) struct GeneratedBrillig { pub(crate) byte_code: Vec, pub(crate) locations: BTreeMap, diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_control_flow.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_control_flow.rs index 116eaa5103f..f8f39f03df4 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_control_flow.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/codegen_control_flow.rs @@ -138,4 +138,48 @@ impl BrilligContext { self.enter_section(end_section); } + + /// Emits brillig bytecode to jump to a trap condition if `condition` + /// is false. The trap will include the given message as revert data. + pub(crate) fn codegen_constrain_with_revert_data( + &mut self, + condition: SingleAddrVariable, + assert_message: Option, + ) { + assert!(condition.bit_size == 1); + + self.codegen_if_not(condition.address, |ctx| { + let (revert_data_offset, revert_data_size) = + if let Some(assert_message) = assert_message { + let bytes = assert_message.as_bytes(); + for (i, byte) in bytes.iter().enumerate() { + ctx.const_instruction( + SingleAddrVariable::new(MemoryAddress(i), 8), + (*byte as usize).into(), + ); + } + (0, bytes.len()) + } else { + (0, 0) + }; + ctx.trap_instruction(revert_data_offset, revert_data_size); + }); + } + + /// Emits brillig bytecode to jump to a trap condition if `condition` + /// is false. + pub(crate) fn codegen_constrain( + &mut self, + condition: SingleAddrVariable, + assert_message: Option, + ) { + assert!(condition.bit_size == 1); + + self.codegen_if_not(condition.address, |ctx| { + ctx.trap_instruction(0, 0); + if let Some(assert_message) = assert_message { + ctx.obj.add_assert_message_to_last_opcode(assert_message); + } + }); + } } diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs index 4ca1144b6a4..41a6d1873e4 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/debug_show.rs @@ -113,10 +113,14 @@ impl DebugShow { DebugShow { enable_debug_trace } } - /// Emits brillig bytecode to jump to a trap condition if `condition` - /// is false. - pub(crate) fn constrain_instruction(&self, condition: MemoryAddress) { - debug_println!(self.enable_debug_trace, " ASSERT {} != 0", condition); + /// Emits a `trap` instruction. + pub(crate) fn trap_instruction(&self, revert_data_offset: usize, revert_data_size: usize) { + debug_println!( + self.enable_debug_trace, + " TRAP {}..{}", + revert_data_offset, + revert_data_offset + revert_data_size + ); } /// Emits a `mov` instruction. diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs index db872487fcc..88cf987325d 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/entry_point.rs @@ -23,6 +23,7 @@ impl BrilligContext { section_label: 0, next_section: 1, debug_show: DebugShow::new(false), + bigint_new_id: 0, }; context.codegen_entry_point(&arguments, &return_parameters); @@ -527,7 +528,7 @@ mod tests { let (vm, return_data_offset, return_data_size) = create_and_run_vm(calldata.clone(), &bytecode); assert_eq!(return_data_size, 1, "Return data size is incorrect"); - assert_eq!(vm.get_memory()[return_data_offset].value, FieldElement::from(1_usize)); + assert_eq!(vm.get_memory()[return_data_offset].to_field(), FieldElement::from(1_usize)); } #[test] @@ -569,7 +570,7 @@ mod tests { assert_eq!( memory[return_data_pointer..(return_data_pointer + flattened_array.len())] .iter() - .map(|mem_val| mem_val.value) + .map(|mem_val| mem_val.to_field()) .collect::>(), flattened_array ); diff --git a/compiler/noirc_evaluator/src/brillig/brillig_ir/instructions.rs b/compiler/noirc_evaluator/src/brillig/brillig_ir/instructions.rs index f305eb81b01..901ccc58036 100644 --- a/compiler/noirc_evaluator/src/brillig/brillig_ir/instructions.rs +++ b/compiler/noirc_evaluator/src/brillig/brillig_ir/instructions.rs @@ -215,29 +215,6 @@ impl BrilligContext { ); } - /// Emits brillig bytecode to jump to a trap condition if `condition` - /// is false. - pub(crate) fn constrain_instruction( - &mut self, - condition: SingleAddrVariable, - assert_message: Option, - ) { - self.debug_show.constrain_instruction(condition.address); - - assert!(condition.bit_size == 1); - - let (next_section, next_label) = self.reserve_next_section_label(); - self.add_unresolved_jump( - BrilligOpcode::JumpIf { condition: condition.address, location: 0 }, - next_label, - ); - self.push_opcode(BrilligOpcode::Trap); - if let Some(assert_message) = assert_message { - self.obj.add_assert_message_to_last_opcode(assert_message); - } - self.enter_section(next_section); - } - /// Adds a unresolved `Jump` to the bytecode. fn add_unresolved_jump( &mut self, @@ -488,6 +465,12 @@ impl BrilligContext { offset, }); } + + pub(super) fn trap_instruction(&mut self, revert_data_offset: usize, revert_data_size: usize) { + self.debug_show.trap_instruction(revert_data_offset, revert_data_size); + + self.push_opcode(BrilligOpcode::Trap { revert_data_offset, revert_data_size }); + } } /// Type to encapsulate the binary operation types in Brillig diff --git a/compiler/noirc_evaluator/src/ssa.rs b/compiler/noirc_evaluator/src/ssa.rs index a2eb67bd231..feeca5797b2 100644 --- a/compiler/noirc_evaluator/src/ssa.rs +++ b/compiler/noirc_evaluator/src/ssa.rs @@ -14,7 +14,9 @@ use crate::{ errors::{RuntimeError, SsaReport}, }; use acvm::acir::{ - circuit::{Circuit, ExpressionWidth, Program as AcirProgram, PublicInputs}, + circuit::{ + brillig::BrilligBytecode, Circuit, ExpressionWidth, Program as AcirProgram, PublicInputs, + }, native_types::Witness, }; @@ -35,14 +37,16 @@ pub mod ssa_gen; /// Optimize the given program by converting it into SSA /// form and performing optimizations there. When finished, -/// convert the final SSA into ACIR and return it. +/// convert the final SSA into an ACIR program and return it. +/// An ACIR program is made up of both ACIR functions +/// and Brillig functions for unconstrained execution. pub(crate) fn optimize_into_acir( program: Program, print_passes: bool, print_brillig_trace: bool, force_brillig_output: bool, print_timings: bool, -) -> Result, RuntimeError> { +) -> Result<(Vec, Vec), RuntimeError> { let abi_distinctness = program.return_distinctness; let ssa_gen_span = span!(Level::TRACE, "ssa_generation"); @@ -55,7 +59,7 @@ pub(crate) fn optimize_into_acir( .run_pass(Ssa::mem2reg, "After Mem2Reg:") .run_pass(Ssa::as_slice_optimization, "After `as_slice` optimization") .try_run_pass(Ssa::evaluate_assert_constant, "After Assert Constant:")? - .try_run_pass(Ssa::unroll_loops, "After Unrolling:")? + .try_run_pass(Ssa::unroll_loops_iteratively, "After Unrolling:")? .run_pass(Ssa::simplify_cfg, "After Simplifying:") .run_pass(Ssa::flatten_cfg, "After Flattening:") .run_pass(Ssa::remove_bit_shifts, "After Removing Bit Shifts:") @@ -100,6 +104,18 @@ pub struct SsaProgramArtifact { } impl SsaProgramArtifact { + fn new(unconstrained_functions: Vec) -> Self { + let program = AcirProgram { functions: Vec::default(), unconstrained_functions }; + Self { + program, + debug: Vec::default(), + warnings: Vec::default(), + main_input_witnesses: Vec::default(), + main_return_witnesses: Vec::default(), + names: Vec::default(), + } + } + fn add_circuit(&mut self, mut circuit_artifact: SsaCircuitArtifact, is_main: bool) { self.program.functions.push(circuit_artifact.circuit); self.debug.push(circuit_artifact.debug_info); @@ -131,7 +147,7 @@ pub fn create_program( let func_sigs = program.function_signatures.clone(); let recursive = program.recursive; - let generated_acirs = optimize_into_acir( + let (generated_acirs, generated_brillig) = optimize_into_acir( program, enable_ssa_logging, enable_brillig_logging, @@ -144,7 +160,7 @@ pub fn create_program( "The generated ACIRs should match the supplied function signatures" ); - let mut program_artifact = SsaProgramArtifact::default(); + let mut program_artifact = SsaProgramArtifact::new(generated_brillig); // For setting up the ABI we need separately specify main's input and return witnesses let mut is_main = true; for (acir, func_sig) in generated_acirs.into_iter().zip(func_sigs) { diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs index bcd62e3b062..b94e02e5119 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/acir_variable.rs @@ -1261,7 +1261,8 @@ impl AcirContext { let modulus = self.big_int_ctx.modulus(bigint.modulus_id()); let bytes_len = ((modulus - BigUint::from(1_u32)).bits() - 1) / 8 + 1; output_count = bytes_len as usize; - (field_inputs, vec![FieldElement::from(bytes_len as u128)]) + assert!(bytes_len == 32); + (field_inputs, vec![]) } BlackBoxFunc::BigIntFromLeBytes => { let invalid_input = "ICE - bigint operation requires 2 inputs"; @@ -1463,6 +1464,7 @@ impl AcirContext { id } + // TODO: Delete this method once we remove the `Brillig` opcode pub(crate) fn brillig( &mut self, predicate: AcirVar, @@ -1553,6 +1555,105 @@ impl AcirContext { Ok(outputs_var) } + #[allow(clippy::too_many_arguments)] + pub(crate) fn brillig_call( + &mut self, + predicate: AcirVar, + generated_brillig: &GeneratedBrillig, + inputs: Vec, + outputs: Vec, + attempt_execution: bool, + unsafe_return_values: bool, + brillig_function_index: u32, + ) -> Result, RuntimeError> { + let brillig_inputs = try_vecmap(inputs, |i| -> Result<_, InternalError> { + match i { + AcirValue::Var(var, _) => Ok(BrilligInputs::Single(self.var_to_expression(var)?)), + AcirValue::Array(vars) => { + let mut var_expressions: Vec = Vec::new(); + for var in vars { + self.brillig_array_input(&mut var_expressions, var)?; + } + Ok(BrilligInputs::Array(var_expressions)) + } + AcirValue::DynamicArray(AcirDynamicArray { block_id, .. }) => { + Ok(BrilligInputs::MemoryArray(block_id)) + } + } + })?; + + // Optimistically try executing the brillig now, if we can complete execution they just return the results. + // This is a temporary measure pending SSA optimizations being applied to Brillig which would remove constant-input opcodes (See #2066) + // + // We do _not_ want to do this in the situation where the `main` function is unconstrained, as if execution succeeds + // the entire program will be replaced with witness constraints to its outputs. + if attempt_execution { + if let Some(brillig_outputs) = + self.execute_brillig(&generated_brillig.byte_code, &brillig_inputs, &outputs) + { + return Ok(brillig_outputs); + } + } + + // Otherwise we must generate ACIR for it and execute at runtime. + let mut brillig_outputs = Vec::new(); + let outputs_var = vecmap(outputs, |output| match output { + AcirType::NumericType(_) => { + let witness_index = self.acir_ir.next_witness_index(); + brillig_outputs.push(BrilligOutputs::Simple(witness_index)); + let var = self.add_data(AcirVarData::Witness(witness_index)); + AcirValue::Var(var, output.clone()) + } + AcirType::Array(element_types, size) => { + let (acir_value, witnesses) = self.brillig_array_output(&element_types, size); + brillig_outputs.push(BrilligOutputs::Array(witnesses)); + acir_value + } + }); + let predicate = self.var_to_expression(predicate)?; + + self.acir_ir.brillig_call( + Some(predicate), + generated_brillig, + brillig_inputs, + brillig_outputs, + brillig_function_index, + ); + + fn range_constraint_value( + context: &mut AcirContext, + value: &AcirValue, + ) -> Result<(), RuntimeError> { + match value { + AcirValue::Var(var, typ) => { + let numeric_type = match typ { + AcirType::NumericType(numeric_type) => numeric_type, + _ => unreachable!("`AcirValue::Var` may only hold primitive values"), + }; + context.range_constrain_var(*var, numeric_type, None)?; + } + AcirValue::Array(values) => { + for value in values { + range_constraint_value(context, value)?; + } + } + AcirValue::DynamicArray(_) => { + unreachable!("Brillig opcodes cannot return dynamic arrays") + } + } + Ok(()) + } + + // This is a hack to ensure that if we're compiling a brillig entrypoint function then + // we don't also add a number of range constraints. + if !unsafe_return_values { + for output_var in &outputs_var { + range_constraint_value(self, output_var)?; + } + } + Ok(outputs_var) + } + fn brillig_array_input( &mut self, var_expressions: &mut Vec, @@ -1623,7 +1724,7 @@ impl AcirContext { let outputs_var = vecmap(outputs_types.iter(), |output| match output { AcirType::NumericType(_) => { let var = self.add_data(AcirVarData::Const( - memory.next().expect("Missing return data").value, + memory.next().expect("Missing return data").to_field(), )); AcirValue::Var(var, output.clone()) } @@ -1657,7 +1758,7 @@ impl AcirContext { AcirType::NumericType(_) => { let memory_value = memory_iter.next().expect("ICE: Unexpected end of memory"); - let var = self.add_data(AcirVarData::Const(memory_value.value)); + let var = self.add_data(AcirVarData::Const(memory_value.to_field())); array_values.push_back(AcirValue::Var(var, element_type.clone())); } } @@ -1763,6 +1864,7 @@ impl AcirContext { id: u32, inputs: Vec, output_count: usize, + predicate: AcirVar, ) -> Result, RuntimeError> { let inputs = self.prepare_inputs_for_black_box_func_call(inputs)?; let inputs = inputs @@ -1778,7 +1880,8 @@ impl AcirContext { let results = vecmap(&outputs, |witness_index| self.add_data(AcirVarData::Witness(*witness_index))); - self.acir_ir.push_opcode(Opcode::Call { id, inputs, outputs }); + let predicate = Some(self.var_to_expression(predicate)?); + self.acir_ir.push_opcode(Opcode::Call { id, inputs, outputs, predicate }); Ok(results) } } diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs index b43110b2f5b..999ff2ddb5d 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/acir_ir/generated_acir.rs @@ -167,17 +167,27 @@ impl GeneratedAcir { BlackBoxFuncCall::XOR { lhs: inputs[0][0], rhs: inputs[1][0], output: outputs[0] } } BlackBoxFunc::RANGE => BlackBoxFuncCall::RANGE { input: inputs[0][0] }, - BlackBoxFunc::SHA256 => BlackBoxFuncCall::SHA256 { inputs: inputs[0].clone(), outputs }, - BlackBoxFunc::Blake2s => { - BlackBoxFuncCall::Blake2s { inputs: inputs[0].clone(), outputs } - } - BlackBoxFunc::Blake3 => BlackBoxFuncCall::Blake3 { inputs: inputs[0].clone(), outputs }, + BlackBoxFunc::SHA256 => BlackBoxFuncCall::SHA256 { + inputs: inputs[0].clone(), + outputs: outputs.try_into().expect("Compiler should generate correct size outputs"), + }, + BlackBoxFunc::Blake2s => BlackBoxFuncCall::Blake2s { + inputs: inputs[0].clone(), + outputs: outputs.try_into().expect("Compiler should generate correct size outputs"), + }, + BlackBoxFunc::Blake3 => BlackBoxFuncCall::Blake3 { + inputs: inputs[0].clone(), + outputs: outputs.try_into().expect("Compiler should generate correct size outputs"), + }, BlackBoxFunc::SchnorrVerify => { BlackBoxFuncCall::SchnorrVerify { public_key_x: inputs[0][0], public_key_y: inputs[1][0], // Schnorr signature is an r & s, 32 bytes each - signature: inputs[2].clone(), + signature: inputs[2] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), message: inputs[3].clone(), output: outputs[0], } @@ -195,24 +205,48 @@ impl GeneratedAcir { BlackBoxFunc::EcdsaSecp256k1 => { BlackBoxFuncCall::EcdsaSecp256k1 { // 32 bytes for each public key co-ordinate - public_key_x: inputs[0].clone(), - public_key_y: inputs[1].clone(), + public_key_x: inputs[0] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), + public_key_y: inputs[1] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), // (r,s) are both 32 bytes each, so signature // takes up 64 bytes - signature: inputs[2].clone(), - hashed_message: inputs[3].clone(), + signature: inputs[2] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), + hashed_message: inputs[3] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), output: outputs[0], } } BlackBoxFunc::EcdsaSecp256r1 => { BlackBoxFuncCall::EcdsaSecp256r1 { // 32 bytes for each public key co-ordinate - public_key_x: inputs[0].clone(), - public_key_y: inputs[1].clone(), + public_key_x: inputs[0] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), + public_key_y: inputs[1] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), // (r,s) are both 32 bytes each, so signature // takes up 64 bytes - signature: inputs[2].clone(), - hashed_message: inputs[3].clone(), + signature: inputs[2] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), + hashed_message: inputs[3] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), output: outputs[0], } } @@ -240,15 +274,21 @@ impl GeneratedAcir { } }; - BlackBoxFuncCall::Keccak256VariableLength { + BlackBoxFuncCall::Keccak256 { inputs: inputs[0].clone(), var_message_size, - outputs, + outputs: outputs + .try_into() + .expect("Compiler should generate correct size outputs"), } } - BlackBoxFunc::Keccakf1600 => { - BlackBoxFuncCall::Keccakf1600 { inputs: inputs[0].clone(), outputs } - } + BlackBoxFunc::Keccakf1600 => BlackBoxFuncCall::Keccakf1600 { + inputs: inputs[0] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), + outputs: outputs.try_into().expect("Compiler should generate correct size outputs"), + }, BlackBoxFunc::RecursiveAggregation => BlackBoxFuncCall::RecursiveAggregation { verification_key: inputs[0].clone(), proof: inputs[1].clone(), @@ -290,9 +330,15 @@ impl GeneratedAcir { len: constant_inputs[0].to_u128() as u32, }, BlackBoxFunc::Sha256Compression => BlackBoxFuncCall::Sha256Compression { - inputs: inputs[0].clone(), - hash_values: inputs[1].clone(), - outputs, + inputs: inputs[0] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), + hash_values: inputs[1] + .clone() + .try_into() + .expect("Compiler should generate correct size inputs"), + outputs: outputs.try_into().expect("Compiler should generate correct size outputs"), }, }; @@ -543,6 +589,7 @@ impl GeneratedAcir { Ok(()) } + // TODO: Delete this method once we remove the `Brillig` opcode pub(crate) fn brillig( &mut self, predicate: Option, @@ -571,6 +618,37 @@ impl GeneratedAcir { } } + pub(crate) fn brillig_call( + &mut self, + predicate: Option, + generated_brillig: &GeneratedBrillig, + inputs: Vec, + outputs: Vec, + brillig_function_index: u32, + ) { + let opcode = + AcirOpcode::BrilligCall { id: brillig_function_index, inputs, outputs, predicate }; + self.push_opcode(opcode); + for (brillig_index, call_stack) in generated_brillig.locations.iter() { + self.locations.insert( + OpcodeLocation::Brillig { + acir_index: self.opcodes.len() - 1, + brillig_index: *brillig_index, + }, + call_stack.clone(), + ); + } + for (brillig_index, message) in generated_brillig.assert_messages.iter() { + self.assert_messages.insert( + OpcodeLocation::Brillig { + acir_index: self.opcodes.len() - 1, + brillig_index: *brillig_index, + }, + message.clone(), + ); + } + } + pub(crate) fn last_acir_opcode_location(&self) -> OpcodeLocation { OpcodeLocation::Acir(self.opcodes.len() - 1) } diff --git a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs index ff082791c02..839d6a38281 100644 --- a/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/acir_gen/mod.rs @@ -1,13 +1,14 @@ //! This file holds the pass to convert from Noir's SSA IR to ACIR. mod acir_ir; -use std::collections::HashSet; +use std::collections::{BTreeMap, HashSet}; use std::fmt::Debug; use self::acir_ir::acir_variable::{AcirContext, AcirType, AcirVar}; use super::function_builder::data_bus::DataBus; use super::ir::dfg::CallStack; -use super::ir::instruction::ConstrainError; +use super::ir::function::FunctionId; +use super::ir::instruction::{ConstrainError, UserDefinedConstrainError}; use super::{ ir::{ dfg::DataFlowGraph, @@ -28,6 +29,7 @@ use crate::errors::{InternalError, InternalWarning, RuntimeError, SsaReport}; use crate::ssa::ir::function::InlineType; pub(crate) use acir_ir::generated_acir::GeneratedAcir; +use acvm::acir::circuit::brillig::BrilligBytecode; use acvm::acir::native_types::Witness; use acvm::acir::BlackBoxFunc; use acvm::{ @@ -39,9 +41,54 @@ use im::Vector; use iter_extended::{try_vecmap, vecmap}; use noirc_frontend::Distinctness; +#[derive(Default)] +struct SharedContext { + /// Final list of Brillig functions which will be part of the final program + /// This is shared across `Context` structs as we want one list of Brillig + /// functions across all ACIR artifacts + generated_brillig: Vec, + + /// Maps SSA function index -> Final generated Brillig artifact index. + /// There can be Brillig functions specified in SSA which do not act as + /// entry points in ACIR (e.g. only called by other Brillig functions) + /// This mapping is necessary to use the correct function pointer for a Brillig call. + /// This uses the brillig parameters in the map since using slices with different lengths + /// needs to create different brillig entrypoints + brillig_generated_func_pointers: BTreeMap<(FunctionId, Vec), u32>, +} + +impl SharedContext { + fn generated_brillig_pointer( + &self, + func_id: FunctionId, + arguments: Vec, + ) -> Option<&u32> { + self.brillig_generated_func_pointers.get(&(func_id, arguments)) + } + + fn generated_brillig(&self, func_pointer: usize) -> &GeneratedBrillig { + &self.generated_brillig[func_pointer] + } + + fn insert_generated_brillig( + &mut self, + func_id: FunctionId, + arguments: Vec, + generated_pointer: u32, + code: GeneratedBrillig, + ) { + self.brillig_generated_func_pointers.insert((func_id, arguments), generated_pointer); + self.generated_brillig.push(code); + } + + fn new_generated_pointer(&self) -> u32 { + self.generated_brillig.len() as u32 + } +} + /// Context struct for the acir generation pass. /// May be similar to the Evaluator struct in the current SSA IR. -struct Context { +struct Context<'a> { /// Maps SSA values to `AcirVar`. /// /// This is needed so that we only create a single @@ -91,6 +138,9 @@ struct Context { max_block_id: u32, data_bus: DataBus, + + /// Contains state that is generated and also used across ACIR functions + shared_context: &'a mut SharedContext, } #[derive(Clone)] @@ -181,11 +231,12 @@ impl Ssa { self, brillig: &Brillig, abi_distinctness: Distinctness, - ) -> Result, RuntimeError> { + ) -> Result<(Vec, Vec), RuntimeError> { let mut acirs = Vec::new(); // TODO: can we parallelise this? + let mut shared_context = SharedContext::default(); for function in self.functions.values() { - let context = Context::new(); + let context = Context::new(&mut shared_context); if let Some(mut generated_acir) = context.convert_ssa_function(&self, function, brillig)? { @@ -194,6 +245,10 @@ impl Ssa { } } + let brillig = vecmap(shared_context.generated_brillig, |brillig| BrilligBytecode { + bytecode: brillig.byte_code, + }); + // TODO: check whether doing this for a single circuit's return witnesses is correct. // We probably need it for all foldable circuits, as any circuit being folded is essentially an entry point. However, I do not know how that // plays a part when we potentially want not inlined functions normally as part of the compiler. @@ -215,15 +270,15 @@ impl Ssa { .collect(); main_func_acir.return_witnesses = distinct_return_witness; - Ok(acirs) } - Distinctness::DuplicationAllowed => Ok(acirs), + Distinctness::DuplicationAllowed => {} } + Ok((acirs, brillig)) } } -impl Context { - fn new() -> Context { +impl<'a> Context<'a> { + fn new(shared_context: &'a mut SharedContext) -> Context<'a> { let mut acir_context = AcirContext::default(); let current_side_effects_enabled_var = acir_context.add_constant(FieldElement::one()); @@ -237,6 +292,7 @@ impl Context { internal_mem_block_lengths: HashMap::default(), max_block_id: 0, data_bus: DataBus::default(), + shared_context, } } @@ -307,18 +363,22 @@ impl Context { let outputs: Vec = vecmap(main_func.returns(), |result_id| dfg.type_of_value(*result_id).into()); - let code = self.gen_brillig_for(main_func, arguments, brillig)?; + let code = self.gen_brillig_for(main_func, arguments.clone(), brillig)?; // We specifically do not attempt execution of the brillig code being generated as this can result in it being // replaced with constraints on witnesses to the program outputs. - let output_values = self.acir_context.brillig( + let output_values = self.acir_context.brillig_call( self.current_side_effects_enabled_var, - code, + &code, inputs, outputs, false, true, + // We are guaranteed to have a Brillig function pointer of `0` as main itself is marked as unconstrained + 0, )?; + self.shared_context.insert_generated_brillig(main_func.id(), arguments, 0, code); + let output_vars: Vec<_> = output_values .iter() .flat_map(|value| value.clone().flatten()) @@ -472,8 +532,13 @@ impl Context { let assert_message = if let Some(error) = assert_message { match error.as_ref() { - ConstrainError::Static(string) => Some(string.clone()), - ConstrainError::Dynamic(call_instruction) => { + ConstrainError::Intrinsic(string) + | ConstrainError::UserDefined(UserDefinedConstrainError::Static(string)) => { + Some(string.clone()) + } + ConstrainError::UserDefined(UserDefinedConstrainError::Dynamic( + call_instruction, + )) => { self.convert_ssa_call(call_instruction, dfg, ssa, brillig, &[])?; None } @@ -574,14 +639,15 @@ impl Context { sum + dfg.try_get_array_length(*result_id).unwrap_or(1) }); - let acir_program_id = ssa - .id_to_index + let acir_function_id = ssa + .entry_point_to_generated_index .get(id) .expect("ICE: should have an associated final index"); let output_vars = self.acir_context.call_acir_function( - *acir_program_id, + *acir_function_id, inputs, output_count, + self.current_side_effects_enabled_var, )?; let output_values = self.convert_vars_to_values(output_vars, dfg, result_ids); @@ -600,24 +666,53 @@ impl Context { ); } } - let inputs = vecmap(arguments, |arg| self.convert_value(*arg, dfg)); let arguments = self.gen_brillig_parameters(arguments, dfg); - let code = self.gen_brillig_for(func, arguments, brillig)?; - let outputs: Vec = vecmap(result_ids, |result_id| { dfg.type_of_value(*result_id).into() }); - let output_values = self.acir_context.brillig( - self.current_side_effects_enabled_var, - code, - inputs, - outputs, - true, - false, - )?; + // Check whether we have already generated Brillig for this function + // If we have, re-use the generated code to set-up the Brillig call. + let output_values = if let Some(generated_pointer) = self + .shared_context + .generated_brillig_pointer(*id, arguments.clone()) + { + let code = self + .shared_context + .generated_brillig(*generated_pointer as usize); + self.acir_context.brillig_call( + self.current_side_effects_enabled_var, + code, + inputs, + outputs, + true, + false, + *generated_pointer, + )? + } else { + let code = + self.gen_brillig_for(func, arguments.clone(), brillig)?; + let generated_pointer = + self.shared_context.new_generated_pointer(); + let output_values = self.acir_context.brillig_call( + self.current_side_effects_enabled_var, + &code, + inputs, + outputs, + true, + false, + generated_pointer, + )?; + self.shared_context.insert_generated_brillig( + *id, + arguments, + generated_pointer, + code, + ); + output_values + }; // Compiler sanity check assert_eq!(result_ids.len(), output_values.len(), "ICE: The number of Brillig output values should match the result ids in SSA"); @@ -2446,19 +2541,27 @@ mod test { }, }; - fn build_basic_foo_with_return(builder: &mut FunctionBuilder, foo_id: FunctionId) { - // acir(fold) fn foo f1 { + fn build_basic_foo_with_return( + builder: &mut FunctionBuilder, + foo_id: FunctionId, + is_brillig_func: bool, + ) { + // fn foo f1 { // b0(v0: Field, v1: Field): // v2 = eq v0, v1 // constrain v2 == u1 0 // return v0 // } - builder.new_function("foo".into(), foo_id, InlineType::Fold); + if is_brillig_func { + builder.new_brillig_function("foo".into(), foo_id); + } else { + builder.new_function("foo".into(), foo_id, InlineType::Fold); + } let foo_v0 = builder.add_parameter(Type::field()); let foo_v1 = builder.add_parameter(Type::field()); let foo_equality_check = builder.insert_binary(foo_v0, BinaryOp::Eq, foo_v1); - let zero = builder.field_constant(0u128); + let zero = builder.numeric_constant(0u128, Type::unsigned(1)); builder.insert_constrain(foo_equality_check, zero, None); builder.terminate_with_return(vec![foo_v0]); } @@ -2492,11 +2595,11 @@ mod test { builder.insert_constrain(main_call1_results[0], main_call2_results[0], None); builder.terminate_with_return(vec![]); - build_basic_foo_with_return(&mut builder, foo_id); + build_basic_foo_with_return(&mut builder, foo_id, false); let ssa = builder.finish(); - let acir_functions = ssa + let (acir_functions, _) = ssa .into_acir(&Brillig::default(), noirc_frontend::Distinctness::Distinct) .expect("Should compile manually written SSA into ACIR"); // Expected result: @@ -2588,11 +2691,11 @@ mod test { builder.insert_constrain(main_call1_results[0], main_call2_results[0], None); builder.terminate_with_return(vec![]); - build_basic_foo_with_return(&mut builder, foo_id); + build_basic_foo_with_return(&mut builder, foo_id, false); let ssa = builder.finish(); - let acir_functions = ssa + let (acir_functions, _) = ssa .into_acir(&Brillig::default(), noirc_frontend::Distinctness::Distinct) .expect("Should compile manually written SSA into ACIR"); // The expected result should look very similar to the abvoe test expect that the input witnesses of the `Call` @@ -2679,11 +2782,11 @@ mod test { .to_vec(); builder.terminate_with_return(vec![foo_call[0]]); - build_basic_foo_with_return(&mut builder, foo_id); + build_basic_foo_with_return(&mut builder, foo_id, false); let ssa = builder.finish(); - let acir_functions = ssa + let (acir_functions, _) = ssa .into_acir(&Brillig::default(), noirc_frontend::Distinctness::Distinct) .expect("Should compile manually written SSA into ACIR"); @@ -2721,7 +2824,7 @@ mod test { expected_outputs: Vec, ) { match opcode { - Opcode::Call { id, inputs, outputs } => { + Opcode::Call { id, inputs, outputs, .. } => { assert_eq!( *id, expected_id, "Main was expected to call {expected_id} but got {}", @@ -2743,4 +2846,82 @@ mod test { _ => panic!("Expected only Call opcode"), } } + + // Test that given multiple calls to the same brillig function we generate only one bytecode + // and the appropriate Brillig call opcodes are generated + #[test] + fn multiple_brillig_calls_one_bytecode() { + // acir(inline) fn main f0 { + // b0(v0: Field, v1: Field): + // v3 = call f1(v0, v1) + // v4 = call f1(v0, v1) + // v5 = call f1(v0, v1) + // v6 = call f1(v0, v1) + // return + // } + // brillig fn foo f1 { + // b0(v0: Field, v1: Field): + // v2 = eq v0, v1 + // constrain v2 == u1 0 + // return v0 + // } + // brillig fn foo f2 { + // b0(v0: Field, v1: Field): + // v2 = eq v0, v1 + // constrain v2 == u1 0 + // return v0 + // } + let foo_id = Id::test_new(0); + let mut builder = FunctionBuilder::new("main".into(), foo_id); + let main_v0 = builder.add_parameter(Type::field()); + let main_v1 = builder.add_parameter(Type::field()); + + let foo_id = Id::test_new(1); + let foo = builder.import_function(foo_id); + let bar_id = Id::test_new(2); + let bar = builder.import_function(bar_id); + + // Insert multiple calls to the same Brillig function + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); + // Interleave a call to a separate Brillig function to make sure that we can call multiple separate Brillig functions + builder.insert_call(bar, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); + builder.insert_call(foo, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); + builder.insert_call(bar, vec![main_v0, main_v1], vec![Type::field()]).to_vec(); + builder.terminate_with_return(vec![]); + + build_basic_foo_with_return(&mut builder, foo_id, true); + build_basic_foo_with_return(&mut builder, bar_id, true); + + let ssa = builder.finish(); + let brillig = ssa.to_brillig(false); + println!("{}", ssa); + + let (acir_functions, brillig_functions) = ssa + .into_acir(&brillig, noirc_frontend::Distinctness::Distinct) + .expect("Should compile manually written SSA into ACIR"); + + assert_eq!(acir_functions.len(), 1, "Should only have a `main` ACIR function"); + assert_eq!( + brillig_functions.len(), + 2, + "Should only have generated a single Brillig function" + ); + + let main_acir = &acir_functions[0]; + let main_opcodes = main_acir.opcodes(); + assert_eq!(main_opcodes.len(), 6, "Should have four calls to f1 and two calls to f2"); + + // We should only have `BrilligCall` opcodes in `main` + for (i, opcode) in main_opcodes.iter().enumerate() { + match opcode { + Opcode::BrilligCall { id, .. } => { + let expected_id = if i == 3 || i == 5 { 1 } else { 0 }; + assert_eq!(*id, expected_id, "Expected an id of {expected_id} but got {id}"); + } + _ => panic!("Expected only Brillig call opcode"), + } + } + } } diff --git a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs index d3e5e506111..75a427397b6 100644 --- a/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/function_builder/mod.rs @@ -326,6 +326,12 @@ impl FunctionBuilder { self.insert_instruction(Instruction::DecrementRc { value }, None); } + /// Insert an enable_side_effects_if instruction. These are normally only automatically + /// inserted during the flattening pass when branching is removed. + pub(crate) fn insert_enable_side_effects_if(&mut self, condition: ValueId) { + self.insert_instruction(Instruction::EnableSideEffects { condition }, None); + } + /// Terminates the current block with the given terminator instruction /// if the current block does not already have a terminator instruction. fn terminate_block_with(&mut self, terminator: TerminatorInstruction) { diff --git a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs index 54a2ee893f4..8bc9a8de01e 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/instruction.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/instruction.rs @@ -360,9 +360,11 @@ impl Instruction { let lhs = f(*lhs); let rhs = f(*rhs); let assert_message = assert_message.as_ref().map(|error| match error.as_ref() { - ConstrainError::Dynamic(call_instr) => { + ConstrainError::UserDefined(UserDefinedConstrainError::Dynamic(call_instr)) => { let new_instr = call_instr.map_values(f); - Box::new(ConstrainError::Dynamic(new_instr)) + Box::new(ConstrainError::UserDefined(UserDefinedConstrainError::Dynamic( + new_instr, + ))) } _ => error.clone(), }); @@ -432,7 +434,10 @@ impl Instruction { f(*lhs); f(*rhs); if let Some(error) = assert_error.as_ref() { - if let ConstrainError::Dynamic(call_instr) = error.as_ref() { + if let ConstrainError::UserDefined(UserDefinedConstrainError::Dynamic( + call_instr, + )) = error.as_ref() + { call_instr.for_each_value(f); } } @@ -654,6 +659,14 @@ impl Instruction { #[derive(Debug, PartialEq, Eq, Hash, Clone)] pub(crate) enum ConstrainError { // These are errors which have been hardcoded during SSA gen + Intrinsic(String), + // These are errors issued by the user + UserDefined(UserDefinedConstrainError), +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone)] +pub(crate) enum UserDefinedConstrainError { + // These are errors which come from static strings specified by a Noir program Static(String), // These are errors which come from runtime expressions specified by a Noir program // We store an `Instruction` as we want this Instruction to be atomic in SSA with @@ -663,7 +676,7 @@ pub(crate) enum ConstrainError { impl From for ConstrainError { fn from(value: String) -> Self { - ConstrainError::Static(value) + ConstrainError::Intrinsic(value) } } diff --git a/compiler/noirc_evaluator/src/ssa/ir/printer.rs b/compiler/noirc_evaluator/src/ssa/ir/printer.rs index ef5ba39d0e4..23395617c85 100644 --- a/compiler/noirc_evaluator/src/ssa/ir/printer.rs +++ b/compiler/noirc_evaluator/src/ssa/ir/printer.rs @@ -9,7 +9,10 @@ use iter_extended::vecmap; use super::{ basic_block::BasicBlockId, function::Function, - instruction::{ConstrainError, Instruction, InstructionId, TerminatorInstruction}, + instruction::{ + ConstrainError, Instruction, InstructionId, TerminatorInstruction, + UserDefinedConstrainError, + }, value::ValueId, }; @@ -211,10 +214,11 @@ fn display_constrain_error( f: &mut Formatter, ) -> Result { match error { - ConstrainError::Static(assert_message_string) => { + ConstrainError::Intrinsic(assert_message_string) + | ConstrainError::UserDefined(UserDefinedConstrainError::Static(assert_message_string)) => { writeln!(f, "{assert_message_string:?}") } - ConstrainError::Dynamic(assert_message_call) => { + ConstrainError::UserDefined(UserDefinedConstrainError::Dynamic(assert_message_call)) => { display_instruction_inner(function, assert_message_call, f) } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs index 6cac8c91bc3..5a7134f3486 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs @@ -607,4 +607,55 @@ mod test { assert_eq!(main.dfg[instructions[4]], Instruction::Constrain(v1, v_true, None)); assert_eq!(main.dfg[instructions[5]], Instruction::Constrain(v2, v_false, None)); } + + // Regression for #4600 + #[test] + fn array_get_regression() { + // fn main f0 { + // b0(v0: u1, v1: u64): + // enable_side_effects_if v0 + // v2 = array_get [Field 0, Field 1], index v1 + // v3 = not v0 + // enable_side_effects_if v3 + // v4 = array_get [Field 0, Field 1], index v1 + // } + // + // We want to make sure after constant folding both array_gets remain since they are + // under different enable_side_effects_if contexts and thus one may be disabled while + // the other is not. If one is removed, it is possible e.g. v4 is replaced with v2 which + // is disabled (only gets from index 0) and thus returns the wrong result. + let main_id = Id::test_new(0); + + // Compiling main + let mut builder = FunctionBuilder::new("main".into(), main_id); + let v0 = builder.add_parameter(Type::bool()); + let v1 = builder.add_parameter(Type::unsigned(64)); + + builder.insert_enable_side_effects_if(v0); + + let zero = builder.field_constant(0u128); + let one = builder.field_constant(1u128); + + let typ = Type::Array(Rc::new(vec![Type::field()]), 2); + let array = builder.array_constant(vec![zero, one].into(), typ); + + let _v2 = builder.insert_array_get(array, v1, Type::field()); + let v3 = builder.insert_not(v0); + + builder.insert_enable_side_effects_if(v3); + let _v4 = builder.insert_array_get(array, v1, Type::field()); + + // Expected output is unchanged + let ssa = builder.finish(); + let main = ssa.main(); + let instructions = main.dfg[main.entry_block()].instructions(); + let starting_instruction_count = instructions.len(); + assert_eq!(starting_instruction_count, 5); + + let ssa = ssa.fold_constants(); + let main = ssa.main(); + let instructions = main.dfg[main.entry_block()].instructions(); + let ending_instruction_count = instructions.len(); + assert_eq!(starting_instruction_count, ending_instruction_count); + } } diff --git a/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs b/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs index ca6527eb0ec..aa0368cc2dd 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/defunctionalize.rs @@ -14,7 +14,7 @@ use crate::ssa::{ ir::{ basic_block::BasicBlockId, function::{Function, FunctionId, Signature}, - instruction::{BinaryOp, ConstrainError, Instruction}, + instruction::{BinaryOp, ConstrainError, Instruction, UserDefinedConstrainError}, types::{NumericType, Type}, value::{Value, ValueId}, }, @@ -93,10 +93,9 @@ impl DefunctionalizationContext { // Constrain instruction potentially hold a call instruction themselves // thus we need to account for them. Instruction::Constrain(_, _, Some(constrain_error)) => { - if let ConstrainError::Dynamic(Instruction::Call { - func: target_func_id, - arguments, - }) = constrain_error.as_ref() + if let ConstrainError::UserDefined(UserDefinedConstrainError::Dynamic( + Instruction::Call { func: target_func_id, arguments }, + )) = constrain_error.as_ref() { (*target_func_id, arguments) } else { @@ -138,9 +137,11 @@ impl DefunctionalizationContext { if let Instruction::Constrain(lhs, rhs, constrain_error_call) = instruction { let new_error_call = if let Some(error) = constrain_error_call { match error.as_ref() { - ConstrainError::Dynamic(_) => { - Some(Box::new(ConstrainError::Dynamic(new_instruction))) - } + ConstrainError::UserDefined( + UserDefinedConstrainError::Dynamic(_), + ) => Some(Box::new(ConstrainError::UserDefined( + UserDefinedConstrainError::Dynamic(new_instruction), + ))), _ => None, } } else { diff --git a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs index 8110e3469f1..6cf155f85ab 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/unrolling.rs @@ -34,10 +34,41 @@ use crate::{ use fxhash::FxHashMap as HashMap; impl Ssa { - /// Unroll all loops in each SSA function. + /// Loop unrolling can return errors, since ACIR functions need to be fully unrolled. + /// This meta-pass will keep trying to unroll loops and simplifying the SSA until no more errors are found. + pub(crate) fn unroll_loops_iteratively(mut ssa: Ssa) -> Result { + // Try to unroll loops first: + let mut unroll_errors; + (ssa, unroll_errors) = ssa.try_to_unroll_loops(); + + // Keep unrolling until no more errors are found + while !unroll_errors.is_empty() { + let prev_unroll_err_count = unroll_errors.len(); + + // Simplify the SSA before retrying + + // Do a mem2reg after the last unroll to aid simplify_cfg + ssa = ssa.mem2reg(); + ssa = ssa.simplify_cfg(); + // Do another mem2reg after simplify_cfg to aid the next unroll + ssa = ssa.mem2reg(); + + // Unroll again + (ssa, unroll_errors) = ssa.try_to_unroll_loops(); + // If we didn't manage to unroll any more loops, exit + if unroll_errors.len() >= prev_unroll_err_count { + return Err(unroll_errors.swap_remove(0)); + } + } + Ok(ssa) + } + + /// Tries to unroll all loops in each SSA function. /// If any loop cannot be unrolled, it is left as-is or in a partially unrolled state. + /// Returns the ssa along with all unrolling errors encountered #[tracing::instrument(level = "trace", skip(self))] - pub(crate) fn unroll_loops(mut self) -> Result { + pub(crate) fn try_to_unroll_loops(mut self) -> (Ssa, Vec) { + let mut errors = vec![]; for function in self.functions.values_mut() { // Loop unrolling in brillig can lead to a code explosion currently. This can // also be true for ACIR, but we have no alternative to unrolling in ACIR. @@ -46,12 +77,9 @@ impl Ssa { continue; } - // This check is always true with the addition of the above guard, but I'm - // keeping it in case the guard on brillig functions is ever removed. - let abort_on_error = matches!(function.runtime(), RuntimeType::Acir(_)); - find_all_loops(function).unroll_each_loop(function, abort_on_error)?; + errors.extend(find_all_loops(function).unroll_each_loop(function)); } - Ok(self) + (self, errors) } } @@ -115,34 +143,32 @@ fn find_all_loops(function: &Function) -> Loops { impl Loops { /// Unroll all loops within a given function. /// Any loops which fail to be unrolled (due to using non-constant indices) will be unmodified. - fn unroll_each_loop( - mut self, - function: &mut Function, - abort_on_error: bool, - ) -> Result<(), RuntimeError> { + fn unroll_each_loop(mut self, function: &mut Function) -> Vec { + let mut unroll_errors = vec![]; while let Some(next_loop) = self.yet_to_unroll.pop() { // If we've previously modified a block in this loop we need to refresh the context. // This happens any time we have nested loops. if next_loop.blocks.iter().any(|block| self.modified_blocks.contains(block)) { let mut new_context = find_all_loops(function); new_context.failed_to_unroll = self.failed_to_unroll; - return new_context.unroll_each_loop(function, abort_on_error); + return unroll_errors + .into_iter() + .chain(new_context.unroll_each_loop(function)) + .collect(); } // Don't try to unroll the loop again if it is known to fail if !self.failed_to_unroll.contains(&next_loop.header) { match unroll_loop(function, &self.cfg, &next_loop) { Ok(_) => self.modified_blocks.extend(next_loop.blocks), - Err(call_stack) if abort_on_error => { - return Err(RuntimeError::UnknownLoopBound { call_stack }); - } - Err(_) => { + Err(call_stack) => { self.failed_to_unroll.insert(next_loop.header); + unroll_errors.push(RuntimeError::UnknownLoopBound { call_stack }); } } } } - Ok(()) + unroll_errors } } @@ -585,7 +611,8 @@ mod tests { // } // The final block count is not 1 because unrolling creates some unnecessary jmps. // If a simplify cfg pass is ran afterward, the expected block count will be 1. - let ssa = ssa.unroll_loops().expect("All loops should be unrolled"); + let (ssa, errors) = ssa.try_to_unroll_loops(); + assert_eq!(errors.len(), 0, "All loops should be unrolled"); assert_eq!(ssa.main().reachable_blocks().len(), 5); } @@ -634,6 +661,7 @@ mod tests { assert_eq!(ssa.main().reachable_blocks().len(), 4); // Expected that we failed to unroll the loop - assert!(ssa.unroll_loops().is_err()); + let (_, errors) = ssa.try_to_unroll_loops(); + assert_eq!(errors.len(), 1, "Expected to fail to unroll loop"); } } diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs index 3fe52f7f0e5..2a4b5276547 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/mod.rs @@ -29,7 +29,9 @@ use super::{ function_builder::data_bus::DataBus, ir::{ function::RuntimeType, - instruction::{BinaryOp, ConstrainError, Instruction, TerminatorInstruction}, + instruction::{ + BinaryOp, ConstrainError, Instruction, TerminatorInstruction, UserDefinedConstrainError, + }, types::Type, value::ValueId, }, @@ -241,6 +243,7 @@ impl<'a> FunctionContext<'a> { Ok(Tree::Branch(vec![string, field_count.into(), fields])) } + ast::Literal::Unit => Ok(Self::unit_value()), } } @@ -707,7 +710,9 @@ impl<'a> FunctionContext<'a> { if let ast::Expression::Literal(ast::Literal::Str(assert_message)) = assert_message_expr.as_ref() { - return Ok(Some(Box::new(ConstrainError::Static(assert_message.to_string())))); + return Ok(Some(Box::new(ConstrainError::UserDefined( + UserDefinedConstrainError::Static(assert_message.to_string()), + )))); } let ast::Expression::Call(call) = assert_message_expr.as_ref() else { @@ -733,7 +738,7 @@ impl<'a> FunctionContext<'a> { } let instr = Instruction::Call { func, arguments }; - Ok(Some(Box::new(ConstrainError::Dynamic(instr)))) + Ok(Some(Box::new(ConstrainError::UserDefined(UserDefinedConstrainError::Dynamic(instr))))) } fn codegen_assign(&mut self, assign: &ast::Assign) -> Result { diff --git a/compiler/noirc_evaluator/src/ssa/ssa_gen/program.rs b/compiler/noirc_evaluator/src/ssa/ssa_gen/program.rs index 9995c031145..b05a2cbc741 100644 --- a/compiler/noirc_evaluator/src/ssa/ssa_gen/program.rs +++ b/compiler/noirc_evaluator/src/ssa/ssa_gen/program.rs @@ -3,7 +3,7 @@ use std::{collections::BTreeMap, fmt::Display}; use iter_extended::btree_map; use crate::ssa::ir::{ - function::{Function, FunctionId}, + function::{Function, FunctionId, RuntimeType}, map::AtomicCounter, }; @@ -12,7 +12,11 @@ pub(crate) struct Ssa { pub(crate) functions: BTreeMap, pub(crate) main_id: FunctionId, pub(crate) next_id: AtomicCounter, - pub(crate) id_to_index: BTreeMap, + /// Maps SSA entry point function ID -> Final generated ACIR artifact index. + /// There can be functions specified in SSA which do not act as ACIR entry points. + /// This mapping is necessary to use the correct function pointer for an ACIR call, + /// as the final program artifact will be a list of only entry point functions. + pub(crate) entry_point_to_generated_index: BTreeMap, } impl Ssa { @@ -27,9 +31,26 @@ impl Ssa { (f.id(), f) }); - let id_to_index = btree_map(functions.iter().enumerate(), |(i, (id, _))| (*id, i as u32)); + let entry_point_to_generated_index = btree_map( + functions + .iter() + .filter(|(_, func)| { + let runtime = func.runtime(); + match func.runtime() { + RuntimeType::Acir(_) => runtime.is_entry_point() || func.id() == main_id, + RuntimeType::Brillig => false, + } + }) + .enumerate(), + |(i, (id, _))| (*id, i as u32), + ); - Self { functions, main_id, next_id: AtomicCounter::starting_after(max_id), id_to_index } + Self { + functions, + main_id, + next_id: AtomicCounter::starting_after(max_id), + entry_point_to_generated_index, + } } /// Returns the entry-point function of the program diff --git a/compiler/noirc_frontend/Cargo.toml b/compiler/noirc_frontend/Cargo.toml index 03b92e15032..7a23585bd23 100644 --- a/compiler/noirc_frontend/Cargo.toml +++ b/compiler/noirc_frontend/Cargo.toml @@ -17,6 +17,7 @@ iter-extended.workspace = true chumsky.workspace = true thiserror.workspace = true smol_str.workspace = true +im.workspace = true serde_json.workspace = true serde.workspace = true rustc-hash = "1.1.0" @@ -24,9 +25,16 @@ small-ord-set = "0.1.3" regex = "1.9.1" tracing.workspace = true petgraph = "0.6" +lalrpop-util = { version = "0.20.2", features = ["lexer"] } [dev-dependencies] base64.workspace = true strum = "0.24" strum_macros = "0.24" tempfile.workspace = true + +[build-dependencies] +lalrpop = "0.20.2" + +[features] +experimental_parser = [] diff --git a/compiler/noirc_frontend/build.rs b/compiler/noirc_frontend/build.rs new file mode 100644 index 00000000000..eb896a377ae --- /dev/null +++ b/compiler/noirc_frontend/build.rs @@ -0,0 +1,28 @@ +use std::fs::{read_to_string, File}; +use std::io::Write; + +fn main() { + lalrpop::Configuration::new() + .emit_rerun_directives(true) + .use_cargo_dir_conventions() + .process() + .unwrap(); + + // here, we get a lint error from "extern crate core" so patching that until lalrpop does + // (adding cfg directives appears to be unsupported by lalrpop) + let out_dir = std::env::var("OUT_DIR").unwrap(); + let parser_path = std::path::Path::new(&out_dir).join("noir_parser.rs"); + let content_str = read_to_string(parser_path.clone()).unwrap(); + let mut parser_file = File::create(parser_path).unwrap(); + for line in content_str.lines() { + if line.contains("extern crate core") { + parser_file + .write_all( + format!("{}\n", line.replace("extern crate core", "use core")).as_bytes(), + ) + .unwrap(); + } else { + parser_file.write_all(format!("{}\n", line).as_bytes()).unwrap(); + } + } +} diff --git a/compiler/noirc_frontend/src/ast/expression.rs b/compiler/noirc_frontend/src/ast/expression.rs index 0e5919bf7db..755739af8fe 100644 --- a/compiler/noirc_frontend/src/ast/expression.rs +++ b/compiler/noirc_frontend/src/ast/expression.rs @@ -387,6 +387,9 @@ pub struct FunctionDefinition { /// True if this function was defined with the 'unconstrained' keyword pub is_unconstrained: bool, + /// True if this function was defined with the 'comptime' keyword + pub is_comptime: bool, + /// Indicate if this function was defined with the 'pub' keyword pub visibility: ItemVisibility, @@ -679,10 +682,12 @@ impl FunctionDefinition { span: ident.span().merge(unresolved_type.span.unwrap()), }) .collect(); + FunctionDefinition { name: name.clone(), attributes: Attributes::empty(), is_unconstrained: false, + is_comptime: false, visibility: ItemVisibility::Private, generics: generics.clone(), parameters: p, diff --git a/compiler/noirc_frontend/src/ast/mod.rs b/compiler/noirc_frontend/src/ast/mod.rs index 4547dc2a176..254ec4a7590 100644 --- a/compiler/noirc_frontend/src/ast/mod.rs +++ b/compiler/noirc_frontend/src/ast/mod.rs @@ -112,6 +112,9 @@ pub enum UnresolvedTypeData { /*env:*/ Box, ), + // The type of quoted code for metaprogramming + Code, + Unspecified, // This is for when the user declares a variable without specifying it's type Error, } @@ -200,6 +203,7 @@ impl std::fmt::Display for UnresolvedTypeData { } } MutableReference(element) => write!(f, "&mut {element}"), + Code => write!(f, "Code"), Unit => write!(f, "()"), Error => write!(f, "error"), Unspecified => write!(f, "unspecified"), diff --git a/compiler/noirc_frontend/src/ast/statement.rs b/compiler/noirc_frontend/src/ast/statement.rs index dea9fc0f3d3..753b5a31d32 100644 --- a/compiler/noirc_frontend/src/ast/statement.rs +++ b/compiler/noirc_frontend/src/ast/statement.rs @@ -2,6 +2,7 @@ use std::fmt::Display; use std::sync::atomic::{AtomicU32, Ordering}; use crate::lexer::token::SpannedToken; +use crate::macros_api::SecondaryAttribute; use crate::parser::{ParserError, ParserErrorReason}; use crate::token::Token; use crate::{ @@ -107,7 +108,7 @@ impl StatementKind { pub fn new_let( ((pattern, r#type), expression): ((Pattern, UnresolvedType), Expression), ) -> StatementKind { - StatementKind::Let(LetStatement { pattern, r#type, expression }) + StatementKind::Let(LetStatement { pattern, r#type, expression, attributes: vec![] }) } /// Create a Statement::Assign value, desugaring any combined operators like += if needed. @@ -405,13 +406,17 @@ pub struct LetStatement { pub pattern: Pattern, pub r#type: UnresolvedType, pub expression: Expression, + pub attributes: Vec, } impl LetStatement { pub fn new_let( - ((pattern, r#type), expression): ((Pattern, UnresolvedType), Expression), + (((pattern, r#type), expression), attributes): ( + ((Pattern, UnresolvedType), Expression), + Vec, + ), ) -> LetStatement { - LetStatement { pattern, r#type, expression } + LetStatement { pattern, r#type, expression, attributes } } } @@ -568,6 +573,7 @@ impl ForRange { pattern: Pattern::Identifier(array_ident.clone()), r#type: UnresolvedType::unspecified(), expression: array, + attributes: vec![], }), span: array_span, }; @@ -610,6 +616,7 @@ impl ForRange { pattern: Pattern::Identifier(identifier), r#type: UnresolvedType::unspecified(), expression: Expression::new(loop_element, array_span), + attributes: vec![], }), span: array_span, }; diff --git a/compiler/noirc_frontend/src/debug/mod.rs b/compiler/noirc_frontend/src/debug/mod.rs index 71e0d44b478..67b52071d7b 100644 --- a/compiler/noirc_frontend/src/debug/mod.rs +++ b/compiler/noirc_frontend/src/debug/mod.rs @@ -145,6 +145,7 @@ impl DebugInstrumenter { pattern: ast::Pattern::Identifier(ident("__debug_expr", ret_expr.span)), r#type: ast::UnresolvedType::unspecified(), expression: ret_expr.clone(), + attributes: vec![], }), span: ret_expr.span, }; @@ -248,6 +249,7 @@ impl DebugInstrumenter { }), span: let_stmt.expression.span, }, + attributes: vec![], }), span: *span, } @@ -273,6 +275,7 @@ impl DebugInstrumenter { pattern: ast::Pattern::Identifier(ident("__debug_expr", assign_stmt.expression.span)), r#type: ast::UnresolvedType::unspecified(), expression: assign_stmt.expression.clone(), + attributes: vec![], }); let expression_span = assign_stmt.expression.span; let new_assign_stmt = match &assign_stmt.lvalue { diff --git a/compiler/noirc_frontend/src/hir/comptime/hir_to_ast.rs b/compiler/noirc_frontend/src/hir/comptime/hir_to_ast.rs new file mode 100644 index 00000000000..8ffcbce7d62 --- /dev/null +++ b/compiler/noirc_frontend/src/hir/comptime/hir_to_ast.rs @@ -0,0 +1,350 @@ +use iter_extended::vecmap; +use noirc_errors::{Span, Spanned}; + +use crate::ast::{ConstrainStatement, Expression, Statement, StatementKind}; +use crate::hir_def::expr::{HirArrayLiteral, HirExpression, HirIdent}; +use crate::hir_def::stmt::{HirLValue, HirPattern, HirStatement}; +use crate::macros_api::HirLiteral; +use crate::node_interner::{ExprId, NodeInterner, StmtId}; +use crate::{ + ArrayLiteral, AssignStatement, BlockExpression, CallExpression, CastExpression, ConstrainKind, + ConstructorExpression, ExpressionKind, ForLoopStatement, ForRange, Ident, IfExpression, + IndexExpression, InfixExpression, LValue, Lambda, LetStatement, Literal, + MemberAccessExpression, MethodCallExpression, Path, Pattern, PrefixExpression, Type, + UnresolvedType, UnresolvedTypeData, UnresolvedTypeExpression, +}; + +// TODO: +// - Full path for idents & types +// - Assert/AssertEq information lost +// - The type name span is lost in constructor patterns & expressions +// - All type spans are lost +// - Type::TypeVariable has no equivalent in the Ast + +impl StmtId { + #[allow(unused)] + fn to_ast(self, interner: &NodeInterner) -> Statement { + let statement = interner.statement(&self); + let span = interner.statement_span(&self); + + let kind = match statement { + HirStatement::Let(let_stmt) => { + let pattern = let_stmt.pattern.into_ast(interner); + let r#type = interner.id_type(let_stmt.expression).to_ast(); + let expression = let_stmt.expression.to_ast(interner); + StatementKind::Let(LetStatement { + pattern, + r#type, + expression, + attributes: Vec::new(), + }) + } + HirStatement::Constrain(constrain) => { + let expr = constrain.0.to_ast(interner); + let message = constrain.2.map(|message| message.to_ast(interner)); + + // TODO: Find difference in usage between Assert & AssertEq + StatementKind::Constrain(ConstrainStatement(expr, message, ConstrainKind::Assert)) + } + HirStatement::Assign(assign) => StatementKind::Assign(AssignStatement { + lvalue: assign.lvalue.into_ast(interner), + expression: assign.expression.to_ast(interner), + }), + HirStatement::For(for_stmt) => StatementKind::For(ForLoopStatement { + identifier: for_stmt.identifier.to_ast(interner), + range: ForRange::Range( + for_stmt.start_range.to_ast(interner), + for_stmt.end_range.to_ast(interner), + ), + block: for_stmt.block.to_ast(interner), + span, + }), + HirStatement::Break => StatementKind::Break, + HirStatement::Continue => StatementKind::Continue, + HirStatement::Expression(expr) => StatementKind::Expression(expr.to_ast(interner)), + HirStatement::Semi(expr) => StatementKind::Semi(expr.to_ast(interner)), + HirStatement::Error => StatementKind::Error, + }; + + Statement { kind, span } + } +} + +impl ExprId { + #[allow(unused)] + fn to_ast(self, interner: &NodeInterner) -> Expression { + let expression = interner.expression(&self); + let span = interner.expr_span(&self); + + let kind = match expression { + HirExpression::Ident(ident) => { + let path = Path::from_ident(ident.to_ast(interner)); + ExpressionKind::Variable(path) + } + HirExpression::Literal(HirLiteral::Array(array)) => { + let array = array.into_ast(interner, span); + ExpressionKind::Literal(Literal::Array(array)) + } + HirExpression::Literal(HirLiteral::Slice(array)) => { + let array = array.into_ast(interner, span); + ExpressionKind::Literal(Literal::Slice(array)) + } + HirExpression::Literal(HirLiteral::Bool(value)) => { + ExpressionKind::Literal(Literal::Bool(value)) + } + HirExpression::Literal(HirLiteral::Integer(value, sign)) => { + ExpressionKind::Literal(Literal::Integer(value, sign)) + } + HirExpression::Literal(HirLiteral::Str(string)) => { + ExpressionKind::Literal(Literal::Str(string)) + } + HirExpression::Literal(HirLiteral::FmtStr(string, _exprs)) => { + // TODO: Is throwing away the exprs here valid? + ExpressionKind::Literal(Literal::FmtStr(string)) + } + HirExpression::Literal(HirLiteral::Unit) => ExpressionKind::Literal(Literal::Unit), + HirExpression::Block(expr) => { + let statements = vecmap(expr.statements, |statement| statement.to_ast(interner)); + ExpressionKind::Block(BlockExpression { statements }) + } + HirExpression::Prefix(prefix) => ExpressionKind::Prefix(Box::new(PrefixExpression { + operator: prefix.operator, + rhs: prefix.rhs.to_ast(interner), + })), + HirExpression::Infix(infix) => ExpressionKind::Infix(Box::new(InfixExpression { + lhs: infix.lhs.to_ast(interner), + operator: Spanned::from(infix.operator.location.span, infix.operator.kind), + rhs: infix.rhs.to_ast(interner), + })), + HirExpression::Index(index) => ExpressionKind::Index(Box::new(IndexExpression { + collection: index.collection.to_ast(interner), + index: index.index.to_ast(interner), + })), + HirExpression::Constructor(constructor) => { + let type_name = constructor.r#type.borrow().name.to_string(); + let type_name = Path::from_single(type_name, span); + let fields = + vecmap(constructor.fields, |(name, expr)| (name, expr.to_ast(interner))); + + ExpressionKind::Constructor(Box::new(ConstructorExpression { type_name, fields })) + } + HirExpression::MemberAccess(access) => { + ExpressionKind::MemberAccess(Box::new(MemberAccessExpression { + lhs: access.lhs.to_ast(interner), + rhs: access.rhs, + })) + } + HirExpression::Call(call) => { + let func = Box::new(call.func.to_ast(interner)); + let arguments = vecmap(call.arguments, |arg| arg.to_ast(interner)); + ExpressionKind::Call(Box::new(CallExpression { func, arguments })) + } + HirExpression::MethodCall(method_call) => { + ExpressionKind::MethodCall(Box::new(MethodCallExpression { + object: method_call.object.to_ast(interner), + method_name: method_call.method, + arguments: vecmap(method_call.arguments, |arg| arg.to_ast(interner)), + })) + } + HirExpression::Cast(cast) => { + let lhs = cast.lhs.to_ast(interner); + let r#type = cast.r#type.to_ast(); + ExpressionKind::Cast(Box::new(CastExpression { lhs, r#type })) + } + HirExpression::If(if_expr) => ExpressionKind::If(Box::new(IfExpression { + condition: if_expr.condition.to_ast(interner), + consequence: if_expr.consequence.to_ast(interner), + alternative: if_expr.alternative.map(|expr| expr.to_ast(interner)), + })), + HirExpression::Tuple(fields) => { + ExpressionKind::Tuple(vecmap(fields, |field| field.to_ast(interner))) + } + HirExpression::Lambda(lambda) => { + let parameters = vecmap(lambda.parameters, |(pattern, typ)| { + (pattern.into_ast(interner), typ.to_ast()) + }); + let return_type = lambda.return_type.to_ast(); + let body = lambda.body.to_ast(interner); + ExpressionKind::Lambda(Box::new(Lambda { parameters, return_type, body })) + } + HirExpression::Quote(block) => ExpressionKind::Quote(block), + HirExpression::Error => ExpressionKind::Error, + }; + + Expression::new(kind, span) + } +} + +impl HirPattern { + fn into_ast(self, interner: &NodeInterner) -> Pattern { + match self { + HirPattern::Identifier(ident) => Pattern::Identifier(ident.to_ast(interner)), + HirPattern::Mutable(pattern, location) => { + let pattern = Box::new(pattern.into_ast(interner)); + Pattern::Mutable(pattern, location.span, false) + } + HirPattern::Tuple(patterns, location) => { + let patterns = vecmap(patterns, |pattern| pattern.into_ast(interner)); + Pattern::Tuple(patterns, location.span) + } + HirPattern::Struct(typ, patterns, location) => { + let patterns = + vecmap(patterns, |(name, pattern)| (name, pattern.into_ast(interner))); + let name = match typ.follow_bindings() { + Type::Struct(struct_def, _) => { + let struct_def = struct_def.borrow(); + struct_def.name.0.contents.clone() + } + // This pass shouldn't error so if the type isn't a struct we just get a string + // representation of any other type and use that. We're relying on name + // resolution to fail later when this Ast is re-converted to Hir. + other => other.to_string(), + }; + // The name span is lost here + let path = Path::from_single(name, location.span); + Pattern::Struct(path, patterns, location.span) + } + } + } +} + +impl HirIdent { + fn to_ast(&self, interner: &NodeInterner) -> Ident { + let name = interner.definition_name(self.id).to_owned(); + Ident(Spanned::from(self.location.span, name)) + } +} + +impl Type { + fn to_ast(&self) -> UnresolvedType { + let typ = match self { + Type::FieldElement => UnresolvedTypeData::FieldElement, + Type::Array(length, element) => { + let length = length.to_type_expression(); + let element = Box::new(element.to_ast()); + UnresolvedTypeData::Array(length, element) + } + Type::Slice(element) => { + let element = Box::new(element.to_ast()); + UnresolvedTypeData::Slice(element) + } + Type::Integer(sign, bit_size) => UnresolvedTypeData::Integer(*sign, *bit_size), + Type::Bool => UnresolvedTypeData::Bool, + Type::String(length) => { + let length = length.to_type_expression(); + UnresolvedTypeData::String(Some(length)) + } + Type::FmtString(length, element) => { + let length = length.to_type_expression(); + let element = Box::new(element.to_ast()); + UnresolvedTypeData::FormatString(length, element) + } + Type::Unit => UnresolvedTypeData::Unit, + Type::Tuple(fields) => { + let fields = vecmap(fields, |field| field.to_ast()); + UnresolvedTypeData::Tuple(fields) + } + Type::Struct(def, generics) => { + let struct_def = def.borrow(); + let generics = vecmap(generics, |generic| generic.to_ast()); + let name = Path::from_ident(struct_def.name.clone()); + UnresolvedTypeData::Named(name, generics, false) + } + Type::Alias(type_def, generics) => { + // Keep the alias name instead of expanding this in case the + // alias' definition was changed + let type_def = type_def.borrow(); + let generics = vecmap(generics, |generic| generic.to_ast()); + let name = Path::from_ident(type_def.name.clone()); + UnresolvedTypeData::Named(name, generics, false) + } + Type::TypeVariable(_, _) => todo!("Convert Type::TypeVariable Hir -> Ast"), + Type::TraitAsType(_, name, generics) => { + let generics = vecmap(generics, |generic| generic.to_ast()); + let name = Path::from_single(name.as_ref().clone(), Span::default()); + UnresolvedTypeData::TraitAsType(name, generics) + } + Type::NamedGeneric(_, name) => { + let name = Path::from_single(name.as_ref().clone(), Span::default()); + UnresolvedTypeData::TraitAsType(name, Vec::new()) + } + Type::Function(args, ret, env) => { + let args = vecmap(args, |arg| arg.to_ast()); + let ret = Box::new(ret.to_ast()); + let env = Box::new(env.to_ast()); + UnresolvedTypeData::Function(args, ret, env) + } + Type::MutableReference(element) => { + let element = Box::new(element.to_ast()); + UnresolvedTypeData::MutableReference(element) + } + // Type::Forall is only for generic functions which don't store a type + // in their Ast so they don't need to call to_ast for their Forall type. + // Since there is no UnresolvedTypeData equivalent for Type::Forall, we use + // this to ignore this case since it shouldn't be needed anyway. + Type::Forall(_, typ) => return typ.to_ast(), + Type::Constant(_) => panic!("Type::Constant where a type was expected: {self:?}"), + Type::Code => UnresolvedTypeData::Code, + Type::Error => UnresolvedTypeData::Error, + }; + + UnresolvedType { typ, span: None } + } + + fn to_type_expression(&self) -> UnresolvedTypeExpression { + let span = Span::default(); + + match self.follow_bindings() { + Type::Constant(length) => UnresolvedTypeExpression::Constant(length, span), + Type::NamedGeneric(_, name) => { + let path = Path::from_single(name.as_ref().clone(), span); + UnresolvedTypeExpression::Variable(path) + } + // TODO: This should be turned into a proper error. + other => panic!("Cannot represent {other:?} as type expression"), + } + } +} + +impl HirLValue { + fn into_ast(self, interner: &NodeInterner) -> LValue { + match self { + HirLValue::Ident(ident, _) => LValue::Ident(ident.to_ast(interner)), + HirLValue::MemberAccess { object, field_name, field_index: _, typ: _, location } => { + let object = Box::new(object.into_ast(interner)); + LValue::MemberAccess { object, field_name, span: location.span } + } + HirLValue::Index { array, index, typ: _, location } => { + let array = Box::new(array.into_ast(interner)); + let index = index.to_ast(interner); + LValue::Index { array, index, span: location.span } + } + HirLValue::Dereference { lvalue, element_type: _, location } => { + let lvalue = Box::new(lvalue.into_ast(interner)); + LValue::Dereference(lvalue, location.span) + } + } + } +} + +impl HirArrayLiteral { + fn into_ast(self, interner: &NodeInterner, span: Span) -> ArrayLiteral { + match self { + HirArrayLiteral::Standard(elements) => { + ArrayLiteral::Standard(vecmap(elements, |element| element.to_ast(interner))) + } + HirArrayLiteral::Repeated { repeated_element, length } => { + let repeated_element = Box::new(repeated_element.to_ast(interner)); + let length = match length { + Type::Constant(length) => { + let literal = Literal::Integer((length as u128).into(), false); + let kind = ExpressionKind::Literal(literal); + Box::new(Expression::new(kind, span)) + } + other => panic!("Cannot convert non-constant type for repeated array literal from Hir -> Ast: {other:?}"), + }; + ArrayLiteral::Repeated { repeated_element, length } + } + } + } +} diff --git a/compiler/noirc_frontend/src/hir/comptime/interpreter.rs b/compiler/noirc_frontend/src/hir/comptime/interpreter.rs new file mode 100644 index 00000000000..81050073008 --- /dev/null +++ b/compiler/noirc_frontend/src/hir/comptime/interpreter.rs @@ -0,0 +1,1282 @@ +use std::{borrow::Cow, collections::hash_map::Entry, rc::Rc}; + +use acvm::FieldElement; +use im::Vector; +use iter_extended::{try_vecmap, vecmap}; +use noirc_errors::Location; +use rustc_hash::{FxHashMap as HashMap, FxHashSet as HashSet}; + +use crate::{ + hir_def::{ + expr::{ + HirArrayLiteral, HirBlockExpression, HirCallExpression, HirCastExpression, + HirConstructorExpression, HirIdent, HirIfExpression, HirIndexExpression, + HirInfixExpression, HirLambda, HirMemberAccess, HirMethodCallExpression, + HirPrefixExpression, + }, + stmt::{ + HirAssignStatement, HirConstrainStatement, HirForStatement, HirLValue, HirLetStatement, + HirPattern, + }, + }, + macros_api::{HirExpression, HirLiteral, HirStatement, NodeInterner}, + node_interner::{DefinitionId, DefinitionKind, ExprId, FuncId, StmtId}, + BinaryOpKind, BlockExpression, FunctionKind, IntegerBitSize, Shared, Signedness, Type, + TypeBinding, TypeBindings, TypeVariableKind, +}; + +#[allow(unused)] +pub(crate) struct Interpreter<'interner> { + /// To expand macros the Interpreter may mutate hir nodes within the NodeInterner + interner: &'interner mut NodeInterner, + + /// Each value currently in scope in the interpreter. + /// Each element of the Vec represents a scope with every scope together making + /// up all currently visible definitions. + scopes: Vec>, + + /// True if we've expanded any macros into any functions and will need + /// to redo name resolution & type checking for that function. + changed_functions: HashSet, + + /// True if we've expanded any macros into global scope and will need + /// to redo name resolution & type checking for everything. + changed_globally: bool, + + in_loop: bool, +} + +#[allow(unused)] +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) enum Value { + Unit, + Bool(bool), + Field(FieldElement), + I8(i8), + I32(i32), + I64(i64), + U8(u8), + U32(u32), + U64(u64), + String(Rc), + Function(FuncId, Type), + Closure(HirLambda, Vec, Type), + Tuple(Vec), + Struct(HashMap, Value>, Type), + Pointer(Shared), + Array(Vector, Type), + Slice(Vector, Type), + Code(Rc), +} + +/// The possible errors that can halt the interpreter. +#[allow(unused)] +#[derive(Debug)] +pub(crate) enum InterpreterError { + ArgumentCountMismatch { expected: usize, actual: usize, call_location: Location }, + TypeMismatch { expected: Type, value: Value, location: Location }, + NoValueForId { id: DefinitionId, location: Location }, + IntegerOutOfRangeForType { value: FieldElement, typ: Type, location: Location }, + ErrorNodeEncountered { location: Location }, + NonFunctionCalled { value: Value, location: Location }, + NonBoolUsedInIf { value: Value, location: Location }, + NonBoolUsedInConstrain { value: Value, location: Location }, + FailingConstraint { message: Option, location: Location }, + NoMethodFound { object: Value, typ: Type, location: Location }, + NonIntegerUsedInLoop { value: Value, location: Location }, + NonPointerDereferenced { value: Value, location: Location }, + NonTupleOrStructInMemberAccess { value: Value, location: Location }, + NonArrayIndexed { value: Value, location: Location }, + NonIntegerUsedAsIndex { value: Value, location: Location }, + NonIntegerIntegerLiteral { typ: Type, location: Location }, + NonIntegerArrayLength { typ: Type, location: Location }, + NonNumericCasted { value: Value, location: Location }, + IndexOutOfBounds { index: usize, length: usize, location: Location }, + ExpectedStructToHaveField { value: Value, field_name: String, location: Location }, + TypeUnsupported { typ: Type, location: Location }, + InvalidValueForUnary { value: Value, operator: &'static str, location: Location }, + InvalidValuesForBinary { lhs: Value, rhs: Value, operator: &'static str, location: Location }, + CastToNonNumericType { typ: Type, location: Location }, + + // Perhaps this should be unreachable! due to type checking also preventing this error? + // Currently it and the Continue variant are the only interpreter errors without a Location field + BreakNotInLoop, + ContinueNotInLoop, + + // These cases are not errors but prevent us from running more code + // until the loop can be resumed properly. + Break, + Continue, +} + +#[allow(unused)] +type IResult = std::result::Result; + +#[allow(unused)] +impl<'a> Interpreter<'a> { + pub(crate) fn new(interner: &'a mut NodeInterner) -> Self { + Self { + interner, + scopes: vec![HashMap::default()], + changed_functions: HashSet::default(), + changed_globally: false, + in_loop: false, + } + } + + pub(crate) fn call_function( + &mut self, + function: FuncId, + arguments: Vec<(Value, Location)>, + call_location: Location, + ) -> IResult { + let previous_state = self.enter_function(); + + let meta = self.interner.function_meta(&function); + if meta.kind != FunctionKind::Normal { + todo!("Evaluation for {:?} is unimplemented", meta.kind); + } + + if meta.parameters.len() != arguments.len() { + return Err(InterpreterError::ArgumentCountMismatch { + expected: meta.parameters.len(), + actual: arguments.len(), + call_location, + }); + } + + let parameters = meta.parameters.0.clone(); + for ((parameter, typ, _), (argument, arg_location)) in parameters.iter().zip(arguments) { + self.define_pattern(parameter, typ, argument, arg_location)?; + } + + let function_body = self.interner.function(&function).as_expr(); + let result = self.evaluate(function_body)?; + + self.exit_function(previous_state); + Ok(result) + } + + fn call_closure( + &mut self, + closure: HirLambda, + // TODO: How to define environment here? + _environment: Vec, + arguments: Vec<(Value, Location)>, + call_location: Location, + ) -> IResult { + let previous_state = self.enter_function(); + + if closure.parameters.len() != arguments.len() { + return Err(InterpreterError::ArgumentCountMismatch { + expected: closure.parameters.len(), + actual: arguments.len(), + call_location, + }); + } + + let parameters = closure.parameters.iter().zip(arguments); + for ((parameter, typ), (argument, arg_location)) in parameters { + self.define_pattern(parameter, typ, argument, arg_location)?; + } + + let result = self.evaluate(closure.body)?; + + self.exit_function(previous_state); + Ok(result) + } + + /// Enters a function, pushing a new scope and resetting any required state. + /// Returns the previous values of the internal state, to be reset when + /// `exit_function` is called. + fn enter_function(&mut self) -> (bool, Vec>) { + // Drain every scope except the global scope + let scope = self.scopes.drain(1..).collect(); + self.push_scope(); + (std::mem::take(&mut self.in_loop), scope) + } + + fn exit_function(&mut self, mut state: (bool, Vec>)) { + self.in_loop = state.0; + + // Keep only the global scope + self.scopes.truncate(1); + self.scopes.append(&mut state.1); + } + + fn push_scope(&mut self) { + self.scopes.push(HashMap::default()); + } + + fn pop_scope(&mut self) { + self.scopes.pop(); + } + + fn current_scope_mut(&mut self) -> &mut HashMap { + // the global scope is always at index zero, so this is always Some + self.scopes.last_mut().unwrap() + } + + fn define_pattern( + &mut self, + pattern: &HirPattern, + typ: &Type, + argument: Value, + location: Location, + ) -> IResult<()> { + match pattern { + HirPattern::Identifier(identifier) => { + self.define(identifier.id, typ, argument, location) + } + HirPattern::Mutable(pattern, _) => { + self.define_pattern(pattern, typ, argument, location) + } + HirPattern::Tuple(pattern_fields, _) => match (argument, typ) { + (Value::Tuple(fields), Type::Tuple(type_fields)) + if fields.len() == pattern_fields.len() => + { + for ((pattern, typ), argument) in + pattern_fields.iter().zip(type_fields).zip(fields) + { + self.define_pattern(pattern, typ, argument, location)?; + } + Ok(()) + } + (value, _) => { + Err(InterpreterError::TypeMismatch { expected: typ.clone(), value, location }) + } + }, + HirPattern::Struct(struct_type, pattern_fields, _) => { + self.type_check(typ, &argument, location)?; + self.type_check(struct_type, &argument, location)?; + + match argument { + Value::Struct(fields, struct_type) if fields.len() == pattern_fields.len() => { + for (field_name, field_pattern) in pattern_fields { + let field = fields.get(&field_name.0.contents).ok_or_else(|| { + InterpreterError::ExpectedStructToHaveField { + value: Value::Struct(fields.clone(), struct_type.clone()), + field_name: field_name.0.contents.clone(), + location, + } + })?; + + let field_type = field.get_type().into_owned(); + self.define_pattern( + field_pattern, + &field_type, + field.clone(), + location, + )?; + } + Ok(()) + } + value => Err(InterpreterError::TypeMismatch { + expected: typ.clone(), + value, + location, + }), + } + } + } + } + + /// Define a new variable in the current scope + fn define( + &mut self, + id: DefinitionId, + typ: &Type, + argument: Value, + location: Location, + ) -> IResult<()> { + self.type_check(typ, &argument, location)?; + self.current_scope_mut().insert(id, argument); + Ok(()) + } + + /// Mutate an existing variable, potentially from a prior scope. + /// Also type checks the value being assigned + fn checked_mutate( + &mut self, + id: DefinitionId, + typ: &Type, + argument: Value, + location: Location, + ) -> IResult<()> { + self.type_check(typ, &argument, location)?; + for scope in self.scopes.iter_mut().rev() { + if let Entry::Occupied(mut entry) = scope.entry(id) { + entry.insert(argument); + return Ok(()); + } + } + Err(InterpreterError::NoValueForId { id, location }) + } + + /// Mutate an existing variable, potentially from a prior scope + fn mutate(&mut self, id: DefinitionId, argument: Value, location: Location) -> IResult<()> { + for scope in self.scopes.iter_mut().rev() { + if let Entry::Occupied(mut entry) = scope.entry(id) { + entry.insert(argument); + return Ok(()); + } + } + Err(InterpreterError::NoValueForId { id, location }) + } + + fn lookup(&self, ident: &HirIdent) -> IResult { + for scope in self.scopes.iter().rev() { + if let Some(value) = scope.get(&ident.id) { + return Ok(value.clone()); + } + } + + Err(InterpreterError::NoValueForId { id: ident.id, location: ident.location }) + } + + fn lookup_id(&self, id: DefinitionId, location: Location) -> IResult { + for scope in self.scopes.iter().rev() { + if let Some(value) = scope.get(&id) { + return Ok(value.clone()); + } + } + + Err(InterpreterError::NoValueForId { id, location }) + } + + fn type_check(&self, typ: &Type, value: &Value, location: Location) -> IResult<()> { + let typ = typ.follow_bindings(); + let value_type = value.get_type(); + + typ.try_unify(&value_type, &mut TypeBindings::new()).map_err(|_| { + InterpreterError::TypeMismatch { expected: typ, value: value.clone(), location } + }) + } + + /// Evaluate an expression and return the result + fn evaluate(&mut self, id: ExprId) -> IResult { + match self.interner.expression(&id) { + HirExpression::Ident(ident) => self.evaluate_ident(ident, id), + HirExpression::Literal(literal) => self.evaluate_literal(literal, id), + HirExpression::Block(block) => self.evaluate_block(block), + HirExpression::Prefix(prefix) => self.evaluate_prefix(prefix, id), + HirExpression::Infix(infix) => self.evaluate_infix(infix, id), + HirExpression::Index(index) => self.evaluate_index(index, id), + HirExpression::Constructor(constructor) => self.evaluate_constructor(constructor, id), + HirExpression::MemberAccess(access) => self.evaluate_access(access, id), + HirExpression::Call(call) => self.evaluate_call(call, id), + HirExpression::MethodCall(call) => self.evaluate_method_call(call, id), + HirExpression::Cast(cast) => self.evaluate_cast(cast, id), + HirExpression::If(if_) => self.evaluate_if(if_, id), + HirExpression::Tuple(tuple) => self.evaluate_tuple(tuple), + HirExpression::Lambda(lambda) => self.evaluate_lambda(lambda, id), + HirExpression::Quote(block) => Ok(Value::Code(Rc::new(block))), + HirExpression::Error => { + let location = self.interner.expr_location(&id); + Err(InterpreterError::ErrorNodeEncountered { location }) + } + } + } + + fn evaluate_ident(&mut self, ident: HirIdent, id: ExprId) -> IResult { + let definition = self.interner.definition(ident.id); + + match &definition.kind { + DefinitionKind::Function(function_id) => { + let typ = self.interner.id_type(id); + Ok(Value::Function(*function_id, typ)) + } + DefinitionKind::Local(_) => dbg!(self.lookup(&ident)), + DefinitionKind::Global(global_id) => { + let let_ = self.interner.get_global_let_statement(*global_id).unwrap(); + self.evaluate_let(let_)?; + self.lookup(&ident) + } + DefinitionKind::GenericType(type_variable) => { + let value = match &*type_variable.borrow() { + TypeBinding::Unbound(_) => None, + TypeBinding::Bound(binding) => binding.evaluate_to_u64(), + }; + + if let Some(value) = value { + let typ = self.interner.id_type(id); + self.evaluate_integer((value as u128).into(), false, id) + } else { + let location = self.interner.expr_location(&id); + let typ = Type::TypeVariable(type_variable.clone(), TypeVariableKind::Normal); + Err(InterpreterError::NonIntegerArrayLength { typ, location }) + } + } + } + } + + fn evaluate_literal(&mut self, literal: HirLiteral, id: ExprId) -> IResult { + match literal { + HirLiteral::Unit => Ok(Value::Unit), + HirLiteral::Bool(value) => Ok(Value::Bool(value)), + HirLiteral::Integer(value, is_negative) => { + self.evaluate_integer(value, is_negative, id) + } + HirLiteral::Str(string) => Ok(Value::String(Rc::new(string))), + HirLiteral::FmtStr(_, _) => todo!("Evaluate format strings"), + HirLiteral::Array(array) => self.evaluate_array(array, id), + HirLiteral::Slice(array) => self.evaluate_slice(array, id), + } + } + + fn evaluate_integer( + &self, + value: FieldElement, + is_negative: bool, + id: ExprId, + ) -> IResult { + let typ = self.interner.id_type(id).follow_bindings(); + let location = self.interner.expr_location(&id); + + if let Type::FieldElement = &typ { + Ok(Value::Field(value)) + } else if let Type::Integer(sign, bit_size) = &typ { + match (sign, bit_size) { + (Signedness::Unsigned, IntegerBitSize::One) => { + return Err(InterpreterError::TypeUnsupported { typ, location }); + } + (Signedness::Unsigned, IntegerBitSize::Eight) => { + let value: u8 = + value.try_to_u64().and_then(|value| value.try_into().ok()).ok_or( + InterpreterError::IntegerOutOfRangeForType { value, typ, location }, + )?; + let value = if is_negative { 0u8.wrapping_sub(value) } else { value }; + Ok(Value::U8(value)) + } + (Signedness::Unsigned, IntegerBitSize::ThirtyTwo) => { + let value: u32 = + value.try_to_u64().and_then(|value| value.try_into().ok()).ok_or( + InterpreterError::IntegerOutOfRangeForType { value, typ, location }, + )?; + let value = if is_negative { 0u32.wrapping_sub(value) } else { value }; + Ok(Value::U32(value)) + } + (Signedness::Unsigned, IntegerBitSize::SixtyFour) => { + let value: u64 = + value.try_to_u64().ok_or(InterpreterError::IntegerOutOfRangeForType { + value, + typ, + location, + })?; + let value = if is_negative { 0u64.wrapping_sub(value) } else { value }; + Ok(Value::U64(value)) + } + (Signedness::Signed, IntegerBitSize::One) => { + return Err(InterpreterError::TypeUnsupported { typ, location }); + } + (Signedness::Signed, IntegerBitSize::Eight) => { + let value: i8 = + value.try_to_u64().and_then(|value| value.try_into().ok()).ok_or( + InterpreterError::IntegerOutOfRangeForType { value, typ, location }, + )?; + let value = if is_negative { -value } else { value }; + Ok(Value::I8(value)) + } + (Signedness::Signed, IntegerBitSize::ThirtyTwo) => { + let value: i32 = + value.try_to_u64().and_then(|value| value.try_into().ok()).ok_or( + InterpreterError::IntegerOutOfRangeForType { value, typ, location }, + )?; + let value = if is_negative { -value } else { value }; + Ok(Value::I32(value)) + } + (Signedness::Signed, IntegerBitSize::SixtyFour) => { + let value: i64 = + value.try_to_u64().and_then(|value| value.try_into().ok()).ok_or( + InterpreterError::IntegerOutOfRangeForType { value, typ, location }, + )?; + let value = if is_negative { -value } else { value }; + Ok(Value::I64(value)) + } + } + } else { + Err(InterpreterError::NonIntegerIntegerLiteral { typ, location }) + } + } + + fn evaluate_block(&mut self, mut block: HirBlockExpression) -> IResult { + let last_statement = block.statements.pop(); + self.push_scope(); + + for statement in block.statements { + self.evaluate_statement(statement)?; + } + + let result = if let Some(statement) = last_statement { + self.evaluate_statement(statement) + } else { + Ok(Value::Unit) + }; + + self.pop_scope(); + result + } + + fn evaluate_array(&mut self, array: HirArrayLiteral, id: ExprId) -> IResult { + let typ = self.interner.id_type(id); + + match array { + HirArrayLiteral::Standard(elements) => { + let elements = elements + .into_iter() + .map(|id| self.evaluate(id)) + .collect::>>()?; + + Ok(Value::Array(elements, typ)) + } + HirArrayLiteral::Repeated { repeated_element, length } => { + let element = self.evaluate(repeated_element)?; + + if let Some(length) = length.evaluate_to_u64() { + let elements = (0..length).map(|_| element.clone()).collect(); + Ok(Value::Array(elements, typ)) + } else { + let location = self.interner.expr_location(&id); + Err(InterpreterError::NonIntegerArrayLength { typ: length, location }) + } + } + } + } + + fn evaluate_slice(&mut self, array: HirArrayLiteral, id: ExprId) -> IResult { + self.evaluate_array(array, id).map(|value| match value { + Value::Array(array, typ) => Value::Slice(array, typ), + other => unreachable!("Non-array value returned from evaluate array: {other:?}"), + }) + } + + fn evaluate_prefix(&mut self, prefix: HirPrefixExpression, id: ExprId) -> IResult { + let rhs = self.evaluate(prefix.rhs)?; + match prefix.operator { + crate::UnaryOp::Minus => match rhs { + Value::Field(value) => Ok(Value::Field(FieldElement::zero() - value)), + Value::I8(value) => Ok(Value::I8(-value)), + Value::I32(value) => Ok(Value::I32(-value)), + Value::I64(value) => Ok(Value::I64(-value)), + Value::U8(value) => Ok(Value::U8(0 - value)), + Value::U32(value) => Ok(Value::U32(0 - value)), + Value::U64(value) => Ok(Value::U64(0 - value)), + value => { + let location = self.interner.expr_location(&id); + Err(InterpreterError::InvalidValueForUnary { + value, + location, + operator: "minus", + }) + } + }, + crate::UnaryOp::Not => match rhs { + Value::Bool(value) => Ok(Value::Bool(!value)), + Value::I8(value) => Ok(Value::I8(!value)), + Value::I32(value) => Ok(Value::I32(!value)), + Value::I64(value) => Ok(Value::I64(!value)), + Value::U8(value) => Ok(Value::U8(!value)), + Value::U32(value) => Ok(Value::U32(!value)), + Value::U64(value) => Ok(Value::U64(!value)), + value => { + let location = self.interner.expr_location(&id); + Err(InterpreterError::InvalidValueForUnary { value, location, operator: "not" }) + } + }, + crate::UnaryOp::MutableReference => Ok(Value::Pointer(Shared::new(rhs))), + crate::UnaryOp::Dereference { implicitly_added: _ } => match rhs { + Value::Pointer(element) => Ok(element.borrow().clone()), + value => { + let location = self.interner.expr_location(&id); + Err(InterpreterError::NonPointerDereferenced { value, location }) + } + }, + } + } + + fn evaluate_infix(&mut self, infix: HirInfixExpression, id: ExprId) -> IResult { + let lhs = self.evaluate(infix.lhs)?; + let rhs = self.evaluate(infix.rhs)?; + + // TODO: Need to account for operator overloading + assert!( + self.interner.get_selected_impl_for_expression(id).is_none(), + "Operator overloading is unimplemented in the interpreter" + ); + + use InterpreterError::InvalidValuesForBinary; + match infix.operator.kind { + BinaryOpKind::Add => match (lhs, rhs) { + (Value::Field(lhs), Value::Field(rhs)) => Ok(Value::Field(lhs + rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::I8(lhs + rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::I32(lhs + rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::I64(lhs + rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::U8(lhs + rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::U32(lhs + rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::U64(lhs + rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "+" }) + } + }, + BinaryOpKind::Subtract => match (lhs, rhs) { + (Value::Field(lhs), Value::Field(rhs)) => Ok(Value::Field(lhs - rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::I8(lhs - rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::I32(lhs - rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::I64(lhs - rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::U8(lhs - rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::U32(lhs - rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::U64(lhs - rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "-" }) + } + }, + BinaryOpKind::Multiply => match (lhs, rhs) { + (Value::Field(lhs), Value::Field(rhs)) => Ok(Value::Field(lhs * rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::I8(lhs * rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::I32(lhs * rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::I64(lhs * rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::U8(lhs * rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::U32(lhs * rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::U64(lhs * rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "*" }) + } + }, + BinaryOpKind::Divide => match (lhs, rhs) { + (Value::Field(lhs), Value::Field(rhs)) => Ok(Value::Field(lhs / rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::I8(lhs / rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::I32(lhs / rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::I64(lhs / rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::U8(lhs / rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::U32(lhs / rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::U64(lhs / rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "/" }) + } + }, + BinaryOpKind::Equal => match (lhs, rhs) { + (Value::Field(lhs), Value::Field(rhs)) => Ok(Value::Bool(lhs == rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::Bool(lhs == rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::Bool(lhs == rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::Bool(lhs == rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::Bool(lhs == rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::Bool(lhs == rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::Bool(lhs == rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "==" }) + } + }, + BinaryOpKind::NotEqual => match (lhs, rhs) { + (Value::Field(lhs), Value::Field(rhs)) => Ok(Value::Bool(lhs != rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::Bool(lhs != rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::Bool(lhs != rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::Bool(lhs != rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::Bool(lhs != rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::Bool(lhs != rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::Bool(lhs != rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "!=" }) + } + }, + BinaryOpKind::Less => match (lhs, rhs) { + (Value::Field(lhs), Value::Field(rhs)) => Ok(Value::Bool(lhs < rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::Bool(lhs < rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::Bool(lhs < rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::Bool(lhs < rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::Bool(lhs < rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::Bool(lhs < rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::Bool(lhs < rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "<" }) + } + }, + BinaryOpKind::LessEqual => match (lhs, rhs) { + (Value::Field(lhs), Value::Field(rhs)) => Ok(Value::Bool(lhs <= rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::Bool(lhs <= rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::Bool(lhs <= rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::Bool(lhs <= rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::Bool(lhs <= rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::Bool(lhs <= rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::Bool(lhs <= rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "<=" }) + } + }, + BinaryOpKind::Greater => match (lhs, rhs) { + (Value::Field(lhs), Value::Field(rhs)) => Ok(Value::Bool(lhs > rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::Bool(lhs > rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::Bool(lhs > rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::Bool(lhs > rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::Bool(lhs > rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::Bool(lhs > rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::Bool(lhs > rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: ">" }) + } + }, + BinaryOpKind::GreaterEqual => match (lhs, rhs) { + (Value::Field(lhs), Value::Field(rhs)) => Ok(Value::Bool(lhs >= rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::Bool(lhs >= rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::Bool(lhs >= rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::Bool(lhs >= rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::Bool(lhs >= rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::Bool(lhs >= rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::Bool(lhs >= rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: ">=" }) + } + }, + BinaryOpKind::And => match (lhs, rhs) { + (Value::Bool(lhs), Value::Bool(rhs)) => Ok(Value::Bool(lhs & rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::I8(lhs & rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::I32(lhs & rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::I64(lhs & rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::U8(lhs & rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::U32(lhs & rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::U64(lhs & rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "&" }) + } + }, + BinaryOpKind::Or => match (lhs, rhs) { + (Value::Bool(lhs), Value::Bool(rhs)) => Ok(Value::Bool(lhs | rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::I8(lhs | rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::I32(lhs | rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::I64(lhs | rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::U8(lhs | rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::U32(lhs | rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::U64(lhs | rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "|" }) + } + }, + BinaryOpKind::Xor => match (lhs, rhs) { + (Value::Bool(lhs), Value::Bool(rhs)) => Ok(Value::Bool(lhs ^ rhs)), + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::I8(lhs ^ rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::I32(lhs ^ rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::I64(lhs ^ rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::U8(lhs ^ rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::U32(lhs ^ rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::U64(lhs ^ rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "^" }) + } + }, + BinaryOpKind::ShiftRight => match (lhs, rhs) { + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::I8(lhs >> rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::I32(lhs >> rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::I64(lhs >> rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::U8(lhs >> rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::U32(lhs >> rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::U64(lhs >> rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: ">>" }) + } + }, + BinaryOpKind::ShiftLeft => match (lhs, rhs) { + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::I8(lhs << rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::I32(lhs << rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::I64(lhs << rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::U8(lhs << rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::U32(lhs << rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::U64(lhs << rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "<<" }) + } + }, + BinaryOpKind::Modulo => match (lhs, rhs) { + (Value::I8(lhs), Value::I8(rhs)) => Ok(Value::I8(lhs % rhs)), + (Value::I32(lhs), Value::I32(rhs)) => Ok(Value::I32(lhs % rhs)), + (Value::I64(lhs), Value::I64(rhs)) => Ok(Value::I64(lhs % rhs)), + (Value::U8(lhs), Value::U8(rhs)) => Ok(Value::U8(lhs % rhs)), + (Value::U32(lhs), Value::U32(rhs)) => Ok(Value::U32(lhs % rhs)), + (Value::U64(lhs), Value::U64(rhs)) => Ok(Value::U64(lhs % rhs)), + (lhs, rhs) => { + let location = self.interner.expr_location(&id); + Err(InvalidValuesForBinary { lhs, rhs, location, operator: "%" }) + } + }, + } + } + + fn evaluate_index(&mut self, index: HirIndexExpression, id: ExprId) -> IResult { + let array = self.evaluate(index.collection)?; + let index = self.evaluate(index.index)?; + + let location = self.interner.expr_location(&id); + let (array, index) = self.bounds_check(array, index, location)?; + + Ok(array[index].clone()) + } + + /// Bounds check the given array and index pair. + /// This will also ensure the given arguments are in fact an array and integer. + fn bounds_check( + &self, + array: Value, + index: Value, + location: Location, + ) -> IResult<(Vector, usize)> { + let collection = match array { + Value::Array(array, _) => array, + Value::Slice(array, _) => array, + value => { + return Err(InterpreterError::NonArrayIndexed { value, location }); + } + }; + + let index = match index { + Value::Field(value) => { + value.try_to_u64().expect("index could not fit into u64") as usize + } + Value::I8(value) => value as usize, + Value::I32(value) => value as usize, + Value::I64(value) => value as usize, + Value::U8(value) => value as usize, + Value::U32(value) => value as usize, + Value::U64(value) => value as usize, + value => { + return Err(InterpreterError::NonIntegerUsedAsIndex { value, location }); + } + }; + + if index >= collection.len() { + use InterpreterError::IndexOutOfBounds; + return Err(IndexOutOfBounds { index, location, length: collection.len() }); + } + + Ok((collection, index)) + } + + fn evaluate_constructor( + &mut self, + constructor: HirConstructorExpression, + id: ExprId, + ) -> IResult { + let fields = constructor + .fields + .into_iter() + .map(|(name, expr)| { + let field_value = self.evaluate(expr)?; + Ok((Rc::new(name.0.contents), field_value)) + }) + .collect::>()?; + + let typ = self.interner.id_type(id); + Ok(Value::Struct(fields, typ)) + } + + fn evaluate_access(&mut self, access: HirMemberAccess, id: ExprId) -> IResult { + let (fields, struct_type) = match self.evaluate(access.lhs)? { + Value::Struct(fields, typ) => (fields, typ), + value => { + let location = self.interner.expr_location(&id); + return Err(InterpreterError::NonTupleOrStructInMemberAccess { value, location }); + } + }; + + fields.get(&access.rhs.0.contents).cloned().ok_or_else(|| { + let location = self.interner.expr_location(&id); + let value = Value::Struct(fields, struct_type); + let field_name = access.rhs.0.contents; + InterpreterError::ExpectedStructToHaveField { value, field_name, location } + }) + } + + fn evaluate_call(&mut self, call: HirCallExpression, id: ExprId) -> IResult { + let function = self.evaluate(call.func)?; + let arguments = try_vecmap(call.arguments, |arg| { + Ok((self.evaluate(arg)?, self.interner.expr_location(&arg))) + })?; + let location = self.interner.expr_location(&id); + + match function { + Value::Function(function_id, _) => self.call_function(function_id, arguments, location), + Value::Closure(closure, env, _) => self.call_closure(closure, env, arguments, location), + value => Err(InterpreterError::NonFunctionCalled { value, location }), + } + } + + fn evaluate_method_call( + &mut self, + call: HirMethodCallExpression, + id: ExprId, + ) -> IResult { + let object = self.evaluate(call.object)?; + let arguments = try_vecmap(call.arguments, |arg| { + Ok((self.evaluate(arg)?, self.interner.expr_location(&arg))) + })?; + let location = self.interner.expr_location(&id); + + let typ = object.get_type().follow_bindings(); + let method_name = &call.method.0.contents; + + // TODO: Traits + let method = match &typ { + Type::Struct(struct_def, _) => { + self.interner.lookup_method(&typ, struct_def.borrow().id, method_name, false) + } + _ => self.interner.lookup_primitive_method(&typ, method_name), + }; + + if let Some(method) = method { + self.call_function(method, arguments, location) + } else { + Err(InterpreterError::NoMethodFound { object, typ, location }) + } + } + + fn evaluate_cast(&mut self, cast: HirCastExpression, id: ExprId) -> IResult { + macro_rules! signed_int_to_field { + ($x:expr) => {{ + // Need to convert the signed integer to an i128 before + // we negate it to preserve the MIN value. + let mut value = $x as i128; + let is_negative = value < 0; + if is_negative { + value = -value; + } + ((value as u128).into(), is_negative) + }}; + } + + let (mut lhs, lhs_is_negative) = match self.evaluate(cast.lhs)? { + Value::Field(value) => (value, false), + Value::U8(value) => ((value as u128).into(), false), + Value::U32(value) => ((value as u128).into(), false), + Value::U64(value) => ((value as u128).into(), false), + Value::I8(value) => signed_int_to_field!(value), + Value::I32(value) => signed_int_to_field!(value), + Value::I64(value) => signed_int_to_field!(value), + Value::Bool(value) => { + (if value { FieldElement::one() } else { FieldElement::zero() }, false) + } + value => { + let location = self.interner.expr_location(&id); + return Err(InterpreterError::NonNumericCasted { value, location }); + } + }; + + macro_rules! cast_to_int { + ($x:expr, $method:ident, $typ:ty, $f:ident) => {{ + let mut value = $x.$method() as $typ; + if lhs_is_negative { + value = 0 - value; + } + Ok(Value::$f(value)) + }}; + } + + // Now actually cast the lhs, bit casting and wrapping as necessary + match cast.r#type.follow_bindings() { + Type::FieldElement => { + if lhs_is_negative { + lhs = FieldElement::zero() - lhs; + } + Ok(Value::Field(lhs)) + } + Type::Integer(sign, bit_size) => match (sign, bit_size) { + (Signedness::Unsigned, IntegerBitSize::One) => { + let location = self.interner.expr_location(&id); + Err(InterpreterError::TypeUnsupported { typ: cast.r#type, location }) + } + (Signedness::Unsigned, IntegerBitSize::Eight) => cast_to_int!(lhs, to_u128, u8, U8), + (Signedness::Unsigned, IntegerBitSize::ThirtyTwo) => { + cast_to_int!(lhs, to_u128, u32, U32) + } + (Signedness::Unsigned, IntegerBitSize::SixtyFour) => { + cast_to_int!(lhs, to_u128, u64, U64) + } + (Signedness::Signed, IntegerBitSize::One) => { + let location = self.interner.expr_location(&id); + Err(InterpreterError::TypeUnsupported { typ: cast.r#type, location }) + } + (Signedness::Signed, IntegerBitSize::Eight) => cast_to_int!(lhs, to_i128, i8, I8), + (Signedness::Signed, IntegerBitSize::ThirtyTwo) => { + cast_to_int!(lhs, to_i128, i32, I32) + } + (Signedness::Signed, IntegerBitSize::SixtyFour) => { + cast_to_int!(lhs, to_i128, i64, I64) + } + }, + Type::Bool => Ok(Value::Bool(!lhs.is_zero() || lhs_is_negative)), + typ => { + let location = self.interner.expr_location(&id); + Err(InterpreterError::CastToNonNumericType { typ, location }) + } + } + } + + fn evaluate_if(&mut self, if_: HirIfExpression, id: ExprId) -> IResult { + let condition = match self.evaluate(if_.condition)? { + Value::Bool(value) => value, + value => { + let location = self.interner.expr_location(&id); + return Err(InterpreterError::NonBoolUsedInIf { value, location }); + } + }; + + self.push_scope(); + + let result = if condition { + if if_.alternative.is_some() { + self.evaluate(if_.consequence) + } else { + self.evaluate(if_.consequence)?; + Ok(Value::Unit) + } + } else { + match if_.alternative { + Some(alternative) => self.evaluate(alternative), + None => Ok(Value::Unit), + } + }; + + self.pop_scope(); + result + } + + fn evaluate_tuple(&mut self, tuple: Vec) -> IResult { + let fields = try_vecmap(tuple, |field| self.evaluate(field))?; + Ok(Value::Tuple(fields)) + } + + fn evaluate_lambda(&mut self, lambda: HirLambda, id: ExprId) -> IResult { + let location = self.interner.expr_location(&id); + let environment = + try_vecmap(&lambda.captures, |capture| self.lookup_id(capture.ident.id, location))?; + + let typ = self.interner.id_type(id); + Ok(Value::Closure(lambda, environment, typ)) + } + + fn evaluate_statement(&mut self, statement: StmtId) -> IResult { + match self.interner.statement(&statement) { + HirStatement::Let(let_) => self.evaluate_let(let_), + HirStatement::Constrain(constrain) => self.evaluate_constrain(constrain), + HirStatement::Assign(assign) => self.evaluate_assign(assign), + HirStatement::For(for_) => self.evaluate_for(for_), + HirStatement::Break => self.evaluate_break(), + HirStatement::Continue => self.evaluate_continue(), + HirStatement::Expression(expression) => self.evaluate(expression), + HirStatement::Semi(expression) => { + self.evaluate(expression)?; + Ok(Value::Unit) + } + HirStatement::Error => { + let location = self.interner.id_location(statement); + Err(InterpreterError::ErrorNodeEncountered { location }) + } + } + } + + fn evaluate_let(&mut self, let_: HirLetStatement) -> IResult { + let rhs = self.evaluate(let_.expression)?; + let location = self.interner.expr_location(&let_.expression); + self.define_pattern(&let_.pattern, &let_.r#type, rhs, location)?; + Ok(Value::Unit) + } + + fn evaluate_constrain(&mut self, constrain: HirConstrainStatement) -> IResult { + match self.evaluate(constrain.0)? { + Value::Bool(true) => Ok(Value::Unit), + Value::Bool(false) => { + let location = self.interner.expr_location(&constrain.0); + let message = constrain.2.and_then(|expr| self.evaluate(expr).ok()); + Err(InterpreterError::FailingConstraint { location, message }) + } + value => { + let location = self.interner.expr_location(&constrain.0); + Err(InterpreterError::NonBoolUsedInConstrain { value, location }) + } + } + } + + fn evaluate_assign(&mut self, assign: HirAssignStatement) -> IResult { + let rhs = self.evaluate(assign.expression)?; + self.store_lvalue(assign.lvalue, rhs)?; + Ok(Value::Unit) + } + + fn store_lvalue(&mut self, lvalue: HirLValue, rhs: Value) -> IResult<()> { + match lvalue { + HirLValue::Ident(ident, typ) => { + self.checked_mutate(ident.id, &typ, rhs, ident.location) + } + HirLValue::Dereference { lvalue, element_type: _, location } => { + match self.evaluate_lvalue(&lvalue)? { + Value::Pointer(value) => { + *value.borrow_mut() = rhs; + Ok(()) + } + value => Err(InterpreterError::NonPointerDereferenced { value, location }), + } + } + HirLValue::MemberAccess { object, field_name, field_index, typ: _, location } => { + let index = field_index.expect("The field index should be set after type checking"); + match self.evaluate_lvalue(&object)? { + Value::Tuple(mut fields) => { + fields[index] = rhs; + self.store_lvalue(*object, Value::Tuple(fields)) + } + Value::Struct(mut fields, typ) => { + fields.insert(Rc::new(field_name.0.contents), rhs); + self.store_lvalue(*object, Value::Struct(fields, typ)) + } + value => { + Err(InterpreterError::NonTupleOrStructInMemberAccess { value, location }) + } + } + } + HirLValue::Index { array, index, typ: _, location } => { + let array_value = self.evaluate_lvalue(&array)?; + let index = self.evaluate(index)?; + + let constructor = match &array_value { + Value::Array(..) => Value::Array, + _ => Value::Slice, + }; + + let typ = array_value.get_type().into_owned(); + let (elements, index) = self.bounds_check(array_value, index, location)?; + + let new_array = constructor(elements.update(index, rhs), typ); + self.store_lvalue(*array, new_array) + } + } + } + + fn evaluate_lvalue(&mut self, lvalue: &HirLValue) -> IResult { + match lvalue { + HirLValue::Ident(ident, _) => self.lookup(ident), + HirLValue::Dereference { lvalue, element_type: _, location } => { + match self.evaluate_lvalue(lvalue)? { + Value::Pointer(value) => Ok(value.borrow().clone()), + value => { + Err(InterpreterError::NonPointerDereferenced { value, location: *location }) + } + } + } + HirLValue::MemberAccess { object, field_name, field_index, typ: _, location } => { + let index = field_index.expect("The field index should be set after type checking"); + + match self.evaluate_lvalue(object)? { + Value::Tuple(mut values) => Ok(values.swap_remove(index)), + Value::Struct(fields, _) => Ok(fields[&field_name.0.contents].clone()), + value => Err(InterpreterError::NonTupleOrStructInMemberAccess { + value, + location: *location, + }), + } + } + HirLValue::Index { array, index, typ: _, location } => { + let array = self.evaluate_lvalue(array)?; + let index = self.evaluate(*index)?; + let (elements, index) = self.bounds_check(array, index, *location)?; + Ok(elements[index].clone()) + } + } + } + + fn evaluate_for(&mut self, for_: HirForStatement) -> IResult { + // i128 can store all values from i8 - u64 + let get_index = |this: &mut Self, expr| -> IResult<(_, fn(_) -> _)> { + match this.evaluate(expr)? { + Value::I8(value) => Ok((value as i128, |i| Value::I8(i as i8))), + Value::I32(value) => Ok((value as i128, |i| Value::I32(i as i32))), + Value::I64(value) => Ok((value as i128, |i| Value::I64(i as i64))), + Value::U8(value) => Ok((value as i128, |i| Value::U8(i as u8))), + Value::U32(value) => Ok((value as i128, |i| Value::U32(i as u32))), + Value::U64(value) => Ok((value as i128, |i| Value::U64(i as u64))), + value => { + let location = this.interner.expr_location(&expr); + Err(InterpreterError::NonIntegerUsedInLoop { value, location }) + } + } + }; + + let (start, make_value) = get_index(self, for_.start_range)?; + let (end, _) = get_index(self, for_.end_range)?; + let was_in_loop = std::mem::replace(&mut self.in_loop, true); + + for i in start..end { + self.push_scope(); + self.current_scope_mut().insert(for_.identifier.id, make_value(i)); + + match self.evaluate(for_.block) { + Ok(_) => (), + Err(InterpreterError::Break) => break, + Err(InterpreterError::Continue) => continue, + Err(other) => return Err(other), + } + self.pop_scope(); + } + + self.in_loop = was_in_loop; + Ok(Value::Unit) + } + + fn evaluate_break(&mut self) -> IResult { + if self.in_loop { + Err(InterpreterError::Break) + } else { + Err(InterpreterError::BreakNotInLoop) + } + } + + fn evaluate_continue(&mut self) -> IResult { + if self.in_loop { + Err(InterpreterError::Continue) + } else { + Err(InterpreterError::ContinueNotInLoop) + } + } +} + +impl Value { + fn get_type(&self) -> Cow { + Cow::Owned(match self { + Value::Unit => Type::Unit, + Value::Bool(_) => Type::Bool, + Value::Field(_) => Type::FieldElement, + Value::I8(_) => Type::Integer(Signedness::Signed, IntegerBitSize::Eight), + Value::I32(_) => Type::Integer(Signedness::Signed, IntegerBitSize::ThirtyTwo), + Value::I64(_) => Type::Integer(Signedness::Signed, IntegerBitSize::SixtyFour), + Value::U8(_) => Type::Integer(Signedness::Unsigned, IntegerBitSize::Eight), + Value::U32(_) => Type::Integer(Signedness::Unsigned, IntegerBitSize::ThirtyTwo), + Value::U64(_) => Type::Integer(Signedness::Unsigned, IntegerBitSize::SixtyFour), + Value::String(value) => { + let length = Type::Constant(value.len() as u64); + Type::String(Box::new(length)) + } + Value::Function(_, typ) => return Cow::Borrowed(typ), + Value::Closure(_, _, typ) => return Cow::Borrowed(typ), + Value::Tuple(fields) => { + Type::Tuple(vecmap(fields, |field| field.get_type().into_owned())) + } + Value::Struct(_, typ) => return Cow::Borrowed(typ), + Value::Array(_, typ) => return Cow::Borrowed(typ), + Value::Slice(_, typ) => return Cow::Borrowed(typ), + Value::Code(_) => Type::Code, + Value::Pointer(element) => { + let element = element.borrow().get_type().into_owned(); + Type::MutableReference(Box::new(element)) + } + }) + } +} diff --git a/compiler/noirc_frontend/src/hir/comptime/mod.rs b/compiler/noirc_frontend/src/hir/comptime/mod.rs new file mode 100644 index 00000000000..83aaddaa405 --- /dev/null +++ b/compiler/noirc_frontend/src/hir/comptime/mod.rs @@ -0,0 +1,3 @@ +mod hir_to_ast; +mod interpreter; +mod tests; diff --git a/compiler/noirc_frontend/src/hir/comptime/tests.rs b/compiler/noirc_frontend/src/hir/comptime/tests.rs new file mode 100644 index 00000000000..016e7079886 --- /dev/null +++ b/compiler/noirc_frontend/src/hir/comptime/tests.rs @@ -0,0 +1,166 @@ +#![cfg(test)] + +use noirc_errors::Location; + +use super::interpreter::{Interpreter, InterpreterError, Value}; +use crate::hir::type_check::test::type_check_src_code; + +fn interpret_helper(src: &str, func_namespace: Vec) -> Result { + let (mut interner, main_id) = type_check_src_code(src, func_namespace); + let mut interpreter = Interpreter::new(&mut interner); + + let no_location = Location::dummy(); + interpreter.call_function(main_id, Vec::new(), no_location) +} + +fn interpret(src: &str, func_namespace: Vec) -> Value { + interpret_helper(src, func_namespace).unwrap_or_else(|error| { + panic!("Expected interpreter to exit successfully, but found {error:?}") + }) +} + +fn interpret_expect_error(src: &str, func_namespace: Vec) -> InterpreterError { + interpret_helper(src, func_namespace).expect_err("Expected interpreter to error") +} + +#[test] +fn interpreter_works() { + let program = "fn main() -> pub Field { 3 }"; + let result = interpret(program, vec!["main".into()]); + assert_eq!(result, Value::Field(3u128.into())); +} + +#[test] +fn mutation_works() { + let program = "fn main() -> pub i8 { + let mut x = 3; + x = 4; + x + }"; + let result = interpret(program, vec!["main".into()]); + assert_eq!(result, Value::I8(4)); +} + +#[test] +fn mutating_references() { + let program = "fn main() -> pub i32 { + let x = &mut 3; + *x = 4; + *x + }"; + let result = interpret(program, vec!["main".into()]); + assert_eq!(result, Value::I32(4)); +} + +#[test] +fn mutating_mutable_references() { + let program = "fn main() -> pub i64 { + let mut x = &mut 3; + *x = 4; + *x + }"; + let result = interpret(program, vec!["main".into()]); + assert_eq!(result, Value::I64(4)); +} + +#[test] +fn mutating_arrays() { + let program = "fn main() -> pub u8 { + let mut a1 = [1, 2, 3, 4]; + a1[1] = 22; + a1[1] + }"; + let result = interpret(program, vec!["main".into()]); + assert_eq!(result, Value::U8(22)); +} + +#[test] +fn for_loop() { + let program = "fn main() -> pub u8 { + let mut x = 0; + for i in 0 .. 6 { + x += i; + } + x + }"; + let result = interpret(program, vec!["main".into()]); + assert_eq!(result, Value::U8(15)); +} + +#[test] +fn for_loop_with_break() { + let program = "unconstrained fn main() -> pub u32 { + let mut x = 0; + for i in 0 .. 6 { + if i == 4 { + break; + } + x += i; + } + x + }"; + let result = interpret(program, vec!["main".into()]); + assert_eq!(result, Value::U32(6)); +} + +#[test] +fn for_loop_with_continue() { + let program = "unconstrained fn main() -> pub u64 { + let mut x = 0; + for i in 0 .. 6 { + if i == 4 { + continue; + } + x += i; + } + x + }"; + let result = interpret(program, vec!["main".into()]); + assert_eq!(result, Value::U64(11)); +} + +#[test] +fn assert() { + let program = "fn main() { + assert(1 == 1); + }"; + let result = interpret(program, vec!["main".into()]); + assert_eq!(result, Value::Unit); +} + +#[test] +fn assert_fail() { + let program = "fn main() { + assert(1 == 2); + }"; + let result = interpret_expect_error(program, vec!["main".into()]); + assert!(matches!(result, InterpreterError::FailingConstraint { .. })); +} + +#[test] +fn lambda() { + let program = "fn main() -> pub u8 { + let f = |x: u8| x + 1; + f(1) + }"; + let result = interpret(program, vec!["main".into()]); + assert!(matches!(result, Value::U8(2))); +} + +#[test] +fn non_deterministic_recursion() { + let program = " + fn main() -> pub u64 { + fib(10) + } + + fn fib(x: u64) -> u64 { + if x <= 1 { + x + } else { + fib(x - 1) + fib(x - 2) + } + }"; + let result = interpret(program, vec!["main".into(), "fib".into()]); + assert_eq!(result, Value::U64(55)); +} diff --git a/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs b/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs index 90aa4baee7c..463b8a4b329 100644 --- a/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs +++ b/compiler/noirc_frontend/src/hir/def_collector/dc_crate.rs @@ -256,20 +256,6 @@ impl DefCollector { // Add the current crate to the collection of DefMaps context.def_maps.insert(crate_id, def_collector.def_map); - // TODO(#4653): generalize this function - for macro_processor in macro_processors { - macro_processor - .process_collected_defs( - &crate_id, - context, - &def_collector.collected_traits_impls, - &mut def_collector.collected_functions, - ) - .unwrap_or_else(|(macro_err, file_id)| { - errors.push((macro_err.into(), file_id)); - }); - } - inject_prelude(crate_id, context, crate_root, &mut def_collector.collected_imports); for submodule in submodules { inject_prelude( diff --git a/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs b/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs index fcb20c740c7..e3c79e39d31 100644 --- a/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs +++ b/compiler/noirc_frontend/src/hir/def_collector/dc_mod.rs @@ -102,8 +102,12 @@ impl<'a> ModCollector<'a> { for global in globals { let name = global.pattern.name_ident().clone(); - let global_id = - context.def_interner.push_empty_global(name.clone(), self.module_id, self.file_id); + let global_id = context.def_interner.push_empty_global( + name.clone(), + self.module_id, + self.file_id, + global.attributes.clone(), + ); // Add the statement to the scope so its path can be looked up later let result = self.def_collector.def_map.modules[self.module_id.0] @@ -412,6 +416,7 @@ impl<'a> ModCollector<'a> { // TODO(Maddiaa): Investigate trait implementations with attributes see: https://github.com/noir-lang/noir/issues/2629 attributes: crate::token::Attributes::empty(), is_unconstrained: false, + is_comptime: false, }; let location = Location::new(name.span(), self.file_id); @@ -455,6 +460,7 @@ impl<'a> ModCollector<'a> { name.clone(), trait_id.0.local_id, self.file_id, + vec![], ); if let Err((first_def, second_def)) = self.def_collector.def_map.modules diff --git a/compiler/noirc_frontend/src/hir/def_map/mod.rs b/compiler/noirc_frontend/src/hir/def_map/mod.rs index 157227f763e..7c0090ff95b 100644 --- a/compiler/noirc_frontend/src/hir/def_map/mod.rs +++ b/compiler/noirc_frontend/src/hir/def_map/mod.rs @@ -2,13 +2,13 @@ use crate::graph::CrateId; use crate::hir::def_collector::dc_crate::{CompilationError, DefCollector}; use crate::hir::Context; use crate::macros_api::MacroProcessor; -use crate::node_interner::{FuncId, NodeInterner, StructId}; +use crate::node_interner::{FuncId, GlobalId, NodeInterner, StructId}; use crate::parser::{parse_program, ParsedModule, ParserError}; use crate::token::{FunctionAttribute, SecondaryAttribute, TestScope}; use arena::{Arena, Index}; use fm::{FileId, FileManager}; use noirc_errors::Location; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; mod module_def; pub use module_def::*; mod item_scope; @@ -217,20 +217,37 @@ impl CrateDefMap { }) .collect(); - let events = module - .type_definitions() - .filter_map(|id| { - id.as_type().filter(|struct_id| { - interner - .struct_attributes(struct_id) - .iter() - .any(|attr| attr == &SecondaryAttribute::Event) - }) - }) - .collect(); + let mut outputs = + ContractOutputs { structs: HashMap::new(), globals: HashMap::new() }; + + interner.get_all_globals().iter().for_each(|global_info| { + interner.global_attributes(&global_info.id).iter().for_each(|attr| { + if let SecondaryAttribute::Abi(tag) = attr { + if let Some(tagged) = outputs.globals.get_mut(tag) { + tagged.push(global_info.id); + } else { + outputs.globals.insert(tag.to_string(), vec![global_info.id]); + } + } + }); + }); + + module.type_definitions().for_each(|id| { + if let ModuleDefId::TypeId(struct_id) = id { + interner.struct_attributes(&struct_id).iter().for_each(|attr| { + if let SecondaryAttribute::Abi(tag) = attr { + if let Some(tagged) = outputs.structs.get_mut(tag) { + tagged.push(struct_id); + } else { + outputs.structs.insert(tag.to_string(), vec![struct_id]); + } + } + }); + } + }); let name = self.get_module_path(id, module.parent); - Some(Contract { name, location: module.location, functions, events }) + Some(Contract { name, location: module.location, functions, outputs }) } else { None } @@ -283,6 +300,11 @@ pub struct ContractFunctionMeta { pub is_entry_point: bool, } +pub struct ContractOutputs { + pub structs: HashMap>, + pub globals: HashMap>, +} + /// A 'contract' in Noir source code with a given name, functions and events. /// This is not an AST node, it is just a convenient form to return for CrateDefMap::get_all_contracts. pub struct Contract { @@ -290,7 +312,7 @@ pub struct Contract { pub name: String, pub location: Location, pub functions: Vec, - pub events: Vec, + pub outputs: ContractOutputs, } /// Given a FileId, fetch the File, from the FileManager and parse it's content diff --git a/compiler/noirc_frontend/src/hir/mod.rs b/compiler/noirc_frontend/src/hir/mod.rs index 00bcb0cdebf..55dc22d6c5d 100644 --- a/compiler/noirc_frontend/src/hir/mod.rs +++ b/compiler/noirc_frontend/src/hir/mod.rs @@ -1,3 +1,4 @@ +pub mod comptime; pub mod def_collector; pub mod def_map; pub mod resolution; @@ -26,7 +27,7 @@ pub type ParsedFiles = HashMap)>; pub struct Context<'file_manager, 'parsed_files> { pub def_interner: NodeInterner, pub crate_graph: CrateGraph, - pub(crate) def_maps: BTreeMap, + pub def_maps: BTreeMap, // In the WASM context, we take ownership of the file manager, // which is why this needs to be a Cow. In all use-cases, the file manager // is read-only however, once it has been passed to the Context. @@ -157,7 +158,8 @@ impl Context<'_, '_> { } } - /// Recursively walks down the crate dependency graph from crate_id until we reach requested crate + /// Tries to find the requested crate in the current one's dependencies, + /// otherwise walks down the crate dependency graph from crate_id until we reach it. /// This is needed in case a library (lib1) re-export a structure defined in another library (lib2) /// In that case, we will get [lib1,lib2] when looking for a struct defined in lib2, /// re-exported by lib1 and used by the main crate. @@ -167,16 +169,26 @@ impl Context<'_, '_> { crate_id: &CrateId, target_crate_id: &CrateId, ) -> Option> { - for dep in &self.crate_graph[crate_id].dependencies { - if &dep.crate_id == target_crate_id { - return Some(vec![dep.name.to_string()]); - } - if let Some(mut path) = self.find_dependencies(&dep.crate_id, target_crate_id) { - path.insert(0, dep.name.to_string()); - return Some(path); - } - } - None + self.crate_graph[crate_id] + .dependencies + .iter() + .find_map(|dep| { + if &dep.crate_id == target_crate_id { + Some(vec![dep.name.to_string()]) + } else { + None + } + }) + .or_else(|| { + self.crate_graph[crate_id].dependencies.iter().find_map(|dep| { + if let Some(mut path) = self.find_dependencies(&dep.crate_id, target_crate_id) { + path.insert(0, dep.name.to_string()); + Some(path) + } else { + None + } + }) + }) } pub fn function_meta(&self, func_id: &FuncId) -> &FuncMeta { @@ -241,7 +253,7 @@ impl Context<'_, '_> { .get_all_contracts(&self.def_interner) } - fn module(&self, module_id: def_map::ModuleId) -> &def_map::ModuleData { + pub fn module(&self, module_id: def_map::ModuleId) -> &def_map::ModuleData { module_id.module(&self.def_maps) } } diff --git a/compiler/noirc_frontend/src/hir/resolution/errors.rs b/compiler/noirc_frontend/src/hir/resolution/errors.rs index d5b0c612f90..71e3f3482fc 100644 --- a/compiler/noirc_frontend/src/hir/resolution/errors.rs +++ b/compiler/noirc_frontend/src/hir/resolution/errors.rs @@ -76,6 +76,8 @@ pub enum ResolverError { NestedSlices { span: Span }, #[error("#[recursive] attribute is only allowed on entry points to a program")] MisplacedRecursiveAttribute { ident: Ident }, + #[error("#[abi(tag)] attribute is only allowed in contracts")] + AbiAttributeOusideContract { span: Span }, #[error("Usage of the `#[foreign]` or `#[builtin]` function attributes are not allowed outside of the Noir standard library")] LowLevelFunctionOutsideOfStdlib { ident: Ident }, #[error("Dependency cycle found, '{item}' recursively depends on itself: {cycle} ")] @@ -303,6 +305,13 @@ impl From for Diagnostic { diag.add_note("The `#[recursive]` attribute specifies to the backend whether it should use a prover which generates proofs that are friendly for recursive verification in another circuit".to_owned()); diag } + ResolverError::AbiAttributeOusideContract { span } => { + Diagnostic::simple_error( + "#[abi(tag)] attributes can only be used in contracts".to_string(), + "misplaced #[abi(tag)] attribute".to_string(), + span, + ) + }, ResolverError::LowLevelFunctionOutsideOfStdlib { ident } => Diagnostic::simple_error( "Definition of low-level function outside of standard library".into(), "Usage of the `#[foreign]` or `#[builtin]` function attributes are not allowed outside of the Noir standard library".into(), diff --git a/compiler/noirc_frontend/src/hir/resolution/resolver.rs b/compiler/noirc_frontend/src/hir/resolution/resolver.rs index f2b8212db7a..01d477e9d4c 100644 --- a/compiler/noirc_frontend/src/hir/resolution/resolver.rs +++ b/compiler/noirc_frontend/src/hir/resolution/resolver.rs @@ -19,6 +19,7 @@ use crate::hir_def::expr::{ }; use crate::hir_def::traits::{Trait, TraitConstraint}; +use crate::macros_api::SecondaryAttribute; use crate::token::{Attributes, FunctionAttribute}; use regex::Regex; use std::collections::{BTreeMap, HashSet}; @@ -245,6 +246,7 @@ impl<'a> Resolver<'a> { name: name.clone(), attributes: Attributes::empty(), is_unconstrained: false, + is_comptime: false, visibility: ItemVisibility::Public, // Trait functions are always public generics: generics.clone(), parameters: vecmap(parameters, |(name, typ)| Param { @@ -501,6 +503,7 @@ impl<'a> Resolver<'a> { let fields = self.resolve_type_inner(*fields, new_variables); Type::FmtString(Box::new(resolved_size), Box::new(fields)) } + Code => Type::Code, Unit => Type::Unit, Unspecified => Type::Error, Error => Type::Error, @@ -617,7 +620,17 @@ impl<'a> Resolver<'a> { match self.lookup_struct_or_error(path) { Some(struct_type) => { let expected_generic_count = struct_type.borrow().generics.len(); - + if !self.in_contract + && self + .interner + .struct_attributes(&struct_type.borrow().id) + .iter() + .any(|attr| matches!(attr, SecondaryAttribute::Abi(_))) + { + self.push_err(ResolverError::AbiAttributeOusideContract { + span: struct_type.borrow().name.span(), + }); + } self.verify_generics_count(expected_generic_count, &mut args, span, || { struct_type.borrow().to_string() }); @@ -916,6 +929,7 @@ impl<'a> Resolver<'a> { let name_ident = HirIdent::non_trait_method(id, location); let attributes = func.attributes().clone(); + let should_fold = attributes.is_foldable(); let mut generics = vecmap(&self.generics, |(_, typevar, _)| typevar.clone()); let mut parameters = vec![]; @@ -996,8 +1010,6 @@ impl<'a> Resolver<'a> { .map(|(name, typevar, _span)| (name.clone(), typevar.clone())) .collect(); - let should_fold = attributes.is_foldable(); - FuncMeta { name: name_ident, kind: func.kind, @@ -1026,7 +1038,7 @@ impl<'a> Resolver<'a> { /// True if the 'pub' keyword is allowed on parameters in this function /// 'pub' on function parameters is only allowed for entry point functions fn pub_allowed(&self, func: &NoirFunction) -> bool { - self.is_entry_point_function(func) + self.is_entry_point_function(func) || func.attributes().is_foldable() } fn is_entry_point_function(&self, func: &NoirFunction) -> bool { @@ -1167,10 +1179,19 @@ impl<'a> Resolver<'a> { let global_id = self.interner.next_global_id(); let definition = DefinitionKind::Global(global_id); + if !self.in_contract + && let_stmt.attributes.iter().any(|attr| matches!(attr, SecondaryAttribute::Abi(_))) + { + self.push_err(ResolverError::AbiAttributeOusideContract { + span: let_stmt.pattern.span(), + }); + } + HirStatement::Let(HirLetStatement { pattern: self.resolve_pattern(let_stmt.pattern, definition), r#type: self.resolve_type(let_stmt.r#type), expression, + attributes: let_stmt.attributes, }) } @@ -1183,6 +1204,7 @@ impl<'a> Resolver<'a> { pattern: self.resolve_pattern(let_stmt.pattern, definition), r#type: self.resolve_type(let_stmt.r#type), expression, + attributes: let_stmt.attributes, }) } StatementKind::Constrain(constrain_stmt) => { diff --git a/compiler/noirc_frontend/src/hir/type_check/mod.rs b/compiler/noirc_frontend/src/hir/type_check/mod.rs index 926dac30bcd..b8931ce56b9 100644 --- a/compiler/noirc_frontend/src/hir/type_check/mod.rs +++ b/compiler/noirc_frontend/src/hir/type_check/mod.rs @@ -51,8 +51,7 @@ pub fn type_check_func(interner: &mut NodeInterner, func_id: FuncId) -> Vec, ) { let meta = type_checker.interner.function_meta(&func_id); - if (meta.is_entry_point || meta.should_fold) && !param.1.is_valid_for_program_input() { + if (meta.is_entry_point && !param.1.is_valid_for_program_input()) + || (meta.should_fold && !param.1.is_valid_non_inlined_function_input()) + { let span = param.0.span(); errors.push(TypeCheckError::InvalidTypeForEntryPoint { span }); } @@ -424,12 +425,12 @@ impl<'interner> TypeChecker<'interner> { // XXX: These tests are all manual currently. /// We can either build a test apparatus or pass raw code through the resolver #[cfg(test)] -mod test { +pub mod test { use std::collections::{BTreeMap, HashMap}; use std::vec; use fm::FileId; - use iter_extended::vecmap; + use iter_extended::btree_map; use noirc_errors::{Location, Span}; use crate::graph::CrateId; @@ -504,6 +505,7 @@ mod test { pattern: Identifier(z), r#type: Type::FieldElement, expression: expr_id, + attributes: vec![], }; let stmt_id = interner.push_stmt(HirStatement::Let(let_stmt)); let expr_id = interner @@ -600,7 +602,7 @@ mod test { "#; - type_check_src_code(src, vec![String::from("main"), String::from("foo")]); + type_check_src_code(src, vec![String::from("main")]); } #[test] fn basic_closure() { @@ -611,7 +613,7 @@ mod test { } "#; - type_check_src_code(src, vec![String::from("main"), String::from("foo")]); + type_check_src_code(src, vec![String::from("main")]); } #[test] @@ -632,12 +634,23 @@ mod test { #[fold] fn fold(x: &mut Field) -> Field { *x - } + } "#; type_check_src_code_errors_expected(src, vec![String::from("fold")], 1); } + #[test] + fn fold_numeric_generic() { + let src = r#" + #[fold] + fn fold(x: T) -> T { + x + } + "#; + + type_check_src_code(src, vec![String::from("fold")]); + } // This is the same Stub that is in the resolver, maybe we can pull this out into a test module and re-use? struct TestPathResolver(HashMap); @@ -671,8 +684,8 @@ mod test { } } - fn type_check_src_code(src: &str, func_namespace: Vec) { - type_check_src_code_errors_expected(src, func_namespace, 0); + pub fn type_check_src_code(src: &str, func_namespace: Vec) -> (NodeInterner, FuncId) { + type_check_src_code_errors_expected(src, func_namespace, 0) } // This function assumes that there is only one function and this is the @@ -681,7 +694,7 @@ mod test { src: &str, func_namespace: Vec, expected_num_type_check_errs: usize, - ) { + ) -> (NodeInterner, FuncId) { let (program, errors) = parse_program(src); let mut interner = NodeInterner::default(); interner.populate_dummy_operator_traits(); @@ -694,14 +707,16 @@ mod test { errors ); - let main_id = interner.push_test_function_definition("main".into()); + let func_ids = btree_map(&func_namespace, |name| { + (name.to_string(), interner.push_test_function_definition(name.into())) + }); - let func_ids = - vecmap(&func_namespace, |name| interner.push_test_function_definition(name.into())); + let main_id = + *func_ids.get("main").unwrap_or_else(|| func_ids.first_key_value().unwrap().1); let mut path_resolver = TestPathResolver(HashMap::new()); - for (name, id) in func_namespace.into_iter().zip(func_ids.clone()) { - path_resolver.insert_func(name.to_owned(), id); + for (name, id) in func_ids.iter() { + path_resolver.insert_func(name.to_owned(), *id); } let mut def_maps = BTreeMap::new(); @@ -721,20 +736,24 @@ mod test { }, ); - let func_meta = vecmap(program.into_sorted().functions, |nf| { + for nf in program.into_sorted().functions { let resolver = Resolver::new(&mut interner, &path_resolver, &def_maps, file); - let (hir_func, func_meta, resolver_errors) = resolver.resolve_function(nf, main_id); - assert_eq!(resolver_errors, vec![]); - (hir_func, func_meta) - }); - for ((hir_func, meta), func_id) in func_meta.into_iter().zip(func_ids.clone()) { - interner.update_fn(func_id, hir_func); - interner.push_fn_meta(meta, func_id); + let function_id = *func_ids.get(nf.name()).unwrap(); + let (hir_func, func_meta, resolver_errors) = resolver.resolve_function(nf, function_id); + + interner.push_fn_meta(func_meta, function_id); + interner.update_fn(function_id, hir_func); + assert_eq!(resolver_errors, vec![]); } // Type check section - let errors = super::type_check_func(&mut interner, func_ids.first().cloned().unwrap()); + let mut errors = Vec::new(); + + for function in func_ids.values() { + errors.extend(super::type_check_func(&mut interner, *function)); + } + assert_eq!( errors.len(), expected_num_type_check_errs, @@ -743,5 +762,7 @@ mod test { errors.len(), errors ); + + (interner, main_id) } } diff --git a/compiler/noirc_frontend/src/hir_def/expr.rs b/compiler/noirc_frontend/src/hir_def/expr.rs index c2f6031bf6d..eb4ebf3f913 100644 --- a/compiler/noirc_frontend/src/hir_def/expr.rs +++ b/compiler/noirc_frontend/src/hir_def/expr.rs @@ -260,7 +260,7 @@ impl HirBlockExpression { } /// A variable captured inside a closure -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct HirCapturedVar { pub ident: HirIdent, @@ -274,7 +274,7 @@ pub struct HirCapturedVar { pub transitive_capture_index: Option, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct HirLambda { pub parameters: Vec<(HirPattern, Type)>, pub return_type: Type, diff --git a/compiler/noirc_frontend/src/hir_def/function.rs b/compiler/noirc_frontend/src/hir_def/function.rs index a3bbc9445a8..97f4b6a1616 100644 --- a/compiler/noirc_frontend/src/hir_def/function.rs +++ b/compiler/noirc_frontend/src/hir_def/function.rs @@ -24,8 +24,8 @@ impl HirFunction { HirFunction(expr_id) } - pub const fn as_expr(&self) -> &ExprId { - &self.0 + pub const fn as_expr(&self) -> ExprId { + self.0 } pub fn block(&self, interner: &NodeInterner) -> HirBlockExpression { diff --git a/compiler/noirc_frontend/src/hir_def/stmt.rs b/compiler/noirc_frontend/src/hir_def/stmt.rs index c5e287b393c..37e3651a9b2 100644 --- a/compiler/noirc_frontend/src/hir_def/stmt.rs +++ b/compiler/noirc_frontend/src/hir_def/stmt.rs @@ -1,4 +1,5 @@ use super::expr::HirIdent; +use crate::macros_api::SecondaryAttribute; use crate::node_interner::ExprId; use crate::{Ident, Type}; use fm::FileId; @@ -26,6 +27,7 @@ pub struct HirLetStatement { pub pattern: HirPattern, pub r#type: Type, pub expression: ExprId, + pub attributes: Vec, } impl HirLetStatement { @@ -59,7 +61,7 @@ pub struct HirAssignStatement { #[derive(Debug, Clone)] pub struct HirConstrainStatement(pub ExprId, pub FileId, pub Option); -#[derive(Debug, Clone, Hash)] +#[derive(Debug, Clone, Hash, PartialEq, Eq)] pub enum HirPattern { Identifier(HirIdent), Mutable(Box, Location), diff --git a/compiler/noirc_frontend/src/hir_def/types.rs b/compiler/noirc_frontend/src/hir_def/types.rs index ec8b54c33b8..6aced6cced4 100644 --- a/compiler/noirc_frontend/src/hir_def/types.rs +++ b/compiler/noirc_frontend/src/hir_def/types.rs @@ -726,6 +726,54 @@ impl Type { } } + /// True if this type can be used as a parameter to an ACIR function that is not `main` or a contract function. + /// This encapsulates functions for which we may not want to inline during compilation. + /// + /// The inputs allowed for a function entry point differ from those allowed as input to a program as there are + /// certain types which through compilation we know what their size should be. + /// This includes types such as numeric generics. + pub(crate) fn is_valid_non_inlined_function_input(&self) -> bool { + match self { + // Type::Error is allowed as usual since it indicates an error was already issued and + // we don't need to issue further errors about this likely unresolved type + Type::FieldElement + | Type::Integer(_, _) + | Type::Bool + | Type::Unit + | Type::Constant(_) + | Type::TypeVariable(_, _) + | Type::NamedGeneric(_, _) + | Type::Error => true, + + Type::FmtString(_, _) + // To enable this we would need to determine the size of the closure outputs at compile-time. + // This is possible as long as the output size is not dependent upon a witness condition. + | Type::Function(_, _, _) + | Type::Slice(_) + | Type::MutableReference(_) + | Type::Forall(_, _) + // TODO: probably can allow code as it is all compile time + | Type::Code + | Type::TraitAsType(..) => false, + + Type::Alias(alias, generics) => { + let alias = alias.borrow(); + alias.get_type(generics).is_valid_non_inlined_function_input() + } + + Type::Array(length, element) => { + length.is_valid_non_inlined_function_input() && element.is_valid_non_inlined_function_input() + } + Type::String(length) => length.is_valid_non_inlined_function_input(), + Type::Tuple(elements) => elements.iter().all(|elem| elem.is_valid_non_inlined_function_input()), + Type::Struct(definition, generics) => definition + .borrow() + .get_fields(generics) + .into_iter() + .all(|(_, field)| field.is_valid_non_inlined_function_input()), + } + } + /// Returns the number of `Forall`-quantified type variables on this type. /// Returns 0 if this is not a Type::Forall pub fn generic_count(&self) -> usize { diff --git a/compiler/noirc_frontend/src/lexer/lexer.rs b/compiler/noirc_frontend/src/lexer/lexer.rs index 265b9e4b5a3..2d1ebf530e3 100644 --- a/compiler/noirc_frontend/src/lexer/lexer.rs +++ b/compiler/noirc_frontend/src/lexer/lexer.rs @@ -2,7 +2,9 @@ use crate::token::{Attribute, DocStyle}; use super::{ errors::LexerErrorKind, - token::{IntType, Keyword, SpannedToken, Token, Tokens}, + token::{ + token_to_borrowed_token, BorrowedToken, IntType, Keyword, SpannedToken, Token, Tokens, + }, }; use acvm::FieldElement; use noirc_errors::{Position, Span}; @@ -21,6 +23,21 @@ pub struct Lexer<'a> { pub type SpannedTokenResult = Result; +pub(crate) fn from_spanned_token_result( + token_result: &SpannedTokenResult, +) -> Result<(usize, BorrowedToken<'_>, usize), LexerErrorKind> { + token_result + .as_ref() + .map(|spanned_token| { + ( + spanned_token.to_span().start() as usize, + token_to_borrowed_token(spanned_token.into()), + spanned_token.to_span().end() as usize, + ) + }) + .map_err(Clone::clone) +} + impl<'a> Lexer<'a> { /// Given a source file of noir code, return all the tokens in the file /// in order, along with any lexing errors that occurred. @@ -94,7 +111,7 @@ impl<'a> Lexer<'a> { fn next_token(&mut self) -> SpannedTokenResult { match self.next_char() { - Some(x) if x.is_whitespace() => { + Some(x) if Self::is_code_whitespace(x) => { let spanned = self.eat_whitespace(x); if self.skip_whitespaces { self.next_token() @@ -560,16 +577,21 @@ impl<'a> Lexer<'a> { } } + fn is_code_whitespace(c: char) -> bool { + c == '\t' || c == '\n' || c == '\r' || c == ' ' + } + /// Skips white space. They are not significant in the source language fn eat_whitespace(&mut self, initial_char: char) -> SpannedToken { let start = self.position; - let whitespace = self.eat_while(initial_char.into(), |ch| ch.is_whitespace()); + let whitespace = self.eat_while(initial_char.into(), Self::is_code_whitespace); SpannedToken::new(Token::Whitespace(whitespace), Span::inclusive(start, self.position)) } } impl<'a> Iterator for Lexer<'a> { type Item = SpannedTokenResult; + fn next(&mut self) -> Option { if self.done { None @@ -578,10 +600,12 @@ impl<'a> Iterator for Lexer<'a> { } } } + #[cfg(test)] mod tests { use super::*; use crate::token::{FunctionAttribute, SecondaryAttribute, TestScope}; + #[test] fn test_single_double_char() { let input = "! != + ( ) { } [ ] | , ; : :: < <= > >= & - -> . .. % / * = == << >>"; diff --git a/compiler/noirc_frontend/src/lexer/token.rs b/compiler/noirc_frontend/src/lexer/token.rs index f8378cdd84b..b4b785f2c81 100644 --- a/compiler/noirc_frontend/src/lexer/token.rs +++ b/compiler/noirc_frontend/src/lexer/token.rs @@ -9,12 +9,105 @@ use crate::lexer::errors::LexerErrorKind; /// smallest unit of grammar. A parser may (will) decide to parse /// items differently depending on the Tokens present but will /// never parse the same ordering of identical tokens differently. +#[derive(PartialEq, Eq, Hash, Debug, Clone, PartialOrd, Ord)] +pub enum BorrowedToken<'input> { + Ident(&'input str), + Int(FieldElement), + Bool(bool), + Str(&'input str), + /// the u8 is the number of hashes, i.e. r###.. + RawStr(&'input str, u8), + FmtStr(&'input str), + Keyword(Keyword), + IntType(IntType), + Attribute(Attribute), + LineComment(&'input str, Option), + BlockComment(&'input str, Option), + /// < + Less, + /// <= + LessEqual, + /// > + Greater, + /// >= + GreaterEqual, + /// == + Equal, + /// != + NotEqual, + /// + + Plus, + /// - + Minus, + /// * + Star, + /// / + Slash, + /// % + Percent, + /// & + Ampersand, + /// ^ + Caret, + /// << + ShiftLeft, + /// >> + ShiftRight, + /// . + Dot, + /// .. + DoubleDot, + /// ( + LeftParen, + /// ) + RightParen, + /// { + LeftBrace, + /// } + RightBrace, + /// [ + LeftBracket, + /// ] + RightBracket, + /// -> + Arrow, + /// | + Pipe, + /// # + Pound, + /// , + Comma, + /// : + Colon, + /// :: + DoubleColon, + /// ; + Semicolon, + /// ! + Bang, + /// = + Assign, + #[allow(clippy::upper_case_acronyms)] + EOF, + + Whitespace(&'input str), + + /// An invalid character is one that is not in noir's language or grammar. + /// + /// We don't report invalid tokens in the source as errors until parsing to + /// avoid reporting the error twice (once while lexing, again when it is encountered + /// during parsing). Reporting during lexing then removing these from the token stream + /// would not be equivalent as it would change the resulting parse. + Invalid(char), +} + #[derive(PartialEq, Eq, Hash, Debug, Clone, PartialOrd, Ord)] pub enum Token { Ident(String), Int(FieldElement), Bool(bool), Str(String), + /// the u8 is the number of hashes, i.e. r###.. RawStr(String, u8), FmtStr(String), Keyword(Keyword), @@ -100,6 +193,57 @@ pub enum Token { Invalid(char), } +pub fn token_to_borrowed_token(token: &Token) -> BorrowedToken<'_> { + match token { + Token::Ident(ref s) => BorrowedToken::Ident(s), + Token::Int(n) => BorrowedToken::Int(*n), + Token::Bool(b) => BorrowedToken::Bool(*b), + Token::Str(ref b) => BorrowedToken::Str(b), + Token::FmtStr(ref b) => BorrowedToken::FmtStr(b), + Token::RawStr(ref b, hashes) => BorrowedToken::RawStr(b, *hashes), + Token::Keyword(k) => BorrowedToken::Keyword(*k), + Token::Attribute(ref a) => BorrowedToken::Attribute(a.clone()), + Token::LineComment(ref s, _style) => BorrowedToken::LineComment(s, *_style), + Token::BlockComment(ref s, _style) => BorrowedToken::BlockComment(s, *_style), + Token::IntType(ref i) => BorrowedToken::IntType(i.clone()), + Token::Less => BorrowedToken::Less, + Token::LessEqual => BorrowedToken::LessEqual, + Token::Greater => BorrowedToken::Greater, + Token::GreaterEqual => BorrowedToken::GreaterEqual, + Token::Equal => BorrowedToken::Equal, + Token::NotEqual => BorrowedToken::NotEqual, + Token::Plus => BorrowedToken::Plus, + Token::Minus => BorrowedToken::Minus, + Token::Star => BorrowedToken::Star, + Token::Slash => BorrowedToken::Slash, + Token::Percent => BorrowedToken::Percent, + Token::Ampersand => BorrowedToken::Ampersand, + Token::Caret => BorrowedToken::Caret, + Token::ShiftLeft => BorrowedToken::ShiftLeft, + Token::ShiftRight => BorrowedToken::ShiftRight, + Token::Dot => BorrowedToken::Dot, + Token::DoubleDot => BorrowedToken::DoubleDot, + Token::LeftParen => BorrowedToken::LeftParen, + Token::RightParen => BorrowedToken::RightParen, + Token::LeftBrace => BorrowedToken::LeftBrace, + Token::RightBrace => BorrowedToken::RightBrace, + Token::LeftBracket => BorrowedToken::LeftBracket, + Token::RightBracket => BorrowedToken::RightBracket, + Token::Arrow => BorrowedToken::Arrow, + Token::Pipe => BorrowedToken::Pipe, + Token::Pound => BorrowedToken::Pound, + Token::Comma => BorrowedToken::Comma, + Token::Colon => BorrowedToken::Colon, + Token::DoubleColon => BorrowedToken::DoubleColon, + Token::Semicolon => BorrowedToken::Semicolon, + Token::Assign => BorrowedToken::Assign, + Token::Bang => BorrowedToken::Bang, + Token::EOF => BorrowedToken::EOF, + Token::Invalid(c) => BorrowedToken::Invalid(*c), + Token::Whitespace(ref s) => BorrowedToken::Whitespace(s), + } +} + #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord)] pub enum DocStyle { Outer, @@ -126,6 +270,12 @@ impl From for Token { } } +impl<'a> From<&'a SpannedToken> for &'a Token { + fn from(spt: &'a SpannedToken) -> Self { + &spt.0.contents + } +} + impl SpannedToken { pub fn new(token: Token, span: Span) -> SpannedToken { SpannedToken(Spanned::from(span, token)) @@ -510,6 +660,7 @@ impl Attribute { Attribute::Secondary(SecondaryAttribute::ContractLibraryMethod) } ["event"] => Attribute::Secondary(SecondaryAttribute::Event), + ["abi", tag] => Attribute::Secondary(SecondaryAttribute::Abi(tag.to_string())), ["export"] => Attribute::Secondary(SecondaryAttribute::Export), ["deprecated", name] => { if !name.starts_with('"') && !name.ends_with('"') { @@ -604,6 +755,7 @@ pub enum SecondaryAttribute { Export, Field(String), Custom(String), + Abi(String), } impl fmt::Display for SecondaryAttribute { @@ -618,6 +770,7 @@ impl fmt::Display for SecondaryAttribute { SecondaryAttribute::Event => write!(f, "#[event]"), SecondaryAttribute::Export => write!(f, "#[export]"), SecondaryAttribute::Field(ref k) => write!(f, "#[field({k})]"), + SecondaryAttribute::Abi(ref k) => write!(f, "#[abi({k})]"), } } } @@ -640,7 +793,9 @@ impl AsRef for SecondaryAttribute { match self { SecondaryAttribute::Deprecated(Some(string)) => string, SecondaryAttribute::Deprecated(None) => "", - SecondaryAttribute::Custom(string) | SecondaryAttribute::Field(string) => string, + SecondaryAttribute::Custom(string) + | SecondaryAttribute::Field(string) + | SecondaryAttribute::Abi(string) => string, SecondaryAttribute::ContractLibraryMethod => "", SecondaryAttribute::Event | SecondaryAttribute::Export => "", } @@ -684,6 +839,7 @@ pub enum Keyword { ReturnData, String, Struct, + Super, Trait, Type, Unchecked, @@ -728,6 +884,7 @@ impl fmt::Display for Keyword { Keyword::ReturnData => write!(f, "return_data"), Keyword::String => write!(f, "str"), Keyword::Struct => write!(f, "struct"), + Keyword::Super => write!(f, "super"), Keyword::Trait => write!(f, "trait"), Keyword::Type => write!(f, "type"), Keyword::Unchecked => write!(f, "unchecked"), @@ -775,6 +932,7 @@ impl Keyword { "return_data" => Keyword::ReturnData, "str" => Keyword::String, "struct" => Keyword::Struct, + "super" => Keyword::Super, "trait" => Keyword::Trait, "type" => Keyword::Type, "unchecked" => Keyword::Unchecked, diff --git a/compiler/noirc_frontend/src/lib.rs b/compiler/noirc_frontend/src/lib.rs index 6ce6f4325e4..93d7960faf5 100644 --- a/compiler/noirc_frontend/src/lib.rs +++ b/compiler/noirc_frontend/src/lib.rs @@ -45,7 +45,6 @@ pub mod macros_api { pub use noirc_errors::Span; pub use crate::graph::CrateId; - use crate::hir::def_collector::dc_crate::{UnresolvedFunctions, UnresolvedTraitImpl}; pub use crate::hir::def_collector::errors::MacroError; pub use crate::hir_def::expr::{HirExpression, HirLiteral}; pub use crate::hir_def::stmt::HirStatement; @@ -76,15 +75,6 @@ pub mod macros_api { context: &HirContext, ) -> Result; - // TODO(#4653): generalize this function - fn process_collected_defs( - &self, - _crate_id: &CrateId, - _context: &mut HirContext, - _collected_trait_impls: &[UnresolvedTraitImpl], - _collected_functions: &mut [UnresolvedFunctions], - ) -> Result<(), (MacroError, FileId)>; - /// Function to manipulate the AST after type checking has been completed. /// The AST after type checking has been done is called the HIR. fn process_typed_ast( diff --git a/compiler/noirc_frontend/src/monomorphization/ast.rs b/compiler/noirc_frontend/src/monomorphization/ast.rs index 7d20c2bcfee..d9c33d8604e 100644 --- a/compiler/noirc_frontend/src/monomorphization/ast.rs +++ b/compiler/noirc_frontend/src/monomorphization/ast.rs @@ -92,6 +92,7 @@ pub enum Literal { Slice(ArrayLiteral), Integer(FieldElement, Type, Location), Bool(bool), + Unit, Str(String), FmtStr(String, u64, Box), } diff --git a/compiler/noirc_frontend/src/monomorphization/mod.rs b/compiler/noirc_frontend/src/monomorphization/mod.rs index 6aa0abce152..20b9c0885bf 100644 --- a/compiler/noirc_frontend/src/monomorphization/mod.rs +++ b/compiler/noirc_frontend/src/monomorphization/mod.rs @@ -52,14 +52,13 @@ struct LambdaContext { /// This struct holds the FIFO queue of functions to monomorphize, which is added to /// whenever a new (function, type) combination is encountered. struct Monomorphizer<'interner> { - /// Globals are keyed by their unique ID and expected type so that we can monomorphize - /// a new version of the global for each type. Note that 'global' here means 'globally - /// visible' and thus includes both functions and global variables. + /// Functions are keyed by their unique ID and expected type so that we can monomorphize + /// a new version of the function for each type. /// /// Using nested HashMaps here lets us avoid cloning HirTypes when calling .get() - globals: HashMap>, + functions: HashMap>, - /// Unlike globals, locals are only keyed by their unique ID because they are never + /// Unlike functions, locals are only keyed by their unique ID because they are never /// duplicated during monomorphization. Doing so would allow them to be used polymorphically /// but would also cause them to be re-evaluated which is a performance trap that would /// confuse users. @@ -165,7 +164,7 @@ pub fn monomorphize_debug( impl<'interner> Monomorphizer<'interner> { fn new(interner: &'interner mut NodeInterner, debug_type_tracker: DebugTypeTracker) -> Self { Monomorphizer { - globals: HashMap::new(), + functions: HashMap::new(), locals: HashMap::new(), queue: VecDeque::new(), finished_functions: BTreeMap::new(), @@ -203,7 +202,7 @@ impl<'interner> Monomorphizer<'interner> { trait_method: Option, ) -> Definition { let typ = typ.follow_bindings(); - match self.globals.get(&id).and_then(|inner_map| inner_map.get(&typ)) { + match self.functions.get(&id).and_then(|inner_map| inner_map.get(&typ)) { Some(id) => Definition::Function(*id), None => { // Function has not been monomorphized yet @@ -251,8 +250,8 @@ impl<'interner> Monomorphizer<'interner> { } /// Prerequisite: typ = typ.follow_bindings() - fn define_global(&mut self, id: node_interner::FuncId, typ: HirType, new_id: FuncId) { - self.globals.entry(id).or_default().insert(typ, new_id); + fn define_function(&mut self, id: node_interner::FuncId, typ: HirType, new_id: FuncId) { + self.functions.entry(id).or_default().insert(typ, new_id); } fn compile_main( @@ -284,12 +283,18 @@ impl<'interner> Monomorphizer<'interner> { } let meta = self.interner.function_meta(&f).clone(); - let func_sig = meta.function_signature(); + let mut func_sig = meta.function_signature(); + // Follow the bindings of the function signature for entry points + // which are not `main` such as foldable functions. + for param in func_sig.0.iter_mut() { + param.1 = param.1.follow_bindings(); + } + func_sig.1 = func_sig.1.map(|return_type| return_type.follow_bindings()); let modifiers = self.interner.function_modifiers(&f); let name = self.interner.function_name(&f).to_owned(); - let body_expr_id = *self.interner.function(&f).as_expr(); + let body_expr_id = self.interner.function(&f).as_expr(); let body_return_type = self.interner.id_type(body_expr_id); let return_type = match meta.return_type() { Type::TraitAsType(..) => &body_return_type, @@ -786,7 +791,7 @@ impl<'interner> Monomorphizer<'interner> { }) } - /// A local (ie non-global) ident only + /// A local (ie non-function) ident only fn local_ident( &mut self, ident: &HirIdent, @@ -1280,7 +1285,7 @@ impl<'interner> Monomorphizer<'interner> { trait_method: Option, ) -> FuncId { let new_id = self.next_function_id(); - self.define_global(id, function_type.clone(), new_id); + self.define_function(id, function_type.clone(), new_id); let bindings = self.interner.get_instantiation_bindings(expr_id); let bindings = self.follow_bindings(bindings); @@ -1553,9 +1558,7 @@ impl<'interner> Monomorphizer<'interner> { ast::Expression::Literal(ast::Literal::Integer(0_u128.into(), typ, location)) } ast::Type::Bool => ast::Expression::Literal(ast::Literal::Bool(false)), - // There is no unit literal currently. Replace it with 'false' since it should be ignored - // anyway. - ast::Type::Unit => ast::Expression::Literal(ast::Literal::Bool(false)), + ast::Type::Unit => ast::Expression::Literal(ast::Literal::Unit), ast::Type::Array(length, element_type) => { let element = self.zeroed_value_of_type(element_type.as_ref(), location); ast::Expression::Literal(ast::Literal::Array(ast::ArrayLiteral { diff --git a/compiler/noirc_frontend/src/monomorphization/printer.rs b/compiler/noirc_frontend/src/monomorphization/printer.rs index c253bfe7930..ea8f079cc2f 100644 --- a/compiler/noirc_frontend/src/monomorphization/printer.rs +++ b/compiler/noirc_frontend/src/monomorphization/printer.rs @@ -110,6 +110,9 @@ impl AstPrinter { s.fmt(f)?; write!(f, "\"") } + super::ast::Literal::Unit => { + write!(f, "()") + } } } diff --git a/compiler/noirc_frontend/src/node_interner.rs b/compiler/noirc_frontend/src/node_interner.rs index dcfceccdb57..5b375be8d56 100644 --- a/compiler/noirc_frontend/src/node_interner.rs +++ b/compiler/noirc_frontend/src/node_interner.rs @@ -146,6 +146,7 @@ pub struct NodeInterner { // Maps GlobalId -> GlobalInfo // NOTE: currently only used for checking repeat globals and restricting their scope to a module globals: Vec, + global_attributes: HashMap>, next_type_variable_id: std::cell::Cell, @@ -222,10 +223,10 @@ pub enum TraitImplKind { /// /// Additionally, types can define specialized impls with methods of the same name /// as long as these specialized impls do not overlap. E.g. `impl Struct` and `impl Struct` -#[derive(Default, Debug)] +#[derive(Default, Debug, Clone)] pub struct Methods { - direct: Vec, - trait_impl_methods: Vec, + pub direct: Vec, + pub trait_impl_methods: Vec, } /// All the information from a function that is filled out during definition collection rather than @@ -241,6 +242,8 @@ pub struct FunctionModifiers { pub attributes: Attributes, pub is_unconstrained: bool, + + pub is_comptime: bool, } impl FunctionModifiers { @@ -253,6 +256,7 @@ impl FunctionModifiers { visibility: ItemVisibility::Public, attributes: Attributes::empty(), is_unconstrained: false, + is_comptime: false, } } } @@ -480,6 +484,7 @@ impl Default for NodeInterner { field_indices: HashMap::new(), next_type_variable_id: std::cell::Cell::new(0), globals: Vec::new(), + global_attributes: HashMap::new(), struct_methods: HashMap::new(), primitive_methods: HashMap::new(), type_alias_ref: Vec::new(), @@ -647,11 +652,13 @@ impl NodeInterner { local_id: LocalModuleId, let_statement: StmtId, file: FileId, + attributes: Vec, ) -> GlobalId { let id = GlobalId(self.globals.len()); let location = Location::new(ident.span(), file); let name = ident.to_string(); let definition_id = self.push_definition(name, false, DefinitionKind::Global(id), location); + self.globals.push(GlobalInfo { id, definition_id, @@ -660,6 +667,7 @@ impl NodeInterner { let_statement, location, }); + self.global_attributes.insert(id, attributes); id } @@ -673,9 +681,10 @@ impl NodeInterner { name: Ident, local_id: LocalModuleId, file: FileId, + attributes: Vec, ) -> GlobalId { let statement = self.push_stmt(HirStatement::Error); - self.push_global(name, local_id, statement, file) + self.push_global(name, local_id, statement, file, attributes) } /// Intern an empty function. @@ -753,6 +762,7 @@ impl NodeInterner { visibility: function.visibility, attributes: function.attributes.clone(), is_unconstrained: function.is_unconstrained, + is_comptime: function.is_comptime, }; self.push_function_definition(id, modifiers, module, location) } @@ -838,6 +848,10 @@ impl NodeInterner { &self.struct_attributes[struct_id] } + pub fn global_attributes(&self, global_id: &GlobalId) -> &[SecondaryAttribute] { + &self.global_attributes[global_id] + } + /// Returns the interned statement corresponding to `stmt_id` pub fn statement(&self, stmt_id: &StmtId) -> HirStatement { let def = @@ -907,10 +921,32 @@ impl NodeInterner { self.id_location(expr_id) } + pub fn statement_span(&self, stmt_id: &StmtId) -> Span { + self.id_location(stmt_id).span + } + pub fn get_struct(&self, id: StructId) -> Shared { self.structs[&id].clone() } + pub fn get_struct_methods(&self, id: StructId) -> Vec { + self.struct_methods + .keys() + .filter_map(|(key_id, name)| { + if key_id == &id { + Some( + self.struct_methods + .get(&(*key_id, name.clone())) + .expect("get_struct_methods given invalid StructId") + .clone(), + ) + } else { + None + } + }) + .collect() + } + pub fn get_trait(&self, id: TraitId) -> &Trait { &self.traits[&id] } diff --git a/compiler/noirc_frontend/src/noir_parser.lalrpop b/compiler/noirc_frontend/src/noir_parser.lalrpop new file mode 100644 index 00000000000..c8d293fb72f --- /dev/null +++ b/compiler/noirc_frontend/src/noir_parser.lalrpop @@ -0,0 +1,164 @@ +use noirc_errors::Span; + +use crate::lexer::token::BorrowedToken; +use crate::lexer::token as noir_token; +use crate::lexer::errors::LexerErrorKind; +use crate::parser::TopLevelStatement; +use crate::{Ident, Path, PathKind, UseTree, UseTreeKind}; + +use lalrpop_util::ErrorRecovery; + +grammar<'input, 'err>(input: &'input str, errors: &'err mut [ErrorRecovery, &'static str>]); + +extern { + type Location = usize; + + type Error = LexerErrorKind; + + // NOTE: each token needs a terminal defined + enum BorrowedToken<'input> { + string => BorrowedToken::Str(<&'input str>), + ident => BorrowedToken::Ident(<&'input str>), + + // symbols + "<" => BorrowedToken::Less, + "<=" => BorrowedToken::LessEqual, + ">" => BorrowedToken::Greater, + ">=" => BorrowedToken::GreaterEqual, + "==" => BorrowedToken::Equal, + "!=" => BorrowedToken::NotEqual, + "+" => BorrowedToken::Plus, + "-" => BorrowedToken::Minus, + "*" => BorrowedToken::Star, + "/" => BorrowedToken::Slash, + "%" => BorrowedToken::Percent, + "&" => BorrowedToken::Ampersand, + "^" => BorrowedToken::Caret, + "<<" => BorrowedToken::ShiftLeft, + ">>" => BorrowedToken::ShiftRight, + "." => BorrowedToken::Dot, + ".." => BorrowedToken::DoubleDot, + "(" => BorrowedToken::LeftParen, + ")" => BorrowedToken::RightParen, + "{" => BorrowedToken::LeftBrace, + "}" => BorrowedToken::RightBrace, + "[" => BorrowedToken::LeftBracket, + "]" => BorrowedToken::RightBracket, + "->" => BorrowedToken::Arrow, + "|" => BorrowedToken::Pipe, + "#" => BorrowedToken::Pound, + "," => BorrowedToken::Comma, + ":" => BorrowedToken::Colon, + "::" => BorrowedToken::DoubleColon, + ";" => BorrowedToken::Semicolon, + "!" => BorrowedToken::Bang, + "=" => BorrowedToken::Assign, + // keywords + "as" => BorrowedToken::Keyword(noir_token::Keyword::As), + "assert" => BorrowedToken::Keyword(noir_token::Keyword::Assert), + "assert_eq" => BorrowedToken::Keyword(noir_token::Keyword::AssertEq), + "bool" => BorrowedToken::Keyword(noir_token::Keyword::Bool), + "break" => BorrowedToken::Keyword(noir_token::Keyword::Break), + "call_data" => BorrowedToken::Keyword(noir_token::Keyword::CallData), + "char" => BorrowedToken::Keyword(noir_token::Keyword::Char), + "comptime" => BorrowedToken::Keyword(noir_token::Keyword::CompTime), + "constrain" => BorrowedToken::Keyword(noir_token::Keyword::Constrain), + "continue" => BorrowedToken::Keyword(noir_token::Keyword::Continue), + "contract" => BorrowedToken::Keyword(noir_token::Keyword::Contract), + "crate" => BorrowedToken::Keyword(noir_token::Keyword::Crate), + "dep" => BorrowedToken::Keyword(noir_token::Keyword::Dep), + "distinct" => BorrowedToken::Keyword(noir_token::Keyword::Distinct), + "else" => BorrowedToken::Keyword(noir_token::Keyword::Else), + "Field" => BorrowedToken::Keyword(noir_token::Keyword::Field), + "fn" => BorrowedToken::Keyword(noir_token::Keyword::Fn), + "for" => BorrowedToken::Keyword(noir_token::Keyword::For), + "fmtstr" => BorrowedToken::Keyword(noir_token::Keyword::FormatString), + "global" => BorrowedToken::Keyword(noir_token::Keyword::Global), + "if" => BorrowedToken::Keyword(noir_token::Keyword::If), + "impl" => BorrowedToken::Keyword(noir_token::Keyword::Impl), + "in" => BorrowedToken::Keyword(noir_token::Keyword::In), + "let" => BorrowedToken::Keyword(noir_token::Keyword::Let), + "mod" => BorrowedToken::Keyword(noir_token::Keyword::Mod), + "mut" => BorrowedToken::Keyword(noir_token::Keyword::Mut), + "pub" => BorrowedToken::Keyword(noir_token::Keyword::Pub), + "quote" => BorrowedToken::Keyword(noir_token::Keyword::Quote), + "return" => BorrowedToken::Keyword(noir_token::Keyword::Return), + "return_data" => BorrowedToken::Keyword(noir_token::Keyword::ReturnData), + "str" => BorrowedToken::Keyword(noir_token::Keyword::String), + "struct" => BorrowedToken::Keyword(noir_token::Keyword::Struct), + "trait" => BorrowedToken::Keyword(noir_token::Keyword::Trait), + "type" => BorrowedToken::Keyword(noir_token::Keyword::Type), + "unchecked" => BorrowedToken::Keyword(noir_token::Keyword::Unchecked), + "unconstrained" => BorrowedToken::Keyword(noir_token::Keyword::Unconstrained), + "use" => BorrowedToken::Keyword(noir_token::Keyword::Use), + "where" => BorrowedToken::Keyword(noir_token::Keyword::Where), + "while" => BorrowedToken::Keyword(noir_token::Keyword::While), + // bool + "true" => BorrowedToken::Bool(true), + "false" => BorrowedToken::Bool(false), + + r"[\t\r\n ]+" => BorrowedToken::Whitespace(_), + + EOF => BorrowedToken::EOF, + } +} + +pub(crate) TopLevelStatement: TopLevelStatement = { + "use" r"[\t\r\n ]+" ";" EOF => { + TopLevelStatement::Import(use_tree) + } +} + +UseTree: UseTree = { + // path::to::ident as SomeAlias + => { + let ident = prefix.pop(); + let kind = UseTreeKind::Path(ident, alias); + UseTree { prefix, kind } + }, +} + +pub(crate) Path: Path = { + "crate" "::" => { + let kind = PathKind::Crate; + let span = Span::from(lo as u32..hi as u32); + Path { segments, kind, span } + }, + + "dep" "::" => { + let kind = PathKind::Dep; + let span = Span::from(lo as u32..hi as u32); + Path { segments, kind, span } + }, + + => { + segments.insert(0, id); + let kind = PathKind::Plain; + let span = Span::from(lo as u32..hi as u32); + Path { segments, kind, span } + }, +} + +PathSegments: Vec = { + )*> => { + segments + } +} + +Alias: Ident = { + r"[\t\r\n ]+" "as" r"[\t\r\n ]+" => <>, +} + +Ident: Ident = { + => { + let token = noir_token::Token::Ident(i.to_string()); + let span = Span::from(lo as u32..hi as u32); + Ident::from_token(token, span) + }, +} + +Bool: BorrowedToken<'input> = { + "true" => BorrowedToken::Bool(true), + "false" => BorrowedToken::Bool(false), +}; + diff --git a/compiler/noirc_frontend/src/parser/errors.rs b/compiler/noirc_frontend/src/parser/errors.rs index 43a1f96f13f..895d4e07bbd 100644 --- a/compiler/noirc_frontend/src/parser/errors.rs +++ b/compiler/noirc_frontend/src/parser/errors.rs @@ -110,25 +110,31 @@ impl ParserError { impl std::fmt::Display for ParserError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let reason_str: String = if self.reason.is_none() { + "".to_string() + } else { + format!("\nreason: {}", Diagnostic::from(self.clone())) + }; let mut expected = vecmap(&self.expected_tokens, ToString::to_string); expected.append(&mut vecmap(&self.expected_labels, |label| format!("{label}"))); if expected.is_empty() { - write!(f, "Unexpected {} in input", self.found) + write!(f, "Unexpected {} in input{}", self.found, reason_str) } else if expected.len() == 1 { let first = expected.first().unwrap(); let vowel = "aeiou".contains(first.chars().next().unwrap()); write!( f, - "Expected a{} {} but found {}", + "Expected a{} {} but found {}{}", if vowel { "n" } else { "" }, first, - self.found + self.found, + reason_str ) } else { let expected = expected.iter().map(ToString::to_string).collect::>().join(", "); - write!(f, "Unexpected {}, expected one of {}", self.found, expected) + write!(f, "Unexpected {}, expected one of {}{}", self.found, expected, reason_str) } } } diff --git a/compiler/noirc_frontend/src/parser/mod.rs b/compiler/noirc_frontend/src/parser/mod.rs index ea96dee8a47..80c5f47f07b 100644 --- a/compiler/noirc_frontend/src/parser/mod.rs +++ b/compiler/noirc_frontend/src/parser/mod.rs @@ -97,14 +97,14 @@ where /// Sequence the two parsers. /// Fails if the first parser fails, otherwise forces /// the second parser to succeed while logging any errors. -fn then_commit<'a, P1, P2, T1, T2: 'a>( +fn then_commit<'a, P1, P2, T1, T2>( first_parser: P1, second_parser: P2, ) -> impl NoirParser<(T1, T2)> + 'a where P1: NoirParser + 'a, P2: NoirParser + 'a, - T2: Clone + Recoverable, + T2: Clone + Recoverable + 'a, { let second_parser = skip_then_retry_until(second_parser) .map_with_span(|option, span| option.unwrap_or_else(|| Recoverable::error(span))); @@ -112,14 +112,15 @@ where first_parser.then(second_parser) } -fn then_commit_ignore<'a, P1, P2, T1: 'a, T2: 'a>( +fn then_commit_ignore<'a, P1, P2, T1, T2>( first_parser: P1, second_parser: P2, ) -> impl NoirParser + 'a where P1: NoirParser + 'a, P2: NoirParser + 'a, - T2: Clone, + T1: 'a, + T2: Clone + 'a, { let second_parser = skip_then_retry_until(second_parser); first_parser.then_ignore(second_parser) @@ -140,10 +141,10 @@ where first_parser.ignore_then(second_parser) } -fn skip_then_retry_until<'a, P, T: 'a>(parser: P) -> impl NoirParser> + 'a +fn skip_then_retry_until<'a, P, T>(parser: P) -> impl NoirParser> + 'a where P: NoirParser + 'a, - T: Clone, + T: Clone + 'a, { let terminators = [ Token::EOF, diff --git a/compiler/noirc_frontend/src/parser/parser.rs b/compiler/noirc_frontend/src/parser/parser.rs index cdfa16400ae..5706c3ef12f 100644 --- a/compiler/noirc_frontend/src/parser/parser.rs +++ b/compiler/noirc_frontend/src/parser/parser.rs @@ -35,7 +35,7 @@ use super::{spanned, Item, ItemKind}; use crate::ast::{ Expression, ExpressionKind, LetStatement, StatementKind, UnresolvedType, UnresolvedTypeData, }; -use crate::lexer::Lexer; +use crate::lexer::{lexer::from_spanned_token_result, Lexer}; use crate::parser::{force, ignore_then_commit, statement_recovery}; use crate::token::{Keyword, Token, TokenKind}; use crate::{ @@ -47,6 +47,7 @@ use crate::{ use chumsky::prelude::*; use iter_extended::vecmap; +use lalrpop_util::lalrpop_mod; use noirc_errors::{Span, Spanned}; mod assertion; @@ -59,6 +60,9 @@ mod primitives; mod structs; mod traits; +// synthesized by LALRPOP +lalrpop_mod!(pub noir_parser); + #[cfg(test)] mod test_helpers; @@ -77,8 +81,79 @@ pub fn parse_program(source_program: &str) -> (ParsedModule, Vec) { let (module, mut parsing_errors) = program().parse_recovery_verbose(tokens); parsing_errors.extend(lexing_errors.into_iter().map(Into::into)); + let parsed_module = module.unwrap_or(ParsedModule { items: vec![] }); + + if cfg!(feature = "experimental_parser") { + for parsed_item in &parsed_module.items { + if lalrpop_parser_supports_kind(&parsed_item.kind) { + match &parsed_item.kind { + ItemKind::Import(parsed_use_tree) => { + prototype_parse_use_tree(Some(parsed_use_tree), source_program); + } + // other kinds prevented by lalrpop_parser_supports_kind + _ => unreachable!(), + } + } + } + } + (parsed_module, parsing_errors) +} + +fn prototype_parse_use_tree(expected_use_tree_opt: Option<&UseTree>, input: &str) { + // TODO(https://github.com/noir-lang/noir/issues/4777): currently skipping + // recursive use trees, e.g. "use std::{foo, bar}" + if input.contains('{') { + return; + } + + let mut lexer = Lexer::new(input); + lexer = lexer.skip_whitespaces(false); + let mut errors = Vec::new(); + + // NOTE: this is a hack to get the references working + // => this likely means that we'll want to propagate the <'input> lifetime further into Token + let lexer_result = lexer.collect::>(); + let referenced_lexer_result = lexer_result.iter().map(from_spanned_token_result); + + let calculated = noir_parser::TopLevelStatementParser::new().parse( + input, + &mut errors, + referenced_lexer_result, + ); + + if let Some(expected_use_tree) = expected_use_tree_opt { + assert!( + calculated.is_ok(), + "calculated not Ok(_): {:?}\n\nlexer: {:?}\n\ninput: {:?}", + calculated, + lexer_result, + input + ); + + match calculated.unwrap() { + TopLevelStatement::Import(parsed_use_tree) => { + assert_eq!(expected_use_tree, &parsed_use_tree); + } + unexpected_calculated => { + panic!( + "expected a TopLevelStatement::Import, but found: {:?}", + unexpected_calculated + ) + } + } + } else { + assert!( + calculated.is_err(), + "calculated not Err(_): {:?}\n\nlexer: {:?}\n\ninput: {:?}", + calculated, + lexer_result, + input + ); + } +} - (module.unwrap_or(ParsedModule { items: vec![] }), parsing_errors) +fn lalrpop_parser_supports_kind(kind: &ItemKind) -> bool { + matches!(kind, ItemKind::Import(_)) } /// program: module EOF @@ -158,14 +233,17 @@ fn implementation() -> impl NoirParser { /// global_declaration: 'global' ident global_type_annotation '=' literal fn global_declaration() -> impl NoirParser { - let p = ignore_then_commit( - keyword(Keyword::Global).labelled(ParsingRuleLabel::Global), - ident().map(Pattern::Identifier), - ); + let p = attributes::attributes() + .then_ignore(keyword(Keyword::Global).labelled(ParsingRuleLabel::Global)) + .then(ident().map(Pattern::Identifier)); let p = then_commit(p, optional_type_annotation()); let p = then_commit_ignore(p, just(Token::Assign)); let p = then_commit(p, expression()); - p.map(LetStatement::new_let).map(TopLevelStatement::Global) + p.validate(|(((attributes, pattern), r#type), expression), span, emit| { + let global_attributes = attributes::validate_secondary_attributes(attributes, span, emit); + LetStatement { pattern, r#type, expression, attributes: global_attributes } + }) + .map(TopLevelStatement::Global) } /// submodule: 'mod' ident '{' module '}' @@ -1509,33 +1587,53 @@ mod test { #[test] fn parse_use() { - parse_all( - use_statement(), - vec![ - "use std::hash", - "use std", - "use foo::bar as hello", - "use bar as bar", - "use foo::{}", - "use foo::{bar,}", - "use foo::{bar, hello}", - "use foo::{bar as bar2, hello}", - "use foo::{bar as bar2, hello::{foo}, nested::{foo, bar}}", - "use dep::{std::println, bar::baz}", - ], - ); + let valid_use_statements = [ + "use std::hash", + "use std", + "use foo::bar as hello", + "use bar as bar", + "use foo::{}", + "use foo::{bar,}", + "use foo::{bar, hello}", + "use foo::{bar as bar2, hello}", + "use foo::{bar as bar2, hello::{foo}, nested::{foo, bar}}", + "use dep::{std::println, bar::baz}", + ]; - parse_all_failing( - use_statement(), - vec![ - "use std as ;", - "use foobar as as;", - "use hello:: as foo;", - "use foo bar::baz", - "use foo bar::{baz}", - "use foo::{,}", - ], - ); + let invalid_use_statements = [ + "use std as ;", + "use foobar as as;", + "use hello:: as foo;", + "use foo bar::baz", + "use foo bar::{baz}", + "use foo::{,}", + ]; + + let use_statements = valid_use_statements + .into_iter() + .map(|valid_str| (valid_str, true)) + .chain(invalid_use_statements.into_iter().map(|invalid_str| (invalid_str, false))); + + for (use_statement_str, expect_valid) in use_statements { + let mut use_statement_str = use_statement_str.to_string(); + let expected_use_statement = if expect_valid { + let (result_opt, _diagnostics) = + parse_recover(&use_statement(), &use_statement_str); + use_statement_str.push(';'); + match result_opt.unwrap() { + TopLevelStatement::Import(expected_use_statement) => { + Some(expected_use_statement) + } + _ => unreachable!(), + } + } else { + let result = parse_with(&use_statement(), &use_statement_str); + assert!(result.is_err()); + None + }; + + prototype_parse_use_tree(expected_use_statement.as_ref(), &use_statement_str); + } } #[test] diff --git a/compiler/noirc_frontend/src/parser/parser/attributes.rs b/compiler/noirc_frontend/src/parser/parser/attributes.rs index 4b256a95c8b..47add6f82e0 100644 --- a/compiler/noirc_frontend/src/parser/parser/attributes.rs +++ b/compiler/noirc_frontend/src/parser/parser/attributes.rs @@ -2,6 +2,7 @@ use chumsky::Parser; use noirc_errors::Span; use crate::{ + macros_api::SecondaryAttribute, parser::{NoirParser, ParserError, ParserErrorReason}, token::{Attribute, Attributes, Token, TokenKind}, }; @@ -44,3 +45,25 @@ pub(super) fn validate_attributes( Attributes { function: primary, secondary } } + +pub(super) fn validate_secondary_attributes( + attributes: Vec, + span: Span, + emit: &mut dyn FnMut(ParserError), +) -> Vec { + let mut struct_attributes = vec![]; + + for attribute in attributes { + match attribute { + Attribute::Function(..) => { + emit(ParserError::with_reason( + ParserErrorReason::NoFunctionAttributesAllowedOnStruct, + span, + )); + } + Attribute::Secondary(attr) => struct_attributes.push(attr), + } + } + + struct_attributes +} diff --git a/compiler/noirc_frontend/src/parser/parser/function.rs b/compiler/noirc_frontend/src/parser/parser/function.rs index 06e1a958eb1..18f17065038 100644 --- a/compiler/noirc_frontend/src/parser/parser/function.rs +++ b/compiler/noirc_frontend/src/parser/parser/function.rs @@ -36,6 +36,7 @@ pub(super) fn function_definition(allow_self: bool) -> impl NoirParser impl NoirParser { .then(function::generics()) .then(fields) .validate(|(((raw_attributes, name), generics), fields), span, emit| { - let attributes = validate_struct_attributes(raw_attributes, span, emit); + let attributes = validate_secondary_attributes(raw_attributes, span, emit); TopLevelStatement::Struct(NoirStruct { name, attributes, generics, fields, span }) }) } @@ -48,28 +46,6 @@ fn struct_fields() -> impl NoirParser> { .allow_trailing() } -fn validate_struct_attributes( - attributes: Vec, - span: Span, - emit: &mut dyn FnMut(ParserError), -) -> Vec { - let mut struct_attributes = vec![]; - - for attribute in attributes { - match attribute { - Attribute::Function(..) => { - emit(ParserError::with_reason( - ParserErrorReason::NoFunctionAttributesAllowedOnStruct, - span, - )); - } - Attribute::Secondary(attr) => struct_attributes.push(attr), - } - } - - struct_attributes -} - #[cfg(test)] mod test { use super::*; diff --git a/compiler/wasm/package.json b/compiler/wasm/package.json index 6d11a5ba5c8..3bcf60afbe8 100644 --- a/compiler/wasm/package.json +++ b/compiler/wasm/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.26.0", + "version": "0.27.0", "license": "(MIT OR Apache-2.0)", "main": "dist/main.js", "types": "./dist/types/src/index.d.cts", diff --git a/compiler/wasm/src/compile.rs b/compiler/wasm/src/compile.rs index 9e6fca1126e..de157a1fe20 100644 --- a/compiler/wasm/src/compile.rs +++ b/compiler/wasm/src/compile.rs @@ -30,11 +30,16 @@ export type DependencyGraph = { library_dependencies: Readonly>; } +export type ContractOutputsArtifact = { + structs: Record>; + globals: Record>; +} + export type ContractArtifact = { noir_version: string; name: string; functions: Array; - events: Array; + outputs: ContractOutputsArtifact; file_map: Record; }; @@ -218,7 +223,7 @@ pub fn compile_contract( noir_version: String::from(NOIR_ARTIFACT_VERSION_STRING), name: optimized_contract.name, functions, - events: optimized_contract.events, + outputs: optimized_contract.outputs.into(), file_map: optimized_contract.file_map, }; diff --git a/compiler/wasm/src/compile_new.rs b/compiler/wasm/src/compile_new.rs index d6b382f669f..c187fe7f3de 100644 --- a/compiler/wasm/src/compile_new.rs +++ b/compiler/wasm/src/compile_new.rs @@ -146,7 +146,7 @@ impl CompilerContext { noir_version: String::from(NOIR_ARTIFACT_VERSION_STRING), name: optimized_contract.name, functions, - events: optimized_contract.events, + outputs: optimized_contract.outputs.into(), file_map: optimized_contract.file_map, }; diff --git a/compiler/wasm/src/types/noir_artifact.ts b/compiler/wasm/src/types/noir_artifact.ts index 935c99043da..6ecc3ccd56f 100644 --- a/compiler/wasm/src/types/noir_artifact.ts +++ b/compiler/wasm/src/types/noir_artifact.ts @@ -1,35 +1,55 @@ import { Abi, AbiType } from '@noir-lang/types'; /** - * A named type. + * A basic value. */ -export interface ABIVariable { +export interface BasicValue { /** - * The name of the variable. + * The kind of the value. */ - name: string; - /** - * The type of the variable. - */ - type: AbiType; + kind: T; + value: V; } /** - * A contract event. + * An exported value. */ -export interface EventAbi { +export type AbiValue = + | BasicValue<'boolean', boolean> + | BasicValue<'string', string> + | BasicValue<'array', AbiValue[]> + | TupleValue + | IntegerValue + | StructValue; + +export type TypedStructFieldValue = { name: string; value: T }; + +export interface StructValue { + kind: 'struct'; + fields: TypedStructFieldValue[]; +} + +export interface TupleValue { + kind: 'tuple'; + fields: AbiValue[]; +} + +export interface IntegerValue extends BasicValue<'integer', string> { + sign: boolean; +} + +/** + * A named type. + */ +export interface ABIVariable { /** - * The event name. + * The name of the variable. */ name: string; /** - * Fully qualified name of the event. - */ - path: string; - /** - * The fields of the event. + * The type of the variable. */ - fields: ABIVariable[]; + type: AbiType; } /** @@ -60,8 +80,11 @@ export interface ContractArtifact { noir_version: string; /** The functions of the contract. */ functions: NoirFunctionEntry[]; - /** The events of the contract */ - events: EventAbi[]; + + outputs: { + structs: Record; + globals: Record; + }; /** The map of file ID to the source code and path of the file. */ file_map: DebugFileMap; } @@ -128,6 +151,16 @@ export interface DebugInfo { locations: Record; } +/** + * The debug information for a given program. + */ +export interface ProgramDebugInfo { + /** + * An array that maps to each function of a program. + */ + debug_infos: Array; +} + /** * Maps a file ID to its metadata for debugging purposes. */ diff --git a/compiler/wasm/test/compiler/shared/compile.test.ts b/compiler/wasm/test/compiler/shared/compile.test.ts index 52cef14968b..f9e37530cbc 100644 --- a/compiler/wasm/test/compiler/shared/compile.test.ts +++ b/compiler/wasm/test/compiler/shared/compile.test.ts @@ -5,6 +5,7 @@ import { ContractCompilationArtifacts, DebugFileMap, DebugInfo, + ProgramDebugInfo, NoirFunctionEntry, ProgramArtifact, ProgramCompilationArtifacts, @@ -15,7 +16,7 @@ export function shouldCompileProgramIdentically( expect: typeof Expect, timeout = 5000, ) { - it('both nargo and noir_wasm should compile identically', async () => { + it('both nargo and noir_wasm should compile program identically', async () => { // Compile! const { nargoArtifact, noirWasmArtifact } = await compileFn(); @@ -51,7 +52,7 @@ export function shouldCompileContractIdentically( expect: typeof Expect, timeout = 5000, ) { - it('both nargo and noir_wasm should compile identically', async () => { + it('both nargo and noir_wasm should compile contract identically', async () => { // Compile! const { nargoArtifact, noirWasmArtifact } = await compileFn(); @@ -90,7 +91,7 @@ function extractDebugInfos(fns: NoirFunctionEntry[]) { return fns.map((fn) => { const debugSymbols = inflateDebugSymbols(fn.debug_symbols); delete (fn as Partial).debug_symbols; - clearFileIdentifiers(debugSymbols); + clearFileIdentifiersProgram(debugSymbols); return debugSymbols; }); } @@ -113,6 +114,12 @@ function deleteContractDebugMetadata(contract: ContractArtifact) { return [extractDebugInfos(contract.functions), fileMap]; } +function clearFileIdentifiersProgram(debugSymbols: ProgramDebugInfo) { + debugSymbols.debug_infos.map((debug_info) => { + clearFileIdentifiers(debug_info); + }); +} + /** Clears file identifiers from a set of debug symbols. */ function clearFileIdentifiers(debugSymbols: DebugInfo) { for (const loc of Object.values(debugSymbols.locations)) { diff --git a/cspell.json b/cspell.json index 16de9757fb8..bf3040265c2 100644 --- a/cspell.json +++ b/cspell.json @@ -113,6 +113,7 @@ "Maddiaa", "mathbb", "memfs", + "memset", "merkle", "metas", "minreq", diff --git a/deny.toml b/deny.toml index eff233687e8..db7e53cad24 100644 --- a/deny.toml +++ b/deny.toml @@ -58,7 +58,7 @@ allow = [ # bitmaps 2.1.0, im 15.1.0 "MPL-2.0", # Boost Software License - "BSL-1.0", + "BSL-1.0" ] # Allow 1 or more licenses on a per-crate basis, so that particular licenses @@ -70,6 +70,7 @@ exceptions = [ { allow = ["CC0-1.0"], name = "more-asserts" }, { allow = ["CC0-1.0"], name = "jsonrpc" }, { allow = ["CC0-1.0"], name = "notify" }, + { allow = ["CC0-1.0"], name = "tiny-keccak" }, { allow = ["MPL-2.0"], name = "sized-chunks" }, { allow = ["MPL-2.0"], name = "webpki-roots" }, diff --git a/docs/docs/how_to/debugger/_category_.json b/docs/docs/how_to/debugger/_category_.json new file mode 100644 index 00000000000..cc2cbb1c253 --- /dev/null +++ b/docs/docs/how_to/debugger/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Debugging", + "position": 5, + "collapsible": true, + "collapsed": true +} diff --git a/docs/docs/how_to/debugger/debugging_with_the_repl.md b/docs/docs/how_to/debugger/debugging_with_the_repl.md new file mode 100644 index 00000000000..09e5bae68ad --- /dev/null +++ b/docs/docs/how_to/debugger/debugging_with_the_repl.md @@ -0,0 +1,164 @@ +--- +title: Using the REPL Debugger +description: + Step by step guide on how to debug your Noir circuits with the REPL Debugger. +keywords: + [ + Nargo, + Noir CLI, + Noir Debugger, + REPL, + ] +sidebar_position: 1 +--- + +#### Pre-requisites + +In order to use the REPL debugger, first you need to install recent enough versions of Nargo and vscode-noir. + +## Debugging a simple circuit + +Let's debug a simple circuit: + +```rust +fn main(x : Field, y : pub Field) { + assert(x != y); +} +``` + +To start the REPL debugger, using a terminal, go to a Noir circuit's home directory. Then: + +`$ nargo debug` + +You should be seeing this in your terminal: + +``` +[main] Starting debugger +At ~/noir-examples/recursion/circuits/main/src/main.nr:1:9 + 1 -> fn main(x : Field, y : pub Field) { + 2 assert(x != y); + 3 } +> +``` + +The debugger displays the current Noir code location, and it is now waiting for us to drive it. + +Let's first take a look at the available commands. For that we'll use the `help` command. + +``` +> help +Available commands: + + opcodes display ACIR opcodes + into step into to the next opcode + next step until a new source location is reached + out step until a new source location is reached + and the current stack frame is finished + break LOCATION:OpcodeLocation add a breakpoint at an opcode location + over step until a new source location is reached + without diving into function calls + restart restart the debugging session + delete LOCATION:OpcodeLocation delete breakpoint at an opcode location + witness show witness map + witness index:u32 display a single witness from the witness map + witness index:u32 value:String update a witness with the given value + memset index:usize value:String update a memory cell with the given + value + continue continue execution until the end of the + program + vars show variable values available at this point + in execution + stacktrace display the current stack trace + memory show memory (valid when executing unconstrained code) + step step to the next ACIR opcode + +Other commands: + + help Show this help message + quit Quit repl + +``` + +Some commands operate only for unconstrained functions, such as `memory` and `memset`. If you try to use them while execution is paused at an ACIR opcode, the debugger will simply inform you that you are not executing unconstrained code: + +``` +> memory +Unconstrained VM memory not available +> +``` + +Before continuing, we can take a look at the initial witness map: + +``` +> witness +_0 = 1 +_1 = 2 +> +``` + +Cool, since `x==1`, `y==2`, and we want to check that `x != y`, our circuit should succeed. At this point we could intervene and use the witness setter command to change one of the witnesses. Let's set `y=3`, then back to 2, so we don't affect the expected result: + +``` +> witness +_0 = 1 +_1 = 2 +> witness 1 3 +_1 = 3 +> witness +_0 = 1 +_1 = 3 +> witness 1 2 +_1 = 2 +> witness +_0 = 1 +_1 = 2 +> +``` + +Now we can inspect the current state of local variables. For that we use the `vars` command. + +``` +> vars +> +``` + +We currently have no vars in context, since we are at the entry point of the program. Let's use `next` to execute until the next point in the program. + +``` +> vars +> next +At ~/noir-examples/recursion/circuits/main/src/main.nr:1:20 + 1 -> fn main(x : Field, y : pub Field) { + 2 assert(x != y); + 3 } +> vars +x:Field = 0x01 +``` + +As a result of stepping, the variable `x`, whose initial value comes from the witness map, is now in context and returned by `vars`. + +``` +> next + 1 fn main(x : Field, y : pub Field) { + 2 -> assert(x != y); + 3 } +> vars +y:Field = 0x02 +x:Field = 0x01 +``` + +Stepping again we can finally see both variables and their values. And now we can see that the next assertion should succeed. + +Let's continue to the end: + +``` +> continue +(Continuing execution...) +Finished execution +> q +[main] Circuit witness successfully solved +``` + +Upon quitting the debugger after a solved circuit, the resulting circuit witness gets saved, equivalent to what would happen if we had run the same circuit with `nargo execute`. + +We just went through the basics of debugging using Noir REPL debugger. For a comprehensive reference, check out [the reference page](../../reference/debugger/debugger_repl.md). diff --git a/docs/docs/how_to/debugger/debugging_with_vs_code.md b/docs/docs/how_to/debugger/debugging_with_vs_code.md new file mode 100644 index 00000000000..a5858c1a5eb --- /dev/null +++ b/docs/docs/how_to/debugger/debugging_with_vs_code.md @@ -0,0 +1,68 @@ +--- +title: Using the VS Code Debugger +description: + Step by step guide on how to debug your Noir circuits with the VS Code Debugger configuration and features. +keywords: + [ + Nargo, + Noir CLI, + Noir Debugger, + VS Code, + IDE, + ] +sidebar_position: 0 +--- + +This guide will show you how to use VS Code with the vscode-noir extension to debug a Noir project. + +#### Pre-requisites + +- Nargo +- vscode-noir +- A Noir project with a `Nargo.toml`, `Prover.toml` and at least one Noir (`.nr`) containing an entry point function (typically `main`). + +## Running the debugger + +The easiest way to start debugging is to open the file you want to debug, and press `F5`. This will cause the debugger to launch, using your `Prover.toml` file as input. + +You should see something like this: + +![Debugger launched](@site/static/img/debugger/1-started.png) + +Let's inspect the state of the program. For that, we open VS Code's _Debug pane_. Look for this icon: + +![Debug pane icon](@site/static/img/debugger/2-icon.png) + +You will now see two categories of variables: Locals and Witness Map. + +![Debug pane expanded](@site/static/img/debugger/3-debug-pane.png) + +1. **Locals**: variables of your program. At this point in execution this section is empty, but as we step through the code it will get populated by `x`, `result`, `digest`, etc. + +2. **Witness map**: these are initially populated from your project's `Prover.toml` file. In this example, they will be used to populate `x` and `result` at the beginning of the `main` function. + +Most of the time you will probably be focusing mostly on locals, as they represent the high level state of your program. + +You might be interested in inspecting the witness map in case you are trying to solve a really low level issue in the compiler or runtime itself, so this concerns mostly advanced or niche users. + +Let's step through the program, by using the debugger buttons or their corresponding keyboard shortcuts. + +![Debugger buttons](@site/static/img/debugger/4-debugger-buttons.png) + +Now we can see in the variables pane that there's values for `digest`, `result` and `x`. + +![Inspecting locals](@site/static/img/debugger/5-assert.png) + +We can also inspect the values of variables by directly hovering on them on the code. + +![Hover locals](@site/static/img/debugger/6-hover.png) + +Let's set a break point at the `keccak256` function, so we can continue execution up to the point when it's first invoked without having to go one step at a time. + +We just need to click the to the right of the line number 18. Once the breakpoint appears, we can click the `continue` button or use its corresponding keyboard shortcut (`F5` by default). + +![Breakpoint](@site/static/img/debugger/7-break.png) + +Now we are debugging the `keccak256` function, notice the _Call Stack pane_ at the lower right. This lets us inspect the current call stack of our process. + +That covers most of the current debugger functionalities. Check out [the reference](../../reference/debugger/debugger_vscode.md) for more details on how to configure the debugger. \ No newline at end of file diff --git a/docs/docs/how_to/merkle-proof.mdx b/docs/docs/how_to/merkle-proof.mdx index 003c7019a93..16c425bed76 100644 --- a/docs/docs/how_to/merkle-proof.mdx +++ b/docs/docs/how_to/merkle-proof.mdx @@ -5,6 +5,7 @@ description: merkle tree with a specified root, at a given index. keywords: [merkle proof, merkle membership proof, Noir, rust, hash function, Pedersen, sha256, merkle tree] +sidebar_position: 4 --- Let's walk through an example of a merkle membership proof in Noir that proves that a given leaf is diff --git a/docs/docs/noir/concepts/data_types/slices.mdx b/docs/docs/noir/concepts/data_types/slices.mdx index 828faf4a8f8..4eccc677b80 100644 --- a/docs/docs/noir/concepts/data_types/slices.mdx +++ b/docs/docs/noir/concepts/data_types/slices.mdx @@ -168,3 +168,28 @@ fn main() { assert(slice.len() == 2); } ``` + +### as_array + +Converts this slice into an array. + +Make sure to specify the size of the resulting array. +Panics if the resulting array length is different than the slice's length. + +```rust +fn as_array(self) -> [T; N] +``` + +Example: + +```rust +fn main() { + let slice = &[5, 6]; + + // Always specify the length of the resulting array! + let array: [Field; 2] = slice.as_array(); + + assert(array[0] == slice[0]); + assert(array[1] == slice[1]); +} +``` diff --git a/docs/docs/noir/concepts/functions.md b/docs/docs/noir/concepts/functions.md index 2c9bc33fdfc..f656cdfd97a 100644 --- a/docs/docs/noir/concepts/functions.md +++ b/docs/docs/noir/concepts/functions.md @@ -62,7 +62,7 @@ fn main(x : [Field]) // can't compile, has variable size fn main(....// i think you got it by now ``` -Keep in mind [tests](../../getting_started/tooling/testing.md) don't differentiate between `main` and any other function. The following snippet passes tests, but won't compile or prove: +Keep in mind [tests](../../tooling/testing.md) don't differentiate between `main` and any other function. The following snippet passes tests, but won't compile or prove: ```rust fn main(x : [Field]) { @@ -190,7 +190,7 @@ Supported attributes include: - **deprecated**: mark the function as _deprecated_. Calling the function will generate a warning: `warning: use of deprecated function` - **field**: Used to enable conditional compilation of code depending on the field size. See below for more details - **oracle**: mark the function as _oracle_; meaning it is an external unconstrained function, implemented in noir_js. See [Unconstrained](./unconstrained.md) and [NoirJS](../../reference/NoirJS/noir_js/index.md) for more details. -- **test**: mark the function as unit tests. See [Tests](../../getting_started/tooling/testing.md) for more details +- **test**: mark the function as unit tests. See [Tests](../../tooling/testing.md) for more details ### Field Attribute diff --git a/docs/docs/noir/concepts/oracles.md b/docs/docs/noir/concepts/oracles.md index 2e6a6818d48..aa380b5f7b8 100644 --- a/docs/docs/noir/concepts/oracles.md +++ b/docs/docs/noir/concepts/oracles.md @@ -11,6 +11,12 @@ keywords: sidebar_position: 6 --- +:::note + +This is an experimental feature that is not fully documented. If you notice any outdated information or potential improvements to this page, pull request contributions are very welcome: https://github.com/noir-lang/noir + +::: + Noir has support for Oracles via RPC calls. This means Noir will make an RPC call and use the return value for proof generation. Since Oracles are not resolved by Noir, they are [`unconstrained` functions](./unconstrained.md) @@ -21,3 +27,5 @@ You can declare an Oracle through the `#[oracle()]` flag. Example: #[oracle(get_number_sequence)] unconstrained fn get_number_sequence(_size: Field) -> [Field] {} ``` + +The timeout for when using an external RPC oracle resolver can be set with the `NARGO_FOREIGN_CALL_TIMEOUT` environment variable. This timeout is in units of milliseconds. diff --git a/docs/docs/noir/modules_packages_crates/workspaces.md b/docs/docs/noir/modules_packages_crates/workspaces.md index 67a1dafa372..513497f12bf 100644 --- a/docs/docs/noir/modules_packages_crates/workspaces.md +++ b/docs/docs/noir/modules_packages_crates/workspaces.md @@ -11,16 +11,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: diff --git a/docs/docs/noir/standard_library/bigint.md b/docs/docs/noir/standard_library/bigint.md index 9aa4fb77112..54d791b82d3 100644 --- a/docs/docs/noir/standard_library/bigint.md +++ b/docs/docs/noir/standard_library/bigint.md @@ -48,7 +48,10 @@ The available operations for each big integer are: Construct a big integer from its little-endian bytes representation. Example: ```rust + // Construct a big integer from a slice of bytes let a = Secpk1Fq::from_le_bytes(&[x, y, 0, 45, 2]); + // Construct a big integer from an array of 32 bytes + let a = Secpk1Fq::from_le_bytes_32([1;32]); ``` Sure, here's the formatted version of the remaining methods: diff --git a/docs/docs/noir/standard_library/containers/hashmap.md b/docs/docs/noir/standard_library/containers/hashmap.md index 093b6d38d11..2b9f4895722 100644 --- a/docs/docs/noir/standard_library/containers/hashmap.md +++ b/docs/docs/noir/standard_library/containers/hashmap.md @@ -20,8 +20,9 @@ Example: ```rust // Create a mapping from Fields to u32s with a maximum length of 12 -// using a pedersen hash -let mut map: HashMap> = HashMap::default(); +// using a poseidon2 hasher +use dep::std::hash::poseidon2::Poseidon2Hasher; +let mut map: HashMap> = HashMap::default(); map.insert(1, 2); map.insert(3, 4); diff --git a/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx b/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx index f98c90a97c8..7329880c7a7 100644 --- a/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx +++ b/docs/docs/noir/standard_library/cryptographic_primitives/hashes.mdx @@ -13,7 +13,6 @@ import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; ## sha256 Given an array of bytes, returns the resulting sha256 hash. -See sha256_slice for a version that works directly on slices. #include_code sha256 noir_stdlib/src/hash.nr rust @@ -28,18 +27,9 @@ fn main() { -## sha256_slice - -A version of sha256 specialized to slices: - -#include_code sha256_slice noir_stdlib/src/hash.nr rust - - - ## blake2s Given an array of bytes, returns an array with the Blake2 hash -See blake2s_slice for a version that works directly on slices. #include_code blake2s noir_stdlib/src/hash.nr rust @@ -54,18 +44,9 @@ fn main() { -## blake2s_slice - -A version of blake2s specialized to slices: - -#include_code blake2s_slice noir_stdlib/src/hash.nr rust - - - ## blake3 Given an array of bytes, returns an array with the Blake3 hash -See blake3_slice for a version that works directly on slices. #include_code blake3 noir_stdlib/src/hash.nr rust @@ -80,18 +61,9 @@ fn main() { -## blake3_slice - -A version of blake3 specialized to slices: - -#include_code blake3_slice noir_stdlib/src/hash.nr rust - - - ## pedersen_hash Given an array of Fields, returns the Pedersen hash. -See pedersen_hash_slice for a version that works directly on slices. #include_code pedersen_hash noir_stdlib/src/hash.nr rust @@ -101,18 +73,9 @@ example: -## pedersen_hash_slice - -Given a slice of Fields, returns the Pedersen hash. - -#include_code pedersen_hash_slice noir_stdlib/src/hash.nr rust - - - ## pedersen_commitment Given an array of Fields, returns the Pedersen commitment. -See pedersen_commitment_slice for a version that works directly on slices. #include_code pedersen_commitment noir_stdlib/src/hash.nr rust @@ -122,20 +85,11 @@ example: -## pedersen_commitment_slice - -Given a slice of Fields, returns the Pedersen commitment. - -#include_code pedersen_commitment_slice noir_stdlib/src/hash.nr rust - - - ## keccak256 Given an array of bytes (`u8`), returns the resulting keccak hash as an array of 32 bytes (`[u8; 32]`). Specify a message_size to hash only the first -`message_size` bytes of the input. See keccak256_slice for a version that works -directly on slices. +`message_size` bytes of the input. #include_code keccak256 noir_stdlib/src/hash.nr rust @@ -145,15 +99,6 @@ example: -## keccak256_slice - -Given a slice of bytes (`u8`), returns the resulting keccak hash as an array of -32 bytes (`[u8; 32]`). - -#include_code keccak256_slice noir_stdlib/src/hash.nr rust - - - ## poseidon Given an array of Fields, returns a new Field with the Poseidon Hash. Mind that you need to specify diff --git a/docs/docs/noir/standard_library/traits.md b/docs/docs/noir/standard_library/traits.md index e6e7e6d40cb..2536d9a943f 100644 --- a/docs/docs/noir/standard_library/traits.md +++ b/docs/docs/noir/standard_library/traits.md @@ -140,6 +140,8 @@ impl Eq for (A, B, C, D, E) Implementing this trait on a type allows `<`, `<=`, `>`, and `>=` to be used on values of the type. +`std::cmp` also provides `max` and `min` functions for any type which implements the `Ord` trait. + Implementations: ```rust diff --git a/docs/docs/reference/debugger/_category_.json b/docs/docs/reference/debugger/_category_.json new file mode 100644 index 00000000000..27869205ad3 --- /dev/null +++ b/docs/docs/reference/debugger/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Debugger", + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/docs/reference/debugger/debugger_known_limitations.md b/docs/docs/reference/debugger/debugger_known_limitations.md new file mode 100644 index 00000000000..936d416ac4b --- /dev/null +++ b/docs/docs/reference/debugger/debugger_known_limitations.md @@ -0,0 +1,59 @@ +--- +title: Known limitations +description: + An overview of known limitations of the current version of the Noir debugger +keywords: + [ + Nargo, + Noir Debugger, + VS Code, + ] +sidebar_position: 2 +--- + +# Debugger Known Limitations + +There are currently some limits to what the debugger can observe. + +## Mutable references + +The debugger is currently blind to any state mutated via a mutable reference. For example, in: + +``` +let mut x = 1; +let y = &mut x; +*y = 2; +``` + +The update on `x` will not be observed by the debugger. That means, when running `vars` from the debugger REPL, or inspecting the _local variables_ pane in the VS Code debugger, `x` will appear with value 1 despite having executed `*y = 2;`. + +## Variables of type function or mutable references are opaque + +When inspecting variables, any variable of type `Function` or `MutableReference` will render its value as `<>` or `<>`. + +## Debugger instrumentation affects resulting ACIR + +In order to make the state of local variables observable, the debugger compiles Noir circuits interleaving foreign calls that track any mutations to them. While this works (except in the cases described above) and doesn't introduce any behavior changes, it does as a side effect produce bigger bytecode. In particular, when running the command `opcodes` on the REPL debugger, you will notice Unconstrained VM blocks that look like this: + +``` +... +5 BRILLIG inputs=[Single(Expression { mul_terms: [], linear_combinations: [], q_c: 2 }), Single(Expression { mul_terms: [], linear_combinations: [(1, Witness(2))], q_c: 0 })] + | outputs=[] + 5.0 | Mov { destination: RegisterIndex(2), source: RegisterIndex(0) } + 5.1 | Mov { destination: RegisterIndex(3), source: RegisterIndex(1) } + 5.2 | Const { destination: RegisterIndex(0), value: Value { inner: 0 } } + 5.3 | Const { destination: RegisterIndex(1), value: Value { inner: 0 } } + 5.4 | Mov { destination: RegisterIndex(2), source: RegisterIndex(2) } + 5.5 | Mov { destination: RegisterIndex(3), source: RegisterIndex(3) } + 5.6 | Call { location: 8 } + 5.7 | Stop + 5.8 | ForeignCall { function: "__debug_var_assign", destinations: [], inputs: [RegisterIndex(RegisterIndex(2)), RegisterIndex(RegisterIndex(3))] } +... +``` + +If you are interested in debugging/inspecting compiled ACIR without these synthetic changes, you can invoke the REPL debugger with the `--skip-instrumentation` flag or launch the VS Code debugger with the `skipConfiguration` property set to true in its launch configuration. You can find more details about those in the [Debugger REPL reference](debugger_repl.md) and the [VS Code Debugger reference](debugger_vscode.md). + +:::note +Skipping debugger instrumentation means you won't be able to inspect values of local variables. +::: + diff --git a/docs/docs/reference/debugger/debugger_repl.md b/docs/docs/reference/debugger/debugger_repl.md new file mode 100644 index 00000000000..46e2011304e --- /dev/null +++ b/docs/docs/reference/debugger/debugger_repl.md @@ -0,0 +1,360 @@ +--- +title: REPL Debugger +description: + Noir Debugger REPL options and commands. +keywords: + [ + Nargo, + Noir CLI, + Noir Debugger, + REPL, + ] +sidebar_position: 1 +--- + +## Running the REPL debugger + +`nargo debug [OPTIONS] [WITNESS_NAME]` + +Runs the Noir REPL debugger. If a `WITNESS_NAME` is provided the debugger writes the resulting execution witness to a `WITNESS_NAME` file. + +### Options + +| Option | Description | +| --------------------- | ------------------------------------------------------------ | +| `-p, --prover-name ` | The name of the toml file which contains the inputs for the prover [default: Prover]| +| `--package ` | The name of the package to debug | +| `--print-acir` | Display the ACIR for compiled circuit | +| `--deny-warnings` | Treat all warnings as errors | +| `--silence-warnings` | Suppress warnings | +| `-h, --help` | Print help | + +None of these options are required. + +:::note +Since the debugger starts by compiling the target package, all Noir compiler options are also available. Check out the [compiler reference](../nargo_commands.md#nargo-compile) to learn more about the compiler options. +::: + +## REPL commands + +Once the debugger is running, it accepts the following commands. + +#### `help` (h) + +Displays the menu of available commands. + +``` +> help +Available commands: + + opcodes display ACIR opcodes + into step into to the next opcode + next step until a new source location is reached + out step until a new source location is reached + and the current stack frame is finished + break LOCATION:OpcodeLocation add a breakpoint at an opcode location + over step until a new source location is reached + without diving into function calls + restart restart the debugging session + delete LOCATION:OpcodeLocation delete breakpoint at an opcode location + witness show witness map + witness index:u32 display a single witness from the witness map + witness index:u32 value:String update a witness with the given value + memset index:usize value:String update a memory cell with the given + value + continue continue execution until the end of the + program + vars show variable values available at this point + in execution + stacktrace display the current stack trace + memory show memory (valid when executing unconstrained code) value + step step to the next ACIR opcode + +Other commands: + + help Show this help message + quit Quit repl + +``` + +### Stepping through programs + +#### `next` (n) + +Step until the next Noir source code location. While other commands, such as [`into`](#into-i) and [`step`](#step-s), allow for finer grained control of the program's execution at the opcode level, `next` is source code centric. For example: + +``` +3 ... +4 fn main(x: u32) { +5 assert(entry_point(x) == 2); +6 swap_entry_point(x, x + 1); +7 -> assert(deep_entry_point(x) == 4); +8 multiple_values_entry_point(x); +9 } +``` + + +Using `next` here would cause the debugger to jump to the definition of `deep_entry_point` (if available). + +If you want to step over `deep_entry_point` and go straight to line 8, use [the `over` command](#over) instead. + +#### `over` + +Step until the next source code location, without diving into function calls. For example: + +``` +3 ... +4 fn main(x: u32) { +5 assert(entry_point(x) == 2); +6 swap_entry_point(x, x + 1); +7 -> assert(deep_entry_point(x) == 4); +8 multiple_values_entry_point(x); +9 } +``` + + +Using `over` here would cause the debugger to execute until line 8 (`multiple_values_entry_point(x);`). + +If you want to step into `deep_entry_point` instead, use [the `next` command](#next-n). + +#### `out` + +Step until the end of the current function call. For example: + +``` + 3 ... + 4 fn main(x: u32) { + 5 assert(entry_point(x) == 2); + 6 swap_entry_point(x, x + 1); + 7 -> assert(deep_entry_point(x) == 4); + 8 multiple_values_entry_point(x); + 9 } + 10 + 11 unconstrained fn returns_multiple_values(x: u32) -> (u32, u32, u32, u32) { + 12 ... + ... + 55 + 56 unconstrained fn deep_entry_point(x: u32) -> u32 { + 57 -> level_1(x + 1) + 58 } + +``` + +Running `out` here will resume execution until line 8. + +#### `step` (s) + +Skips to the next ACIR code. A compiled Noir program is a sequence of ACIR opcodes. However, an unconstrained VM opcode denotes the start of an unconstrained code block, to be executed by the unconstrained VM. For example (redacted for brevity): + +``` +0 BLACKBOX::RANGE [(_0, num_bits: 32)] [ ] +1 -> BRILLIG inputs=[Single(Expression { mul_terms: [], linear_combinations: [(1, Witness(0))], q_c: 0 })] outputs=[Simple(Witness(1))] + 1.0 | Mov { destination: RegisterIndex(2), source: RegisterIndex(0) } + 1.1 | Const { destination: RegisterIndex(0), value: Value { inner: 0 } } + 1.2 | Const { destination: RegisterIndex(1), value: Value { inner: 0 } } + 1.3 | Mov { destination: RegisterIndex(2), source: RegisterIndex(2) } + 1.4 | Call { location: 7 } + ... + 1.43 | Return +2 EXPR [ (1, _1) -2 ] +``` + +The `->` here shows the debugger paused at an ACIR opcode: `BRILLIG`, at index 1, which denotes an unconstrained code block is about to start. + +Using the `step` command at this point would result in the debugger stopping at ACIR opcode 2, `EXPR`, skipping unconstrained computation steps. + +Use [the `into` command](#into-i) instead if you want to follow unconstrained computation step by step. + +#### `into` (i) + +Steps into the next opcode. A compiled Noir program is a sequence of ACIR opcodes. However, a BRILLIG opcode denotes the start of an unconstrained code block, to be executed by the unconstrained VM. For example (redacted for brevity): + +``` +0 BLACKBOX::RANGE [(_0, num_bits: 32)] [ ] +1 -> BRILLIG inputs=[Single(Expression { mul_terms: [], linear_combinations: [(1, Witness(0))], q_c: 0 })] outputs=[Simple(Witness(1))] + 1.0 | Mov { destination: RegisterIndex(2), source: RegisterIndex(0) } + 1.1 | Const { destination: RegisterIndex(0), value: Value { inner: 0 } } + 1.2 | Const { destination: RegisterIndex(1), value: Value { inner: 0 } } + 1.3 | Mov { destination: RegisterIndex(2), source: RegisterIndex(2) } + 1.4 | Call { location: 7 } + ... + 1.43 | Return +2 EXPR [ (1, _1) -2 ] +``` + +The `->` here shows the debugger paused at an ACIR opcode: `BRILLIG`, at index 1, which denotes an unconstrained code block is about to start. + +Using the `into` command at this point would result in the debugger stopping at opcode 1.0, `Mov ...`, allowing the debugger user to follow unconstrained computation step by step. + +Use [the `step` command](#step-s) instead if you want to skip to the next ACIR code directly. + +#### `continue` (c) + +Continues execution until the next breakpoint, or the end of the program. + +#### `restart` (res) + +Interrupts execution, and restarts a new debugging session from scratch. + +#### `opcodes` (o) + +Display the program's ACIR opcode sequence. For example: + +``` +0 BLACKBOX::RANGE [(_0, num_bits: 32)] [ ] +1 -> BRILLIG inputs=[Single(Expression { mul_terms: [], linear_combinations: [(1, Witness(0))], q_c: 0 })] outputs=[Simple(Witness(1))] + 1.0 | Mov { destination: RegisterIndex(2), source: RegisterIndex(0) } + 1.1 | Const { destination: RegisterIndex(0), value: Value { inner: 0 } } + 1.2 | Const { destination: RegisterIndex(1), value: Value { inner: 0 } } + 1.3 | Mov { destination: RegisterIndex(2), source: RegisterIndex(2) } + 1.4 | Call { location: 7 } + ... + 1.43 | Return +2 EXPR [ (1, _1) -2 ] +``` + +### Breakpoints + +#### `break [Opcode]` (or shorthand `b [Opcode]`) + +Sets a breakpoint on the specified opcode index. To get a list of the program opcode numbers, see [the `opcode` command](#opcodes-o). For example: + +``` +0 BLACKBOX::RANGE [(_0, num_bits: 32)] [ ] +1 -> BRILLIG inputs=[Single(Expression { mul_terms: [], linear_combinations: [(1, Witness(0))], q_c: 0 })] outputs=[Simple(Witness(1))] + 1.0 | Mov { destination: RegisterIndex(2), source: RegisterIndex(0) } + 1.1 | Const { destination: RegisterIndex(0), value: Value { inner: 0 } } + 1.2 | Const { destination: RegisterIndex(1), value: Value { inner: 0 } } + 1.3 | Mov { destination: RegisterIndex(2), source: RegisterIndex(2) } + 1.4 | Call { location: 7 } + ... + 1.43 | Return +2 EXPR [ (1, _1) -2 ] +``` + +In this example, issuing a `break 1.2` command adds break on opcode 1.2, as denoted by the `*` character: + +``` +0 BLACKBOX::RANGE [(_0, num_bits: 32)] [ ] +1 -> BRILLIG inputs=[Single(Expression { mul_terms: [], linear_combinations: [(1, Witness(0))], q_c: 0 })] outputs=[Simple(Witness(1))] + 1.0 | Mov { destination: RegisterIndex(2), source: RegisterIndex(0) } + 1.1 | Const { destination: RegisterIndex(0), value: Value { inner: 0 } } + 1.2 | * Const { destination: RegisterIndex(1), value: Value { inner: 0 } } + 1.3 | Mov { destination: RegisterIndex(2), source: RegisterIndex(2) } + 1.4 | Call { location: 7 } + ... + 1.43 | Return +2 EXPR [ (1, _1) -2 ] +``` + +Running [the `continue` command](#continue-c) at this point would cause the debugger to execute the program until opcode 1.2. + +#### `delete [Opcode]` (or shorthand `d [Opcode]`) + +Deletes a breakpoint at an opcode location. Usage is analogous to [the `break` command](#). + +### Variable inspection + +#### vars + +Show variable values available at this point in execution. + +:::note +The ability to inspect variable values from the debugger depends on compilation to be run in a special debug instrumentation mode. This instrumentation weaves variable tracing code with the original source code. + +So variable value inspection comes at the expense of making the resulting ACIR bytecode bigger and harder to understand and optimize. + +If you find this compromise unacceptable, you can run the debugger with the flag `--skip-debug-instrumentation`. This will compile your circuit without any additional debug information, so the resulting ACIR bytecode will be identical to the one produced by standard Noir compilation. However, if you opt for this, the `vars` command will not be available while debugging. +::: + + +### Stacktrace + +#### `stacktrace` + +Displays the current stack trace. + + +### Witness map + +#### `witness` (w) + +Show witness map. For example: + +``` +_0 = 0 +_1 = 2 +_2 = 1 +``` + +#### `witness [Witness Index]` + +Display a single witness from the witness map. For example: + +``` +> witness 1 +_1 = 2 +``` + +#### `witness [Witness Index] [New value]` + +Overwrite the given index with a new value. For example: + +``` +> witness 1 3 +_1 = 3 +``` + + +### Unconstrained VM memory + +#### `memory` + +Show unconstrained VM memory state. For example: + +``` +> memory +At opcode 1.13: Store { destination_pointer: RegisterIndex(0), source: RegisterIndex(3) } +... +> registers +0 = 0 +1 = 10 +2 = 0 +3 = 1 +4 = 1 +5 = 2³² +6 = 1 +> into +At opcode 1.14: Const { destination: RegisterIndex(5), value: Value { inner: 1 } } +... +> memory +0 = 1 +> +``` + +In the example above: we start with clean memory, then step through a `Store` opcode which stores the value of register 3 (1) into the memory address stored in register 0 (0). Thus now `memory` shows memory address 0 contains value 1. + +:::note +This command is only functional while the debugger is executing unconstrained code. +::: + +#### `memset [Memory address] [New value]` + +Update a memory cell with the given value. For example: + +``` +> memory +0 = 1 +> memset 0 2 +> memory +0 = 2 +> memset 1 4 +> memory +0 = 2 +1 = 4 +> +``` + +:::note +This command is only functional while the debugger is executing unconstrained code. +::: \ No newline at end of file diff --git a/docs/docs/reference/debugger/debugger_vscode.md b/docs/docs/reference/debugger/debugger_vscode.md new file mode 100644 index 00000000000..c027332b3b0 --- /dev/null +++ b/docs/docs/reference/debugger/debugger_vscode.md @@ -0,0 +1,82 @@ +--- +title: VS Code Debugger +description: + VS Code Debugger configuration and features. +keywords: + [ + Nargo, + Noir CLI, + Noir Debugger, + VS Code, + IDE, + ] +sidebar_position: 0 +--- + +# VS Code Noir Debugger Reference + +The Noir debugger enabled by the vscode-noir extension ships with default settings such that the most common scenario should run without any additional configuration steps. + +These defaults can nevertheless be overridden by defining a launch configuration file. This page provides a reference for the properties you can override via a launch configuration file, as well as documenting the Nargo `dap` command, which is a dependency of the VS Code Noir debugger. + + +## Creating and editing launch configuration files + +To create a launch configuration file from VS Code, open the _debug pane_, and click on _create a launch.json file_. + +![Creating a launch configuration file](@site/static/img/debugger/ref1-create-launch.png) + +A `launch.json` file will be created, populated with basic defaults. + +### Noir Debugger launch.json properties + +#### projectFolder + +_String, optional._ + +Absolute path to the Nargo project to debug. By default, it is dynamically determined by looking for the nearest `Nargo.toml` file to the active file at the moment of launching the debugger. + +#### proverName + +_String, optional._ + +Name of the prover input to use. Defaults to `Prover`, which looks for a file named `Prover.toml` at the `projectFolder`. + +#### generateAcir + +_Boolean, optional._ + +If true, generate ACIR opcodes instead of unconstrained opcodes which will be closer to release binaries but less convenient for debugging. Defaults to `false`. + +#### skipInstrumentation + +_Boolean, optional._ + +Skips variables debugging instrumentation of code, making debugging less convenient but the resulting binary smaller and closer to production. Defaults to `false`. + +:::note +Skipping instrumentation causes the debugger to be unable to inspect local variables. +::: + +## `nargo dap [OPTIONS]` + +When run without any option flags, it starts the Nargo Debug Adapter Protocol server, which acts as the debugging backend for the VS Code Noir Debugger. + +All option flags are related to preflight checks. The Debug Adapter Protocol specifies how errors are to be informed from a running DAP server, but it doesn't specify mechanisms to communicate server initialization errors between the DAP server and its client IDE. + +Thus `nargo dap` ships with a _preflight check_ mode. If flag `--preflight-check` and the rest of the `--preflight-*` flags are provided, Nargo will run the same initialization routine except it will not start the DAP server. + +`vscode-noir` will then run `nargo dap` in preflight check mode first before a debugging session starts. If the preflight check ends in error, vscode-noir will present stderr and stdout output from this process through its own Output pane in VS Code. This makes it possible for users to diagnose what pieces of configuration might be wrong or missing in case of initialization errors. + +If the preflight check succeeds, `vscode-noir` proceeds to start the DAP server normally but running `nargo dap` without any additional flags. + +### Options + +| Option | Description | +| --------------------------------------------------------- | --------------------------------------------------------------------------------------------------------- | +| `--preflight-check` | If present, dap runs in preflight check mode. | +| `--preflight-project-folder ` | Absolute path to the project to debug for preflight check. | +| `--preflight-prover-name ` | Name of prover file to use for preflight check | +| `--preflight-generate-acir` | Optional. If present, compile in ACIR mode while running preflight check. | +| `--preflight-skip-instrumentation` | Optional. If present, compile without introducing debug instrumentation while running preflight check. | +| `-h, --help` | Print help. | diff --git a/docs/docs/tooling/debugger.md b/docs/docs/tooling/debugger.md new file mode 100644 index 00000000000..184c436068f --- /dev/null +++ b/docs/docs/tooling/debugger.md @@ -0,0 +1,27 @@ +--- +title: Debugger +description: Learn about the Noir Debugger, in its REPL or VS Code versions. +keywords: [Nargo, VSCode, Visual Studio Code, REPL, Debugger] +sidebar_position: 2 +--- + +# Noir Debugger + +There are currently two ways of debugging Noir programs: + +1. From VS Code, via the [vscode-noir](https://github.com/noir-lang/vscode-noir) extension. You can install it via the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir). +2. Via the REPL debugger, which ships with Nargo. + +In order to use either version of the debugger, you will need to install recent enough versions of Noir, [Nargo](../getting_started/installation) and vscode-noir: + +- Noir 0.xx +- Nargo 0.xx +- vscode-noir 0.xx + +:::info +At the moment, the debugger supports debugging binary projects, but not contracts. +::: + +We cover the VS Code Noir debugger more in depth in [its VS Code debugger how-to guide](../how_to/debugger/debugging_with_vs_code.md) and [the reference](../reference/debugger/debugger_vscode.md). + +The REPL debugger is discussed at length in [the REPL debugger how-to guide](../how_to/debugger/debugging_with_the_repl.md) and [the reference](../reference/debugger/debugger_repl.md). diff --git a/docs/docs/getting_started/tooling/language_server.md b/docs/docs/tooling/language_server.md similarity index 100% rename from docs/docs/getting_started/tooling/language_server.md rename to docs/docs/tooling/language_server.md diff --git a/docs/docs/getting_started/tooling/testing.md b/docs/docs/tooling/testing.md similarity index 100% rename from docs/docs/getting_started/tooling/testing.md rename to docs/docs/tooling/testing.md diff --git a/docs/sidebars.js b/docs/sidebars.js index f1e79ba9ebc..cf7e852fed5 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -65,6 +65,11 @@ export default { label: 'Reference', items: [{ type: 'autogenerated', dirName: 'reference' }], }, + { + type: 'category', + label: 'Tooling', + items: [{ type: 'autogenerated', dirName: 'tooling' }], + }, { type: 'html', value: '
', diff --git a/docs/static/img/debugger/1-started.png b/docs/static/img/debugger/1-started.png new file mode 100644 index 00000000000..6f764d4e601 Binary files /dev/null and b/docs/static/img/debugger/1-started.png differ diff --git a/docs/static/img/debugger/2-icon.png b/docs/static/img/debugger/2-icon.png new file mode 100644 index 00000000000..31706670ccb Binary files /dev/null and b/docs/static/img/debugger/2-icon.png differ diff --git a/docs/static/img/debugger/3-debug-pane.png b/docs/static/img/debugger/3-debug-pane.png new file mode 100644 index 00000000000..24c112da96f Binary files /dev/null and b/docs/static/img/debugger/3-debug-pane.png differ diff --git a/docs/static/img/debugger/4-debugger-buttons.png b/docs/static/img/debugger/4-debugger-buttons.png new file mode 100644 index 00000000000..64c1e05be8a Binary files /dev/null and b/docs/static/img/debugger/4-debugger-buttons.png differ diff --git a/docs/static/img/debugger/5-assert.png b/docs/static/img/debugger/5-assert.png new file mode 100644 index 00000000000..0bfed6562af Binary files /dev/null and b/docs/static/img/debugger/5-assert.png differ diff --git a/docs/static/img/debugger/6-hover.png b/docs/static/img/debugger/6-hover.png new file mode 100644 index 00000000000..20579ec461e Binary files /dev/null and b/docs/static/img/debugger/6-hover.png differ diff --git a/docs/static/img/debugger/7-break.png b/docs/static/img/debugger/7-break.png new file mode 100644 index 00000000000..aca5121d722 Binary files /dev/null and b/docs/static/img/debugger/7-break.png differ diff --git a/docs/static/img/debugger/debugger-intro.gif b/docs/static/img/debugger/debugger-intro.gif new file mode 100644 index 00000000000..06e3b853555 Binary files /dev/null and b/docs/static/img/debugger/debugger-intro.gif differ diff --git a/docs/static/img/debugger/ref1-create-launch.png b/docs/static/img/debugger/ref1-create-launch.png new file mode 100644 index 00000000000..0b6cb8b3ec6 Binary files /dev/null and b/docs/static/img/debugger/ref1-create-launch.png differ diff --git a/docs/versioned_docs/version-v0.17.0/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.17.0/modules_packages_crates/workspaces.md index d9ac92667c9..8168793fc80 100644 --- a/docs/versioned_docs/version-v0.17.0/modules_packages_crates/workspaces.md +++ b/docs/versioned_docs/version-v0.17.0/modules_packages_crates/workspaces.md @@ -10,16 +10,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: diff --git a/docs/versioned_docs/version-v0.19.0/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.19.0/modules_packages_crates/workspaces.md index d9ac92667c9..8168793fc80 100644 --- a/docs/versioned_docs/version-v0.19.0/modules_packages_crates/workspaces.md +++ b/docs/versioned_docs/version-v0.19.0/modules_packages_crates/workspaces.md @@ -10,16 +10,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: diff --git a/docs/versioned_docs/version-v0.19.1/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.19.1/modules_packages_crates/workspaces.md index d9ac92667c9..8168793fc80 100644 --- a/docs/versioned_docs/version-v0.19.1/modules_packages_crates/workspaces.md +++ b/docs/versioned_docs/version-v0.19.1/modules_packages_crates/workspaces.md @@ -10,16 +10,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: diff --git a/docs/versioned_docs/version-v0.19.2/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.19.2/modules_packages_crates/workspaces.md index d9ac92667c9..8168793fc80 100644 --- a/docs/versioned_docs/version-v0.19.2/modules_packages_crates/workspaces.md +++ b/docs/versioned_docs/version-v0.19.2/modules_packages_crates/workspaces.md @@ -10,16 +10,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: diff --git a/docs/versioned_docs/version-v0.19.3/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.19.3/modules_packages_crates/workspaces.md index a979ef9f0a5..8168793fc80 100644 --- a/docs/versioned_docs/version-v0.19.3/modules_packages_crates/workspaces.md +++ b/docs/versioned_docs/version-v0.19.3/modules_packages_crates/workspaces.md @@ -10,16 +10,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: @@ -36,4 +38,4 @@ default-member = "crates/a" Libraries can be defined in a workspace. Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. -Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. \ No newline at end of file +Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. diff --git a/docs/versioned_docs/version-v0.19.4/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.19.4/modules_packages_crates/workspaces.md index a979ef9f0a5..8168793fc80 100644 --- a/docs/versioned_docs/version-v0.19.4/modules_packages_crates/workspaces.md +++ b/docs/versioned_docs/version-v0.19.4/modules_packages_crates/workspaces.md @@ -10,16 +10,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: @@ -36,4 +38,4 @@ default-member = "crates/a" Libraries can be defined in a workspace. Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. -Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. \ No newline at end of file +Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. diff --git a/docs/versioned_docs/version-v0.22.0/noir/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.22.0/noir/modules_packages_crates/workspaces.md index 67a1dafa372..513497f12bf 100644 --- a/docs/versioned_docs/version-v0.22.0/noir/modules_packages_crates/workspaces.md +++ b/docs/versioned_docs/version-v0.22.0/noir/modules_packages_crates/workspaces.md @@ -11,16 +11,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: diff --git a/docs/versioned_docs/version-v0.23.0/noir/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.23.0/noir/modules_packages_crates/workspaces.md index 67a1dafa372..513497f12bf 100644 --- a/docs/versioned_docs/version-v0.23.0/noir/modules_packages_crates/workspaces.md +++ b/docs/versioned_docs/version-v0.23.0/noir/modules_packages_crates/workspaces.md @@ -11,16 +11,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: diff --git a/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/workspaces.md index 67a1dafa372..513497f12bf 100644 --- a/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/workspaces.md +++ b/docs/versioned_docs/version-v0.24.0/noir/modules_packages_crates/workspaces.md @@ -11,16 +11,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: diff --git a/docs/versioned_docs/version-v0.25.0/noir/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.25.0/noir/modules_packages_crates/workspaces.md index 67a1dafa372..513497f12bf 100644 --- a/docs/versioned_docs/version-v0.25.0/noir/modules_packages_crates/workspaces.md +++ b/docs/versioned_docs/version-v0.25.0/noir/modules_packages_crates/workspaces.md @@ -11,16 +11,18 @@ For a project with the following structure: ```tree ├── crates -│   ├── a -│   │   ├── Nargo.toml -│   │   └── src -│   │   └── main.nr -│   └── b -│   ├── Nargo.toml -│   └── src -│   └── main.nr -├── Nargo.toml -└── Prover.toml +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml ``` You can define a workspace in Nargo.toml like so: diff --git a/docs/versioned_docs/version-v0.26.0/explainers/explainer-oracle.md b/docs/versioned_docs/version-v0.26.0/explainers/explainer-oracle.md new file mode 100644 index 00000000000..b84ca5dd986 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/explainers/explainer-oracle.md @@ -0,0 +1,57 @@ +--- +title: Oracles +description: This guide provides an in-depth understanding of how Oracles work in Noir programming. Learn how to use outside calculations in your programs, constrain oracles, and understand their uses and limitations. +keywords: + - Noir Programming + - Oracles + - JSON-RPC + - Foreign Call Handlers + - Constrained Functions + - Blockchain Programming +sidebar_position: 1 +--- + +If you've seen "The Matrix" you may recall "The Oracle" as Gloria Foster smoking cigarettes and baking cookies. While she appears to "know things", she is actually providing a calculation of a pre-determined future. Noir Oracles are similar, in a way. They don't calculate the future (yet), but they allow you to use outside calculations in your programs. + +![matrix oracle prediction](@site/static/img/memes/matrix_oracle.jpeg) + +A Noir program is usually self-contained. You can pass certain inputs to it, and it will generate a deterministic output for those inputs. But what if you wanted to defer some calculation to an outside process or source? + +Oracles are functions that provide this feature. + +## Use cases + +An example usage for Oracles is proving something on-chain. For example, proving that the ETH-USDC quote was below a certain target at a certain block time. Or even making more complex proofs like proving the ownership of an NFT as an anonymous login method. + +Another interesting use case is to defer expensive calculations to be made outside of the Noir program, and then constraining the result; similar to the use of [unconstrained functions](../noir/concepts//unconstrained.md). + +In short, anything that can be constrained in a Noir program but needs to be fetched from an external source is a great candidate to be used in oracles. + +## Constraining oracles + +Just like in The Matrix, Oracles are powerful. But with great power, comes great responsibility. Just because you're using them in a Noir program doesn't mean they're true. Noir has no superpowers. If you want to prove that Portugal won the Euro Cup 2016, you're still relying on potentially untrusted information. + +To give a concrete example, Alice wants to login to the [NounsDAO](https://nouns.wtf/) forum with her username "noir_nouner" by proving she owns a noun without revealing her ethereum address. Her Noir program could have a oracle call like this: + +```rust +#[oracle(getNoun)] +unconstrained fn get_noun(address: Field) -> Field +``` + +This oracle could naively resolve with the number of Nouns she possesses. However, it is useless as a trusted source, as the oracle could resolve to anything Alice wants. In order to make this oracle call actually useful, Alice would need to constrain the response from the oracle, by proving her address and the noun count belongs to the state tree of the contract. + +In short, **Oracles don't prove anything. Your Noir program does.** + +:::danger + +If you don't constrain the return of your oracle, you could be clearly opening an attack vector on your Noir program. Make double-triple sure that the return of an oracle call is constrained! + +::: + +## How to use Oracles + +On CLI, Nargo resolves oracles by making JSON RPC calls, which means it would require an RPC node to be running. + +In JavaScript, NoirJS accepts and resolves arbitrary call handlers (that is, not limited to JSON) as long as they matches the expected types the developer defines. Refer to [Foreign Call Handler](../reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md) to learn more about NoirJS's call handling. + +If you want to build using oracles, follow through to the [oracle guide](../how_to/how-to-oracles.md) for a simple example on how to do that. diff --git a/docs/versioned_docs/version-v0.26.0/explainers/explainer-recursion.md b/docs/versioned_docs/version-v0.26.0/explainers/explainer-recursion.md new file mode 100644 index 00000000000..18846176ca7 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/explainers/explainer-recursion.md @@ -0,0 +1,176 @@ +--- +title: Recursive proofs +description: Explore the concept of recursive proofs in Zero-Knowledge programming. Understand how recursion works in Noir, a language for writing smart contracts on the EVM blockchain. Learn through practical examples like Alice and Bob's guessing game, Charlie's recursive merkle tree, and Daniel's reusable components. Discover how to use recursive proofs to optimize computational resources and improve efficiency. + +keywords: + [ + "Recursive Proofs", + "Zero-Knowledge Programming", + "Noir", + "EVM Blockchain", + "Smart Contracts", + "Recursion in Noir", + "Alice and Bob Guessing Game", + "Recursive Merkle Tree", + "Reusable Components", + "Optimizing Computational Resources", + "Improving Efficiency", + "Verification Key", + "Aggregation", + "Recursive zkSNARK schemes", + "PLONK", + "Proving and Verification Keys" + ] +sidebar_position: 1 +pagination_next: how_to/how-to-recursion +--- + +In programming, we tend to think of recursion as something calling itself. A classic example would be the calculation of the factorial of a number: + +```js +function factorial(n) { + if (n === 0 || n === 1) { + return 1; + } else { + return n * factorial(n - 1); + } +} +``` + +In this case, while `n` is not `1`, this function will keep calling itself until it hits the base case, bubbling up the result on the call stack: + +```md + Is `n` 1? <--------- + /\ / + / \ n = n -1 + / \ / + Yes No -------- +``` + +In Zero-Knowledge, recursion has some similarities. + +It is not a Noir function calling itself, but a proof being used as an input to another circuit. In short, you verify one proof *inside* another proof, returning the proof that both proofs are valid. + +This means that, given enough computational resources, you can prove the correctness of any arbitrary number of proofs in a single proof. This could be useful to design state channels (for which a common example would be [Bitcoin's Lightning Network](https://en.wikipedia.org/wiki/Lightning_Network)), to save on gas costs by settling one proof on-chain, or simply to make business logic less dependent on a consensus mechanism. + +## Examples + +Let us look at some of these examples + +### Alice and Bob - Guessing game + +Alice and Bob are friends, and they like guessing games. They want to play a guessing game online, but for that, they need a trusted third-party that knows both of their secrets and finishes the game once someone wins. + +So, they use zero-knowledge proofs. Alice tries to guess Bob's number, and Bob will generate a ZK proof stating whether she succeeded or failed. + +This ZK proof can go on a smart contract, revealing the winner and even giving prizes. However, this means every turn needs to be verified on-chain. This incurs some cost and waiting time that may simply make the game too expensive or time-consuming to be worth it. + +As a solution, Alice proposes the following: "what if Bob generates his proof, and instead of sending it on-chain, I verify it *within* my own proof before playing my own turn?". + +She can then generate a proof that she verified his proof, and so on. + +```md + Did you fail? <-------------------------- + / \ / + / \ n = n -1 + / \ / + Yes No / + | | / + | | / + | You win / + | / + | / +Generate proof of that / + + / + my own guess ---------------- +``` + +### Charlie - Recursive merkle tree + +Charlie is a concerned citizen, and wants to be sure his vote in an election is accounted for. He votes with a ZK proof, but he has no way of knowing that his ZK proof was included in the total vote count! + +If the vote collector puts all of the votes into a [Merkle tree](https://en.wikipedia.org/wiki/Merkle_tree), everyone can prove the verification of two proofs within one proof, as such: + +```md + abcd + __________|______________ + | | + ab cd + _____|_____ ______|______ + | | | | + alice bob charlie daniel +``` + +Doing this recursively allows us to arrive on a final proof `abcd` which if true, verifies the correctness of all the votes. + +### Daniel - Reusable components + +Daniel has a big circuit and a big headache. A part of his circuit is a setup phase that finishes with some assertions that need to be made. But that section alone takes most of the proving time, and is largely independent of the rest of the circuit. + +He might find it more efficient to generate a proof for that setup phase separately, and verify that proof recursively in the actual business logic section of his circuit. This will allow for parallelization of both proofs, which results in a considerable speedup. + +## What params do I need + +As you can see in the [recursion reference](noir/standard_library/recursion.md), a simple recursive proof requires: + +- The proof to verify +- The Verification Key of the circuit that generated the proof +- A hash of this verification key, as it's needed for some backends +- The public inputs for the proof + +:::info + +Recursive zkSNARK schemes do not necessarily "verify a proof" in the sense that you expect a true or false to be spit out by the verifier. Rather an aggregation object is built over the public inputs. + +So, taking the example of Alice and Bob and their guessing game: + +- Alice makes her guess. Her proof is *not* recursive: it doesn't verify any proof within it! It's just a standard `assert(x != y)` circuit +- Bob verifies Alice's proof and makes his own guess. In this circuit, he doesn't exactly *prove* the verification of Alice's proof. Instead, he *aggregates* his proof to Alice's proof. The actual verification is done when the full proof is verified, for example when using `nargo verify` or through the verifier smart contract. + +We can imagine recursive proofs a [relay race](https://en.wikipedia.org/wiki/Relay_race). The first runner doesn't have to receive the baton from anyone else, as he/she already starts with it. But when his/her turn is over, the next runner needs to receive it, run a bit more, and pass it along. Even though every runner could theoretically verify the baton mid-run (why not? 🏃🔍), only at the end of the race does the referee verify that the whole race is valid. + +::: + +## Some architecture + +As with everything in computer science, there's no one-size-fits all. But there are some patterns that could help understanding and implementing them. To give three examples: + +### Adding some logic to a proof verification + +This would be an approach for something like our guessing game, where proofs are sent back and forth and are verified by each opponent. This circuit would be divided in two sections: + +- A `recursive verification` section, which would be just the call to `std::verify_proof`, and that would be skipped on the first move (since there's no proof to verify) +- A `guessing` section, which is basically the logic part where the actual guessing happens + +In such a situation, and assuming Alice is first, she would skip the first part and try to guess Bob's number. Bob would then verify her proof on the first section of his run, and try to guess Alice's number on the second part, and so on. + +### Aggregating proofs + +In some one-way interaction situations, recursion would allow for aggregation of simple proofs that don't need to be immediately verified on-chain or elsewhere. + +To give a practical example, a barman wouldn't need to verify a "proof-of-age" on-chain every time he serves alcohol to a customer. Instead, the architecture would comprise two circuits: + +- A `main`, non-recursive circuit with some logic +- A `recursive` circuit meant to verify two proofs in one proof + +The customer's proofs would be intermediate, and made on their phones, and the barman could just verify them locally. He would then aggregate them into a final proof sent on-chain (or elsewhere) at the end of the day. + +### Recursively verifying different circuits + +Nothing prevents you from verifying different circuits in a recursive proof, for example: + +- A `circuit1` circuit +- A `circuit2` circuit +- A `recursive` circuit + +In this example, a regulator could verify that taxes were paid for a specific purchase by aggregating both a `payer` circuit (proving that a purchase was made and taxes were paid), and a `receipt` circuit (proving that the payment was received) + +## How fast is it + +At the time of writing, verifying recursive proofs is surprisingly fast. This is because most of the time is spent on generating the verification key that will be used to generate the next proof. So you are able to cache the verification key and reuse it later. + +Currently, Noir JS packages don't expose the functionality of loading proving and verification keys, but that feature exists in the underlying `bb.js` package. + +## How can I try it + +Learn more about using recursion in Nargo and NoirJS in the [how-to guide](../how_to/how-to-recursion.md) and see a full example in [noir-examples](https://github.com/noir-lang/noir-examples). diff --git a/docs/versioned_docs/version-v0.26.0/getting_started/_category_.json b/docs/versioned_docs/version-v0.26.0/getting_started/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/getting_started/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.26.0/getting_started/hello_noir/_category_.json b/docs/versioned_docs/version-v0.26.0/getting_started/hello_noir/_category_.json new file mode 100644 index 00000000000..23b560f610b --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/getting_started/hello_noir/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.26.0/getting_started/hello_noir/index.md b/docs/versioned_docs/version-v0.26.0/getting_started/hello_noir/index.md new file mode 100644 index 00000000000..743c4d8d634 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/getting_started/hello_noir/index.md @@ -0,0 +1,142 @@ +--- +title: Creating a Project +description: + Learn how to create and verify your first Noir program using Nargo, a programming language for + zero-knowledge proofs. +keywords: + [ + Nargo, + Noir, + zero-knowledge proofs, + programming language, + create Noir program, + verify Noir program, + step-by-step guide, + ] +sidebar_position: 1 + +--- + +Now that we have installed Nargo, it is time to make our first hello world program! + +## Create a Project Directory + +Noir code can live anywhere on your computer. Let us create a _projects_ folder in the home +directory to house our Noir programs. + +For Linux, macOS, and Windows PowerShell, create the directory and change directory into it by +running: + +```sh +mkdir ~/projects +cd ~/projects +``` + +## Create Our First Nargo Project + +Now that we are in the projects directory, create a new Nargo project by running: + +```sh +nargo new hello_world +``` + +> **Note:** `hello_world` can be any arbitrary project name, we are simply using `hello_world` for +> demonstration. +> +> In production, the common practice is to name the project folder as `circuits` for better +> identifiability when sitting alongside other folders in the codebase (e.g. `contracts`, `scripts`, +> `test`). + +A `hello_world` folder would be created. Similar to Rust, the folder houses _src/main.nr_ and +_Nargo.toml_ which contain the source code and environmental options of your Noir program +respectively. + +### Intro to Noir Syntax + +Let us take a closer look at _main.nr_. The default _main.nr_ generated should look like this: + +```rust +fn main(x : Field, y : pub Field) { + assert(x != y); +} +``` + +The first line of the program specifies the program's inputs: + +```rust +x : Field, y : pub Field +``` + +Program inputs in Noir are private by default (e.g. `x`), but can be labeled public using the +keyword `pub` (e.g. `y`). To learn more about private and public values, check the +[Data Types](../../noir/concepts/data_types/index.md) section. + +The next line of the program specifies its body: + +```rust +assert(x != y); +``` + +The Noir syntax `assert` can be interpreted as something similar to constraints in other zk-contract languages. + +For more Noir syntax, check the [Language Concepts](../../noir/concepts/comments.md) chapter. + +## Build In/Output Files + +Change directory into _hello_world_ and build in/output files for your Noir program by running: + +```sh +cd hello_world +nargo check +``` + +Two additional files would be generated in your project directory: + +_Prover.toml_ houses input values, and _Verifier.toml_ houses public values. + +## Prove Our Noir Program + +Now that the project is set up, we can create a proof of correct execution of our Noir program. + +Fill in input values for execution in the _Prover.toml_ file. For example: + +```toml +x = "1" +y = "2" +``` + +Prove the valid execution of your Noir program: + +```sh +nargo prove +``` + +A new folder _proofs_ would then be generated in your project directory, containing the proof file +`.proof`, where the project name is defined in Nargo.toml. + +The _Verifier.toml_ file would also be updated with the public values computed from program +execution (in this case the value of `y`): + +```toml +y = "0x0000000000000000000000000000000000000000000000000000000000000002" +``` + +> **Note:** Values in _Verifier.toml_ are computed as 32-byte hex values. + +## Verify Our Noir Program + +Once a proof is generated, we can verify correct execution of our Noir program by verifying the +proof file. + +Verify your proof by running: + +```sh +nargo verify +``` + +The verification will complete in silence if it is successful. If it fails, it will log the +corresponding error instead. + +Congratulations, you have now created and verified a proof for your very first Noir program! + +In the [next section](./project_breakdown.md), we will go into more detail on each step performed. diff --git a/docs/versioned_docs/version-v0.26.0/getting_started/hello_noir/project_breakdown.md b/docs/versioned_docs/version-v0.26.0/getting_started/hello_noir/project_breakdown.md new file mode 100644 index 00000000000..6160a102c6c --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/getting_started/hello_noir/project_breakdown.md @@ -0,0 +1,199 @@ +--- +title: Project Breakdown +description: + Learn about the anatomy of a Nargo project, including the purpose of the Prover and Verifier TOML + files, and how to prove and verify your program. +keywords: + [Nargo, Nargo project, Prover.toml, Verifier.toml, proof verification, private asset transfer] +sidebar_position: 2 +--- + +This section breaks down our hello world program from the previous section. We elaborate on the project +structure and what the `prove` and `verify` commands did. + +## Anatomy of a Nargo Project + +Upon creating a new project with `nargo new` and building the in/output files with `nargo check` +commands, you would get a minimal Nargo project of the following structure: + + - src + - Prover.toml + - Verifier.toml + - Nargo.toml + +The source directory _src_ holds the source code for your Noir program. By default only a _main.nr_ +file will be generated within it. + +### Prover.toml + +_Prover.toml_ is used for specifying the input values for executing and proving the program. You can specify `toml` files with different names by using the `--prover-name` or `-p` flags, see the [Prover](#provertoml) section below. Optionally you may specify expected output values for prove-time checking as well. + +### Verifier.toml + +_Verifier.toml_ contains public in/output values computed when executing the Noir program. + +### Nargo.toml + +_Nargo.toml_ contains the environmental options of your project. It contains a "package" section and a "dependencies" section. + +Example Nargo.toml: + +```toml +[package] +name = "noir_starter" +type = "bin" +authors = ["Alice"] +compiler_version = "0.9.0" +description = "Getting started with Noir" +entry = "circuit/main.nr" +license = "MIT" + +[dependencies] +ecrecover = {tag = "v0.9.0", git = "https://github.com/colinnielsen/ecrecover-noir.git"} +``` + +Nargo.toml for a [workspace](../../noir/modules_packages_crates/workspaces.md) will look a bit different. For example: + +```toml +[workspace] +members = ["crates/a", "crates/b"] +default-member = "crates/a" +``` + +#### Package section + +The package section defines a number of fields including: + +- `name` (**required**) - the name of the package +- `type` (**required**) - can be "bin", "lib", or "contract" to specify whether its a binary, library or Aztec contract +- `authors` (optional) - authors of the project +- `compiler_version` - specifies the version of the compiler to use. This is enforced by the compiler and follow's [Rust's versioning](https://doc.rust-lang.org/cargo/reference/manifest.html#the-version-field), so a `compiler_version = 0.18.0` will enforce Nargo version 0.18.0, `compiler_version = ^0.18.0` will enforce anything above 0.18.0 but below 0.19.0, etc. For more information, see how [Rust handles these operators](https://docs.rs/semver/latest/semver/enum.Op.html) +- `description` (optional) +- `entry` (optional) - a relative filepath to use as the entry point into your package (overrides the default of `src/lib.nr` or `src/main.nr`) +- `backend` (optional) +- `license` (optional) + +#### Dependencies section + +This is where you will specify any dependencies for your project. See the [Dependencies page](../../noir/modules_packages_crates/dependencies.md) for more info. + +`./proofs/` and `./contract/` directories will not be immediately visible until you create a proof or +verifier contract respectively. + +### main.nr + +The _main.nr_ file contains a `main` method, this method is the entry point into your Noir program. + +In our sample program, _main.nr_ looks like this: + +```rust +fn main(x : Field, y : Field) { + assert(x != y); +} +``` + +The parameters `x` and `y` can be seen as the API for the program and must be supplied by the +prover. Since neither `x` nor `y` is marked as public, the verifier does not supply any inputs, when +verifying the proof. + +The prover supplies the values for `x` and `y` in the _Prover.toml_ file. + +As for the program body, `assert` ensures that the condition to be satisfied (e.g. `x != y`) is +constrained by the proof of the execution of said program (i.e. if the condition was not met, the +verifier would reject the proof as an invalid proof). + +### Prover.toml + +The _Prover.toml_ file is a file which the prover uses to supply his witness values(both private and +public). + +In our hello world program the _Prover.toml_ file looks like this: + +```toml +x = "1" +y = "2" +``` + +When the command `nargo prove` is executed, two processes happen: + +1. Noir creates a proof that `x`, which holds the value of `1`, and `y`, which holds the value of `2`, + is not equal. This inequality constraint is due to the line `assert(x != y)`. + +2. Noir creates and stores the proof of this statement in the _proofs_ directory in a file called your-project.proof. So if your project is named "private_voting" (defined in the project Nargo.toml), the proof will be saved at `./proofs/private_voting.proof`. Opening this file will display the proof in hex format. + +#### Arrays of Structs + +The following code shows how to pass an array of structs to a Noir program to generate a proof. + +```rust +// main.nr +struct Foo { + bar: Field, + baz: Field, +} + +fn main(foos: [Foo; 3]) -> pub Field { + foos[2].bar + foos[2].baz +} +``` + +Prover.toml: + +```toml +[[foos]] # foos[0] +bar = 0 +baz = 0 + +[[foos]] # foos[1] +bar = 0 +baz = 0 + +[[foos]] # foos[2] +bar = 1 +baz = 2 +``` + +#### Custom toml files + +You can specify a `toml` file with a different name to use for proving by using the `--prover-name` or `-p` flags. + +This command looks for proof inputs in the default **Prover.toml** and generates the proof and saves it at `./proofs/.proof`: + +```bash +nargo prove +``` + +This command looks for proof inputs in the custom **OtherProver.toml** and generates proof and saves it at `./proofs/.proof`: + +```bash +nargo prove -p OtherProver +``` + +## Verifying a Proof + +When the command `nargo verify` is executed, two processes happen: + +1. Noir checks in the _proofs_ directory for a proof file with the project name (eg. test_project.proof) + +2. If that file is found, the proof's validity is checked + +> **Note:** The validity of the proof is linked to the current Noir program; if the program is +> changed and the verifier verifies the proof, it will fail because the proof is not valid for the +> _modified_ Noir program. + +In production, the prover and the verifier are usually two separate entities. A prover would +retrieve the necessary inputs, execute the Noir program, generate a proof and pass it to the +verifier. The verifier would then retrieve the public inputs, usually from external sources, and +verify the validity of the proof against it. + +Take a private asset transfer as an example: + +A person using a browser as the prover would retrieve private inputs locally (e.g. the user's private key) and +public inputs (e.g. the user's encrypted balance on-chain), compute the transfer, generate a proof +and submit it to the verifier smart contract. + +The verifier contract would then draw the user's encrypted balance directly from the blockchain and +verify the proof submitted against it. If the verification passes, additional functions in the +verifier contract could trigger (e.g. approve the asset transfer). + +Now that you understand the concepts, you'll probably want some editor feedback while you are writing more complex code. diff --git a/docs/versioned_docs/version-v0.26.0/getting_started/installation/_category_.json b/docs/versioned_docs/version-v0.26.0/getting_started/installation/_category_.json new file mode 100644 index 00000000000..0c02fb5d4d7 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/getting_started/installation/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 0, + "label": "Install Nargo", + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.26.0/getting_started/installation/index.md b/docs/versioned_docs/version-v0.26.0/getting_started/installation/index.md new file mode 100644 index 00000000000..4ef86aa5914 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/getting_started/installation/index.md @@ -0,0 +1,48 @@ +--- +title: Nargo Installation +description: + nargo is a command line tool for interacting with Noir programs. This page is a quick guide on how to install Nargo through the most common and easy method, noirup +keywords: [ + Nargo + Noir + Rust + Cargo + Noirup + Installation + Terminal Commands + Version Check + Nightlies + Specific Versions + Branches + Noirup Repository +] +pagination_next: getting_started/hello_noir/index +--- + +`nargo` is the one-stop-shop for almost everything related with Noir. The name comes from our love for Rust and its package manager `cargo`. + +With `nargo`, you can start new projects, compile, execute, prove, verify, test, generate solidity contracts, and do pretty much all that is available in Noir. + +Similarly to `rustup`, we also maintain an easy installation method that covers most machines: `noirup`. + +## Installing Noirup + +Open a terminal on your machine, and write: + +```bash +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +Close the terminal, open another one, and run + +```bash +noirup +``` + +Done. That's it. You should have the latest version working. You can check with `nargo --version`. + +You can also install nightlies, specific versions +or branches. Check out the [noirup repository](https://github.com/noir-lang/noirup) for more +information. + +Now we're ready to start working on [our first Noir program!](../hello_noir/index.md) diff --git a/docs/versioned_docs/version-v0.26.0/getting_started/installation/other_install_methods.md b/docs/versioned_docs/version-v0.26.0/getting_started/installation/other_install_methods.md new file mode 100644 index 00000000000..3634723562b --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/getting_started/installation/other_install_methods.md @@ -0,0 +1,102 @@ +--- +title: Alternative Installations +description: There are different ways to install Nargo, the one-stop shop and command-line tool for developing Noir programs. This guide explains how to specify which version to install when using noirup, and using WSL for windows. +keywords: [ + Installation + Nargo + Noirup + Binaries + Compiling from Source + WSL for Windows + macOS + Linux + Nix + Direnv + Uninstalling Nargo + ] +sidebar_position: 1 +--- + +## Encouraged Installation Method: Noirup + +Noirup is the endorsed method for installing Nargo, streamlining the process of fetching binaries or compiling from source. It supports a range of options to cater to your specific needs, from nightly builds and specific versions to compiling from various sources. + +### Installing Noirup + +First, ensure you have `noirup` installed: + +```sh +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +### Fetching Binaries + +With `noirup`, you can easily switch between different Nargo versions, including nightly builds: + +- **Nightly Version**: Install the latest nightly build. + + ```sh + noirup --version nightly + ``` + +- **Specific Version**: Install a specific version of Nargo. + ```sh + noirup --version + ``` + +### Compiling from Source + +`noirup` also enables compiling Nargo from various sources: + +- **From a Specific Branch**: Install from the latest commit on a branch. + + ```sh + noirup --branch + ``` + +- **From a Fork**: Install from the main branch of a fork. + + ```sh + noirup --repo + ``` + +- **From a Specific Branch in a Fork**: Install from a specific branch in a fork. + + ```sh + noirup --repo --branch + ``` + +- **From a Specific Pull Request**: Install from a specific PR. + + ```sh + noirup --pr + ``` + +- **From a Specific Commit**: Install from a specific commit. + + ```sh + noirup -C + ``` + +- **From Local Source**: Compile and install from a local directory. + ```sh + noirup --path ./path/to/local/source + ``` + +## Installation on Windows + +The default backend for Noir (Barretenberg) doesn't provide Windows binaries at this time. For that reason, Noir cannot be installed natively. However, it is available by using Windows Subsystem for Linux (WSL). + +Step 1: Follow the instructions [here](https://learn.microsoft.com/en-us/windows/wsl/install) to install and run WSL. + +step 2: Follow the [Noirup instructions](#encouraged-installation-method-noirup). + +## Uninstalling Nargo + +If you installed Nargo with `noirup`, you can uninstall Nargo by removing the files in `~/.nargo`, `~/nargo`, and `~/noir_cache`. This ensures that all installed binaries, configurations, and cache related to Nargo are fully removed from your system. + +```bash +rm -r ~/.nargo +rm -r ~/nargo +rm -r ~/noir_cache +``` diff --git a/docs/docs/getting_started/tooling/_category_.json b/docs/versioned_docs/version-v0.26.0/getting_started/tooling/_category_.json similarity index 100% rename from docs/docs/getting_started/tooling/_category_.json rename to docs/versioned_docs/version-v0.26.0/getting_started/tooling/_category_.json diff --git a/docs/docs/getting_started/tooling/index.mdx b/docs/versioned_docs/version-v0.26.0/getting_started/tooling/index.mdx similarity index 100% rename from docs/docs/getting_started/tooling/index.mdx rename to docs/versioned_docs/version-v0.26.0/getting_started/tooling/index.mdx diff --git a/docs/versioned_docs/version-v0.26.0/getting_started/tooling/language_server.md b/docs/versioned_docs/version-v0.26.0/getting_started/tooling/language_server.md new file mode 100644 index 00000000000..81e0356ef8a --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/getting_started/tooling/language_server.md @@ -0,0 +1,43 @@ +--- +title: Language Server +description: Learn about the Noir Language Server, how to install the components, and configuration that may be required. +keywords: [Nargo, Language Server, LSP, VSCode, Visual Studio Code] +sidebar_position: 0 +--- + +This section helps you install and configure the Noir Language Server. + +The Language Server Protocol (LSP) has two components, the [Server](#language-server) and the [Client](#language-client). Below we describe each in the context of Noir. + +## Language Server + +The Server component is provided by the Nargo command line tool that you installed at the beginning of this guide. +As long as Nargo is installed and you've used it to run other commands in this guide, it should be good to go! + +If you'd like to verify that the `nargo lsp` command is available, you can run `nargo --help` and look for `lsp` in the list of commands. If you see it, you're using a version of Noir with LSP support. + +## Language Client + +The Client component is usually an editor plugin that launches the Server. It communicates LSP messages between the editor and the Server. For example, when you save a file, the Client will alert the Server, so it can try to compile the project and report any errors. + +Currently, Noir provides a Language Client for Visual Studio Code via the [vscode-noir](https://github.com/noir-lang/vscode-noir) extension. You can install it via the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir). + +> **Note:** Noir's Language Server Protocol support currently assumes users' VSCode workspace root to be the same as users' Noir project root (i.e. where Nargo.toml lies). +> +> If LSP features seem to be missing / malfunctioning, make sure you are opening your Noir project directly (instead of as a sub-folder) in your VSCode instance. + +When your language server is running correctly and the VSCode plugin is installed, you should see handy codelens buttons for compilation, measuring circuit size, execution, and tests: + +![Compile and Execute](@site/static/img/codelens_compile_execute.png) +![Run test](@site/static/img/codelens_run_test.png) + +You should also see your tests in the `testing` panel: + +![Testing panel](@site/static/img/codelens_testing_panel.png) + +### Configuration + +- **Noir: Enable LSP** - If checked, the extension will launch the Language Server via `nargo lsp` and communicate with it. +- **Noir: Nargo Flags** - Additional flags may be specified if you require them to be added when the extension calls `nargo lsp`. +- **Noir: Nargo Path** - An absolute path to a Nargo binary with the `lsp` command. This may be useful if Nargo is not within the `PATH` of your editor. +- **Noir > Trace: Server** - Setting this to `"messages"` or `"verbose"` will log LSP messages between the Client and Server. Useful for debugging. diff --git a/docs/versioned_docs/version-v0.26.0/getting_started/tooling/noir_codegen.md b/docs/versioned_docs/version-v0.26.0/getting_started/tooling/noir_codegen.md new file mode 100644 index 00000000000..d65151da0ab --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/getting_started/tooling/noir_codegen.md @@ -0,0 +1,113 @@ +--- +title: Noir Codegen for TypeScript +description: Learn how to use Noir codegen to generate TypeScript bindings +keywords: [Nargo, Noir, compile, TypeScript] +sidebar_position: 2 +--- + +When using TypeScript, it is extra work to interpret Noir program outputs in a type-safe way. Third party libraries may exist for popular Noir programs, but they are either hard to find or unmaintained. + +Now you can generate TypeScript bindings for your Noir programs in two steps: +1. Exporting Noir functions using `nargo export` +2. Using the TypeScript module `noir_codegen` to generate TypeScript binding + +**Note:** you can only export functions from a Noir *library* (not binary or contract program types). + +## Installation + +### Your TypeScript project + +If you don't already have a TypeScript project you can add the module with `yarn` (or `npm`), then initialize it: + +```bash +yarn add typescript -D +npx tsc --init +``` + +### Add TypeScript module - `noir_codegen` + +The following command will add the module to your project's devDependencies: + +```bash +yarn add @noir-lang/noir_codegen -D +``` + +### Nargo library +Make sure you have Nargo, v0.25.0 or greater, installed. If you don't, follow the [installation guide](../installation/index.md). + +If you're in a new project, make a `circuits` folder and create a new Noir library: + +```bash +mkdir circuits && cd circuits +nargo new --lib myNoirLib +``` + +## Usage + +### Export ABI of specified functions + +First go to the `.nr` files in your Noir library, and add the `#[export]` macro to each function that you want to use in TypeScript. + +```rust +#[export] +fn your_function(... +``` + +From your Noir library (where `Nargo.toml` is), run the following command: + +```bash +nargo export +``` + +You will now have an `export` directory with a .json file per exported function. + +You can also specify the directory of Noir programs using `--program-dir`, for example: + +```bash +nargo export --program-dir=./circuits/myNoirLib +``` + +### Generate TypeScript bindings from exported functions + +To use the `noir-codegen` package we added to the TypeScript project: + +```bash +yarn noir-codegen ./export/your_function.json +``` + +This creates an `exports` directory with an `index.ts` file containing all exported functions. + +**Note:** adding `--out-dir` allows you to specify an output dir for your TypeScript bindings to go. Eg: + +```bash +yarn noir-codegen ./export/*.json --out-dir ./path/to/output/dir +``` + +## Example .nr function to .ts output + +Consider a Noir library with this function: + +```rust +#[export] +fn not_equal(x: Field, y: Field) -> bool { + x != y +} +``` + +After the export and codegen steps, you should have an `index.ts` like: + +```typescript +export type Field = string; + + +export const is_equal_circuit: CompiledCircuit = {"abi":{"parameters":[{"name":"x","type":{"kind":"field"},"visibility":"private"},{"name":"y","type":{"kind":"field"},"visibility":"private"}],"param_witnesses":{"x":[{"start":0,"end":1}],"y":[{"start":1,"end":2}]},"return_type":{"abi_type":{"kind":"boolean"},"visibility":"private"},"return_witnesses":[4]},"bytecode":"H4sIAAAAAAAA/7WUMQ7DIAxFQ0Krrr2JjSGYLVcpKrn/CaqqDQN12WK+hPBgmWd/wEyHbF1SS923uhOs3pfoChI+wKXMAXzIKyNj4PB0TFTYc0w5RUjoqeAeEu1wqK0F54RGkWvW44LPzExnlkbMEs4JNZmN8PxS42uHv82T8a3Jeyn2Ks+VLPcO558HmyLMCDOXAXXtpPt4R/Rt9T36ss6dS9HGPx/eG17nGegKBQAA"}; + +export async function is_equal(x: Field, y: Field, foreignCallHandler?: ForeignCallHandler): Promise { + const program = new Noir(is_equal_circuit); + const args: InputMap = { x, y }; + const { returnValue } = await program.execute(args, foreignCallHandler); + return returnValue as boolean; +} +``` + +Now the `is_equal()` function and relevant types are readily available for use in TypeScript. diff --git a/docs/versioned_docs/version-v0.26.0/getting_started/tooling/testing.md b/docs/versioned_docs/version-v0.26.0/getting_started/tooling/testing.md new file mode 100644 index 00000000000..d3e0c522473 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/getting_started/tooling/testing.md @@ -0,0 +1,62 @@ +--- +title: Testing in Noir +description: Learn how to use Nargo to test your Noir program in a quick and easy way +keywords: [Nargo, testing, Noir, compile, test] +sidebar_position: 1 +--- + +You can test your Noir programs using Noir circuits. + +Nargo will automatically compile and run any functions which have the decorator `#[test]` on them if +you run `nargo test`. + +For example if you have a program like: + +```rust +fn add(x: u64, y: u64) -> u64 { + x + y +} +#[test] +fn test_add() { + assert(add(2,2) == 4); + assert(add(0,1) == 1); + assert(add(1,0) == 1); +} +``` + +Running `nargo test` will test that the `test_add` function can be executed while satisfying all +the constraints which allows you to test that add returns the expected values. Test functions can't +have any arguments currently. + +### Test fail + +You can write tests that are expected to fail by using the decorator `#[test(should_fail)]`. For example: + +```rust +fn add(x: u64, y: u64) -> u64 { + x + y +} +#[test(should_fail)] +fn test_add() { + assert(add(2,2) == 5); +} +``` + +You can be more specific and make it fail with a specific reason by using `should_fail_with = "`: + +```rust +fn main(african_swallow_avg_speed : Field) { + assert(african_swallow_avg_speed == 65, "What is the airspeed velocity of an unladen swallow"); +} + +#[test] +fn test_king_arthur() { + main(65); +} + +#[test(should_fail_with = "What is the airspeed velocity of an unladen swallow")] +fn test_bridgekeeper() { + main(32); +} + +``` diff --git a/docs/versioned_docs/version-v0.26.0/how_to/_category_.json b/docs/versioned_docs/version-v0.26.0/how_to/_category_.json new file mode 100644 index 00000000000..23b560f610b --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/how_to/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.26.0/how_to/how-to-oracles.md b/docs/versioned_docs/version-v0.26.0/how_to/how-to-oracles.md new file mode 100644 index 00000000000..8cf8035a5c4 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/how_to/how-to-oracles.md @@ -0,0 +1,276 @@ +--- +title: How to use Oracles +description: Learn how to use oracles in your Noir program with examples in both Nargo and NoirJS. This guide also covers writing a JSON RPC server and providing custom foreign call handlers for NoirJS. +keywords: + - Noir Programming + - Oracles + - Nargo + - NoirJS + - JSON RPC Server + - Foreign Call Handlers +sidebar_position: 1 +--- + +This guide shows you how to use oracles in your Noir program. For the sake of clarity, it assumes that: + +- You have read the [explainer on Oracles](../explainers/explainer-oracle.md) and are comfortable with the concept. +- You have a Noir program to add oracles to. You can create one using the [vite-hardhat starter](https://github.com/noir-lang/noir-starter/tree/main/vite-hardhat) as a boilerplate. +- You understand the concept of a JSON-RPC server. Visit the [JSON-RPC website](https://www.jsonrpc.org/) if you need a refresher. +- You are comfortable with server-side JavaScript (e.g. Node.js, managing packages, etc.). + +For reference, you can find the snippets used in this tutorial on the [Aztec DevRel Repository](https://github.com/AztecProtocol/dev-rel/tree/main/code-snippets/how-to-oracles). + +## Rundown + +This guide has 3 major steps: + +1. How to modify our Noir program to make use of oracle calls as unconstrained functions +2. How to write a JSON RPC Server to resolve these oracle calls with Nargo +3. How to use them in Nargo and how to provide a custom resolver in NoirJS + +## Step 1 - Modify your Noir program + +An oracle is defined in a Noir program by defining two methods: + +- An unconstrained method - This tells the compiler that it is executing an [unconstrained functions](../noir/concepts//unconstrained.md). +- A decorated oracle method - This tells the compiler that this method is an RPC call. + +An example of an oracle that returns a `Field` would be: + +```rust +#[oracle(getSqrt)] +unconstrained fn sqrt(number: Field) -> Field { } + +unconstrained fn get_sqrt(number: Field) -> Field { + sqrt(number) +} +``` + +In this example, we're wrapping our oracle function in a unconstrained method, and decorating it with `oracle(getSqrt)`. We can then call the unconstrained function as we would call any other function: + +```rust +fn main(input: Field) { + let sqrt = get_sqrt(input); +} +``` + +In the next section, we will make this `getSqrt` (defined on the `sqrt` decorator) be a method of the RPC server Noir will use. + +:::danger + +As explained in the [Oracle Explainer](../explainers/explainer-oracle.md), this `main` function is unsafe unless you constrain its return value. For example: + +```rust +fn main(input: Field) { + let sqrt = get_sqrt(input); + assert(sqrt.pow_32(2) as u64 == input as u64); // <---- constrain the return of an oracle! +} +``` + +::: + +:::info + +Currently, oracles only work with single params or array params. For example: + +```rust +#[oracle(getSqrt)] +unconstrained fn sqrt([Field; 2]) -> [Field; 2] { } +``` + +::: + +## Step 2 - Write an RPC server + +Brillig will call *one* RPC server. Most likely you will have to write your own, and you can do it in whatever language you prefer. In this guide, we will do it in Javascript. + +Let's use the above example of an oracle that consumes an array with two `Field` and returns their square roots: + +```rust +#[oracle(getSqrt)] +unconstrained fn sqrt(input: [Field; 2]) -> [Field; 2] { } + +unconstrained fn get_sqrt(input: [Field; 2]) -> [Field; 2] { + sqrt(input) +} + +fn main(input: [Field; 2]) { + let sqrt = get_sqrt(input); + assert(sqrt[0].pow_32(2) as u64 == input[0] as u64); + assert(sqrt[1].pow_32(2) as u64 == input[1] as u64); +} +``` + +:::info + +Why square root? + +In general, computing square roots is computationally more expensive than multiplications, which takes a toll when speaking about ZK applications. In this case, instead of calculating the square root in Noir, we are using our oracle to offload that computation to be made in plain. In our circuit we can simply multiply the two values. + +::: + +Now, we should write the correspondent RPC server, starting with the [default JSON-RPC 2.0 boilerplate](https://www.npmjs.com/package/json-rpc-2.0#example): + +```js +import { JSONRPCServer } from "json-rpc-2.0"; +import express from "express"; +import bodyParser from "body-parser"; + +const app = express(); +app.use(bodyParser.json()); + +const server = new JSONRPCServer(); +app.post("/", (req, res) => { + const jsonRPCRequest = req.body; + server.receive(jsonRPCRequest).then((jsonRPCResponse) => { + if (jsonRPCResponse) { + res.json(jsonRPCResponse); + } else { + res.sendStatus(204); + } + }); +}); + +app.listen(5555); +``` + +Now, we will add our `getSqrt` method, as expected by the `#[oracle(getSqrt)]` decorator in our Noir code. It maps through the params array and returns their square roots: + +```js +server.addMethod("getSqrt", async (params) => { + const values = params[0].Array.map((field) => { + return `${Math.sqrt(parseInt(field, 16))}`; + }); + return { values: [{ Array: values }] }; +}); +``` + +:::tip + +Brillig expects an object with an array of values. Each value is an object declaring to be `Single` or `Array` and returning a field element *as a string*. For example: + +```json +{ "values": [{ "Array": ["1", "2"] }]} +{ "values": [{ "Single": "1" }]} +{ "values": [{ "Single": "1" }, { "Array": ["1", "2"] }]} +``` + +If you're using Typescript, the following types may be helpful in understanding the expected return value and making sure they're easy to follow: + +```js +interface SingleForeignCallParam { + Single: string, +} + +interface ArrayForeignCallParam { + Array: string[], +} + +type ForeignCallParam = SingleForeignCallParam | ArrayForeignCallParam; + +interface ForeignCallResult { + values: ForeignCallParam[], +} +``` + +::: + +## Step 3 - Usage with Nargo + +Using the [`nargo` CLI tool](../getting_started/installation/index.md), you can use oracles in the `nargo test`, `nargo execute` and `nargo prove` commands by passing a value to `--oracle-resolver`. For example: + +```bash +nargo test --oracle-resolver http://localhost:5555 +``` + +This tells `nargo` to use your RPC Server URL whenever it finds an oracle decorator. + +## Step 4 - Usage with NoirJS + +In a JS environment, an RPC server is not strictly necessary, as you may want to resolve your oracles without needing any JSON call at all. NoirJS simply expects that you pass a callback function when you generate proofs, and that callback function can be anything. + +For example, if your Noir program expects the host machine to provide CPU pseudo-randomness, you could simply pass it as the `foreignCallHandler`. You don't strictly need to create an RPC server to serve pseudo-randomness, as you may as well get it directly in your app: + +```js +const foreignCallHandler = (name, inputs) => crypto.randomBytes(16) // etc + +await noir.generateProof(inputs, foreignCallHandler) +``` + +As one can see, in NoirJS, the [`foreignCallHandler`](../reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md) function simply means "a callback function that returns a value of type [`ForeignCallOutput`](../reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md). It doesn't have to be an RPC call like in the case for Nargo. + +:::tip + +Does this mean you don't have to write an RPC server like in [Step #2](#step-2---write-an-rpc-server)? + +You don't technically have to, but then how would you run `nargo test` or `nargo prove`? To use both `Nargo` and `NoirJS` in your development flow, you will have to write a JSON RPC server. + +::: + +In this case, let's make `foreignCallHandler` call the JSON RPC Server we created in [Step #2](#step-2---write-an-rpc-server), by making it a JSON RPC Client. + +For example, using the same `getSqrt` program in [Step #1](#step-1---modify-your-noir-program) (comments in the code): + +```js +import { JSONRPCClient } from "json-rpc-2.0"; + +// declaring the JSONRPCClient +const client = new JSONRPCClient((jsonRPCRequest) => { +// hitting the same JSON RPC Server we coded above + return fetch("http://localhost:5555", { + method: "POST", + headers: { + "content-type": "application/json", + }, + body: JSON.stringify(jsonRPCRequest), + }).then((response) => { + if (response.status === 200) { + return response + .json() + .then((jsonRPCResponse) => client.receive(jsonRPCResponse)); + } else if (jsonRPCRequest.id !== undefined) { + return Promise.reject(new Error(response.statusText)); + } + }); +}); + +// declaring a function that takes the name of the foreign call (getSqrt) and the inputs +const foreignCallHandler = async (name, input) => { + // notice that the "inputs" parameter contains *all* the inputs + // in this case we to make the RPC request with the first parameter "numbers", which would be input[0] + const oracleReturn = await client.request(name, [ + { Array: input[0].map((i) => i.toString("hex")) }, + ]); + return [oracleReturn.values[0].Array]; +}; + +// the rest of your NoirJS code +const input = { input: [4, 16] }; +const { witness } = await noir.execute(numbers, foreignCallHandler); +``` + +:::tip + +If you're in a NoirJS environment running your RPC server together with a frontend app, you'll probably hit a familiar problem in full-stack development: requests being blocked by [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) policy. For development only, you can simply install and use the [`cors` npm package](https://www.npmjs.com/package/cors) to get around the problem: + +```bash +yarn add cors +``` + +and use it as a middleware: + +```js +import cors from "cors"; + +const app = express(); +app.use(cors()) +``` + +::: + +## Conclusion + +Hopefully by the end of this guide, you should be able to: + +- Write your own logic around Oracles and how to write a JSON RPC server to make them work with your Nargo commands. +- Provide custom foreign call handlers for NoirJS. diff --git a/docs/versioned_docs/version-v0.26.0/how_to/how-to-recursion.md b/docs/versioned_docs/version-v0.26.0/how_to/how-to-recursion.md new file mode 100644 index 00000000000..4c45bb87ae2 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/how_to/how-to-recursion.md @@ -0,0 +1,179 @@ +--- +title: How to use recursion on NoirJS +description: Learn how to implement recursion with NoirJS, a powerful tool for creating smart contracts on the EVM blockchain. This guide assumes familiarity with NoirJS, solidity verifiers, and the Barretenberg proving backend. Discover how to generate both final and intermediate proofs using `noir_js` and `backend_barretenberg`. +keywords: + [ + "NoirJS", + "EVM blockchain", + "smart contracts", + "recursion", + "solidity verifiers", + "Barretenberg backend", + "noir_js", + "backend_barretenberg", + "intermediate proofs", + "final proofs", + "nargo compile", + "json import", + "recursive circuit", + "recursive app" + ] +sidebar_position: 1 +--- + +This guide shows you how to use recursive proofs in your NoirJS app. For the sake of clarity, it is assumed that: + +- You already have a NoirJS app. If you don't, please visit the [NoirJS tutorial](../tutorials/noirjs_app.md) and the [reference](../reference/NoirJS/noir_js/index.md). +- You are familiar with what are recursive proofs and you have read the [recursion explainer](../explainers/explainer-recursion.md) +- You already built a recursive circuit following [the reference](../noir/standard_library/recursion.md), and understand how it works. + +It is also assumed that you're not using `noir_wasm` for compilation, and instead you've used [`nargo compile`](../reference/nargo_commands.md) to generate the `json` you're now importing into your project. However, the guide should work just the same if you're using `noir_wasm`. + +:::info + +As you've read in the [explainer](../explainers/explainer-recursion.md), a recursive proof is an intermediate proof. This means that it doesn't necessarily generate the final step that makes it verifiable in a smart contract. However, it is easy to verify within another circuit. + +While "standard" usage of NoirJS packages abstracts final proofs, it currently lacks the necessary interface to abstract away intermediate proofs. This means that these proofs need to be created by using the backend directly. + +In short: + +- `noir_js` generates *only* final proofs +- `backend_barretenberg` generates both types of proofs + +::: + +In a standard recursive app, you're also dealing with at least two circuits. For the purpose of this guide, we will assume the following: + +- `main`: a circuit of type `assert(x != y)`, where `main` is marked with a `#[recursive]` attribute. This attribute states that the backend should generate proofs that are friendly for verification within another circuit. +- `recursive`: a circuit that verifies `main` + +For a full example on how recursive proofs work, please refer to the [noir-examples](https://github.com/noir-lang/noir-examples) repository. We will *not* be using it as a reference for this guide. + +## Step 1: Setup + +In a common NoirJS app, you need to instantiate a backend with something like `const backend = new Backend(circuit)`. Then you feed it to the `noir_js` interface. + +For recursion, this doesn't happen, and the only need for `noir_js` is only to `execute` a circuit and get its witness and return value. Everything else is not interfaced, so it needs to happen on the `backend` object. + +It is also recommended that you instantiate the backend with as many threads as possible, to allow for maximum concurrency: + +```js +const backend = new Backend(circuit, { threads: 8 }) +``` + +:::tip +You can use the [`os.cpus()`](https://nodejs.org/api/os.html#oscpus) object in `nodejs` or [`navigator.hardwareConcurrency`](https://developer.mozilla.org/en-US/docs/Web/API/Navigator/hardwareConcurrency) on the browser to make the most out of those glorious cpu cores +::: + +## Step 2: Generating the witness and the proof for `main` + +After instantiating the backend, you should also instantiate `noir_js`. We will use it to execute the circuit and get the witness. + +```js +const noir = new Noir(circuit, backend) +const { witness } = noir.execute(input) +``` + +With this witness, you are now able to generate the intermediate proof for the main circuit: + +```js +const { proof, publicInputs } = await backend.generateProof(witness) +``` + +:::warning + +Always keep in mind what is actually happening on your development process, otherwise you'll quickly become confused about what circuit we are actually running and why! + +In this case, you can imagine that Alice (running the `main` circuit) is proving something to Bob (running the `recursive` circuit), and Bob is verifying her proof within his proof. + +With this in mind, it becomes clear that our intermediate proof is the one *meant to be verified within another circuit*, so it must be Alice's. Actually, the only final proof in this theoretical scenario would be the last one, sent on-chain. + +::: + +## Step 3 - Verification and proof artifacts + +Optionally, you are able to verify the intermediate proof: + +```js +const verified = await backend.verifyProof({ proof, publicInputs }) +``` + +This can be useful to make sure our intermediate proof was correctly generated. But the real goal is to do it within another circuit. For that, we need to generate recursive proof artifacts that will be passed to the circuit that is verifying the proof we just generated. Instead of passing the proof and verification key as a byte array, we pass them as fields which makes it cheaper to verify in a circuit: + +```js +const { proofAsFields, vkAsFields, vkHash } = await backend.generateRecursiveProofArtifacts( { publicInputs, proof }, publicInputsCount) +``` + +This call takes the public inputs and the proof, but also the public inputs count. While this is easily retrievable by simply counting the `publicInputs` length, the backend interface doesn't currently abstract it away. + +:::info + +The `proofAsFields` has a constant size `[Field; 93]` and verification keys in Barretenberg are always `[Field; 114]`. + +::: + +:::warning + +One common mistake is to forget *who* makes this call. + +In a situation where Alice is generating the `main` proof, if she generates the proof artifacts and sends them to Bob, which gladly takes them as true, this would mean Alice could prove anything! + +Instead, Bob needs to make sure *he* extracts the proof artifacts, using his own instance of the `main` circuit backend. This way, Alice has to provide a valid proof for the correct `main` circuit. + +::: + +## Step 4 - Recursive proof generation + +With the artifacts, generating a recursive proof is no different from a normal proof. You simply use the `backend` (with the recursive circuit) to generate it: + +```js +const recursiveInputs = { + verification_key: vkAsFields, // array of length 114 + proof: proofAsFields, // array of length 93 + size of public inputs + publicInputs: [mainInput.y], // using the example above, where `y` is the only public input + key_hash: vkHash, +} + +const { witness, returnValue } = noir.execute(recursiveInputs) // we're executing the recursive circuit now! +const { proof, publicInputs } = backend.generateProof(witness) +const verified = backend.verifyProof({ proof, publicInputs }) +``` + +You can obviously chain this proof into another proof. In fact, if you're using recursive proofs, you're probably interested of using them this way! + +:::tip + +Managing circuits and "who does what" can be confusing. To make sure your naming is consistent, you can keep them in an object. For example: + +```js +const circuits = { + main: mainJSON, + recursive: recursiveJSON +} +const backends = { + main: new BarretenbergBackend(circuits.main), + recursive: new BarretenbergBackend(circuits.recursive) +} +const noir_programs = { + main: new Noir(circuits.main, backends.main), + recursive: new Noir(circuits.recursive, backends.recursive) +} +``` + +This allows you to neatly call exactly the method you want without conflicting names: + +```js +// Alice runs this 👇 +const { witness: mainWitness } = await noir_programs.main.execute(input) +const proof = await backends.main.generateProof(mainWitness) + +// Bob runs this 👇 +const verified = await backends.main.verifyProof(proof) +const { proofAsFields, vkAsFields, vkHash } = await backends.main.generateRecursiveProofArtifacts( + proof, + numPublicInputs, +); +const recursiveProof = await noir_programs.recursive.generateProof(recursiveInputs) +``` + +::: diff --git a/docs/versioned_docs/version-v0.26.0/how_to/how-to-solidity-verifier.md b/docs/versioned_docs/version-v0.26.0/how_to/how-to-solidity-verifier.md new file mode 100644 index 00000000000..e3c7c1065da --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/how_to/how-to-solidity-verifier.md @@ -0,0 +1,231 @@ +--- +title: Generate a Solidity Verifier +description: + Learn how to run the verifier as a smart contract on the blockchain. Compile a Solidity verifier + contract for your Noir program and deploy it on any EVM blockchain acting as a verifier smart + contract. Read more to find out +keywords: + [ + solidity verifier, + smart contract, + blockchain, + compiler, + plonk_vk.sol, + EVM blockchain, + verifying Noir programs, + proving backend, + Barretenberg, + ] +sidebar_position: 0 +pagination_next: tutorials/noirjs_app +--- + +Noir has the ability to generate a verifier contract in Solidity, which can be deployed in many EVM-compatible blockchains such as Ethereum. + +This allows for a powerful feature set, as one can make use of the conciseness and the privacy provided by Noir in an immutable ledger. Applications can range from simple P2P guessing games, to complex private DeFi interactions. + +This guide shows you how to generate a Solidity Verifier and deploy it on the [Remix IDE](https://remix.ethereum.org/). It is assumed that: + +- You are comfortable with the Solidity programming language and understand how contracts are deployed on the Ethereum network +- You have Noir installed and you have a Noir program. If you don't, [get started](../getting_started/installation/index.md) with Nargo and the example Hello Noir circuit +- You are comfortable navigating RemixIDE. If you aren't or you need a refresher, you can find some video tutorials [here](https://www.youtube.com/channel/UCjTUPyFEr2xDGN6Cg8nKDaA) that could help you. + +## Rundown + +Generating a Solidity Verifier contract is actually a one-command process. However, compiling it and deploying it can have some caveats. Here's the rundown of this guide: + +1. How to generate a solidity smart contract +2. How to compile the smart contract in the RemixIDE +3. How to deploy it to a testnet + +## Step 1 - Generate a contract + +This is by far the most straight-forward step. Just run: + +```sh +nargo codegen-verifier +``` + +A new `contract` folder would then be generated in your project directory, containing the Solidity +file `plonk_vk.sol`. It can be deployed to any EVM blockchain acting as a verifier smart contract. + +:::info + +It is possible to generate verifier contracts of Noir programs for other smart contract platforms as long as the proving backend supplies an implementation. + +Barretenberg, the default proving backend for Nargo, supports generation of verifier contracts, for the time being these are only in Solidity. +::: + +## Step 2 - Compiling + +We will mostly skip the details of RemixIDE, as the UI can change from version to version. For now, we can just open +Remix and create a blank workspace. + +![Create Workspace](@site/static/img/how-tos/solidity_verifier_1.png) + +We will create a new file to contain the contract Nargo generated, and copy-paste its content. + +:::warning + +You'll likely see a warning advising you to not trust pasted code. While it is an important warning, it is irrelevant in the context of this guide and can be ignored. We will not be deploying anywhere near a mainnet. + +::: + +To compile our the verifier, we can navigate to the compilation tab: + +![Compilation Tab](@site/static/img/how-tos/solidity_verifier_2.png) + +Remix should automatically match a suitable compiler version. However, hitting the "Compile" button will most likely generate a "Stack too deep" error: + +![Stack too deep](@site/static/img/how-tos/solidity_verifier_3.png) + +This is due to the verify function needing to put many variables on the stack, but enabling the optimizer resolves the issue. To do this, let's open the "Advanced Configurations" tab and enable optimization. The default 200 runs will suffice. + +:::info + +This time we will see a warning about an unused function parameter. This is expected, as the `verify` function doesn't use the `_proof` parameter inside a solidity block, it is loaded from calldata and used in assembly. + +::: + +![Compilation success](@site/static/img/how-tos/solidity_verifier_4.png) + +## Step 3 - Deploying + +At this point we should have a compiled contract read to deploy. If we navigate to the deploy section in Remix, we will see many different environments we can deploy to. The steps to deploy on each environment would be out-of-scope for this guide, so we will just use the default Remix VM. + +Looking closely, we will notice that our "Solidity Verifier" is actually three contracts working together: + +- An `UltraVerificationKey` library which simply stores the verification key for our circuit. +- An abstract contract `BaseUltraVerifier` containing most of the verifying logic. +- A main `UltraVerifier` contract that inherits from the Base and uses the Key contract. + +Remix will take care of the dependencies for us so we can simply deploy the UltraVerifier contract by selecting it and hitting "deploy": + +![Deploying UltraVerifier](@site/static/img/how-tos/solidity_verifier_5.png) + +A contract will show up in the "Deployed Contracts" section, where we can retrieve the Verification Key Hash. This is particularly useful for double-checking the deployer contract is the correct one. + +:::note + +Why "UltraVerifier"? + +To be precise, the Noir compiler (`nargo`) doesn't generate the verifier contract directly. It compiles the Noir code into an intermediate language (ACIR), which is then executed by the backend. So it is the backend that returns the verifier smart contract, not Noir. + +In this case, the Barretenberg Backend uses the UltraPlonk proving system, hence the "UltraVerifier" name. + +::: + +## Step 4 - Verifying + +To verify a proof using the Solidity verifier contract, we call the `verify` function in this extended contract: + +```solidity +function verify(bytes calldata _proof, bytes32[] calldata _publicInputs) external view returns (bool) +``` + +When using the default example in the [Hello Noir](../getting_started/hello_noir/index.md) guide, the easiest way to confirm that the verifier contract is doing its job is by calling the `verify` function via remix with the required parameters. For `_proof`, run `nargo prove` and use the string in `proof/.proof` (adding the hex `0x` prefix). We can also copy the public input from `Verifier.toml`, as it will be properly formatted as 32-byte strings: + +``` +0x...... , [0x0000.....02] +``` + +A programmatic example of how the `verify` function is called can be seen in the example zk voting application [here](https://github.com/noir-lang/noir-examples/blob/33e598c257e2402ea3a6b68dd4c5ad492bce1b0a/foundry-voting/src/zkVote.sol#L35): + +```solidity +function castVote(bytes calldata proof, uint proposalId, uint vote, bytes32 nullifierHash) public returns (bool) { + // ... + bytes32[] memory publicInputs = new bytes32[](4); + publicInputs[0] = merkleRoot; + publicInputs[1] = bytes32(proposalId); + publicInputs[2] = bytes32(vote); + publicInputs[3] = nullifierHash; + require(verifier.verify(proof, publicInputs), "Invalid proof"); +``` + +:::info[Return Values] + +A circuit doesn't have the concept of a return value. Return values are just syntactic sugar in +Noir. + +Under the hood, the return value is passed as an input to the circuit and is checked at the end of +the circuit program. + +For example, if you have Noir program like this: + +```rust +fn main( + // Public inputs + pubkey_x: pub Field, + pubkey_y: pub Field, + // Private inputs + priv_key: Field, +) -> pub Field +``` + +the `verify` function will expect the public inputs array (second function parameter) to be of length 3, the two inputs and the return value. Like before, these values are populated in Verifier.toml after running `nargo prove`. + +Passing only two inputs will result in an error such as `PUBLIC_INPUT_COUNT_INVALID(3, 2)`. + +In this case, the inputs parameter to `verify` would be an array ordered as `[pubkey_x, pubkey_y, return]`. + +::: + +:::tip[Structs] + +You can pass structs to the verifier contract. They will be flattened so that the array of inputs is 1-dimensional array. + +For example, consider the following program: + +```rust +struct Type1 { + val1: Field, + val2: Field, +} + +struct Nested { + t1: Type1, + is_true: bool, +} + +fn main(x: pub Field, nested: pub Nested, y: pub Field) { + //... +} +``` + +The order of these inputs would be flattened to: `[x, nested.t1.val1, nested.t1.val2, nested.is_true, y]` + +::: + +The other function you can call is our entrypoint `verify` function, as defined above. + +:::tip + +It's worth noticing that the `verify` function is actually a `view` function. A `view` function does not alter the blockchain state, so it doesn't need to be distributed (i.e. it will run only on the executing node), and therefore doesn't cost any gas. + +This can be particularly useful in some situations. If Alice generated a proof and wants Bob to verify its correctness, Bob doesn't need to run Nargo, NoirJS, or any Noir specific infrastructure. He can simply make a call to the blockchain with the proof and verify it is correct without paying any gas. + +It would be incorrect to say that a Noir proof verification costs any gas at all. However, most of the time the result of `verify` is used to modify state (for example, to update a balance, a game state, etc). In that case the whole network needs to execute it, which does incur gas costs (calldata and execution, but not storage). + +::: + +## A Note on EVM chains + +ZK-SNARK verification depends on some precompiled cryptographic primitives such as Elliptic Curve Pairings (if you like complex math, you can read about EC Pairings [here](https://medium.com/@VitalikButerin/exploring-elliptic-curve-pairings-c73c1864e627)). Not all EVM chains support EC Pairings, notably some of the ZK-EVMs. This means that you won't be able to use the verifier contract in all of them. + +For example, chains like `zkSync ERA` and `Polygon zkEVM` do not currently support these precompiles, so proof verification via Solidity verifier contracts won't work. Here's a quick list of EVM chains that have been tested and are known to work: + +- Optimism +- Arbitrum +- Polygon PoS +- Scroll +- Celo + +If you test any other chains, please open a PR on this page to update the list. See [this doc](https://github.com/noir-lang/noir-starter/tree/main/with-foundry#testing-on-chain) for more info about testing verifier contracts on different EVM chains. + +## What's next + +Now that you know how to call a Noir Solidity Verifier on a smart contract using Remix, you should be comfortable with using it with some programmatic frameworks, such as [hardhat](https://github.com/noir-lang/noir-starter/tree/main/vite-hardhat) and [foundry](https://github.com/noir-lang/noir-starter/tree/main/with-foundry). + +You can find other tools, examples, boilerplates and libraries in the [awesome-noir](https://github.com/noir-lang/awesome-noir) repository. + +You should also be ready to write and deploy your first NoirJS app and start generating proofs on websites, phones, and NodeJS environments! Head on to the [NoirJS tutorial](../tutorials/noirjs_app.md) to learn how to do that. diff --git a/docs/versioned_docs/version-v0.26.0/how_to/merkle-proof.mdx b/docs/versioned_docs/version-v0.26.0/how_to/merkle-proof.mdx new file mode 100644 index 00000000000..003c7019a93 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/how_to/merkle-proof.mdx @@ -0,0 +1,48 @@ +--- +title: Prove Merkle Tree Membership +description: + Learn how to use merkle membership proof in Noir to prove that a given leaf is a member of a + merkle tree with a specified root, at a given index. +keywords: + [merkle proof, merkle membership proof, Noir, rust, hash function, Pedersen, sha256, merkle tree] +--- + +Let's walk through an example of a merkle membership proof in Noir that proves that a given leaf is +in a merkle tree. + +```rust +use dep::std; + +fn main(message : [Field; 62], index : Field, hashpath : [Field; 40], root : Field) { + let leaf = std::hash::hash_to_field(message.as_slice()); + let merkle_root = std::merkle::compute_merkle_root(leaf, index, hashpath); + assert(merkle_root == root); +} + +``` + +The message is hashed using `hash_to_field`. The specific hash function that is being used is chosen +by the backend. The only requirement is that this hash function can heuristically be used as a +random oracle. If only collision resistance is needed, then one can call `std::hash::pedersen_hash` +instead. + +```rust +let leaf = std::hash::hash_to_field(message.as_slice()); +``` + +The leaf is then passed to a compute_merkle_root function with the root, index and hashpath. The returned root can then be asserted to be the same as the provided root. + +```rust +let merkle_root = std::merkle::compute_merkle_root(leaf, index, hashpath); +assert (merkle_root == root); +``` + +> **Note:** It is possible to re-implement the merkle tree implementation without standard library. +> However, for most usecases, it is enough. In general, the standard library will always opt to be +> as conservative as possible, while striking a balance with efficiency. + +An example, the merkle membership proof, only requires a hash function that has collision +resistance, hence a hash function like Pedersen is allowed, which in most cases is more efficient +than the even more conservative sha256. + +[View an example on the starter repo](https://github.com/noir-lang/noir-examples/blob/3ea09545cabfa464124ec2f3ea8e60c608abe6df/stealthdrop/circuits/src/main.nr#L20) diff --git a/docs/versioned_docs/version-v0.26.0/how_to/using-devcontainers.mdx b/docs/versioned_docs/version-v0.26.0/how_to/using-devcontainers.mdx new file mode 100644 index 00000000000..727ec6ca667 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/how_to/using-devcontainers.mdx @@ -0,0 +1,110 @@ +--- +title: Developer Containers and Codespaces +description: "Learn how to set up a devcontainer in your GitHub repository for a seamless coding experience with Codespaces. Follow our easy 8-step guide to create your own Noir environment without installing Nargo locally." +keywords: ["Devcontainer", "Codespaces", "GitHub", "Noir Environment", "Docker Image", "Development Environment", "Remote Coding", "GitHub Codespaces", "Noir Programming", "Nargo", "VSCode Extensions", "Noirup"] +sidebar_position: 1 +--- + +Adding a developer container configuration file to your Noir project is one of the easiest way to unlock coding in browser. + +## What's a devcontainer after all? + +A [Developer Container](https://containers.dev/) (devcontainer for short) is a Docker image that comes preloaded with tools, extensions, and other tools you need to quickly get started or continue a project, without having to install Nargo locally. Think of it as a development environment in a box. + +There are many advantages to this: + +- It's platform and architecture agnostic +- You don't need to have an IDE installed, or Nargo, or use a terminal at all +- It's safer for using on a public machine or public network + +One of the best ways of using devcontainers is... not using your machine at all, for maximum control, performance, and ease of use. +Enter Codespaces. + +## Codespaces + +If a devcontainer is just a Docker image, then what stops you from provisioning a `p3dn.24xlarge` AWS EC2 instance with 92 vCPUs and 768 GiB RAM and using it to prove your 10-gate SNARK proof? + +Nothing! Except perhaps the 30-40$ per hour it will cost you. + +The problem is that provisioning takes time, and I bet you don't want to see the AWS console every time you want to code something real quick. + +Fortunately, there's an easy and free way to get a decent remote machine ready and loaded in less than 2 minutes: Codespaces. [Codespaces is a Github feature](https://github.com/features/codespaces) that allows you to code in a remote machine by using devcontainers, and it's pretty cool: + +- You can start coding Noir in less than a minute +- It uses the resources of a remote machine, so you can code on your grandma's phone if needed be +- It makes it easy to share work with your frens +- It's fully reusable, you can stop and restart whenever you need to + +:::info + +Don't take out your wallet just yet. Free GitHub accounts get about [15-60 hours of coding](https://github.com/features/codespaces) for free per month, depending on the size of your provisioned machine. + +::: + +## Tell me it's _actually_ easy + +It is! + +Github comes with a default codespace and you can use it to code your own devcontainer. That's exactly what we will be doing in this guide. + + + +8 simple steps: + +#### 1. Create a new repository on GitHub. + +#### 2. Click "Start coding with Codespaces". This will use the default image. + +#### 3. Create a folder called `.devcontainer` in the root of your repository. + +#### 4. Create a Dockerfile in that folder, and paste the following code: + +```docker +FROM --platform=linux/amd64 node:lts-bookworm-slim +SHELL ["/bin/bash", "-c"] +RUN apt update && apt install -y curl bash git tar gzip libc++-dev +RUN curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +ENV PATH="/root/.nargo/bin:$PATH" +RUN noirup +ENTRYPOINT ["nargo"] +``` +#### 5. Create a file called `devcontainer.json` in the same folder, and paste the following code: + +```json +{ + "name": "Noir on Codespaces", + "build": { + "context": ".", + "dockerfile": "Dockerfile" + }, + "customizations": { + "vscode": { + "extensions": ["noir-lang.vscode-noir"] + } + } +} +``` +#### 6. Commit and push your changes + +This will pull the new image and build it, so it could take a minute or so + +#### 8. Done! +Just wait for the build to finish, and there's your easy Noir environment. + + +Refer to [noir-starter](https://github.com/noir-lang/noir-starter/) as an example of how devcontainers can be used together with codespaces. + + + +## How do I use it? + +Using the codespace is obviously much easier than setting it up. +Just navigate to your repository and click "Code" -> "Open with Codespaces". It should take a few seconds to load, and you're ready to go. + +:::info + +If you really like the experience, you can add a badge to your readme, links to existing codespaces, and more. +Check out the [official docs](https://docs.github.com/en/codespaces/setting-up-your-project-for-codespaces/setting-up-your-repository/facilitating-quick-creation-and-resumption-of-codespaces) for more info. diff --git a/docs/versioned_docs/version-v0.26.0/index.mdx b/docs/versioned_docs/version-v0.26.0/index.mdx new file mode 100644 index 00000000000..75086ddcdde --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/index.mdx @@ -0,0 +1,67 @@ +--- +title: Noir Lang +hide_title: true +description: + Learn about the public alpha release of Noir, a domain specific language heavily influenced by Rust that compiles to + an intermediate language which can be compiled to an arithmetic circuit or a rank-1 constraint system. +keywords: + [Noir, + Domain Specific Language, + Rust, + Intermediate Language, + Arithmetic Circuit, + Rank-1 Constraint System, + Ethereum Developers, + Protocol Developers, + Blockchain Developers, + Proving System, + Smart Contract Language] +sidebar_position: 0 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Noir Logo + +Noir is a Domain-Specific Language for SNARK proving systems developed by [Aztec Labs](https://aztec.network/). It allows you to generate complex Zero-Knowledge Programs (ZKP) by using simple and flexible syntax, requiring no previous knowledge on the underlying mathematics or cryptography. + +ZK programs are programs that can generate short proofs of a certain statement without revealing some details about it. You can read more about ZKPs [here](https://dev.to/spalladino/a-beginners-intro-to-coding-zero-knowledge-proofs-c56). + +## What's new about Noir? + +Noir works differently from most ZK languages by taking a two-pronged path. First, it compiles the program to an adaptable intermediate language known as ACIR. From there, depending on a given project's needs, ACIR can be further compiled into an arithmetic circuit for integration with the proving backend. + +:::info + +Noir is backend agnostic, which means it makes no assumptions on which proving backend powers the ZK proof. Being the language that powers [Aztec Contracts](https://docs.aztec.network/developers/contracts/main), it defaults to Aztec's Barretenberg proving backend. + +However, the ACIR output can be transformed to be compatible with other PLONK-based backends, or into a [rank-1 constraint system](https://www.rareskills.io/post/rank-1-constraint-system) suitable for backends such as Arkwork's Marlin. + +::: + +## Who is Noir for? + +Noir can be used both in complex cloud-based backends and in user's smartphones, requiring no knowledge on the underlying math or cryptography. From authorization systems that keep a password in the user's device, to complex on-chain verification of recursive proofs, Noir is designed to abstract away complexity without any significant overhead. Here are some examples of situations where Noir can be used: + + + + Noir Logo + + Aztec Contracts leverage Noir to allow for the storage and execution of private information. Writing an Aztec Contract is as easy as writing Noir, and Aztec developers can easily interact with the network storage and execution through the [Aztec.nr](https://docs.aztec.network/developers/contracts/main) library. + + + Soliditry Verifier Example + Noir can auto-generate Solidity verifier contracts that verify Noir proofs. This allows for non-interactive verification of proofs containing private information in an immutable system. This feature powers a multitude of use-case scenarios, from P2P chess tournaments, to [Aztec Layer-2 Blockchain](https://docs.aztec.network/) + + + Aztec Labs developed NoirJS, an easy interface to generate and verify Noir proofs in a Javascript environment. This allows for Noir to be used in webpages, mobile apps, games, and any other environment supporting JS execution in a standalone manner. + + + + +## Libraries + +Noir is meant to be easy to extend by simply importing Noir libraries just like in Rust. +The [awesome-noir repo](https://github.com/noir-lang/awesome-noir#libraries) is a collection of libraries developed by the Noir community. +Writing a new library is easy and makes code be composable and easy to reuse. See the section on [dependencies](noir/modules_packages_crates/dependencies.md) for more information. diff --git a/docs/versioned_docs/version-v0.26.0/migration_notes.md b/docs/versioned_docs/version-v0.26.0/migration_notes.md new file mode 100644 index 00000000000..6bd740024e5 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/migration_notes.md @@ -0,0 +1,105 @@ +--- +title: Migration notes +description: Read about migration notes from previous versions, which could solve problems while updating +keywords: [Noir, notes, migration, updating, upgrading] +--- + +Noir is in full-speed development. Things break fast, wild, and often. This page attempts to leave some notes on errors you might encounter when upgrading and how to resolve them until proper patches are built. + +### `backend encountered an error: libc++.so.1` + +Depending on your OS, you may encounter the following error when running `nargo prove` for the first time: + +```text +The backend encountered an error: "/home/codespace/.nargo/backends/acvm-backend-barretenberg/backend_binary: error while loading shared libraries: libc++.so.1: cannot open shared object file: No such file or directory\n" +``` + +Install the `libc++-dev` library with: + +```bash +sudo apt install libc++-dev +``` + +## ≥0.19 + +### Enforcing `compiler_version` + +From this version on, the compiler will check for the `compiler_version` field in `Nargo.toml`, and will error if it doesn't match the current Nargo version in use. + +To update, please make sure this field in `Nargo.toml` matches the output of `nargo --version`. + +## ≥0.14 + +The index of the [for loops](noir/concepts/control_flow.md#loops) is now of type `u64` instead of `Field`. An example refactor would be: + +```rust +for i in 0..10 { + let i = i as Field; +} +``` + +## ≥v0.11.0 and Nargo backend + +From this version onwards, Nargo starts managing backends through the `nargo backend` command. Upgrading to the versions per usual steps might lead to: + +### `backend encountered an error` + +This is likely due to the existing locally installed version of proving backend (e.g. barretenberg) is incompatible with the version of Nargo in use. + +To fix the issue: + +1. Uninstall the existing backend + +```bash +nargo backend uninstall acvm-backend-barretenberg +``` + +You may replace _acvm-backend-barretenberg_ with the name of your backend listed in `nargo backend ls` or in ~/.nargo/backends. + +2. Reinstall a compatible version of the proving backend. + +If you are using the default barretenberg backend, simply run: + +``` +nargo prove +``` + +with your Noir program. + +This will trigger the download and installation of the latest version of barretenberg compatible with your Nargo in use. + +### `backend encountered an error: illegal instruction` + +On certain Intel-based systems, an `illegal instruction` error may arise due to incompatibility of barretenberg with certain CPU instructions. + +To fix the issue: + +1. Uninstall the existing backend + +```bash +nargo backend uninstall acvm-backend-barretenberg +``` + +You may replace _acvm-backend-barretenberg_ with the name of your backend listed in `nargo backend ls` or in ~/.nargo/backends. + +2. Reinstall a compatible version of the proving backend. + +If you are using the default barretenberg backend, simply run: + +``` +nargo backend install acvm-backend-barretenberg https://github.com/noir-lang/barretenberg-js-binary/raw/master/run-bb.tar.gz +``` + +This downloads and installs a specific bb.js based version of barretenberg binary from GitHub. + +The gzipped file is running [this bash script](https://github.com/noir-lang/barretenberg-js-binary/blob/master/run-bb-js.sh), where we need to gzip it as the Nargo currently expect the backend to be zipped up. + +Then run: + +``` +DESIRED_BINARY_VERSION=0.8.1 nargo info +``` + +This overrides the bb native binary with a bb.js node application instead, which should be compatible with most if not all hardware. This does come with the drawback of being generally slower than native binary. + +0.8.1 indicates bb.js version 0.8.1, so if you change that it will update to a different version or the default version in the script if none was supplied. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/_category_.json b/docs/versioned_docs/version-v0.26.0/noir/concepts/_category_.json new file mode 100644 index 00000000000..7da08f8a8c5 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Concepts", + "position": 0, + "collapsible": true, + "collapsed": true +} \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/assert.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/assert.md new file mode 100644 index 00000000000..bcff613a695 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/assert.md @@ -0,0 +1,45 @@ +--- +title: Assert Function +description: + Learn about the assert function in Noir, which can be used to explicitly constrain the predicate or + comparison expression that follows to be true, and what happens if the expression is false at + runtime. +keywords: [Noir programming language, assert statement, predicate expression, comparison expression] +sidebar_position: 4 +--- + +Noir includes a special `assert` function which will explicitly constrain the predicate/comparison +expression that follows to be true. If this expression is false at runtime, the program will fail to +be proven. Example: + +```rust +fn main(x : Field, y : Field) { + assert(x == y); +} +``` + +> Assertions only work for predicate operations, such as `==`. If there's any ambiguity on the operation, the program will fail to compile. For example, it is unclear if `assert(x + y)` would check for `x + y == 0` or simply would return `true`. + +You can optionally provide a message to be logged when the assertion fails: + +```rust +assert(x == y, "x and y are not equal"); +``` + +Aside string literals, the optional message can be a format string or any other type supported as input for Noir's [print](../standard_library/logging.md) functions. This feature lets you incorporate runtime variables into your failed assertion logs: + +```rust +assert(x == y, f"Expected x == y, but got {x} == {y}"); +``` + +Using a variable as an assertion message directly: + +```rust +struct myStruct { + myField: Field +} + +let s = myStruct { myField: y }; +assert(s.myField == x, s); +``` + diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/comments.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/comments.md new file mode 100644 index 00000000000..b51a85f5c94 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/comments.md @@ -0,0 +1,33 @@ +--- +title: Comments +description: + Learn how to write comments in Noir programming language. A comment is a line of code that is + ignored by the compiler, but it can be read by programmers. Single-line and multi-line comments + are supported in Noir. +keywords: [Noir programming language, comments, single-line comments, multi-line comments] +sidebar_position: 10 +--- + +A comment is a line in your codebase which the compiler ignores, however it can be read by +programmers. + +Here is a single line comment: + +```rust +// This is a comment and is ignored +``` + +`//` is used to tell the compiler to ignore the rest of the line. + +Noir also supports multi-line block comments. Start a block comment with `/*` and end the block with `*/`. + +Noir does not natively support doc comments. You may be able to use [Rust doc comments](https://doc.rust-lang.org/reference/comments.html) in your code to leverage some Rust documentation build tools with Noir code. + +```rust +/* + This is a block comment describing a complex function. +*/ +fn main(x : Field, y : pub Field) { + assert(x != y); +} +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/control_flow.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/control_flow.md new file mode 100644 index 00000000000..045d3c3a5f5 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/control_flow.md @@ -0,0 +1,77 @@ +--- +title: Control Flow +description: + Learn how to use loops and if expressions in the Noir programming language. Discover the syntax + and examples for for loops and if-else statements. +keywords: [Noir programming language, loops, for loop, if-else statements, Rust syntax] +sidebar_position: 2 +--- + +## If Expressions + +Noir supports `if-else` statements. The syntax is most similar to Rust's where it is not required +for the statement's conditional to be surrounded by parentheses. + +```rust +let a = 0; +let mut x: u32 = 0; + +if a == 0 { + if a != 0 { + x = 6; + } else { + x = 2; + } +} else { + x = 5; + assert(x == 5); +} +assert(x == 2); +``` + +## Loops + +Noir has one kind of loop: the `for` loop. `for` loops allow you to repeat a block of code multiple +times. + +The following block of code between the braces is run 10 times. + +```rust +for i in 0..10 { + // do something +} +``` + +The index for loops is of type `u64`. + +### Break and Continue + +In unconstrained code, `break` and `continue` are also allowed in `for` loops. These are only allowed +in unconstrained code since normal constrained code requires that Noir knows exactly how many iterations +a loop may have. `break` and `continue` can be used like so: + +```rust +for i in 0 .. 10 { + println("Iteration start") + + if i == 2 { + continue; + } + + if i == 5 { + break; + } + + println(i); +} +println("Loop end") +``` + +When used, `break` will end the current loop early and jump to the statement after the for loop. In the example +above, the `break` will stop the loop and jump to the `println("Loop end")`. + +`continue` will stop the current iteration of the loop, and jump to the start of the next iteration. In the example +above, `continue` will jump to `println("Iteration start")` when used. Note that the loop continues as normal after this. +The iteration variable `i` is still increased by one as normal when `continue` is used. + +`break` and `continue` cannot currently be used to jump out of more than a single loop at a time. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_bus.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_bus.md new file mode 100644 index 00000000000..e54fc861257 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_bus.md @@ -0,0 +1,21 @@ +--- +title: Data Bus +sidebar_position: 13 +--- +**Disclaimer** this feature is experimental, do not use it! + +The data bus is an optimization that the backend can use to make recursion more efficient. +In order to use it, you must define some inputs of the program entry points (usually the `main()` +function) with the `call_data` modifier, and the return values with the `return_data` modifier. +These modifiers are incompatible with `pub` and `mut` modifiers. + +## Example + +```rust +fn main(mut x: u32, y: call_data u32, z: call_data [u32;4] ) -> return_data u32 { + let a = z[x]; + a+y +} +``` + +As a result, both call_data and return_data will be treated as private inputs and encapsulated into a read-only array each, for the backend to process. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/_category_.json b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/arrays.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/arrays.md new file mode 100644 index 00000000000..efce3e95d32 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/arrays.md @@ -0,0 +1,251 @@ +--- +title: Arrays +description: + Dive into the Array data type in Noir. Grasp its methods, practical examples, and best practices for efficiently using Arrays in your Noir code. +keywords: + [ + noir, + array type, + methods, + examples, + indexing, + ] +sidebar_position: 4 +--- + +An array is one way of grouping together values into one compound type. Array types can be inferred +or explicitly specified via the syntax `[; ]`: + +```rust +fn main(x : Field, y : Field) { + let my_arr = [x, y]; + let your_arr: [Field; 2] = [x, y]; +} +``` + +Here, both `my_arr` and `your_arr` are instantiated as an array containing two `Field` elements. + +Array elements can be accessed using indexing: + +```rust +fn main() { + let a = [1, 2, 3, 4, 5]; + + let first = a[0]; + let second = a[1]; +} +``` + +All elements in an array must be of the same type (i.e. homogeneous). That is, an array cannot group +a `Field` value and a `u8` value together for example. + +You can write mutable arrays, like: + +```rust +fn main() { + let mut arr = [1, 2, 3, 4, 5]; + assert(arr[0] == 1); + + arr[0] = 42; + assert(arr[0] == 42); +} +``` + +You can instantiate a new array of a fixed size with the same value repeated for each element. The following example instantiates an array of length 32 where each element is of type Field and has the value 0. + +```rust +let array: [Field; 32] = [0; 32]; +``` + +Like in Rust, arrays in Noir are a fixed size. However, if you wish to convert an array to a [slice](./slices), you can just call `as_slice` on your array: + +```rust +let array: [Field; 32] = [0; 32]; +let sl = array.as_slice() +``` + +You can define multidimensional arrays: + +```rust +let array : [[Field; 2]; 2]; +let element = array[0][0]; +``` +However, multidimensional slices are not supported. For example, the following code will error at compile time: +```rust +let slice : [[Field]] = &[]; +``` + +## Types + +You can create arrays of primitive types or structs. There is not yet support for nested arrays +(arrays of arrays) or arrays of structs that contain arrays. + +## Methods + +For convenience, the STD provides some ready-to-use, common methods for arrays. +Each of these functions are located within the generic impl `impl [T; N] {`. +So anywhere `self` appears, it refers to the variable `self: [T; N]`. + +### len + +Returns the length of an array + +```rust +fn len(self) -> Field +``` + +example + +```rust +fn main() { + let array = [42, 42]; + assert(array.len() == 2); +} +``` + +### sort + +Returns a new sorted array. The original array remains untouched. Notice that this function will +only work for arrays of fields or integers, not for any arbitrary type. This is because the sorting +logic it uses internally is optimized specifically for these values. If you need a sort function to +sort any type, you should use the function `sort_via` described below. + +```rust +fn sort(self) -> [T; N] +``` + +example + +```rust +fn main() { + let arr = [42, 32]; + let sorted = arr.sort(); + assert(sorted == [32, 42]); +} +``` + +### sort_via + +Sorts the array with a custom comparison function + +```rust +fn sort_via(self, ordering: fn(T, T) -> bool) -> [T; N] +``` + +example + +```rust +fn main() { + let arr = [42, 32] + let sorted_ascending = arr.sort_via(|a, b| a < b); + assert(sorted_ascending == [32, 42]); // verifies + + let sorted_descending = arr.sort_via(|a, b| a > b); + assert(sorted_descending == [32, 42]); // does not verify +} +``` + +### map + +Applies a function to each element of the array, returning a new array containing the mapped elements. + +```rust +fn map(self, f: fn(T) -> U) -> [U; N] +``` + +example + +```rust +let a = [1, 2, 3]; +let b = a.map(|a| a * 2); // b is now [2, 4, 6] +``` + +### fold + +Applies a function to each element of the array, returning the final accumulated value. The first +parameter is the initial value. + +```rust +fn fold(self, mut accumulator: U, f: fn(U, T) -> U) -> U +``` + +This is a left fold, so the given function will be applied to the accumulator and first element of +the array, then the second, and so on. For a given call the expected result would be equivalent to: + +```rust +let a1 = [1]; +let a2 = [1, 2]; +let a3 = [1, 2, 3]; + +let f = |a, b| a - b; +a1.fold(10, f) //=> f(10, 1) +a2.fold(10, f) //=> f(f(10, 1), 2) +a3.fold(10, f) //=> f(f(f(10, 1), 2), 3) +``` + +example: + +```rust + +fn main() { + let arr = [2, 2, 2, 2, 2]; + let folded = arr.fold(0, |a, b| a + b); + assert(folded == 10); +} + +``` + +### reduce + +Same as fold, but uses the first element as starting element. + +```rust +fn reduce(self, f: fn(T, T) -> T) -> T +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 2]; + let reduced = arr.reduce(|a, b| a + b); + assert(reduced == 10); +} +``` + +### all + +Returns true if all the elements satisfy the given predicate + +```rust +fn all(self, predicate: fn(T) -> bool) -> bool +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 2]; + let all = arr.all(|a| a == 2); + assert(all); +} +``` + +### any + +Returns true if any of the elements satisfy the given predicate + +```rust +fn any(self, predicate: fn(T) -> bool) -> bool +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 5]; + let any = arr.any(|a| a == 5); + assert(any); +} + +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/booleans.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/booleans.md new file mode 100644 index 00000000000..69826fcd724 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/booleans.md @@ -0,0 +1,31 @@ +--- +title: Booleans +description: + Delve into the Boolean data type in Noir. Understand its methods, practical examples, and best practices for using Booleans in your Noir programs. +keywords: + [ + noir, + boolean type, + methods, + examples, + logical operations, + ] +sidebar_position: 2 +--- + + +The `bool` type in Noir has two possible values: `true` and `false`: + +```rust +fn main() { + let t = true; + let f: bool = false; +} +``` + +> **Note:** When returning a boolean value, it will show up as a value of 1 for `true` and 0 for +> `false` in _Verifier.toml_. + +The boolean type is most commonly used in conditionals like `if` expressions and `assert` +statements. More about conditionals is covered in the [Control Flow](../control_flow) and +[Assert Function](../assert) sections. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/fields.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/fields.md new file mode 100644 index 00000000000..a10a4810788 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/fields.md @@ -0,0 +1,192 @@ +--- +title: Fields +description: + Dive deep into the Field data type in Noir. Understand its methods, practical examples, and best practices to effectively use Fields in your Noir programs. +keywords: + [ + noir, + field type, + methods, + examples, + best practices, + ] +sidebar_position: 0 +--- + +The field type corresponds to the native field type of the proving backend. + +The size of a Noir field depends on the elliptic curve's finite field for the proving backend +adopted. For example, a field would be a 254-bit integer when paired with the default backend that +spans the Grumpkin curve. + +Fields support integer arithmetic and are often used as the default numeric type in Noir: + +```rust +fn main(x : Field, y : Field) { + let z = x + y; +} +``` + +`x`, `y` and `z` are all private fields in this example. Using the `let` keyword we defined a new +private value `z` constrained to be equal to `x + y`. + +If proving efficiency is of priority, fields should be used as a default for solving problems. +Smaller integer types (e.g. `u64`) incur extra range constraints. + +## Methods + +After declaring a Field, you can use these common methods on it: + +### to_le_bits + +Transforms the field into an array of bits, Little Endian. + +```rust +fn to_le_bits(_x : Field, _bit_size: u32) -> [u1] +``` + +example: + +```rust +fn main() { + let field = 2; + let bits = field.to_le_bits(32); +} +``` + +### to_be_bits + +Transforms the field into an array of bits, Big Endian. + +```rust +fn to_be_bits(_x : Field, _bit_size: u32) -> [u1] +``` + +example: + +```rust +fn main() { + let field = 2; + let bits = field.to_be_bits(32); +} +``` + +### to_le_bytes + +Transforms into an array of bytes, Little Endian + +```rust +fn to_le_bytes(_x : Field, byte_size: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let bytes = field.to_le_bytes(4); +} +``` + +### to_be_bytes + +Transforms into an array of bytes, Big Endian + +```rust +fn to_be_bytes(_x : Field, byte_size: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let bytes = field.to_be_bytes(4); +} +``` + +### to_le_radix + +Decomposes into a vector over the specified base, Little Endian + +```rust +fn to_le_radix(_x : Field, _radix: u32, _result_len: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let radix = field.to_le_radix(256, 4); +} +``` + +### to_be_radix + +Decomposes into a vector over the specified base, Big Endian + +```rust +fn to_be_radix(_x : Field, _radix: u32, _result_len: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let radix = field.to_be_radix(256, 4); +} +``` + +### pow_32 + +Returns the value to the power of the specified exponent + +```rust +fn pow_32(self, exponent: Field) -> Field +``` + +example: + +```rust +fn main() { + let field = 2 + let pow = field.pow_32(4); + assert(pow == 16); +} +``` + +### assert_max_bit_size + +Adds a constraint to specify that the field can be represented with `bit_size` number of bits + +```rust +fn assert_max_bit_size(self, bit_size: u32) +``` + +example: + +```rust +fn main() { + let field = 2 + field.assert_max_bit_size(32); +} +``` + +### sgn0 + +Parity of (prime) Field element, i.e. sgn0(x mod p) = 0 if x ∈ \{0, ..., p-1\} is even, otherwise sgn0(x mod p) = 1. + +```rust +fn sgn0(self) -> u1 +``` + + +### lt + +Returns true if the field is less than the other field + +```rust +pub fn lt(self, another: Field) -> bool +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/function_types.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/function_types.md new file mode 100644 index 00000000000..f6121af17e2 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/function_types.md @@ -0,0 +1,26 @@ +--- +title: Function types +sidebar_position: 10 +--- + +Noir supports higher-order functions. The syntax for a function type is as follows: + +```rust +fn(arg1_type, arg2_type, ...) -> return_type +``` + +Example: + +```rust +fn assert_returns_100(f: fn() -> Field) { // f takes no args and returns a Field + assert(f() == 100); +} + +fn main() { + assert_returns_100(|| 100); // ok + assert_returns_100(|| 150); // fails +} +``` + +A function type also has an optional capture environment - this is necessary to support closures. +See [Lambdas](../lambdas.md) for more details. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/index.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/index.md new file mode 100644 index 00000000000..357813c147a --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/index.md @@ -0,0 +1,110 @@ +--- +title: Data Types +description: + Get a clear understanding of the two categories of Noir data types - primitive types and compound + types. Learn about their characteristics, differences, and how to use them in your Noir + programming. +keywords: + [ + noir, + data types, + primitive types, + compound types, + private types, + public types, + ] +--- + +Every value in Noir has a type, which determines which operations are valid for it. + +All values in Noir are fundamentally composed of `Field` elements. For a more approachable +developing experience, abstractions are added on top to introduce different data types in Noir. + +Noir has two category of data types: primitive types (e.g. `Field`, integers, `bool`) and compound +types that group primitive types (e.g. arrays, tuples, structs). Each value can either be private or +public. + +## Private & Public Types + +A **private value** is known only to the Prover, while a **public value** is known by both the +Prover and Verifier. Mark values as `private` when the value should only be known to the prover. All +primitive types (including individual fields of compound types) in Noir are private by default, and +can be marked public when certain values are intended to be revealed to the Verifier. + +> **Note:** For public values defined in Noir programs paired with smart contract verifiers, once +> the proofs are verified on-chain the values can be considered known to everyone that has access to +> that blockchain. + +Public data types are treated no differently to private types apart from the fact that their values +will be revealed in proofs generated. Simply changing the value of a public type will not change the +circuit (where the same goes for changing values of private types as well). + +_Private values_ are also referred to as _witnesses_ sometimes. + +> **Note:** The terms private and public when applied to a type (e.g. `pub Field`) have a different +> meaning than when applied to a function (e.g. `pub fn foo() {}`). +> +> The former is a visibility modifier for the Prover to interpret if a value should be made known to +> the Verifier, while the latter is a visibility modifier for the compiler to interpret if a +> function should be made accessible to external Noir programs like in other languages. + +### pub Modifier + +All data types in Noir are private by default. Types are explicitly declared as public using the +`pub` modifier: + +```rust +fn main(x : Field, y : pub Field) -> pub Field { + x + y +} +``` + +In this example, `x` is **private** while `y` and `x + y` (the return value) are **public**. Note +that visibility is handled **per variable**, so it is perfectly valid to have one input that is +private and another that is public. + +> **Note:** Public types can only be declared through parameters on `main`. + +## Type Aliases + +A type alias is a new name for an existing type. Type aliases are declared with the keyword `type`: + +```rust +type Id = u8; + +fn main() { + let id: Id = 1; + let zero: u8 = 0; + assert(zero + 1 == id); +} +``` + +Type aliases can also be used with [generics](../generics.md): + +```rust +type Id = Size; + +fn main() { + let id: Id = 1; + let zero: u32 = 0; + assert(zero + 1 == id); +} +``` + +Type aliases can even refer to other aliases. An error will be issued if they form a cycle: + +```rust +// Ok! +type A = B; +type B = Field; + +type Bad1 = Bad2; + +// error: Dependency cycle found +type Bad2 = Bad1; +// ^^^^^^^^^^^ 'Bad2' recursively depends on itself: Bad2 -> Bad1 -> Bad2 +``` + +### BigInt + +You can achieve BigInt functionality using the [Noir BigInt](https://github.com/shuklaayush/noir-bigint) library. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/integers.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/integers.md new file mode 100644 index 00000000000..1c6b375db49 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/integers.md @@ -0,0 +1,155 @@ +--- +title: Integers +description: Explore the Integer data type in Noir. Learn about its methods, see real-world examples, and grasp how to efficiently use Integers in your Noir code. +keywords: [noir, integer types, methods, examples, arithmetic] +sidebar_position: 1 +--- + +An integer type is a range constrained field type. The Noir frontend supports both unsigned and signed integer types. The allowed sizes are 1, 8, 32 and 64 bits. + +:::info + +When an integer is defined in Noir without a specific type, it will default to `Field`. + +The one exception is for loop indices which default to `u64` since comparisons on `Field`s are not possible. + +::: + +## Unsigned Integers + +An unsigned integer type is specified first with the letter `u` (indicating its unsigned nature) followed by its bit size (e.g. `8`): + +```rust +fn main() { + let x: u8 = 1; + let y: u8 = 1; + let z = x + y; + assert (z == 2); +} +``` + +The bit size determines the maximum value the integer type can store. For example, a `u8` variable can store a value in the range of 0 to 255 (i.e. $\\2^{8}-1\\$). + +## Signed Integers + +A signed integer type is specified first with the letter `i` (which stands for integer) followed by its bit size (e.g. `8`): + +```rust +fn main() { + let x: i8 = -1; + let y: i8 = -1; + let z = x + y; + assert (z == -2); +} +``` + +The bit size determines the maximum and minimum range of value the integer type can store. For example, an `i8` variable can store a value in the range of -128 to 127 (i.e. $\\-2^{7}\\$ to $\\2^{7}-1\\$). + +## 128 bits Unsigned Integers + +The built-in structure `U128` allows you to use 128-bit unsigned integers almost like a native integer type. However, there are some differences to keep in mind: +- You cannot cast between a native integer and `U128` +- There is a higher performance cost when using `U128`, compared to a native type. + +Conversion between unsigned integer types and U128 are done through the use of `from_integer` and `to_integer` functions. `from_integer` also accepts the `Field` type as input. + +```rust +fn main() { + let x = U128::from_integer(23); + let y = U128::from_hex("0x7"); + let z = x + y; + assert(z.to_integer() == 30); +} +``` + +`U128` is implemented with two 64 bits limbs, representing the low and high bits, which explains the performance cost. You should expect `U128` to be twice more costly for addition and four times more costly for multiplication. +You can construct a U128 from its limbs: +```rust +fn main(x: u64, y: u64) { + let x = U128::from_u64s_be(x,y); + assert(z.hi == x as Field); + assert(z.lo == y as Field); +} +``` + +Note that the limbs are stored as Field elements in order to avoid unnecessary conversions. +Apart from this, most operations will work as usual: + +```rust +fn main(x: U128, y: U128) { + // multiplication + let c = x * y; + // addition and subtraction + let c = c - x + y; + // division + let c = x / y; + // bit operation; + let c = x & y | y; + // bit shift + let c = x << y; + // comparisons; + let c = x < y; + let c = x == y; +} +``` + +## Overflows + +Computations that exceed the type boundaries will result in overflow errors. This happens with both signed and unsigned integers. For example, attempting to prove: + +```rust +fn main(x: u8, y: u8) { + let z = x + y; +} +``` + +With: + +```toml +x = "255" +y = "1" +``` + +Would result in: + +``` +$ nargo prove +error: Assertion failed: 'attempt to add with overflow' +┌─ ~/src/main.nr:9:13 +│ +│ let z = x + y; +│ ----- +│ += Call stack: + ... +``` + +A similar error would happen with signed integers: + +```rust +fn main() { + let x: i8 = -118; + let y: i8 = -11; + let z = x + y; +} +``` + +### Wrapping methods + +Although integer overflow is expected to error, some use-cases rely on wrapping. For these use-cases, the standard library provides `wrapping` variants of certain common operations: + +```rust +fn wrapping_add(x: T, y: T) -> T; +fn wrapping_sub(x: T, y: T) -> T; +fn wrapping_mul(x: T, y: T) -> T; +``` + +Example of how it is used: + +```rust +use dep::std; + +fn main(x: u8, y: u8) -> pub u8 { + std::wrapping_add(x, y) +} +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/references.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/references.md new file mode 100644 index 00000000000..a5293d11cfb --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/references.md @@ -0,0 +1,23 @@ +--- +title: References +sidebar_position: 9 +--- + +Noir supports first-class references. References are a bit like pointers: they point to a specific address that can be followed to access the data stored at that address. You can use Rust-like syntax to use pointers in Noir: the `&` operator references the variable, the `*` operator dereferences it. + +Example: + +```rust +fn main() { + let mut x = 2; + + // you can reference x as &mut and pass it to multiplyBy2 + multiplyBy2(&mut x); +} + +// you can access &mut here +fn multiplyBy2(x: &mut Field) { + // and dereference it with * + *x = *x * 2; +} +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/slices.mdx b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/slices.mdx new file mode 100644 index 00000000000..828faf4a8f8 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/slices.mdx @@ -0,0 +1,170 @@ +--- +title: Slices +description: Explore the Slice data type in Noir. Understand its methods, see real-world examples, and learn how to effectively use Slices in your Noir programs. +keywords: [noir, slice type, methods, examples, subarrays] +sidebar_position: 5 +--- + +import Experimental from '@site/src/components/Notes/_experimental.mdx'; + + + +A slice is a dynamically-sized view into a sequence of elements. They can be resized at runtime, but because they don't own the data, they cannot be returned from a circuit. You can treat slices as arrays without a constrained size. + +```rust +use dep::std::slice; + +fn main() -> pub Field { + let mut slice: [Field] = &[0; 2]; + + let mut new_slice = slice.push_back(6); + new_slice.len() +} +``` + +To write a slice literal, use a preceeding ampersand as in: `&[0; 2]` or +`&[1, 2, 3]`. + +It is important to note that slices are not references to arrays. In Noir, +`&[..]` is more similar to an immutable, growable vector. + +View the corresponding test file [here][test-file]. + +[test-file]: https://github.com/noir-lang/noir/blob/f387ec1475129732f72ba294877efdf6857135ac/crates/nargo_cli/tests/test_data_ssa_refactor/slices/src/main.nr + +## Methods + +For convenience, the STD provides some ready-to-use, common methods for slices: + +### push_back + +Pushes a new element to the end of the slice, returning a new slice with a length one greater than the original unmodified slice. + +```rust +fn push_back(_self: [T], _elem: T) -> [T] +``` + +example: + +```rust +fn main() -> pub Field { + let mut slice: [Field] = &[0; 2]; + + let mut new_slice = slice.push_back(6); + new_slice.len() +} +``` + +View the corresponding test file [here][test-file]. + +### push_front + +Returns a new array with the specified element inserted at index 0. The existing elements indexes are incremented by 1. + +```rust +fn push_front(_self: Self, _elem: T) -> Self +``` + +Example: + +```rust +let mut new_slice: [Field] = &[]; +new_slice = new_slice.push_front(20); +assert(new_slice[0] == 20); // returns true +``` + +View the corresponding test file [here][test-file]. + +### pop_front + +Returns a tuple of two items, the first element of the array and the rest of the array. + +```rust +fn pop_front(_self: Self) -> (T, Self) +``` + +Example: + +```rust +let (first_elem, rest_of_slice) = slice.pop_front(); +``` + +View the corresponding test file [here][test-file]. + +### pop_back + +Returns a tuple of two items, the beginning of the array with the last element omitted and the last element. + +```rust +fn pop_back(_self: Self) -> (Self, T) +``` + +Example: + +```rust +let (popped_slice, last_elem) = slice.pop_back(); +``` + +View the corresponding test file [here][test-file]. + +### append + +Loops over a slice and adds it to the end of another. + +```rust +fn append(mut self, other: Self) -> Self +``` + +Example: + +```rust +let append = &[1, 2].append(&[3, 4, 5]); +``` + +### insert + +Inserts an element at a specified index and shifts all following elements by 1. + +```rust +fn insert(_self: Self, _index: Field, _elem: T) -> Self +``` + +Example: + +```rust +new_slice = rest_of_slice.insert(2, 100); +assert(new_slice[2] == 100); +``` + +View the corresponding test file [here][test-file]. + +### remove + +Remove an element at a specified index, shifting all elements after it to the left, returning the altered slice and the removed element. + +```rust +fn remove(_self: Self, _index: Field) -> (Self, T) +``` + +Example: + +```rust +let (remove_slice, removed_elem) = slice.remove(3); +``` + +### len + +Returns the length of a slice + +```rust +fn len(self) -> Field +``` + +Example: + +```rust +fn main() { + let slice = &[42, 42]; + assert(slice.len() == 2); +} +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/strings.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/strings.md new file mode 100644 index 00000000000..311dfd64416 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/strings.md @@ -0,0 +1,80 @@ +--- +title: Strings +description: + Discover the String data type in Noir. Learn about its methods, see real-world examples, and understand how to effectively manipulate and use Strings in Noir. +keywords: + [ + noir, + string type, + methods, + examples, + concatenation, + ] +sidebar_position: 3 +--- + + +The string type is a fixed length value defined with `str`. + +You can use strings in `assert()` functions or print them with +`println()`. See more about [Logging](../../standard_library/logging). + +```rust +use dep::std; + +fn main(message : pub str<11>, hex_as_string : str<4>) { + println(message); + assert(message == "hello world"); + assert(hex_as_string == "0x41"); +} +``` + +You can convert a `str` to a byte array by calling `as_bytes()` +or a vector by calling `as_bytes_vec()`. + +```rust +fn main() { + let message = "hello world"; + let message_bytes = message.as_bytes(); + let mut message_vec = message.as_bytes_vec(); + assert(message_bytes.len() == 11); + assert(message_bytes[0] == 104); + assert(message_bytes[0] == message_vec.get(0)); +} +``` + +## Escape characters + +You can use escape characters for your strings: + +| Escape Sequence | Description | +|-----------------|-----------------| +| `\r` | Carriage Return | +| `\n` | Newline | +| `\t` | Tab | +| `\0` | Null Character | +| `\"` | Double Quote | +| `\\` | Backslash | + +Example: + +```rust +let s = "Hello \"world" // prints "Hello "world" +let s = "hey \tyou"; // prints "hey you" +``` + +## Raw strings + +A raw string begins with the letter `r` and is optionally delimited by a number of hashes `#`. + +Escape characters are *not* processed within raw strings. All contents are interpreted literally. + +Example: + +```rust +let s = r"Hello world"; +let s = r#"Simon says "hello world""#; + +// Any number of hashes may be used (>= 1) as long as the string also terminates with the same number of hashes +let s = r#####"One "#, Two "##, Three "###, Four "####, Five will end the string."#####; +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/structs.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/structs.md new file mode 100644 index 00000000000..dbf68c99813 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/structs.md @@ -0,0 +1,70 @@ +--- +title: Structs +description: + Explore the Struct data type in Noir. Learn about its methods, see real-world examples, and grasp how to effectively define and use Structs in your Noir programs. +keywords: + [ + noir, + struct type, + methods, + examples, + data structures, + ] +sidebar_position: 8 +--- + +A struct also allows for grouping multiple values of different types. Unlike tuples, we can also +name each field. + +> **Note:** The usage of _field_ here refers to each element of the struct and is unrelated to the +> field type of Noir. + +Defining a struct requires giving it a name and listing each field within as `: ` pairs: + +```rust +struct Animal { + hands: Field, + legs: Field, + eyes: u8, +} +``` + +An instance of a struct can then be created with actual values in `: ` pairs in any +order. Struct fields are accessible using their given names: + +```rust +fn main() { + let legs = 4; + + let dog = Animal { + eyes: 2, + hands: 0, + legs, + }; + + let zero = dog.hands; +} +``` + +Structs can also be destructured in a pattern, binding each field to a new variable: + +```rust +fn main() { + let Animal { hands, legs: feet, eyes } = get_octopus(); + + let ten = hands + feet + eyes as u8; +} + +fn get_octopus() -> Animal { + let octopus = Animal { + hands: 0, + legs: 8, + eyes: 2, + }; + + octopus +} +``` + +The new variables can be bound with names different from the original struct field names, as +showcased in the `legs --> feet` binding in the example above. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/tuples.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/tuples.md new file mode 100644 index 00000000000..2ec5c9c4113 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/data_types/tuples.md @@ -0,0 +1,48 @@ +--- +title: Tuples +description: + Dive into the Tuple data type in Noir. Understand its methods, practical examples, and best practices for efficiently using Tuples in your Noir code. +keywords: + [ + noir, + tuple type, + methods, + examples, + multi-value containers, + ] +sidebar_position: 7 +--- + +A tuple collects multiple values like an array, but with the added ability to collect values of +different types: + +```rust +fn main() { + let tup: (u8, u64, Field) = (255, 500, 1000); +} +``` + +One way to access tuple elements is via destructuring using pattern matching: + +```rust +fn main() { + let tup = (1, 2); + + let (one, two) = tup; + + let three = one + two; +} +``` + +Another way to access tuple elements is via direct member access, using a period (`.`) followed by +the index of the element we want to access. Index `0` corresponds to the first tuple element, `1` to +the second and so on: + +```rust +fn main() { + let tup = (5, 6, 7, 8); + + let five = tup.0; + let eight = tup.3; +} +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/distinct.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/distinct.md new file mode 100644 index 00000000000..6c993b8b5e0 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/distinct.md @@ -0,0 +1,64 @@ +--- +title: Distinct Witnesses +sidebar_position: 11 +--- + +The `distinct` keyword prevents repetitions of witness indices in the program's ABI. This ensures +that the witnesses being returned as public inputs are all unique. + +The `distinct` keyword is only used for return values on program entry points (usually the `main()` +function). + +When using `distinct` and `pub` simultaneously, `distinct` comes first. See the example below. + +You can read more about the problem this solves +[here](https://github.com/noir-lang/noir/issues/1183). + +## Example + +Without the `distinct` keyword, the following program + +```rust +fn main(x : pub Field, y : pub Field) -> pub [Field; 4] { + let a = 1; + let b = 1; + [x + 1, y, a, b] +} +``` + +compiles to + +```json +{ + //... + "abi": { + //... + "param_witnesses": { "x": [1], "y": [2] }, + "return_witnesses": [3, 2, 4, 4] + } +} +``` + +Whereas (with the `distinct` keyword) + +```rust +fn main(x : pub Field, y : pub Field) -> distinct pub [Field; 4] { + let a = 1; + let b = 1; + [x + 1, y, a, b] +} +``` + +compiles to + +```json +{ + //... + "abi": { + //... + "param_witnesses": { "x": [1], "y": [2] }, + //... + "return_witnesses": [3, 4, 5, 6] + } +} +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/functions.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/functions.md new file mode 100644 index 00000000000..2c9bc33fdfc --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/functions.md @@ -0,0 +1,226 @@ +--- +title: Functions +description: + Learn how to declare functions and methods in Noir, a programming language with Rust semantics. + This guide covers parameter declaration, return types, call expressions, and more. +keywords: [Noir, Rust, functions, methods, parameter declaration, return types, call expressions] +sidebar_position: 1 +--- + +Functions in Noir follow the same semantics of Rust, though Noir does not support early returns. + +To declare a function the `fn` keyword is used. + +```rust +fn foo() {} +``` + +By default, functions are visible only within the package they are defined. To make them visible outside of that package (for example, as part of a [library](../modules_packages_crates/crates_and_packages.md#libraries)), you should mark them as `pub`: + +```rust +pub fn foo() {} +``` + +You can also restrict the visibility of the function to only the crate it was defined in, by specifying `pub(crate)`: + +```rust +pub(crate) fn foo() {} //foo can only be called within its crate +``` + +All parameters in a function must have a type and all types are known at compile time. The parameter +is pre-pended with a colon and the parameter type. Multiple parameters are separated using a comma. + +```rust +fn foo(x : Field, y : Field){} +``` + +The return type of a function can be stated by using the `->` arrow notation. The function below +states that the foo function must return a `Field`. If the function returns no value, then the arrow +is omitted. + +```rust +fn foo(x : Field, y : Field) -> Field { + x + y +} +``` + +Note that a `return` keyword is unneeded in this case - the last expression in a function's body is +returned. + +## Main function + +If you're writing a binary, the `main` function is the starting point of your program. You can pass all types of expressions to it, as long as they have a fixed size at compile time: + +```rust +fn main(x : Field) // this is fine: passing a Field +fn main(x : [Field; 2]) // this is also fine: passing a Field with known size at compile-time +fn main(x : (Field, bool)) // 👌: passing a (Field, bool) tuple means size 2 +fn main(x : str<5>) // this is fine, as long as you pass a string of size 5 + +fn main(x : Vec) // can't compile, has variable size +fn main(x : [Field]) // can't compile, has variable size +fn main(....// i think you got it by now +``` + +Keep in mind [tests](../../getting_started/tooling/testing.md) don't differentiate between `main` and any other function. The following snippet passes tests, but won't compile or prove: + +```rust +fn main(x : [Field]) { + assert(x[0] == 1); +} + +#[test] +fn test_one() { + main(&[1, 2]); +} +``` + +```bash +$ nargo test +[testing] Running 1 test functions +[testing] Testing test_one... ok +[testing] All tests passed + +$ nargo check +The application panicked (crashed). +Message: Cannot have variable sized arrays as a parameter to main +``` + +## Call Expressions + +Calling a function in Noir is executed by using the function name and passing in the necessary +arguments. + +Below we show how to call the `foo` function from the `main` function using a call expression: + +```rust +fn main(x : Field, y : Field) { + let z = foo(x); +} + +fn foo(x : Field) -> Field { + x + x +} +``` + +## Methods + +You can define methods in Noir on any struct type in scope. + +```rust +struct MyStruct { + foo: Field, + bar: Field, +} + +impl MyStruct { + fn new(foo: Field) -> MyStruct { + MyStruct { + foo, + bar: 2, + } + } + + fn sum(self) -> Field { + self.foo + self.bar + } +} + +fn main() { + let s = MyStruct::new(40); + assert(s.sum() == 42); +} +``` + +Methods are just syntactic sugar for functions, so if we wanted to we could also call `sum` as +follows: + +```rust +assert(MyStruct::sum(s) == 42); +``` + +It is also possible to specialize which method is chosen depending on the [generic](./generics.md) type that is used. In this example, the `foo` function returns different values depending on its type: + +```rust +struct Foo {} + +impl Foo { + fn foo(self) -> Field { 1 } +} + +impl Foo { + fn foo(self) -> Field { 2 } +} + +fn main() { + let f1: Foo = Foo{}; + let f2: Foo = Foo{}; + assert(f1.foo() + f2.foo() == 3); +} +``` + +Also note that impls with the same method name defined in them cannot overlap. For example, if we already have `foo` defined for `Foo` and `Foo` like we do above, we cannot also define `foo` in an `impl Foo` since it would be ambiguous which version of `foo` to choose. + +```rust +// Including this impl in the same project as the above snippet would +// cause an overlapping impls error +impl Foo { + fn foo(self) -> Field { 3 } +} +``` + +## Lambdas + +Lambdas are anonymous functions. They follow the syntax of Rust - `|arg1, arg2, ..., argN| return_expression`. + +```rust +let add_50 = |val| val + 50; +assert(add_50(100) == 150); +``` + +See [Lambdas](./lambdas.md) for more details. + +## Attributes + +Attributes are metadata that can be applied to a function, using the following syntax: `#[attribute(value)]`. + +Supported attributes include: + +- **builtin**: the function is implemented by the compiler, for efficiency purposes. +- **deprecated**: mark the function as _deprecated_. Calling the function will generate a warning: `warning: use of deprecated function` +- **field**: Used to enable conditional compilation of code depending on the field size. See below for more details +- **oracle**: mark the function as _oracle_; meaning it is an external unconstrained function, implemented in noir_js. See [Unconstrained](./unconstrained.md) and [NoirJS](../../reference/NoirJS/noir_js/index.md) for more details. +- **test**: mark the function as unit tests. See [Tests](../../getting_started/tooling/testing.md) for more details + +### Field Attribute + +The field attribute defines which field the function is compatible for. The function is conditionally compiled, under the condition that the field attribute matches the Noir native field. +The field can be defined implicitly, by using the name of the elliptic curve usually associated to it - for instance bn254, bls12_381 - or explicitly by using the field (prime) order, in decimal or hexadecimal form. +As a result, it is possible to define multiple versions of a function with each version specialized for a different field attribute. This can be useful when a function requires different parameters depending on the underlying elliptic curve. + +Example: we define the function `foo()` three times below. Once for the default Noir bn254 curve, once for the field $\mathbb F_{23}$, which will normally never be used by Noir, and once again for the bls12_381 curve. + +```rust +#[field(bn254)] +fn foo() -> u32 { + 1 +} + +#[field(23)] +fn foo() -> u32 { + 2 +} + +// This commented code would not compile as foo would be defined twice because it is the same field as bn254 +// #[field(21888242871839275222246405745257275088548364400416034343698204186575808495617)] +// fn foo() -> u32 { +// 2 +// } + +#[field(bls12_381)] +fn foo() -> u32 { + 3 +} +``` + +If the field name is not known to Noir, it will discard the function. Field names are case insensitive. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/generics.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/generics.md new file mode 100644 index 00000000000..ddd42bf1f9b --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/generics.md @@ -0,0 +1,106 @@ +--- +title: Generics +description: Learn how to use Generics in Noir +keywords: [Noir, Rust, generics, functions, structs] +sidebar_position: 7 +--- + +Generics allow you to use the same functions with multiple different concrete data types. You can +read more about the concept of generics in the Rust documentation +[here](https://doc.rust-lang.org/book/ch10-01-syntax.html). + +Here is a trivial example showing the identity function that supports any type. In Rust, it is +common to refer to the most general type as `T`. We follow the same convention in Noir. + +```rust +fn id(x: T) -> T { + x +} +``` + +## In Structs + +Generics are useful for specifying types in structs. For example, we can specify that a field in a +struct will be of a certain generic type. In this case `value` is of type `T`. + +```rust +struct RepeatedValue { + value: T, + count: Field, +} + +impl RepeatedValue { + fn print(self) { + for _i in 0 .. self.count { + println(self.value); + } + } +} + +fn main() { + let repeated = RepeatedValue { value: "Hello!", count: 2 }; + repeated.print(); +} +``` + +The `print` function will print `Hello!` an arbitrary number of times, twice in this case. + +If we want to be generic over array lengths (which are type-level integers), we can use numeric +generics. Using these looks just like using regular generics, but these generics can resolve to +integers at compile-time, rather than resolving to types. Here's an example of a struct that is +generic over the size of the array it contains internally: + +```rust +struct BigInt { + limbs: [u32; N], +} + +impl BigInt { + // `N` is in scope of all methods in the impl + fn first(first: BigInt, second: BigInt) -> Self { + assert(first.limbs != second.limbs); + first + + fn second(first: BigInt, second: Self) -> Self { + assert(first.limbs != second.limbs); + second + } +} +``` + +## Calling functions on generic parameters + +Since a generic type `T` can represent any type, how can we call functions on the underlying type? +In other words, how can we go from "any type `T`" to "any type `T` that has certain methods available?" + +This is what [traits](../concepts/traits) are for in Noir. Here's an example of a function generic over +any type `T` that implements the `Eq` trait for equality: + +```rust +fn first_element_is_equal(array1: [T; N], array2: [T; N]) -> bool + where T: Eq +{ + if (array1.len() == 0) | (array2.len() == 0) { + true + } else { + array1[0] == array2[0] + } +} + +fn main() { + assert(first_element_is_equal([1, 2, 3], [1, 5, 6])); + + // We can use first_element_is_equal for arrays of any type + // as long as we have an Eq impl for the types we pass in + let array = [MyStruct::new(), MyStruct::new()]; + assert(array_eq(array, array, MyStruct::eq)); +} + +impl Eq for MyStruct { + fn eq(self, other: MyStruct) -> bool { + self.foo == other.foo + } +} +``` + +You can find more details on traits and trait implementations on the [traits page](../concepts/traits). diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/globals.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/globals.md new file mode 100644 index 00000000000..063a3d89248 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/globals.md @@ -0,0 +1,72 @@ +--- +title: Global Variables +description: + Learn about global variables in Noir. Discover how + to declare, modify, and use them in your programs. +keywords: [noir programming language, globals, global variables, constants] +sidebar_position: 8 +--- + +## Globals + + +Noir supports global variables. The global's type can be inferred by the compiler entirely: + +```rust +global N = 5; // Same as `global N: Field = 5` + +global TUPLE = (3, 2); + +fn main() { + assert(N == 5); + assert(N == TUPLE.0 + TUPLE.1); +} +``` + +:::info + +Globals can be defined as any expression, so long as they don't depend on themselves - otherwise there would be a dependency cycle! For example: + +```rust +global T = foo(T); // dependency error +``` + +::: + + +If they are initialized to a literal integer, globals can be used to specify an array's length: + +```rust +global N: Field = 2; + +fn main(y : [Field; N]) { + assert(y[0] == y[1]) +} +``` + +A global from another module can be imported or referenced externally like any other name: + +```rust +global N = 20; + +fn main() { + assert(my_submodule::N != N); +} + +mod my_submodule { + global N: Field = 10; +} +``` + +When a global is used, Noir replaces the name with its definition on each occurrence. +This means globals defined using function calls will repeat the call each time they're used: + +```rust +global RESULT = foo(); + +fn foo() -> [Field; 100] { ... } +``` + +This is usually fine since Noir will generally optimize any function call that does not +refer to a program input into a constant. It should be kept in mind however, if the called +function performs side-effects like `println`, as these will still occur on each use. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/lambdas.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/lambdas.md new file mode 100644 index 00000000000..be3c7e0b5ca --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/lambdas.md @@ -0,0 +1,81 @@ +--- +title: Lambdas +description: Learn how to use anonymous functions in Noir programming language. +keywords: [Noir programming language, lambda, closure, function, anonymous function] +sidebar_position: 9 +--- + +## Introduction + +Lambdas are anonymous functions. The syntax is `|arg1, arg2, ..., argN| return_expression`. + +```rust +let add_50 = |val| val + 50; +assert(add_50(100) == 150); +``` + +A block can be used as the body of a lambda, allowing you to declare local variables inside it: + +```rust +let cool = || { + let x = 100; + let y = 100; + x + y +} + +assert(cool() == 200); +``` + +## Closures + +Inside the body of a lambda, you can use variables defined in the enclosing function. Such lambdas are called **closures**. In this example `x` is defined inside `main` and is accessed from within the lambda: + +```rust +fn main() { + let x = 100; + let closure = || x + 150; + assert(closure() == 250); +} +``` + +## Passing closures to higher-order functions + +It may catch you by surprise that the following code fails to compile: + +```rust +fn foo(f: fn () -> Field) -> Field { + f() +} + +fn main() { + let (x, y) = (50, 50); + assert(foo(|| x + y) == 100); // error :( +} +``` + +The reason is that the closure's capture environment affects its type - we have a closure that captures two Fields and `foo` +expects a regular function as an argument - those are incompatible. +:::note + +Variables contained within the `||` are the closure's parameters, and the expression that follows it is the closure's body. The capture environment is comprised of any variables used in the closure's body that are not parameters. + +E.g. in |x| x + y, y would be a captured variable, but x would not be, since it is a parameter of the closure. + +::: +The syntax for the type of a closure is `fn[env](args) -> ret_type`, where `env` is the capture environment of the closure - +in this example that's `(Field, Field)`. + +The best solution in our case is to make `foo` generic over the environment type of its parameter, so that it can be called +with closures with any environment, as well as with regular functions: + +```rust +fn foo(f: fn[Env]() -> Field) -> Field { + f() +} + +fn main() { + let (x, y) = (50, 50); + assert(foo(|| x + y) == 100); // compiles fine + assert(foo(|| 60) == 60); // compiles fine +} +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/mutability.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/mutability.md new file mode 100644 index 00000000000..fdeef6a87c5 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/mutability.md @@ -0,0 +1,121 @@ +--- +title: Mutability +description: + Learn about mutable variables in Noir. Discover how + to declare, modify, and use them in your programs. +keywords: [noir programming language, mutability in noir, mutable variables] +sidebar_position: 8 +--- + +Variables in noir can be declared mutable via the `mut` keyword. Mutable variables can be reassigned +to via an assignment expression. + +```rust +let x = 2; +x = 3; // error: x must be mutable to be assigned to + +let mut y = 3; +let y = 4; // OK +``` + +The `mut` modifier can also apply to patterns: + +```rust +let (a, mut b) = (1, 2); +a = 11; // error: a must be mutable to be assigned to +b = 12; // OK + +let mut (c, d) = (3, 4); +c = 13; // OK +d = 14; // OK + +// etc. +let MyStruct { x: mut y } = MyStruct { x: a }; +// y is now in scope +``` + +Note that mutability in noir is local and everything is passed by value, so if a called function +mutates its parameters then the parent function will keep the old value of the parameters. + +```rust +fn main() -> pub Field { + let x = 3; + helper(x); + x // x is still 3 +} + +fn helper(mut x: i32) { + x = 4; +} +``` + +## Non-local mutability + +Non-local mutability can be achieved through the mutable reference type `&mut T`: + +```rust +fn set_to_zero(x: &mut Field) { + *x = 0; +} + +fn main() { + let mut y = 42; + set_to_zero(&mut y); + assert(*y == 0); +} +``` + +When creating a mutable reference, the original variable being referred to (`y` in this +example) must also be mutable. Since mutable references are a reference type, they must +be explicitly dereferenced via `*` to retrieve the underlying value. Note that this yields +a copy of the value, so mutating this copy will not change the original value behind the +reference: + +```rust +fn main() { + let mut x = 1; + let x_ref = &mut x; + + let mut y = *x_ref; + let y_ref = &mut y; + + x = 2; + *x_ref = 3; + + y = 4; + *y_ref = 5; + + assert(x == 3); + assert(*x_ref == 3); + assert(y == 5); + assert(*y_ref == 5); +} +``` + +Note that types in Noir are actually deeply immutable so the copy that occurs when +dereferencing is only a conceptual copy - no additional constraints will occur. + +Mutable references can also be stored within structs. Note that there is also +no lifetime parameter on these unlike rust. This is because the allocated memory +always lasts the entire program - as if it were an array of one element. + +```rust +struct Foo { + x: &mut Field +} + +impl Foo { + fn incr(mut self) { + *self.x += 1; + } +} + +fn main() { + let foo = Foo { x: &mut 0 }; + foo.incr(); + assert(*foo.x == 1); +} +``` + +In general, you should avoid non-local & shared mutability unless it is needed. Sticking +to only local mutability will improve readability and potentially improve compiler optimizations as well. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/ops.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/ops.md new file mode 100644 index 00000000000..60425cb8994 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/ops.md @@ -0,0 +1,98 @@ +--- +title: Logical Operations +description: + Learn about the supported arithmetic and logical operations in the Noir programming language. + Discover how to perform operations on private input types, integers, and booleans. +keywords: + [ + Noir programming language, + supported operations, + arithmetic operations, + logical operations, + predicate operators, + bitwise operations, + short-circuiting, + backend, + ] +sidebar_position: 3 +--- + +# Operations + +## Table of Supported Operations + +| Operation | Description | Requirements | +| :-------- | :------------------------------------------------------------: | -------------------------------------: | +| + | Adds two private input types together | Types must be private input | +| - | Subtracts two private input types together | Types must be private input | +| \* | Multiplies two private input types together | Types must be private input | +| / | Divides two private input types together | Types must be private input | +| ^ | XOR two private input types together | Types must be integer | +| & | AND two private input types together | Types must be integer | +| \| | OR two private input types together | Types must be integer | +| \<\< | Left shift an integer by another integer amount | Types must be integer | +| >> | Right shift an integer by another integer amount | Types must be integer | +| ! | Bitwise not of a value | Type must be integer or boolean | +| \< | returns a bool if one value is less than the other | Upper bound must have a known bit size | +| \<= | returns a bool if one value is less than or equal to the other | Upper bound must have a known bit size | +| > | returns a bool if one value is more than the other | Upper bound must have a known bit size | +| >= | returns a bool if one value is more than or equal to the other | Upper bound must have a known bit size | +| == | returns a bool if one value is equal to the other | Both types must not be constants | +| != | returns a bool if one value is not equal to the other | Both types must not be constants | + +### Predicate Operators + +`<,<=, !=, == , >, >=` are known as predicate/comparison operations because they compare two values. +This differs from the operations such as `+` where the operands are used in _computation_. + +### Bitwise Operations Example + +```rust +fn main(x : Field) { + let y = x as u32; + let z = y & y; +} +``` + +`z` is implicitly constrained to be the result of `y & y`. The `&` operand is used to denote bitwise +`&`. + +> `x & x` would not compile as `x` is a `Field` and not an integer type. + +### Logical Operators + +Noir has no support for the logical operators `||` and `&&`. This is because encoding the +short-circuiting that these operators require can be inefficient for Noir's backend. Instead you can +use the bitwise operators `|` and `&` which operate identically for booleans, just without the +short-circuiting. + +```rust +let my_val = 5; + +let mut flag = 1; +if (my_val > 6) | (my_val == 0) { + flag = 0; +} +assert(flag == 1); + +if (my_val != 10) & (my_val < 50) { + flag = 0; +} +assert(flag == 0); +``` + +### Shorthand operators + +Noir shorthand operators for most of the above operators, namely `+=, -=, *=, /=, %=, &=, |=, ^=, <<=`, and `>>=`. These allow for more concise syntax. For example: + +```rust +let mut i = 0; +i = i + 1; +``` + +could be written as: + +```rust +let mut i = 0; +i += 1; +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/oracles.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/oracles.md new file mode 100644 index 00000000000..2e6a6818d48 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/oracles.md @@ -0,0 +1,23 @@ +--- +title: Oracles +description: Dive into how Noir supports Oracles via RPC calls, and learn how to declare an Oracle in Noir with our comprehensive guide. +keywords: + - Noir + - Oracles + - RPC Calls + - Unconstrained Functions + - Programming + - Blockchain +sidebar_position: 6 +--- + +Noir has support for Oracles via RPC calls. This means Noir will make an RPC call and use the return value for proof generation. + +Since Oracles are not resolved by Noir, they are [`unconstrained` functions](./unconstrained.md) + +You can declare an Oracle through the `#[oracle()]` flag. Example: + +```rust +#[oracle(get_number_sequence)] +unconstrained fn get_number_sequence(_size: Field) -> [Field] {} +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/shadowing.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/shadowing.md new file mode 100644 index 00000000000..5ce6130d201 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/shadowing.md @@ -0,0 +1,44 @@ +--- +title: Shadowing +sidebar_position: 12 +--- + +Noir allows for inheriting variables' values and re-declaring them with the same name similar to Rust, known as shadowing. + +For example, the following function is valid in Noir: + +```rust +fn main() { + let x = 5; + + { + let x = x * 2; + assert (x == 10); + } + + assert (x == 5); +} +``` + +In this example, a variable x is first defined with the value 5. + +The local scope that follows shadows the original x, i.e. creates a local mutable x based on the value of the original x. It is given a value of 2 times the original x. + +When we return to the main scope, x once again refers to just the original x, which stays at the value of 5. + +## Temporal mutability + +One way that shadowing is useful, in addition to ergonomics across scopes, is for temporarily mutating variables. + +```rust +fn main() { + let age = 30; + // age = age + 5; // Would error as `age` is immutable by default. + + let mut age = age + 5; // Temporarily mutates `age` with a new value. + + let age = age; // Locks `age`'s mutability again. + + assert (age == 35); +} +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/traits.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/traits.md new file mode 100644 index 00000000000..ef1445a5907 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/traits.md @@ -0,0 +1,389 @@ +--- +title: Traits +description: + Traits in Noir can be used to abstract out a common interface for functions across + several data types. +keywords: [noir programming language, traits, interfaces, generic, protocol] +sidebar_position: 14 +--- + +## Overview + +Traits in Noir are a useful abstraction similar to interfaces or protocols in other languages. Each trait defines +the interface of several methods contained within the trait. Types can then implement this trait by providing +implementations for these methods. For example in the program: + +```rust +struct Rectangle { + width: Field, + height: Field, +} + +impl Rectangle { + fn area(self) -> Field { + self.width * self.height + } +} + +fn log_area(r: Rectangle) { + println(r.area()); +} +``` + +We have a function `log_area` to log the area of a `Rectangle`. Now how should we change the program if we want this +function to work on `Triangle`s as well?: + +```rust +struct Triangle { + width: Field, + height: Field, +} + +impl Triangle { + fn area(self) -> Field { + self.width * self.height / 2 + } +} +``` + +Making `log_area` generic over all types `T` would be invalid since not all types have an `area` method. Instead, we can +introduce a new `Area` trait and make `log_area` generic over all types `T` that implement `Area`: + +```rust +trait Area { + fn area(self) -> Field; +} + +fn log_area(shape: T) where T: Area { + println(shape.area()); +} +``` + +We also need to explicitly implement `Area` for `Rectangle` and `Triangle`. We can do that by changing their existing +impls slightly. Note that the parameter types and return type of each of our `area` methods must match those defined +by the `Area` trait. + +```rust +impl Area for Rectangle { + fn area(self) -> Field { + self.width * self.height + } +} + +impl Area for Triangle { + fn area(self) -> Field { + self.width * self.height / 2 + } +} +``` + +Now we have a working program that is generic over any type of Shape that is used! Others can even use this program +as a library with their own types - such as `Circle` - as long as they also implement `Area` for these types. + +## Where Clauses + +As seen in `log_area` above, when we want to create a function or method that is generic over any type that implements +a trait, we can add a where clause to the generic function. + +```rust +fn log_area(shape: T) where T: Area { + println(shape.area()); +} +``` + +It is also possible to apply multiple trait constraints on the same variable at once by combining traits with the `+` +operator. Similarly, we can have multiple trait constraints by separating each with a comma: + +```rust +fn foo(elements: [T], thing: U) where + T: Default + Add + Eq, + U: Bar, +{ + let mut sum = T::default(); + + for element in elements { + sum += element; + } + + if sum == T::default() { + thing.bar(); + } +} +``` + +## Generic Implementations + +You can add generics to a trait implementation by adding the generic list after the `impl` keyword: + +```rust +trait Second { + fn second(self) -> Field; +} + +impl Second for (T, Field) { + fn second(self) -> Field { + self.1 + } +} +``` + +You can also implement a trait for every type this way: + +```rust +trait Debug { + fn debug(self); +} + +impl Debug for T { + fn debug(self) { + println(self); + } +} + +fn main() { + 1.debug(); +} +``` + +### Generic Trait Implementations With Where Clauses + +Where clauses can also be placed on trait implementations themselves to restrict generics in a similar way. +For example, while `impl Foo for T` implements the trait `Foo` for every type, `impl Foo for T where T: Bar` +will implement `Foo` only for types that also implement `Bar`. This is often used for implementing generic types. +For example, here is the implementation for array equality: + +```rust +impl Eq for [T; N] where T: Eq { + // Test if two arrays have the same elements. + // Because both arrays must have length N, we know their lengths already match. + fn eq(self, other: Self) -> bool { + let mut result = true; + + for i in 0 .. self.len() { + // The T: Eq constraint is needed to call == on the array elements here + result &= self[i] == other[i]; + } + + result + } +} +``` + +## Generic Traits + +Traits themselves can also be generic by placing the generic arguments after the trait name. These generics are in +scope of every item within the trait. + +```rust +trait Into { + // Convert `self` to type `T` + fn into(self) -> T; +} +``` + +When implementing generic traits the generic arguments of the trait must be specified. This is also true anytime +when referencing a generic trait (e.g. in a `where` clause). + +```rust +struct MyStruct { + array: [Field; 2], +} + +impl Into<[Field; 2]> for MyStruct { + fn into(self) -> [Field; 2] { + self.array + } +} + +fn as_array(x: T) -> [Field; 2] + where T: Into<[Field; 2]> +{ + x.into() +} + +fn main() { + let array = [1, 2]; + let my_struct = MyStruct { array }; + + assert_eq(as_array(my_struct), array); +} +``` + +## Trait Methods With No `self` + +A trait can contain any number of methods, each of which have access to the `Self` type which represents each type +that eventually implements the trait. Similarly, the `self` variable is available as well but is not required to be used. +For example, we can define a trait to create a default value for a type. This trait will need to return the `Self` type +but doesn't need to take any parameters: + +```rust +trait Default { + fn default() -> Self; +} +``` + +Implementing this trait can be done similarly to any other trait: + +```rust +impl Default for Field { + fn default() -> Field { + 0 + } +} + +struct MyType {} + +impl Default for MyType { + fn default() -> Field { + MyType {} + } +} +``` + +However, since there is no `self` parameter, we cannot call it via the method call syntax `object.method()`. +Instead, we'll need to refer to the function directly. This can be done either by referring to the +specific impl `MyType::default()` or referring to the trait itself `Default::default()`. In the later +case, type inference determines the impl that is selected. + +```rust +let my_struct = MyStruct::default(); + +let x: Field = Default::default(); +let result = x + Default::default(); +``` + +:::warning + +```rust +let _ = Default::default(); +``` + +If type inference cannot select which impl to use because of an ambiguous `Self` type, an impl will be +arbitrarily selected. This occurs most often when the result of a trait function call with no parameters +is unused. To avoid this, when calling a trait function with no `self` or `Self` parameters or return type, +always refer to it via the implementation type's namespace - e.g. `MyType::default()`. +This is set to change to an error in future Noir versions. + +::: + +## Default Method Implementations + +A trait can also have default implementations of its methods by giving a body to the desired functions. +Note that this body must be valid for all types that may implement the trait. As a result, the only +valid operations on `self` will be operations valid for any type or other operations on the trait itself. + +```rust +trait Numeric { + fn add(self, other: Self) -> Self; + + // Default implementation of double is (self + self) + fn double(self) -> Self { + self.add(self) + } +} +``` + +When implementing a trait with default functions, a type may choose to implement only the required functions: + +```rust +impl Numeric for Field { + fn add(self, other: Field) -> Field { + self + other + } +} +``` + +Or it may implement the optional methods as well: + +```rust +impl Numeric for u32 { + fn add(self, other: u32) -> u32 { + self + other + } + + fn double(self) -> u32 { + self * 2 + } +} +``` + +## Impl Specialization + +When implementing traits for a generic type it is possible to implement the trait for only a certain combination +of generics. This can be either as an optimization or because those specific generics are required to implement the trait. + +```rust +trait Sub { + fn sub(self, other: Self) -> Self; +} + +struct NonZero { + value: T, +} + +impl Sub for NonZero { + fn sub(self, other: Self) -> Self { + let value = self.value - other.value; + assert(value != 0); + NonZero { value } + } +} +``` + +## Overlapping Implementations + +Overlapping implementations are disallowed by Noir to ensure Noir's decision on which impl to select is never ambiguous. +This means if a trait `Foo` is already implemented +by a type `Bar` for all `T`, then we cannot also have a separate impl for `Bar` (or any other +type argument). Similarly, if there is an impl for all `T` such as `impl Debug for T`, we cannot create +any more impls to `Debug` for other types since it would be ambiguous which impl to choose for any given +method call. + +```rust +trait Trait {} + +// Previous impl defined here +impl Trait for (A, B) {} + +// error: Impl for type `(Field, Field)` overlaps with existing impl +impl Trait for (Field, Field) {} +``` + +## Trait Coherence + +Another restriction on trait implementations is coherence. This restriction ensures other crates cannot create +impls that may overlap with other impls, even if several unrelated crates are used as dependencies in the same +program. + +The coherence restriction is: to implement a trait, either the trait itself or the object type must be declared +in the crate the impl is in. + +In practice this often comes up when using types provided by libraries. If a library provides a type `Foo` that does +not implement a trait in the standard library such as `Default`, you may not `impl Default for Foo` in your own crate. +While restrictive, this prevents later issues or silent changes in the program if the `Foo` library later added its +own impl for `Default`. If you are a user of the `Foo` library in this scenario and need a trait not implemented by the +library your choices are to either submit a patch to the library or use the newtype pattern. + +### The Newtype Pattern + +The newtype pattern gets around the coherence restriction by creating a new wrapper type around the library type +that we cannot create `impl`s for. Since the new wrapper type is defined in our current crate, we can create +impls for any trait we need on it. + +```rust +struct Wrapper { + foo: dep::some_library::Foo, +} + +impl Default for Wrapper { + fn default() -> Wrapper { + Wrapper { + foo: dep::some_library::Foo::new(), + } + } +} +``` + +Since we have an impl for our own type, the behavior of this code will not change even if `some_library` is updated +to provide its own `impl Default for Foo`. The downside of this pattern is that it requires extra wrapping and +unwrapping of values when converting to and from the `Wrapper` and `Foo` types. diff --git a/docs/versioned_docs/version-v0.26.0/noir/concepts/unconstrained.md b/docs/versioned_docs/version-v0.26.0/noir/concepts/unconstrained.md new file mode 100644 index 00000000000..b8e71fe65f0 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/concepts/unconstrained.md @@ -0,0 +1,99 @@ +--- +title: Unconstrained Functions +description: "Learn about what unconstrained functions in Noir are, how to use them and when you'd want to." + +keywords: [Noir programming language, unconstrained, open] +sidebar_position: 5 +--- + +Unconstrained functions are functions which do not constrain any of the included computation and allow for non-deterministic computation. + +## Why? + +Zero-knowledge (ZK) domain-specific languages (DSL) enable developers to generate ZK proofs from their programs by compiling code down to the constraints of an NP complete language (such as R1CS or PLONKish languages). However, the hard bounds of a constraint system can be very limiting to the functionality of a ZK DSL. + +Enabling a circuit language to perform unconstrained execution is a powerful tool. Said another way, unconstrained execution lets developers generate witnesses from code that does not generate any constraints. Being able to execute logic outside of a circuit is critical for both circuit performance and constructing proofs on information that is external to a circuit. + +Fetching information from somewhere external to a circuit can also be used to enable developers to improve circuit efficiency. + +A ZK DSL does not just prove computation, but proves that some computation was handled correctly. Thus, it is necessary that when we switch from performing some operation directly inside of a circuit to inside of an unconstrained environment that the appropriate constraints are still laid down elsewhere in the circuit. + +## Example + +An in depth example might help drive the point home. This example comes from the excellent [post](https://discord.com/channels/1113924620781883405/1124022445054111926/1128747641853972590) by Tom in the Noir Discord. + +Let's look at how we can optimize a function to turn a `u72` into an array of `u8`s. + +```rust +fn main(num: u72) -> pub [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8)) as u72 & 0xff) as u8; + } + + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 91 +Backend circuit size: 3619 +``` + +A lot of the operations in this function are optimized away by the compiler (all the bit-shifts turn into divisions by constants). However we can save a bunch of gates by casting to u8 a bit earlier. This automatically truncates the bit-shifted value to fit in a u8 which allows us to remove the AND against 0xff. This saves us ~480 gates in total. + +```rust +fn main(num: u72) -> pub [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8)) as u8; + } + + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 75 +Backend circuit size: 3143 +``` + +Those are some nice savings already but we can do better. This code is all constrained so we're proving every step of calculating out using num, but we don't actually care about how we calculate this, just that it's correct. This is where brillig comes in. + +It turns out that truncating a u72 into a u8 is hard to do inside a snark, each time we do as u8 we lay down 4 ACIR opcodes which get converted into multiple gates. It's actually much easier to calculate num from out than the other way around. All we need to do is multiply each element of out by a constant and add them all together, both relatively easy operations inside a snark. + +We can then run u72_to_u8 as unconstrained brillig code in order to calculate out, then use that result in our constrained function and assert that if we were to do the reverse calculation we'd get back num. This looks a little like the below: + +```rust +fn main(num: u72) -> pub [u8; 8] { + let out = u72_to_u8(num); + + let mut reconstructed_num: u72 = 0; + for i in 0..8 { + reconstructed_num += (out[i] as u72 << (56 - (8 * i))); + } + assert(num == reconstructed_num); + out +} + +unconstrained fn u72_to_u8(num: u72) -> [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8))) as u8; + } + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 78 +Backend circuit size: 2902 +``` + +This ends up taking off another ~250 gates from our circuit! We've ended up with more ACIR opcodes than before but they're easier for the backend to prove (resulting in fewer gates). + +Generally we want to use brillig whenever there's something that's easy to verify but hard to compute within the circuit. For example, if you wanted to calculate a square root of a number it'll be a much better idea to calculate this in brillig and then assert that if you square the result you get back your number. + +## Break and Continue + +In addition to loops over runtime bounds, `break` and `continue` are also available in unconstrained code. See [break and continue](../concepts/control_flow/#break-and-continue) diff --git a/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/_category_.json b/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/_category_.json new file mode 100644 index 00000000000..1debcfe7675 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Modules, Packages and Crates", + "position": 2, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/crates_and_packages.md b/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/crates_and_packages.md new file mode 100644 index 00000000000..95ee9f52ab2 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/crates_and_packages.md @@ -0,0 +1,43 @@ +--- +title: Crates and Packages +description: Learn how to use Crates and Packages in your Noir project +keywords: [Nargo, dependencies, package management, crates, package] +sidebar_position: 0 +--- + +## Crates + +A crate is the smallest amount of code that the Noir compiler considers at a time. +Crates can contain modules, and the modules may be defined in other files that get compiled with the crate, as we’ll see in the coming sections. + +### Crate Types + +A Noir crate can come in several forms: binaries, libraries or contracts. + +#### Binaries + +_Binary crates_ are programs which you can compile to an ACIR circuit which you can then create proofs against. Each must have a function called `main` that defines the ACIR circuit which is to be proved. + +#### Libraries + +_Library crates_ don't have a `main` function and they don't compile down to ACIR. Instead they define functionality intended to be shared with multiple projects, and eventually included in a binary crate. + +#### Contracts + +Contract crates are similar to binary crates in that they compile to ACIR which you can create proofs against. They are different in that they do not have a single `main` function, but are a collection of functions to be deployed to the [Aztec network](https://aztec.network). You can learn more about the technical details of Aztec in the [monorepo](https://github.com/AztecProtocol/aztec-packages) or contract [examples](https://github.com/AztecProtocol/aztec-packages/tree/master/noir-projects/noir-contracts/contracts). + +### Crate Root + +Every crate has a root, which is the source file that the compiler starts, this is also known as the root module. The Noir compiler does not enforce any conditions on the name of the file which is the crate root, however if you are compiling via Nargo the crate root must be called `lib.nr` or `main.nr` for library or binary crates respectively. + +## Packages + +A Nargo _package_ is a collection of one of more crates that provides a set of functionality. A package must include a Nargo.toml file. + +A package _must_ contain either a library or a binary crate, but not both. + +### Differences from Cargo Packages + +One notable difference between Rust's Cargo and Noir's Nargo is that while Cargo allows a package to contain an unlimited number of binary crates and a single library crate, Nargo currently only allows a package to contain a single crate. + +In future this restriction may be lifted to allow a Nargo package to contain both a binary and library crate or multiple binary crates. diff --git a/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/dependencies.md b/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/dependencies.md new file mode 100644 index 00000000000..04c1703d929 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/dependencies.md @@ -0,0 +1,124 @@ +--- +title: Dependencies +description: + Learn how to specify and manage dependencies in Nargo, allowing you to upload packages to GitHub + and use them easily in your project. +keywords: [Nargo, dependencies, GitHub, package management, versioning] +sidebar_position: 1 +--- + +Nargo allows you to upload packages to GitHub and use them as dependencies. + +## Specifying a dependency + +Specifying a dependency requires a tag to a specific commit and the git url to the url containing +the package. + +Currently, there are no requirements on the tag contents. If requirements are added, it would follow +semver 2.0 guidelines. + +> Note: Without a `tag` , there would be no versioning and dependencies would change each time you +> compile your project. + +For example, to add the [ecrecover-noir library](https://github.com/colinnielsen/ecrecover-noir) to your project, add it to `Nargo.toml`: + +```toml +# Nargo.toml + +[dependencies] +ecrecover = {tag = "v0.8.0", git = "https://github.com/colinnielsen/ecrecover-noir"} +``` + +If the module is in a subdirectory, you can define a subdirectory in your git repository, for example: + +```toml +# Nargo.toml + +[dependencies] +easy_private_token_contract = {tag ="v0.1.0-alpha62", git = "https://github.com/AztecProtocol/aztec-packages", directory = "noir-contracts/contracts/easy_private_token_contract"} +``` + +## Specifying a local dependency + +You can also specify dependencies that are local to your machine. + +For example, this file structure has a library and binary crate + +```tree +├── binary_crate +│   ├── Nargo.toml +│   └── src +│   └── main.nr +└── lib_a + ├── Nargo.toml + └── src + └── lib.nr +``` + +Inside of the binary crate, you can specify: + +```toml +# Nargo.toml + +[dependencies] +lib_a = { path = "../lib_a" } +``` + +## Importing dependencies + +You can import a dependency to a Noir file using the following syntax. For example, to import the +ecrecover-noir library and local lib_a referenced above: + +```rust +use dep::ecrecover; +use dep::lib_a; +``` + +You can also import only the specific parts of dependency that you want to use, like so: + +```rust +use dep::std::hash::sha256; +use dep::std::scalar_mul::fixed_base_embedded_curve; +``` + +Lastly, as demonstrated in the +[elliptic curve example](../standard_library/cryptographic_primitives/ec_primitives#examples), you +can import multiple items in the same line by enclosing them in curly braces: + +```rust +use dep::std::ec::tecurve::affine::{Curve, Point}; +``` + +We don't have a way to consume libraries from inside a [workspace](./workspaces) as external dependencies right now. + +Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. + +## Dependencies of Dependencies + +Note that when you import a dependency, you also get access to all of the dependencies of that package. + +For example, the [phy_vector](https://github.com/resurgencelabs/phy_vector) library imports an [fraction](https://github.com/resurgencelabs/fraction) library. If you're importing the phy_vector library, then you can access the functions in fractions library like so: + +```rust +use dep::phy_vector; + +fn main(x : Field, y : pub Field) { + //... + let f = phy_vector::fraction::toFraction(true, 2, 1); + //... +} +``` + +## Available Libraries + +Noir does not currently have an official package manager. You can find a list of available Noir libraries in the [awesome-noir repo here](https://github.com/noir-lang/awesome-noir#libraries). + +Some libraries that are available today include: + +- [Standard Library](https://github.com/noir-lang/noir/tree/master/noir_stdlib) - the Noir Standard Library +- [Ethereum Storage Proof Verification](https://github.com/aragonzkresearch/noir-trie-proofs) - a library that contains the primitives necessary for RLP decoding (in the form of look-up table construction) and Ethereum state and storage proof verification (or verification of any trie proof involving 32-byte long keys) +- [BigInt](https://github.com/shuklaayush/noir-bigint) - a library that provides a custom BigUint56 data type, allowing for computations on large unsigned integers +- [ECrecover](https://github.com/colinnielsen/ecrecover-noir/tree/main) - a library to verify an ECDSA signature and return the source Ethereum address +- [Sparse Merkle Tree Verifier](https://github.com/vocdoni/smtverifier-noir/tree/main) - a library for verification of sparse Merkle trees +- [Signed Int](https://github.com/resurgencelabs/signed_int) - a library for accessing a custom Signed Integer data type, allowing access to negative numbers on Noir +- [Fraction](https://github.com/resurgencelabs/fraction) - a library for accessing fractional number data type in Noir, allowing results that aren't whole numbers diff --git a/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/modules.md b/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/modules.md new file mode 100644 index 00000000000..ae822a1cff4 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/modules.md @@ -0,0 +1,105 @@ +--- +title: Modules +description: + Learn how to organize your files using modules in Noir, following the same convention as Rust's + module system. Examples included. +keywords: [Noir, Rust, modules, organizing files, sub-modules] +sidebar_position: 2 +--- + +Noir's module system follows the same convention as the _newer_ version of Rust's module system. + +## Purpose of Modules + +Modules are used to organize files. Without modules all of your code would need to live in a single +file. In Noir, the compiler does not automatically scan all of your files to detect modules. This +must be done explicitly by the developer. + +## Examples + +### Importing a module in the crate root + +Filename : `src/main.nr` + +```rust +mod foo; + +fn main() { + foo::hello_world(); +} +``` + +Filename : `src/foo.nr` + +```rust +fn from_foo() {} +``` + +In the above snippet, the crate root is the `src/main.nr` file. The compiler sees the module +declaration `mod foo` which prompts it to look for a foo.nr file. + +Visually this module hierarchy looks like the following : + +``` +crate + ├── main + │ + └── foo + └── from_foo + +``` + +### Importing a module throughout the tree + +All modules are accessible from the `crate::` namespace. + +``` +crate + ├── bar + ├── foo + └── main + +``` + +In the above snippet, if `bar` would like to use functions in `foo`, it can do so by `use crate::foo::function_name`. + +### Sub-modules + +Filename : `src/main.nr` + +```rust +mod foo; + +fn main() { + foo::from_foo(); +} +``` + +Filename : `src/foo.nr` + +```rust +mod bar; +fn from_foo() {} +``` + +Filename : `src/foo/bar.nr` + +```rust +fn from_bar() {} +``` + +In the above snippet, we have added an extra module to the module tree; `bar`. `bar` is a submodule +of `foo` hence we declare bar in `foo.nr` with `mod bar`. Since `foo` is not the crate root, the +compiler looks for the file associated with the `bar` module in `src/foo/bar.nr` + +Visually the module hierarchy looks as follows: + +``` +crate + ├── main + │ + └── foo + ├── from_foo + └── bar + └── from_bar +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/workspaces.md new file mode 100644 index 00000000000..513497f12bf --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/modules_packages_crates/workspaces.md @@ -0,0 +1,42 @@ +--- +title: Workspaces +sidebar_position: 3 +--- + +Workspaces are a feature of nargo that allow you to manage multiple related Noir packages in a single repository. A workspace is essentially a group of related projects that share common build output directories and configurations. + +Each Noir project (with it's own Nargo.toml file) can be thought of as a package. Each package is expected to contain exactly one "named circuit", being the "name" defined in Nargo.toml with the program logic defined in `./src/main.nr`. + +For a project with the following structure: + +```tree +├── crates +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml +``` + +You can define a workspace in Nargo.toml like so: + +```toml +[workspace] +members = ["crates/a", "crates/b"] +default-member = "crates/a" +``` + +`members` indicates which packages are included in the workspace. As such, all member packages of a workspace will be processed when the `--workspace` flag is used with various commands or if a `default-member` is not specified. + +`default-member` indicates which package various commands process by default. + +Libraries can be defined in a workspace. Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. + +Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/_category_.json b/docs/versioned_docs/version-v0.26.0/noir/standard_library/_category_.json new file mode 100644 index 00000000000..af04c0933fd --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Standard Library", + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/bigint.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/bigint.md new file mode 100644 index 00000000000..da6a7cdfd81 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/bigint.md @@ -0,0 +1,119 @@ +--- +title: Big Integers +description: How to use big integers from Noir standard library +keywords: + [ + Big Integer, + Noir programming language, + Noir libraries, + ] +--- + +The BigInt module in the standard library exposes some class of integers which do not fit (well) into a Noir native field. It implements modulo arithmetic, modulo a 'big' prime number. + +:::note + +The module can currently be considered as `Field`s with fixed modulo sizes used by a set of elliptic curves, in addition to just the native curve. [More work](https://github.com/noir-lang/noir/issues/510) is needed to achieve arbitrarily sized big integers. + +::: + +Currently 6 classes of integers (i.e 'big' prime numbers) are available in the module, namely: + +- BN254 Fq: Bn254Fq +- BN254 Fr: Bn254Fr +- Secp256k1 Fq: Secpk1Fq +- Secp256k1 Fr: Secpk1Fr +- Secp256r1 Fr: Secpr1Fr +- Secp256r1 Fq: Secpr1Fq + +Where XXX Fq and XXX Fr denote respectively the order of the base and scalar field of the (usual) elliptic curve XXX. +For instance the big integer 'Secpk1Fq' in the standard library refers to integers modulo $2^{256}-2^{32}-977$. + +Feel free to explore the source code for the other primes: + +```rust title="big_int_definition" showLineNumbers +struct BigInt { + pointer: u32, + modulus: u32, +} +``` +> Source code: noir_stdlib/src/bigint.nr#L16-L21 + + +## Example usage + +A common use-case is when constructing a big integer from its bytes representation, and performing arithmetic operations on it: + +```rust title="big_int_example" showLineNumbers +fn big_int_example(x: u8, y: u8) { + let a = Secpk1Fq::from_le_bytes(&[x, y, 0, 45, 2]); + let b = Secpk1Fq::from_le_bytes(&[y, x, 9]); + let c = (a + b) * b / a; + let d = c.to_le_bytes(); + println(d[0]); +} +``` +> Source code: test_programs/execution_success/bigint/src/main.nr#L20-L28 + + +## Methods + +The available operations for each big integer are: + +### from_le_bytes + +Construct a big integer from its little-endian bytes representation. Example: + +```rust + let a = Secpk1Fq::from_le_bytes(&[x, y, 0, 45, 2]); + ``` + +Sure, here's the formatted version of the remaining methods: + +### to_le_bytes + +Return the little-endian bytes representation of a big integer. Example: + +```rust +let bytes = a.to_le_bytes(); +``` + +### add + +Add two big integers. Example: + +```rust +let sum = a + b; +``` + +### sub + +Subtract two big integers. Example: + +```rust +let difference = a - b; +``` + +### mul + +Multiply two big integers. Example: + +```rust +let product = a * b; +``` + +### div + +Divide two big integers. Note that division is field division and not euclidean division. Example: + +```rust +let quotient = a / b; +``` + +### eq + +Compare two big integers. Example: + +```rust +let are_equal = a == b; +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/black_box_fns.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/black_box_fns.md new file mode 100644 index 00000000000..be8c65679c3 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/black_box_fns.md @@ -0,0 +1,31 @@ +--- +title: Black Box Functions +description: Black box functions are functions in Noir that rely on backends implementing support for specialized constraints. +keywords: [noir, black box functions] +--- + +Black box functions are functions in Noir that rely on backends implementing support for specialized constraints. This makes certain zk-snark unfriendly computations cheaper than if they were implemented in Noir. + +The ACVM spec defines a set of blackbox functions which backends will be expected to implement. This allows backends to use optimized implementations of these constraints if they have them, however they may also fallback to less efficient naive implementations if not. + +## Function list + +Here is a list of the current black box functions: + +- [SHA256](./cryptographic_primitives/hashes.mdx#sha256) +- [Schnorr signature verification](./cryptographic_primitives/schnorr.mdx) +- [Blake2s](./cryptographic_primitives/hashes.mdx#blake2s) +- [Blake3](./cryptographic_primitives/hashes.mdx#blake3) +- [Pedersen Hash](./cryptographic_primitives/hashes.mdx#pedersen_hash) +- [Pedersen Commitment](./cryptographic_primitives/hashes.mdx#pedersen_commitment) +- [ECDSA signature verification](./cryptographic_primitives/ecdsa_sig_verification.mdx) +- [Fixed base scalar multiplication](./cryptographic_primitives/scalar.mdx) +- AND +- XOR +- RANGE +- [Keccak256](./cryptographic_primitives/hashes.mdx#keccak256) +- [Recursive proof verification](./recursion) + +Most black box functions are included as part of the Noir standard library, however `AND`, `XOR` and `RANGE` are used as part of the Noir language syntax. For instance, using the bitwise operator `&` will invoke the `AND` black box function. + +You can view the black box functions defined in the ACVM code [here](https://github.com/noir-lang/noir/blob/master/acvm-repo/acir/src/circuit/black_box_functions.rs). diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/bn254.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/bn254.md new file mode 100644 index 00000000000..3294f005dbb --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/bn254.md @@ -0,0 +1,46 @@ +--- +title: Bn254 Field Library +--- + +Noir provides a module in standard library with some optimized functions for bn254 Fr in `std::field::bn254`. + +## decompose + +```rust +fn decompose(x: Field) -> (Field, Field) {} +``` + +Decomposes a single field into two fields, low and high. The low field contains the lower 16 bytes of the input field and the high field contains the upper 16 bytes of the input field. Both field results are range checked to 128 bits. + + +## assert_gt + +```rust +fn assert_gt(a: Field, b: Field) {} +``` + +Asserts that a > b. This will generate less constraints than using `assert(gt(a, b))`. + +## assert_lt + +```rust +fn assert_lt(a: Field, b: Field) {} +``` + +Asserts that a < b. This will generate less constraints than using `assert(lt(a, b))`. + +## gt + +```rust +fn gt(a: Field, b: Field) -> bool {} +``` + +Returns true if a > b. + +## lt + +```rust +fn lt(a: Field, b: Field) -> bool {} +``` + +Returns true if a < b. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/boundedvec.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/boundedvec.md new file mode 100644 index 00000000000..ce4529f6e57 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/boundedvec.md @@ -0,0 +1,326 @@ +--- +title: Bounded Vectors +keywords: [noir, vector, bounded vector, slice] +sidebar_position: 1 +--- + +A `BoundedVec` is a growable storage similar to a `Vec` except that it +is bounded with a maximum possible length. Unlike `Vec`, `BoundedVec` is not implemented +via slices and thus is not subject to the same restrictions slices are (notably, nested +slices - and thus nested vectors as well - are disallowed). + +Since a BoundedVec is backed by a normal array under the hood, growing the BoundedVec by +pushing an additional element is also more efficient - the length only needs to be increased +by one. + +For these reasons `BoundedVec` should generally be preferred over `Vec` when there +is a reasonable maximum bound that can be placed on the vector. + +Example: + +```rust +let mut vector: BoundedVec = BoundedVec::new(); +for i in 0..5 { + vector.push(i); +} +assert(vector.len() == 5); +assert(vector.max_len() == 10); +``` + +## Methods + +### new + +```rust +pub fn new() -> Self +``` + +Creates a new, empty vector of length zero. + +Since this container is backed by an array internally, it still needs an initial value +to give each element. To resolve this, each element is zeroed internally. This value +is guaranteed to be inaccessible unless `get_unchecked` is used. + +Example: + +```rust +let empty_vector: BoundedVec = BoundedVec::new(); +assert(empty_vector.len() == 0); +``` + +Note that whenever calling `new` the maximum length of the vector should always be specified +via a type signature: + +```rust title="new_example" showLineNumbers +fn foo() -> BoundedVec { + // Ok! MaxLen is specified with a type annotation + let v1: BoundedVec = BoundedVec::new(); + let v2 = BoundedVec::new(); + + // Ok! MaxLen is known from the type of foo's return value + v2 +} + +fn bad() { + let mut v3 = BoundedVec::new(); + + // Not Ok! We don't know if v3's MaxLen is at least 1, and the compiler often infers 0 by default. + v3.push(5); +} +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L11-L27 + + +This defaulting of `MaxLen` (and numeric generics in general) to zero may change in future noir versions +but for now make sure to use type annotations when using bounded vectors. Otherwise, you will receive a constraint failure at runtime when the vec is pushed to. + +### get + +```rust +pub fn get(mut self: Self, index: u64) -> T { +``` + +Retrieves an element from the vector at the given index, starting from zero. + +If the given index is equal to or greater than the length of the vector, this +will issue a constraint failure. + +Example: + +```rust +fn foo(v: BoundedVec) { + let first = v.get(0); + let last = v.get(v.len() - 1); + assert(first != last); +} +``` + +### get_unchecked + +```rust +pub fn get_unchecked(mut self: Self, index: u64) -> T { +``` + +Retrieves an element from the vector at the given index, starting from zero, without +performing a bounds check. + +Since this function does not perform a bounds check on length before accessing the element, +it is unsafe! Use at your own risk! + +Example: + +```rust title="get_unchecked_example" showLineNumbers +fn sum_of_first_three(v: BoundedVec) -> u32 { + // Always ensure the length is larger than the largest + // index passed to get_unchecked + assert(v.len() > 2); + let first = v.get_unchecked(0); + let second = v.get_unchecked(1); + let third = v.get_unchecked(2); + first + second + third +} +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L54-L64 + + + +### push + +```rust +pub fn push(&mut self, elem: T) { +``` + +Pushes an element to the end of the vector. This increases the length +of the vector by one. + +Panics if the new length of the vector will be greater than the max length. + +Example: + +```rust title="bounded-vec-push-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + + v.push(1); + v.push(2); + + // Panics with failed assertion "push out of bounds" + v.push(3); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L68-L76 + + +### pop + +```rust +pub fn pop(&mut self) -> T +``` + +Pops the element at the end of the vector. This will decrease the length +of the vector by one. + +Panics if the vector is empty. + +Example: + +```rust title="bounded-vec-pop-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + v.push(1); + v.push(2); + + let two = v.pop(); + let one = v.pop(); + + assert(two == 2); + assert(one == 1); + // error: cannot pop from an empty vector + // let _ = v.pop(); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L81-L93 + + +### len + +```rust +pub fn len(self) -> u64 { +``` + +Returns the current length of this vector + +Example: + +```rust title="bounded-vec-len-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + assert(v.len() == 0); + + v.push(100); + assert(v.len() == 1); + + v.push(200); + v.push(300); + v.push(400); + assert(v.len() == 4); + + let _ = v.pop(); + let _ = v.pop(); + assert(v.len() == 2); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L98-L113 + + +### max_len + +```rust +pub fn max_len(_self: BoundedVec) -> u64 { +``` + +Returns the maximum length of this vector. This is always +equal to the `MaxLen` parameter this vector was initialized with. + +Example: + +```rust title="bounded-vec-max-len-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + + assert(v.max_len() == 5); + v.push(10); + assert(v.max_len() == 5); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L118-L124 + + +### storage + +```rust +pub fn storage(self) -> [T; MaxLen] { +``` + +Returns the internal array within this vector. +Since arrays in Noir are immutable, mutating the returned storage array will not mutate +the storage held internally by this vector. + +Note that uninitialized elements may be zeroed out! + +Example: + +```rust title="bounded-vec-storage-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + + assert(v.storage() == [0, 0, 0, 0, 0]); + + v.push(57); + assert(v.storage() == [57, 0, 0, 0, 0]); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L129-L136 + + +### extend_from_array + +```rust +pub fn extend_from_array(&mut self, array: [T; Len]) +``` + +Pushes each element from the given array to this vector. + +Panics if pushing each element would cause the length of this vector +to exceed the maximum length. + +Example: + +```rust title="bounded-vec-extend-from-array-example" showLineNumbers +let mut vec: BoundedVec = BoundedVec::new(); + vec.extend_from_array([2, 4]); + + assert(vec.len == 2); + assert(vec.get(0) == 2); + assert(vec.get(1) == 4); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L141-L148 + + +### extend_from_bounded_vec + +```rust +pub fn extend_from_bounded_vec(&mut self, vec: BoundedVec) +``` + +Pushes each element from the other vector to this vector. The length of +the other vector is left unchanged. + +Panics if pushing each element would cause the length of this vector +to exceed the maximum length. + +Example: + +```rust title="bounded-vec-extend-from-bounded-vec-example" showLineNumbers +let mut v1: BoundedVec = BoundedVec::new(); + let mut v2: BoundedVec = BoundedVec::new(); + + v2.extend_from_array([1, 2, 3]); + v1.extend_from_bounded_vec(v2); + + assert(v1.storage() == [1, 2, 3, 0, 0]); + assert(v2.storage() == [1, 2, 3, 0, 0, 0, 0]); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L153-L162 + + +### any + +```rust +pub fn any(self, predicate: fn[Env](T) -> bool) -> bool +``` + +Returns true if the given predicate returns true for any element +in this vector. + +Example: + +```rust title="bounded-vec-any-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + v.extend_from_array([2, 4, 6]); + + let all_even = !v.any(|elem: u32| elem % 2 != 0); + assert(all_even); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L229-L235 + diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/hashmap.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/hashmap.md new file mode 100644 index 00000000000..91604af765d --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/hashmap.md @@ -0,0 +1,569 @@ +--- +title: HashMap +keywords: [noir, map, hash, hashmap] +sidebar_position: 1 +--- + +`HashMap` is used to efficiently store and look up key-value pairs. + +`HashMap` is a bounded type which can store anywhere from zero to `MaxLen` total elements. +Note that due to hash collisions, the actual maximum number of elements stored by any particular +hashmap is likely lower than `MaxLen`. This is true even with cryptographic hash functions since +every hash value will be performed modulo `MaxLen`. + +When creating `HashMap`s, the `MaxLen` generic should always be specified if it is not already +known. Otherwise, the compiler may infer a different value for `MaxLen` (such as zero), which +will likely change the result of the program. This behavior is set to become an error in future +versions instead. + +Example: + +```rust +// Create a mapping from Fields to u32s with a maximum length of 12 +// using a pedersen hash +let mut map: HashMap> = HashMap::default(); + +map.insert(1, 2); +map.insert(3, 4); + +let two = map.get(1).unwrap(); +``` + +## Methods + +### default + +```rust title="default" showLineNumbers +impl Default for HashMap +where + B: BuildHasher + Default, + H: Hasher + Default +{ + fn default() -> Self { +``` +> Source code: noir_stdlib/src/collections/map.nr#L462-L469 + + +Creates a fresh, empty HashMap. + +When using this function, always make sure to specify the maximum size of the hash map. + +This is the same `default` from the `Default` implementation given further below. It is +repeated here for convenience since it is the recommended way to create a hashmap. + +Example: + +```rust title="default_example" showLineNumbers +let hashmap: HashMap> = HashMap::default(); + assert(hashmap.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L202-L205 + + +Because `HashMap` has so many generic arguments that are likely to be the same throughout +your program, it may be helpful to create a type alias: + +```rust title="type_alias" showLineNumbers +type MyMap = HashMap>; +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L196-L198 + + +### with_hasher + +```rust title="with_hasher" showLineNumbers +pub fn with_hasher(_build_hasher: B) -> Self + where + B: BuildHasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L82-L86 + + +Creates a hashmap with an existing `BuildHasher`. This can be used to ensure multiple +hashmaps are created with the same hasher instance. + +Example: + +```rust title="with_hasher_example" showLineNumbers +let my_hasher: BuildHasherDefault = Default::default(); + let hashmap: HashMap> = HashMap::with_hasher(my_hasher); + assert(hashmap.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L207-L211 + + +### get + +```rust title="get" showLineNumbers +pub fn get( + self, + key: K + ) -> Option + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L278-L287 + + +Retrieves a value from the hashmap, returning `Option::none()` if it was not found. + +Example: + +```rust title="get_example" showLineNumbers +fn get_example(map: HashMap>) { + let x = map.get(12); + + if x.is_some() { + assert(x.unwrap() == 42); + } +} +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L299-L307 + + +### insert + +```rust title="insert" showLineNumbers +pub fn insert( + &mut self, + key: K, + value: V + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L313-L323 + + +Inserts a new key-value pair into the map. If the key was already in the map, its +previous value will be overridden with the newly provided one. + +Example: + +```rust title="insert_example" showLineNumbers +let mut map: HashMap> = HashMap::default(); + map.insert(12, 42); + assert(map.len() == 1); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L213-L217 + + +### remove + +```rust title="remove" showLineNumbers +pub fn remove( + &mut self, + key: K + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L356-L365 + + +Removes the given key-value pair from the map. If the key was not already present +in the map, this does nothing. + +Example: + +```rust title="remove_example" showLineNumbers +map.remove(12); + assert(map.is_empty()); + + // If a key was not present in the map, remove does nothing + map.remove(12); + assert(map.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L221-L228 + + +### is_empty + +```rust title="is_empty" showLineNumbers +pub fn is_empty(self) -> bool { +``` +> Source code: noir_stdlib/src/collections/map.nr#L115-L117 + + +True if the length of the hash map is empty. + +Example: + +```rust title="is_empty_example" showLineNumbers +assert(map.is_empty()); + + map.insert(1, 2); + assert(!map.is_empty()); + + map.remove(1); + assert(map.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L230-L238 + + +### len + +```rust title="len" showLineNumbers +pub fn len(self) -> u64 { +``` +> Source code: noir_stdlib/src/collections/map.nr#L264-L266 + + +Returns the current length of this hash map. + +Example: + +```rust title="len_example" showLineNumbers +// This is equivalent to checking map.is_empty() + assert(map.len() == 0); + + map.insert(1, 2); + map.insert(3, 4); + map.insert(5, 6); + assert(map.len() == 3); + + // 3 was already present as a key in the hash map, so the length is unchanged + map.insert(3, 7); + assert(map.len() == 3); + + map.remove(1); + assert(map.len() == 2); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L240-L255 + + +### capacity + +```rust title="capacity" showLineNumbers +pub fn capacity(_self: Self) -> u64 { +``` +> Source code: noir_stdlib/src/collections/map.nr#L271-L273 + + +Returns the maximum capacity of this hashmap. This is always equal to the capacity +specified in the hashmap's type. + +Unlike hashmaps in general purpose programming languages, hashmaps in Noir have a +static capacity that does not increase as the map grows larger. Thus, this capacity +is also the maximum possible element count that can be inserted into the hashmap. +Due to hash collisions (modulo the hashmap length), it is likely the actual maximum +element count will be lower than the full capacity. + +Example: + +```rust title="capacity_example" showLineNumbers +let empty_map: HashMap> = HashMap::default(); + assert(empty_map.len() == 0); + assert(empty_map.capacity() == 42); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L257-L261 + + +### clear + +```rust title="clear" showLineNumbers +pub fn clear(&mut self) { +``` +> Source code: noir_stdlib/src/collections/map.nr#L93-L95 + + +Clears the hashmap, removing all key-value pairs from it. + +Example: + +```rust title="clear_example" showLineNumbers +assert(!map.is_empty()); + map.clear(); + assert(map.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L263-L267 + + +### contains_key + +```rust title="contains_key" showLineNumbers +pub fn contains_key( + self, + key: K + ) -> bool + where + K: Hash + Eq, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L101-L110 + + +True if the hashmap contains the given key. Unlike `get`, this will not also return +the value associated with the key. + +Example: + +```rust title="contains_key_example" showLineNumbers +if map.contains_key(7) { + let value = map.get(7); + assert(value.is_some()); + } else { + println("No value for key 7!"); + } +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L269-L276 + + +### entries + +```rust title="entries" showLineNumbers +pub fn entries(self) -> BoundedVec<(K, V), N> { +``` +> Source code: noir_stdlib/src/collections/map.nr#L123-L125 + + +Returns a vector of each key-value pair present in the hashmap. + +The length of the returned vector is always equal to the length of the hashmap. + +Example: + +```rust title="entries_example" showLineNumbers +let entries = map.entries(); + + // The length of a hashmap may not be compile-time known, so we + // need to loop over its capacity instead + for i in 0..map.capacity() { + if i < entries.len() { + let (key, value) = entries.get(i); + println(f"{key} -> {value}"); + } + } +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L310-L321 + + +### keys + +```rust title="keys" showLineNumbers +pub fn keys(self) -> BoundedVec { +``` +> Source code: noir_stdlib/src/collections/map.nr#L144-L146 + + +Returns a vector of each key present in the hashmap. + +The length of the returned vector is always equal to the length of the hashmap. + +Example: + +```rust title="keys_example" showLineNumbers +let keys = map.keys(); + + for i in 0..keys.max_len() { + if i < keys.len() { + let key = keys.get_unchecked(i); + let value = map.get(key).unwrap_unchecked(); + println(f"{key} -> {value}"); + } + } +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L323-L333 + + +### values + +```rust title="values" showLineNumbers +pub fn values(self) -> BoundedVec { +``` +> Source code: noir_stdlib/src/collections/map.nr#L164-L166 + + +Returns a vector of each value present in the hashmap. + +The length of the returned vector is always equal to the length of the hashmap. + +Example: + +```rust title="values_example" showLineNumbers +let values = map.values(); + + for i in 0..values.max_len() { + if i < values.len() { + let value = values.get_unchecked(i); + println(f"Found value {value}"); + } + } +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L335-L344 + + +### iter_mut + +```rust title="iter_mut" showLineNumbers +pub fn iter_mut( + &mut self, + f: fn(K, V) -> (K, V) + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L183-L192 + + +Iterates through each key-value pair of the HashMap, setting each key-value pair to the +result returned from the given function. + +Note that since keys can be mutated, the HashMap needs to be rebuilt as it is iterated +through. If this is not desired, use `iter_values_mut` if only values need to be mutated, +or `entries` if neither keys nor values need to be mutated. + +The iteration order is left unspecified. As a result, if two keys are mutated to become +equal, which of the two values that will be present for the key in the resulting map is also unspecified. + +Example: + +```rust title="iter_mut_example" showLineNumbers +// Add 1 to each key in the map, and double the value associated with that key. + map.iter_mut(|k, v| (k + 1, v * 2)); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L348-L351 + + +### iter_keys_mut + +```rust title="iter_keys_mut" showLineNumbers +pub fn iter_keys_mut( + &mut self, + f: fn(K) -> K + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L208-L217 + + +Iterates through the HashMap, mutating each key to the result returned from +the given function. + +Note that since keys can be mutated, the HashMap needs to be rebuilt as it is iterated +through. If only iteration is desired and the keys are not intended to be mutated, +prefer using `entries` instead. + +The iteration order is left unspecified. As a result, if two keys are mutated to become +equal, which of the two values that will be present for the key in the resulting map is also unspecified. + +Example: + +```rust title="iter_keys_mut_example" showLineNumbers +// Double each key, leaving the value associated with that key untouched + map.iter_keys_mut(|k| k * 2); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L353-L356 + + +### iter_values_mut + +```rust title="iter_values_mut" showLineNumbers +pub fn iter_values_mut(&mut self, f: fn(V) -> V) { +``` +> Source code: noir_stdlib/src/collections/map.nr#L233-L235 + + +Iterates through the HashMap, applying the given function to each value and mutating the +value to equal the result. This function is more efficient than `iter_mut` and `iter_keys_mut` +because the keys are untouched and the underlying hashmap thus does not need to be reordered. + +Example: + +```rust title="iter_values_mut_example" showLineNumbers +// Halve each value + map.iter_values_mut(|v| v / 2); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L358-L361 + + +### retain + +```rust title="retain" showLineNumbers +pub fn retain(&mut self, f: fn(K, V) -> bool) { +``` +> Source code: noir_stdlib/src/collections/map.nr#L247-L249 + + +Retains only the key-value pairs for which the given function returns true. +Any key-value pairs for which the function returns false will be removed from the map. + +Example: + +```rust title="retain_example" showLineNumbers +map.retain(|k, v| (k != 0) & (v != 0)); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L281-L283 + + +## Trait Implementations + +### default + +```rust title="default" showLineNumbers +impl Default for HashMap +where + B: BuildHasher + Default, + H: Hasher + Default +{ + fn default() -> Self { +``` +> Source code: noir_stdlib/src/collections/map.nr#L462-L469 + + +Constructs an empty HashMap. + +Example: + +```rust title="default_example" showLineNumbers +let hashmap: HashMap> = HashMap::default(); + assert(hashmap.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L202-L205 + + +### eq + +```rust title="eq" showLineNumbers +impl Eq for HashMap +where + K: Eq + Hash, + V: Eq, + B: BuildHasher, + H: Hasher +{ + fn eq(self, other: HashMap) -> bool { +``` +> Source code: noir_stdlib/src/collections/map.nr#L426-L435 + + +Checks if two HashMaps are equal. + +Example: + +```rust title="eq_example" showLineNumbers +let mut map1: HashMap> = HashMap::default(); + let mut map2: HashMap> = HashMap::default(); + + map1.insert(1, 2); + map1.insert(3, 4); + + map2.insert(3, 4); + map2.insert(1, 2); + + assert(map1 == map2); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L285-L296 + diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/index.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/index.md new file mode 100644 index 00000000000..ea84c6d5c21 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/index.md @@ -0,0 +1,5 @@ +--- +title: Containers +description: Container types provided by Noir's standard library for storing and retrieving data +keywords: [containers, data types, vec, hashmap] +--- diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/vec.mdx b/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/vec.mdx new file mode 100644 index 00000000000..fcfd7e07aa0 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/containers/vec.mdx @@ -0,0 +1,151 @@ +--- +title: Vectors +description: Delve into the Vec data type in Noir. Learn about its methods, practical examples, and best practices for using Vectors in your Noir code. +keywords: [noir, vector type, methods, examples, dynamic arrays] +sidebar_position: 6 +--- + +import Experimental from '@site/src/components/Notes/_experimental.mdx'; + + + +A vector is a collection type similar to Rust's `Vec` type. In Noir, it is a convenient way to use slices as mutable arrays. + +Example: + +```rust +let mut vector: Vec = Vec::new(); +for i in 0..5 { + vector.push(i); +} +assert(vector.len() == 5); +``` + +## Methods + +### new + +Creates a new, empty vector. + +```rust +pub fn new() -> Self +``` + +Example: + +```rust +let empty_vector: Vec = Vec::new(); +assert(empty_vector.len() == 0); +``` + +### from_slice + +Creates a vector containing each element from a given slice. Mutations to the resulting vector will not affect the original slice. + +```rust +pub fn from_slice(slice: [T]) -> Self +``` + +Example: + +```rust +let slice: [Field] = &[1, 2, 3]; +let vector_from_slice = Vec::from_slice(slice); +assert(vector_from_slice.len() == 3); +``` + +### len + +Returns the number of elements in the vector. + +```rust +pub fn len(self) -> Field +``` + +Example: + +```rust +let empty_vector: Vec = Vec::new(); +assert(empty_vector.len() == 0); +``` + +### get + +Retrieves an element from the vector at a given index. Panics if the index points beyond the vector's end. + +```rust +pub fn get(self, index: Field) -> T +``` + +Example: + +```rust +let vector: Vec = Vec::from_slice(&[10, 20, 30]); +assert(vector.get(1) == 20); +``` + +### push + +Adds a new element to the vector's end, returning a new vector with a length one greater than the original unmodified vector. + +```rust +pub fn push(&mut self, elem: T) +``` + +Example: + +```rust +let mut vector: Vec = Vec::new(); +vector.push(10); +assert(vector.len() == 1); +``` + +### pop + +Removes an element from the vector's end, returning a new vector with a length one less than the original vector, along with the removed element. Panics if the vector's length is zero. + +```rust +pub fn pop(&mut self) -> T +``` + +Example: + +```rust +let mut vector = Vec::from_slice(&[10, 20]); +let popped_elem = vector.pop(); +assert(popped_elem == 20); +assert(vector.len() == 1); +``` + +### insert + +Inserts an element at a specified index, shifting subsequent elements to the right. + +```rust +pub fn insert(&mut self, index: Field, elem: T) +``` + +Example: + +```rust +let mut vector = Vec::from_slice(&[10, 30]); +vector.insert(1, 20); +assert(vector.get(1) == 20); +``` + +### remove + +Removes an element at a specified index, shifting subsequent elements to the left, and returns the removed element. + +```rust +pub fn remove(&mut self, index: Field) -> T +``` + +Example: + +```rust +let mut vector = Vec::from_slice(&[10, 20, 30]); +let removed_elem = vector.remove(1); +assert(removed_elem == 20); +assert(vector.len() == 2); +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/_category_.json b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/ec_primitives.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/ec_primitives.md new file mode 100644 index 00000000000..d2b42d67b7c --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/ec_primitives.md @@ -0,0 +1,102 @@ +--- +title: Elliptic Curve Primitives +keywords: [cryptographic primitives, Noir project] +sidebar_position: 4 +--- + +Data structures and methods on them that allow you to carry out computations involving elliptic +curves over the (mathematical) field corresponding to `Field`. For the field currently at our +disposal, applications would involve a curve embedded in BN254, e.g. the +[Baby Jubjub curve](https://eips.ethereum.org/EIPS/eip-2494). + +## Data structures + +### Elliptic curve configurations + +(`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::Curve`), i.e. the specific elliptic +curve you want to use, which would be specified using any one of the methods +`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::new` which take the coefficients in the +defining equation together with a generator point as parameters. You can find more detail in the +comments in +[`noir_stdlib/src/ec.nr`](https://github.com/noir-lang/noir/blob/master/noir_stdlib/src/ec.nr), but +the gist of it is that the elliptic curves of interest are usually expressed in one of the standard +forms implemented here (Twisted Edwards, Montgomery and Short Weierstraß), and in addition to that, +you could choose to use `affine` coordinates (Cartesian coordinates - the usual (x,y) - possibly +together with a point at infinity) or `curvegroup` coordinates (some form of projective coordinates +requiring more coordinates but allowing for more efficient implementations of elliptic curve +operations). Conversions between all of these forms are provided, and under the hood these +conversions are done whenever an operation is more efficient in a different representation (or a +mixed coordinate representation is employed). + +### Points + +(`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::Point`), i.e. points lying on the +elliptic curve. For a curve configuration `c` and a point `p`, it may be checked that `p` +does indeed lie on `c` by calling `c.contains(p1)`. + +## Methods + +(given a choice of curve representation, e.g. use `std::ec::tecurve::affine::Curve` and use +`std::ec::tecurve::affine::Point`) + +- The **zero element** is given by `Point::zero()`, and we can verify whether a point `p: Point` is + zero by calling `p.is_zero()`. +- **Equality**: Points `p1: Point` and `p2: Point` may be checked for equality by calling + `p1.eq(p2)`. +- **Addition**: For `c: Curve` and points `p1: Point` and `p2: Point` on the curve, adding these two + points is accomplished by calling `c.add(p1,p2)`. +- **Negation**: For a point `p: Point`, `p.negate()` is its negation. +- **Subtraction**: For `c` and `p1`, `p2` as above, subtracting `p2` from `p1` is accomplished by + calling `c.subtract(p1,p2)`. +- **Scalar multiplication**: For `c` as above, `p: Point` a point on the curve and `n: Field`, + scalar multiplication is given by `c.mul(n,p)`. If instead `n :: [u1; N]`, i.e. `n` is a bit + array, the `bit_mul` method may be used instead: `c.bit_mul(n,p)` +- **Multi-scalar multiplication**: For `c` as above and arrays `n: [Field; N]` and `p: [Point; N]`, + multi-scalar multiplication is given by `c.msm(n,p)`. +- **Coordinate representation conversions**: The `into_group` method converts a point or curve + configuration in the affine representation to one in the CurveGroup representation, and + `into_affine` goes in the other direction. +- **Curve representation conversions**: `tecurve` and `montcurve` curves and points are equivalent + and may be converted between one another by calling `into_montcurve` or `into_tecurve` on their + configurations or points. `swcurve` is more general and a curve c of one of the other two types + may be converted to this representation by calling `c.into_swcurve()`, whereas a point `p` lying + on the curve given by `c` may be mapped to its corresponding `swcurve` point by calling + `c.map_into_swcurve(p)`. +- **Map-to-curve methods**: The Elligator 2 method of mapping a field element `n: Field` into a + `tecurve` or `montcurve` with configuration `c` may be called as `c.elligator2_map(n)`. For all of + the curve configurations, the SWU map-to-curve method may be called as `c.swu_map(z,n)`, where + `z: Field` depends on `Field` and `c` and must be chosen by the user (the conditions it needs to + satisfy are specified in the comments + [here](https://github.com/noir-lang/noir/blob/master/noir_stdlib/src/ec.nr)). + +## Examples + +The +[ec_baby_jubjub test](https://github.com/noir-lang/noir/blob/master/test_programs/compile_success_empty/ec_baby_jubjub/src/main.nr) +illustrates all of the above primitives on various forms of the Baby Jubjub curve. A couple of more +interesting examples in Noir would be: + +Public-key cryptography: Given an elliptic curve and a 'base point' on it, determine the public key +from the private key. This is a matter of using scalar multiplication. In the case of Baby Jubjub, +for example, this code would do: + +```rust +use dep::std::ec::tecurve::affine::{Curve, Point}; + +fn bjj_pub_key(priv_key: Field) -> Point +{ + + let bjj = Curve::new(168700, 168696, G::new(995203441582195749578291179787384436505546430278305826713579947235728471134,5472060717959818805561601436314318772137091100104008585924551046643952123905)); + + let base_pt = Point::new(5299619240641551281634865583518297030282874472190772894086521144482721001553, 16950150798460657717958625567821834550301663161624707787222815936182638968203); + + bjj.mul(priv_key,base_pt) +} +``` + +This would come in handy in a Merkle proof. + +- EdDSA signature verification: This is a matter of combining these primitives with a suitable hash + function. See + [feat(stdlib): EdDSA sig verification noir#1136](https://github.com/noir-lang/noir/pull/1136) for + the case of Baby Jubjub and the Poseidon hash function. diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx new file mode 100644 index 00000000000..4394b48f907 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx @@ -0,0 +1,98 @@ +--- +title: ECDSA Signature Verification +description: Learn about the cryptographic primitives regarding ECDSA over the secp256k1 and secp256r1 curves +keywords: [cryptographic primitives, Noir project, ecdsa, secp256k1, secp256r1, signatures] +sidebar_position: 3 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +Noir supports ECDSA signatures verification over the secp256k1 and secp256r1 curves. + +## ecdsa_secp256k1::verify_signature + +Verifier for ECDSA Secp256k1 signatures. +See ecdsa_secp256k1::verify_signature_slice for a version that accepts slices directly. + +```rust title="ecdsa_secp256k1" showLineNumbers +pub fn verify_signature( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8; N] +) -> bool +``` +> Source code: noir_stdlib/src/ecdsa_secp256k1.nr#L2-L9 + + +example: + +```rust +fn main(hashed_message : [u8;32], pub_key_x : [u8;32], pub_key_y : [u8;32], signature : [u8;64]) { + let valid_signature = std::ecdsa_secp256k1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message); + assert(valid_signature); +} +``` + + + +## ecdsa_secp256k1::verify_signature_slice + +Verifier for ECDSA Secp256k1 signatures where the message is a slice. + +```rust title="ecdsa_secp256k1_slice" showLineNumbers +pub fn verify_signature_slice( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8] +) -> bool +``` +> Source code: noir_stdlib/src/ecdsa_secp256k1.nr#L13-L20 + + + + +## ecdsa_secp256r1::verify_signature + +Verifier for ECDSA Secp256r1 signatures. +See ecdsa_secp256r1::verify_signature_slice for a version that accepts slices directly. + +```rust title="ecdsa_secp256r1" showLineNumbers +pub fn verify_signature( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8; N] +) -> bool +``` +> Source code: noir_stdlib/src/ecdsa_secp256r1.nr#L2-L9 + + +example: + +```rust +fn main(hashed_message : [u8;32], pub_key_x : [u8;32], pub_key_y : [u8;32], signature : [u8;64]) { + let valid_signature = std::ecdsa_secp256r1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message); + assert(valid_signature); +} +``` + + + +## ecdsa_secp256r1::verify_signature + +Verifier for ECDSA Secp256r1 signatures where the message is a slice. + +```rust title="ecdsa_secp256r1_slice" showLineNumbers +pub fn verify_signature_slice( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8] +) -> bool +``` +> Source code: noir_stdlib/src/ecdsa_secp256r1.nr#L13-L20 + + + diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/eddsa.mdx b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/eddsa.mdx new file mode 100644 index 00000000000..c2c0624dfad --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/eddsa.mdx @@ -0,0 +1,37 @@ +--- +title: EdDSA Verification +description: Learn about the cryptographic primitives regarding EdDSA +keywords: [cryptographic primitives, Noir project, eddsa, signatures] +sidebar_position: 5 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## eddsa::eddsa_poseidon_verify + +Verifier for EdDSA signatures + +```rust +fn eddsa_poseidon_verify(public_key_x : Field, public_key_y : Field, signature_s: Field, signature_r8_x: Field, signature_r8_y: Field, message: Field) -> bool +``` + +It is also possible to specify the hash algorithm used for the signature by using the `eddsa_verify_with_hasher` function with a parameter implementing the Hasher trait. For instance, if you want to use Poseidon2 instead, you can do the following: +```rust +use dep::std::hash::poseidon2::Poseidon2Hasher; + +let mut hasher = Poseidon2Hasher::default(); +eddsa_verify_with_hasher(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg, &mut hasher); +``` + + + +## eddsa::eddsa_to_pub + +Private to public key conversion. + +Returns `(pub_key_x, pub_key_y)` + +```rust +fn eddsa_to_pub(secret : Field) -> (Field, Field) +``` + diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/hashes.mdx b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/hashes.mdx new file mode 100644 index 00000000000..695c7d9406f --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/hashes.mdx @@ -0,0 +1,331 @@ +--- +title: Hash methods +description: + Learn about the cryptographic primitives ready to use for any Noir project, including sha256, + blake2s, pedersen, mimc_bn254 and mimc +keywords: + [cryptographic primitives, Noir project, sha256, blake2s, pedersen, mimc_bn254, mimc, hash] +sidebar_position: 0 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## sha256 + +Given an array of bytes, returns the resulting sha256 hash. +See sha256_slice for a version that works directly on slices. + +```rust title="sha256" showLineNumbers +pub fn sha256(input: [u8; N]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L10-L12 + + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::sha256(x); +} +``` + + + +## sha256_slice + +A version of sha256 specialized to slices: + +```rust title="sha256_slice" showLineNumbers +pub fn sha256_slice(input: [u8]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L16-L18 + + + + +## blake2s + +Given an array of bytes, returns an array with the Blake2 hash +See blake2s_slice for a version that works directly on slices. + +```rust title="blake2s" showLineNumbers +pub fn blake2s(input: [u8; N]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L22-L24 + + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::blake2s(x); +} +``` + + + +## blake2s_slice + +A version of blake2s specialized to slices: + +```rust title="blake2s_slice" showLineNumbers +pub fn blake2s_slice(input: [u8]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L28-L30 + + + + +## blake3 + +Given an array of bytes, returns an array with the Blake3 hash +See blake3_slice for a version that works directly on slices. + +```rust title="blake3" showLineNumbers +pub fn blake3(input: [u8; N]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L34-L36 + + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::blake3(x); +} +``` + + + +## blake3_slice + +A version of blake3 specialized to slices: + +```rust title="blake3_slice" showLineNumbers +pub fn blake3_slice(input: [u8]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L40-L42 + + + + +## pedersen_hash + +Given an array of Fields, returns the Pedersen hash. +See pedersen_hash_slice for a version that works directly on slices. + +```rust title="pedersen_hash" showLineNumbers +pub fn pedersen_hash(input: [Field; N]) -> Field +``` +> Source code: noir_stdlib/src/hash.nr#L78-L80 + + +example: + +```rust title="pedersen-hash" showLineNumbers +use dep::std; + +fn main(x: Field, y: Field, expected_hash: Field) { + let hash = std::hash::pedersen_hash([x, y]); + assert_eq(hash, expected_hash); +} +``` +> Source code: test_programs/execution_success/pedersen_hash/src/main.nr#L1-L8 + + + + +## pedersen_hash_slice + +Given a slice of Fields, returns the Pedersen hash. + +```rust title="pedersen_hash_slice" showLineNumbers +pub fn pedersen_hash_slice(input: [Field]) -> Field +``` +> Source code: noir_stdlib/src/hash.nr#L85-L87 + + + + +## pedersen_commitment + +Given an array of Fields, returns the Pedersen commitment. +See pedersen_commitment_slice for a version that works directly on slices. + +```rust title="pedersen_commitment" showLineNumbers +struct PedersenPoint { + x : Field, + y : Field, +} + +pub fn pedersen_commitment(input: [Field; N]) -> PedersenPoint { +``` +> Source code: noir_stdlib/src/hash.nr#L45-L52 + + +example: + +```rust title="pedersen-commitment" showLineNumbers +use dep::std; + +fn main(x: Field, y: Field, expected_commitment: std::hash::PedersenPoint) { + let commitment = std::hash::pedersen_commitment([x, y]); + assert_eq(commitment.x, expected_commitment.x); + assert_eq(commitment.y, expected_commitment.y); +} +``` +> Source code: test_programs/execution_success/pedersen_commitment/src/main.nr#L1-L9 + + + + +## pedersen_commitment_slice + +Given a slice of Fields, returns the Pedersen commitment. + +```rust title="pedersen_commitment_slice" showLineNumbers +pub fn pedersen_commitment_slice(input: [Field]) -> PedersenPoint { + pedersen_commitment_with_separator_slice(input, 0) +} +``` +> Source code: noir_stdlib/src/hash.nr#L56-L60 + + + + +## keccak256 + +Given an array of bytes (`u8`), returns the resulting keccak hash as an array of +32 bytes (`[u8; 32]`). Specify a message_size to hash only the first +`message_size` bytes of the input. See keccak256_slice for a version that works +directly on slices. + +```rust title="keccak256" showLineNumbers +pub fn keccak256(input: [u8; N], message_size: u32) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L113-L115 + + +example: + +```rust title="keccak256" showLineNumbers +use dep::std; + +fn main(x: Field, result: [u8; 32]) { + // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field + // The padding is taken care of by the program + let digest = std::hash::keccak256([x as u8], 1); + assert(digest == result); + + //#1399: variable message size + let message_size = 4; + let hash_a = std::hash::keccak256([1, 2, 3, 4], message_size); + let hash_b = std::hash::keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size); + + assert(hash_a == hash_b); + + let message_size_big = 8; + let hash_c = std::hash::keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size_big); + + assert(hash_a != hash_c); +} +``` +> Source code: test_programs/execution_success/keccak256/src/main.nr#L1-L22 + + + + +## keccak256_slice + +Given a slice of bytes (`u8`), returns the resulting keccak hash as an array of +32 bytes (`[u8; 32]`). + +```rust title="keccak256_slice" showLineNumbers +pub fn keccak256_slice(input: [u8], message_size: u32) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L119-L121 + + + + +## poseidon + +Given an array of Fields, returns a new Field with the Poseidon Hash. Mind that you need to specify +how many inputs are there to your Poseidon function. + +```rust +// example for hash_1, hash_2 accepts an array of length 2, etc +fn hash_1(input: [Field; 1]) -> Field +``` + +example: + +```rust title="poseidon" showLineNumbers +use dep::std::hash::poseidon; +use dep::std::hash::poseidon2; + +fn main(x1: [Field; 2], y1: pub Field, x2: [Field; 4], y2: pub Field, x3: [Field; 4], y3: Field) { + let hash1 = poseidon::bn254::hash_2(x1); + assert(hash1 == y1); + + let hash2 = poseidon::bn254::hash_4(x2); + assert(hash2 == y2); + + let hash3 = poseidon2::Poseidon2::hash(x3, x3.len()); + assert(hash3 == y3); +} +``` +> Source code: test_programs/execution_success/poseidon_bn254_hash/src/main.nr#L1-L15 + + +## poseidon 2 + +Given an array of Fields, returns a new Field with the Poseidon2 Hash. Contrary to the Poseidon +function, there is only one hash and you can specify a message_size to hash only the first +`message_size` bytes of the input, + +```rust +// example for hashing the first three elements of the input +Poseidon2::hash(input, 3); +``` + +The above example for Poseidon also includes Poseidon2. + +## mimc_bn254 and mimc + +`mimc_bn254` is `mimc`, but with hardcoded parameters for the BN254 curve. You can use it by +providing an array of Fields, and it returns a Field with the hash. You can use the `mimc` method if +you're willing to input your own constants: + +```rust +fn mimc(x: Field, k: Field, constants: [Field; N], exp : Field) -> Field +``` + +otherwise, use the `mimc_bn254` method: + +```rust +fn mimc_bn254(array: [Field; N]) -> Field +``` + +example: + +```rust + +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::mimc::mimc_bn254(x); +} +``` + +## hash_to_field + +```rust +fn hash_to_field(_input : [Field]) -> Field {} +``` + +Calculates the `blake2s` hash of the inputs and returns the hash modulo the field modulus to return +a value which can be represented as a `Field`. + diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/index.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/index.md new file mode 100644 index 00000000000..650f30165d5 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/index.md @@ -0,0 +1,14 @@ +--- +title: Cryptographic Primitives +description: + Learn about the cryptographic primitives ready to use for any Noir project +keywords: + [ + cryptographic primitives, + Noir project, + ] +--- + +The Noir team is progressively adding new cryptographic primitives to the standard library. Reach out for news or if you would be interested in adding more of these calculations in Noir. + +Some methods are available thanks to the Aztec backend, not being performed using Noir. When using other backends, these methods may or may not be supplied. diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/scalar.mdx b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/scalar.mdx new file mode 100644 index 00000000000..df411ca5443 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/scalar.mdx @@ -0,0 +1,33 @@ +--- +title: Scalar multiplication +description: See how you can perform scalar multiplications over a fixed base in Noir +keywords: [cryptographic primitives, Noir project, scalar multiplication] +sidebar_position: 1 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## scalar_mul::fixed_base_embedded_curve + +Performs scalar multiplication over the embedded curve whose coordinates are defined by the +configured noir field. For the BN254 scalar field, this is BabyJubJub or Grumpkin. + +```rust title="fixed_base_embedded_curve" showLineNumbers +pub fn fixed_base_embedded_curve( + low: Field, + high: Field +) -> [Field; 2] +``` +> Source code: noir_stdlib/src/scalar_mul.nr#L27-L32 + + +example + +```rust +fn main(x : Field) { + let scal = std::scalar_mul::fixed_base_embedded_curve(x); + println(scal); +} +``` + + diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/schnorr.mdx b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/schnorr.mdx new file mode 100644 index 00000000000..b59e69c8f07 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/cryptographic_primitives/schnorr.mdx @@ -0,0 +1,64 @@ +--- +title: Schnorr Signatures +description: Learn how you can verify Schnorr signatures using Noir +keywords: [cryptographic primitives, Noir project, schnorr, signatures] +sidebar_position: 2 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## schnorr::verify_signature + +Verifier for Schnorr signatures over the embedded curve (for BN254 it is Grumpkin). +See schnorr::verify_signature_slice for a version that works directly on slices. + +```rust title="schnorr_verify" showLineNumbers +pub fn verify_signature( + public_key_x: Field, + public_key_y: Field, + signature: [u8; 64], + message: [u8; N] +) -> bool +``` +> Source code: noir_stdlib/src/schnorr.nr#L2-L9 + + +where `_signature` can be generated like so using the npm package +[@noir-lang/barretenberg](https://www.npmjs.com/package/@noir-lang/barretenberg) + +```js +const { BarretenbergWasm } = require('@noir-lang/barretenberg/dest/wasm'); +const { Schnorr } = require('@noir-lang/barretenberg/dest/crypto/schnorr'); + +... + +const barretenberg = await BarretenbergWasm.new(); +const schnorr = new Schnorr(barretenberg); +const pubKey = schnorr.computePublicKey(privateKey); +const message = ... +const signature = Array.from( + schnorr.constructSignature(hash, privateKey).toBuffer() +); + +... +``` + + + +## schnorr::verify_signature_slice + +Verifier for Schnorr signatures over the embedded curve (for BN254 it is Grumpkin) +where the message is a slice. + +```rust title="schnorr_verify_slice" showLineNumbers +pub fn verify_signature_slice( + public_key_x: Field, + public_key_y: Field, + signature: [u8; 64], + message: [u8] +) -> bool +``` +> Source code: noir_stdlib/src/schnorr.nr#L13-L20 + + + diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/logging.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/logging.md new file mode 100644 index 00000000000..db75ef9f86f --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/logging.md @@ -0,0 +1,78 @@ +--- +title: Logging +description: + Learn how to use the println statement for debugging in Noir with this tutorial. Understand the + basics of logging in Noir and how to implement it in your code. +keywords: + [ + noir logging, + println statement, + print statement, + debugging in noir, + noir std library, + logging tutorial, + basic logging in noir, + noir logging implementation, + noir debugging techniques, + rust, + ] +--- + +The standard library provides two familiar statements you can use: `println` and `print`. Despite being a limited implementation of rust's `println!` and `print!` macros, these constructs can be useful for debugging. + +You can print the output of both statements in your Noir code by using the `nargo execute` command or the `--show-output` flag when using `nargo test` (provided there are print statements in your tests). + +It is recommended to use `nargo execute` if you want to debug failing constraints with `println` or `print` statements. This is due to every input in a test being a constant rather than a witness, so we issue an error during compilation while we only print during execution (which comes after compilation). Neither `println`, nor `print` are callable for failed constraints caught at compile time. + +Both `print` and `println` are generic functions which can work on integers, fields, strings, and even structs or expressions. Note however, that slices are currently unsupported. For example: + +```rust +struct Person { + age: Field, + height: Field, +} + +fn main(age: Field, height: Field) { + let person = Person { + age: age, + height: height, + }; + println(person); + println(age + height); + println("Hello world!"); +} +``` + +You can print different types in the same statement (including strings) with a type called `fmtstr`. It can be specified in the same way as a normal string, just prepended with an "f" character: + +```rust + let fmt_str = f"i: {i}, j: {j}"; + println(fmt_str); + + let s = myStruct { y: x, x: y }; + println(s); + + println(f"i: {i}, s: {s}"); + + println(x); + println([x, y]); + + let foo = fooStruct { my_struct: s, foo: 15 }; + println(f"s: {s}, foo: {foo}"); + + println(15); // prints 0x0f, implicit Field + println(-1 as u8); // prints 255 + println(-1 as i8); // prints -1 +``` + +Examples shown above are interchangeable between the two `print` statements: + +```rust +let person = Person { age : age, height : height }; + +println(person); +print(person); + +println("Hello world!"); // Prints with a newline at the end of the input +print("Hello world!"); // Prints the input and keeps cursor on the same line +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/merkle_trees.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/merkle_trees.md new file mode 100644 index 00000000000..6a9ebf72ada --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/merkle_trees.md @@ -0,0 +1,58 @@ +--- +title: Merkle Trees +description: Learn about Merkle Trees in Noir with this tutorial. Explore the basics of computing a merkle root using a proof, with examples. +keywords: + [ + Merkle trees in Noir, + Noir programming language, + check membership, + computing root from leaf, + Noir Merkle tree implementation, + Merkle tree tutorial, + Merkle tree code examples, + Noir libraries, + pedersen hash., + ] +--- + +## compute_merkle_root + +Returns the root of the tree from the provided leaf and its hash path, using a [Pedersen hash](./cryptographic_primitives/hashes.mdx#pedersen_hash). + +```rust +fn compute_merkle_root(leaf : Field, index : Field, hash_path: [Field]) -> Field +``` + +example: + +```rust +/** + // these values are for this example only + index = "0" + priv_key = "0x000000000000000000000000000000000000000000000000000000616c696365" + secret = "0x1929ea3ab8d9106a899386883d9428f8256cfedb3c4f6b66bf4aa4d28a79988f" + note_hash_path = [ + "0x1e61bdae0f027b1b2159e1f9d3f8d00fa668a952dddd822fda80dc745d6f65cc", + "0x0e4223f3925f98934393c74975142bd73079ab0621f4ee133cee050a3c194f1a", + "0x2fd7bb412155bf8693a3bd2a3e7581a679c95c68a052f835dddca85fa1569a40" + ] + */ +fn main(index: Field, priv_key: Field, secret: Field, note_hash_path: [Field; 3]) { + + let pubkey = std::scalar_mul::fixed_base_embedded_curve(priv_key); + let pubkey_x = pubkey[0]; + let pubkey_y = pubkey[1]; + let note_commitment = std::hash::pedersen(&[pubkey_x, pubkey_y, secret]); + + let root = std::merkle::compute_merkle_root(note_commitment[0], index, note_hash_path.as_slice()); + println(root); +} +``` + +To check merkle tree membership: + +1. Include a merkle root as a program input. +2. Compute the merkle root of a given leaf, index and hash path. +3. Assert the merkle roots are equal. + +For more info about merkle trees, see the Wikipedia [page](https://en.wikipedia.org/wiki/Merkle_tree). diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/options.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/options.md new file mode 100644 index 00000000000..a1bd4e1de5f --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/options.md @@ -0,0 +1,101 @@ +--- +title: Option Type +--- + +The `Option` type is a way to express that a value might be present (`Some(T))` or absent (`None`). It's a safer way to handle potential absence of values, compared to using nulls in many other languages. + +```rust +struct Option { + None, + Some(T), +} +``` + +The `Option` type, already imported into your Noir program, can be used directly: + +```rust +fn main() { + let none = Option::none(); + let some = Option::some(3); +} +``` + +See [this test](https://github.com/noir-lang/noir/blob/5cbfb9c4a06c8865c98ff2b594464b037d821a5c/crates/nargo_cli/tests/test_data/option/src/main.nr) for a more comprehensive set of examples of each of the methods described below. + +## Methods + +### none + +Constructs a none value. + +### some + +Constructs a some wrapper around a given value. + +### is_none + +Returns true if the Option is None. + +### is_some + +Returns true of the Option is Some. + +### unwrap + +Asserts `self.is_some()` and returns the wrapped value. + +### unwrap_unchecked + +Returns the inner value without asserting `self.is_some()`. This method can be useful within an if condition when we already know that `option.is_some()`. If the option is None, there is no guarantee what value will be returned, only that it will be of type T for an `Option`. + +### unwrap_or + +Returns the wrapped value if `self.is_some()`. Otherwise, returns the given default value. + +### unwrap_or_else + +Returns the wrapped value if `self.is_some()`. Otherwise, calls the given function to return a default value. + +### expect + +Asserts `self.is_some()` with a provided custom message and returns the contained `Some` value. The custom message is expected to be a format string. + +### map + +If self is `Some(x)`, this returns `Some(f(x))`. Otherwise, this returns `None`. + +### map_or + +If self is `Some(x)`, this returns `f(x)`. Otherwise, this returns the given default value. + +### map_or_else + +If self is `Some(x)`, this returns `f(x)`. Otherwise, this returns `default()`. + +### and + +Returns None if self is None. Otherwise, this returns `other`. + +### and_then + +If self is None, this returns None. Otherwise, this calls the given function with the Some value contained within self, and returns the result of that call. In some languages this function is called `flat_map` or `bind`. + +### or + +If self is Some, return self. Otherwise, return `other`. + +### or_else + +If self is Some, return self. Otherwise, return `default()`. + +### xor + +If only one of the two Options is Some, return that option. Otherwise, if both options are Some or both are None, None is returned. + +### filter + +Returns `Some(x)` if self is `Some(x)` and `predicate(x)` is true. Otherwise, this returns `None`. + +### flatten + +Flattens an `Option>` into a `Option`. This returns `None` if the outer Option is None. Otherwise, this returns the inner Option. diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/recursion.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/recursion.md new file mode 100644 index 00000000000..a93894043dc --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/recursion.md @@ -0,0 +1,88 @@ +--- +title: Recursive Proofs +description: Learn about how to write recursive proofs in Noir. +keywords: [recursion, recursive proofs, verification_key, verify_proof] +--- + +Noir supports recursively verifying proofs, meaning you verify the proof of a Noir program in another Noir program. This enables creating proofs of arbitrary size by doing step-wise verification of smaller components of a large proof. + +Read [the explainer on recursion](../../explainers/explainer-recursion.md) to know more about this function and the [guide on how to use it.](../../how_to/how-to-recursion.md) + +## The `#[recursive]` Attribute + +In Noir, the `#[recursive]` attribute is used to indicate that a circuit is designed for recursive proof generation. When applied, it informs the compiler and the tooling that the circuit should be compiled in a way that makes its proofs suitable for recursive verification. This attribute eliminates the need for manual flagging of recursion at the tooling level, streamlining the proof generation process for recursive circuits. + +### Example usage with `#[recursive]` + +```rust +#[recursive] +fn main(x: Field, y: pub Field) { + assert(x == y, "x and y are not equal"); +} + +// This marks the circuit as recursion-friendly and indicates that proofs generated from this circuit +// are intended for recursive verification. +``` + +By incorporating this attribute directly in the circuit's definition, tooling like Nargo and NoirJS can automatically execute recursive-specific duties for Noir programs (e.g. recursive-friendly proof artifact generation) without additional flags or configurations. + +## Verifying Recursive Proofs + +```rust +#[foreign(recursive_aggregation)] +pub fn verify_proof(verification_key: [Field], proof: [Field], public_inputs: [Field], key_hash: Field) {} +``` + +:::info + +This is a black box function. Read [this section](./black_box_fns) to learn more about black box functions in Noir. + +::: + +## Example usage + +```rust +use dep::std; + +fn main( + verification_key : [Field; 114], + proof : [Field; 93], + public_inputs : [Field; 1], + key_hash : Field, + proof_b : [Field; 93], +) { + std::verify_proof( + verification_key.as_slice(), + proof.as_slice(), + public_inputs.as_slice(), + key_hash + ); + + std::verify_proof( + verification_key.as_slice(), + proof_b.as_slice(), + public_inputs.as_slice(), + key_hash + ); +} +``` + +You can see a full example of recursive proofs in [this example recursion demo repo](https://github.com/noir-lang/noir-examples/tree/master/recursion). + +## Parameters + +### `verification_key` + +The verification key for the zk program that is being verified. + +### `proof` + +The proof for the zk program that is being verified. + +### `public_inputs` + +These represent the public inputs of the proof we are verifying. + +### `key_hash` + +A key hash is used to check the validity of the verification key. The circuit implementing this opcode can use this hash to ensure that the key provided to the circuit matches the key produced by the circuit creator. diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/traits.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/traits.md new file mode 100644 index 00000000000..68a9dc3d54b --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/traits.md @@ -0,0 +1,408 @@ +--- +title: Traits +description: Noir's stdlib provides a few commonly used traits. +keywords: [traits, trait, interface, protocol, default, add, eq] +--- + +## `std::default` + +### `std::default::Default` + +```rust title="default-trait" showLineNumbers +trait Default { + fn default() -> Self; +} +``` +> Source code: noir_stdlib/src/default.nr#L1-L5 + + +Constructs a default value of a type. + +Implementations: +```rust +impl Default for Field { .. } + +impl Default for i8 { .. } +impl Default for i16 { .. } +impl Default for i32 { .. } +impl Default for i64 { .. } + +impl Default for u8 { .. } +impl Default for u16 { .. } +impl Default for u32 { .. } +impl Default for u64 { .. } + +impl Default for () { .. } +impl Default for bool { .. } + +impl Default for [T; N] + where T: Default { .. } + +impl Default for [T] { .. } + +impl Default for (A, B) + where A: Default, B: Default { .. } + +impl Default for (A, B, C) + where A: Default, B: Default, C: Default { .. } + +impl Default for (A, B, C, D) + where A: Default, B: Default, C: Default, D: Default { .. } + +impl Default for (A, B, C, D, E) + where A: Default, B: Default, C: Default, D: Default, E: Default { .. } +``` + +For primitive integer types, the return value of `default` is `0`. Container +types such as arrays are filled with default values of their element type, +except slices whose length is unknown and thus defaulted to zero. + + +## `std::convert` + +### `std::convert::From` + +```rust title="from-trait" showLineNumbers +trait From { + fn from(input: T) -> Self; +} +``` +> Source code: noir_stdlib/src/convert.nr#L1-L5 + + +The `From` trait defines how to convert from a given type `T` to the type on which the trait is implemented. + +The Noir standard library provides a number of implementations of `From` between primitive types. +```rust title="from-impls" showLineNumbers +// Unsigned integers + +impl From for u32 { fn from(value: u8) -> u32 { value as u32 } } + +impl From for u64 { fn from(value: u8) -> u64 { value as u64 } } +impl From for u64 { fn from(value: u32) -> u64 { value as u64 } } + +impl From for Field { fn from(value: u8) -> Field { value as Field } } +impl From for Field { fn from(value: u32) -> Field { value as Field } } +impl From for Field { fn from(value: u64) -> Field { value as Field } } + +// Signed integers + +impl From for i32 { fn from(value: i8) -> i32 { value as i32 } } + +impl From for i64 { fn from(value: i8) -> i64 { value as i64 } } +impl From for i64 { fn from(value: i32) -> i64 { value as i64 } } + +// Booleans +impl From for u8 { fn from(value: bool) -> u8 { value as u8 } } +impl From for u32 { fn from(value: bool) -> u32 { value as u32 } } +impl From for u64 { fn from(value: bool) -> u64 { value as u64 } } +impl From for i8 { fn from(value: bool) -> i8 { value as i8 } } +impl From for i32 { fn from(value: bool) -> i32 { value as i32 } } +impl From for i64 { fn from(value: bool) -> i64 { value as i64 } } +impl From for Field { fn from(value: bool) -> Field { value as Field } } +``` +> Source code: noir_stdlib/src/convert.nr#L25-L52 + + +#### When to implement `From` + +As a general rule of thumb, `From` may be implemented in the [situations where it would be suitable in Rust](https://doc.rust-lang.org/std/convert/trait.From.html#when-to-implement-from): + +- The conversion is *infallible*: Noir does not provide an equivalent to Rust's `TryFrom`, if the conversion can fail then provide a named method instead. +- The conversion is *lossless*: semantically, it should not lose or discard information. For example, `u32: From` can losslessly convert any `u16` into a valid `u32` such that the original `u16` can be recovered. On the other hand, `u16: From` should not be implemented as `2**16` is a `u32` which cannot be losslessly converted into a `u16`. +- The conversion is *value-preserving*: the conceptual kind and meaning of the resulting value is the same, even though the Noir type and technical representation might be different. While it's possible to infallibly and losslessly convert a `u8` into a `str<2>` hex representation, `4u8` and `"04"` are too different for `str<2>: From` to be implemented. +- The conversion is *obvious*: it's the only reasonable conversion between the two types. If there's ambiguity on how to convert between them such that the same input could potentially map to two different values then a named method should be used. For instance rather than implementing `U128: From<[u8; 16]>`, the methods `U128::from_le_bytes` and `U128::from_be_bytes` are used as otherwise the endianness of the array would be ambiguous, resulting in two potential values of `U128` from the same byte array. + +One additional recommendation specific to Noir is: +- The conversion is *efficient*: it's relatively cheap to convert between the two types. Due to being a ZK DSL, it's more important to avoid unnecessary computation compared to Rust. If the implementation of `From` would encourage users to perform unnecessary conversion, resulting in additional proving time, then it may be preferable to expose functionality such that this conversion may be avoided. + +### `std::convert::Into` + +The `Into` trait is defined as the reciprocal of `From`. It should be easy to convince yourself that if we can convert to type `A` from type `B`, then it's possible to convert type `B` into type `A`. + +For this reason, implementing `From` on a type will automatically generate a matching `Into` implementation. One should always prefer implementing `From` over `Into` as implementing `Into` will not generate a matching `From` implementation. + +```rust title="into-trait" showLineNumbers +trait Into { + fn into(input: Self) -> T; +} + +impl Into for U where T: From { + fn into(input: U) -> T { + T::from(input) + } +} +``` +> Source code: noir_stdlib/src/convert.nr#L13-L23 + + +`Into` is most useful when passing function arguments where the types don't quite match up with what the function expects. In this case, the compiler has enough type information to perform the necessary conversion by just appending `.into()` onto the arguments in question. + + +## `std::cmp` + +### `std::cmp::Eq` + +```rust title="eq-trait" showLineNumbers +trait Eq { + fn eq(self, other: Self) -> bool; +} +``` +> Source code: noir_stdlib/src/cmp.nr#L1-L5 + + +Returns `true` if `self` is equal to `other`. Implementing this trait on a type +allows the type to be used with `==` and `!=`. + +Implementations: +```rust +impl Eq for Field { .. } + +impl Eq for i8 { .. } +impl Eq for i16 { .. } +impl Eq for i32 { .. } +impl Eq for i64 { .. } + +impl Eq for u8 { .. } +impl Eq for u16 { .. } +impl Eq for u32 { .. } +impl Eq for u64 { .. } + +impl Eq for () { .. } +impl Eq for bool { .. } + +impl Eq for [T; N] + where T: Eq { .. } + +impl Eq for [T] + where T: Eq { .. } + +impl Eq for (A, B) + where A: Eq, B: Eq { .. } + +impl Eq for (A, B, C) + where A: Eq, B: Eq, C: Eq { .. } + +impl Eq for (A, B, C, D) + where A: Eq, B: Eq, C: Eq, D: Eq { .. } + +impl Eq for (A, B, C, D, E) + where A: Eq, B: Eq, C: Eq, D: Eq, E: Eq { .. } +``` + +### `std::cmp::Ord` + +```rust title="ord-trait" showLineNumbers +trait Ord { + fn cmp(self, other: Self) -> Ordering; +} +``` +> Source code: noir_stdlib/src/cmp.nr#L102-L106 + + +`a.cmp(b)` compares two values returning `Ordering::less()` if `a < b`, +`Ordering::equal()` if `a == b`, or `Ordering::greater()` if `a > b`. +Implementing this trait on a type allows `<`, `<=`, `>`, and `>=` to be +used on values of the type. + +Implementations: + +```rust +impl Ord for u8 { .. } +impl Ord for u16 { .. } +impl Ord for u32 { .. } +impl Ord for u64 { .. } + +impl Ord for i8 { .. } +impl Ord for i16 { .. } +impl Ord for i32 { .. } + +impl Ord for i64 { .. } + +impl Ord for () { .. } +impl Ord for bool { .. } + +impl Ord for [T; N] + where T: Ord { .. } + +impl Ord for [T] + where T: Ord { .. } + +impl Ord for (A, B) + where A: Ord, B: Ord { .. } + +impl Ord for (A, B, C) + where A: Ord, B: Ord, C: Ord { .. } + +impl Ord for (A, B, C, D) + where A: Ord, B: Ord, C: Ord, D: Ord { .. } + +impl Ord for (A, B, C, D, E) + where A: Ord, B: Ord, C: Ord, D: Ord, E: Ord { .. } +``` + +## `std::ops` + +### `std::ops::Add`, `std::ops::Sub`, `std::ops::Mul`, and `std::ops::Div` + +These traits abstract over addition, subtraction, multiplication, and division respectively. +Implementing these traits for a given type will also allow that type to be used with the corresponding operator +for that trait (`+` for Add, etc) in addition to the normal method names. + +```rust title="add-trait" showLineNumbers +trait Add { + fn add(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L1-L5 + +```rust title="sub-trait" showLineNumbers +trait Sub { + fn sub(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L17-L21 + +```rust title="mul-trait" showLineNumbers +trait Mul { + fn mul(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L33-L37 + +```rust title="div-trait" showLineNumbers +trait Div { + fn div(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L49-L53 + + +The implementations block below is given for the `Add` trait, but the same types that implement +`Add` also implement `Sub`, `Mul`, and `Div`. + +Implementations: +```rust +impl Add for Field { .. } + +impl Add for i8 { .. } +impl Add for i16 { .. } +impl Add for i32 { .. } +impl Add for i64 { .. } + +impl Add for u8 { .. } +impl Add for u16 { .. } +impl Add for u32 { .. } +impl Add for u64 { .. } +``` + +### `std::ops::Rem` + +```rust title="rem-trait" showLineNumbers +trait Rem{ + fn rem(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L65-L69 + + +`Rem::rem(a, b)` is the remainder function returning the result of what is +left after dividing `a` and `b`. Implementing `Rem` allows the `%` operator +to be used with the implementation type. + +Unlike other numeric traits, `Rem` is not implemented for `Field`. + +Implementations: +```rust +impl Rem for u8 { fn rem(self, other: u8) -> u8 { self % other } } +impl Rem for u16 { fn rem(self, other: u16) -> u16 { self % other } } +impl Rem for u32 { fn rem(self, other: u32) -> u32 { self % other } } +impl Rem for u64 { fn rem(self, other: u64) -> u64 { self % other } } + +impl Rem for i8 { fn rem(self, other: i8) -> i8 { self % other } } +impl Rem for i16 { fn rem(self, other: i16) -> i16 { self % other } } +impl Rem for i32 { fn rem(self, other: i32) -> i32 { self % other } } +impl Rem for i64 { fn rem(self, other: i64) -> i64 { self % other } } +``` + +### `std::ops::{ BitOr, BitAnd, BitXor }` + +```rust title="bitor-trait" showLineNumbers +trait BitOr { + fn bitor(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L79-L83 + +```rust title="bitand-trait" showLineNumbers +trait BitAnd { + fn bitand(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L95-L99 + +```rust title="bitxor-trait" showLineNumbers +trait BitXor { + fn bitxor(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L111-L115 + + +Traits for the bitwise operations `|`, `&`, and `^`. + +Implementing `BitOr`, `BitAnd` or `BitXor` for a type allows the `|`, `&`, or `^` operator respectively +to be used with the type. + +The implementations block below is given for the `BitOr` trait, but the same types that implement +`BitOr` also implement `BitAnd` and `BitXor`. + +Implementations: +```rust +impl BitOr for bool { fn bitor(self, other: bool) -> bool { self | other } } + +impl BitOr for u8 { fn bitor(self, other: u8) -> u8 { self | other } } +impl BitOr for u16 { fn bitor(self, other: u16) -> u16 { self | other } } +impl BitOr for u32 { fn bitor(self, other: u32) -> u32 { self | other } } +impl BitOr for u64 { fn bitor(self, other: u64) -> u64 { self | other } } + +impl BitOr for i8 { fn bitor(self, other: i8) -> i8 { self | other } } +impl BitOr for i16 { fn bitor(self, other: i16) -> i16 { self | other } } +impl BitOr for i32 { fn bitor(self, other: i32) -> i32 { self | other } } +impl BitOr for i64 { fn bitor(self, other: i64) -> i64 { self | other } } +``` + +### `std::ops::{ Shl, Shr }` + +```rust title="shl-trait" showLineNumbers +trait Shl { + fn shl(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L127-L131 + +```rust title="shr-trait" showLineNumbers +trait Shr { + fn shr(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L142-L146 + + +Traits for a bit shift left and bit shift right. + +Implementing `Shl` for a type allows the left shift operator (`<<`) to be used with the implementation type. +Similarly, implementing `Shr` allows the right shift operator (`>>`) to be used with the type. + +Note that bit shifting is not currently implemented for signed types. + +The implementations block below is given for the `Shl` trait, but the same types that implement +`Shl` also implement `Shr`. + +Implementations: +```rust +impl Shl for u8 { fn shl(self, other: u8) -> u8 { self << other } } +impl Shl for u16 { fn shl(self, other: u16) -> u16 { self << other } } +impl Shl for u32 { fn shl(self, other: u32) -> u32 { self << other } } +impl Shl for u64 { fn shl(self, other: u64) -> u64 { self << other } } +``` diff --git a/docs/versioned_docs/version-v0.26.0/noir/standard_library/zeroed.md b/docs/versioned_docs/version-v0.26.0/noir/standard_library/zeroed.md new file mode 100644 index 00000000000..f450fecdd36 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/noir/standard_library/zeroed.md @@ -0,0 +1,26 @@ +--- +title: Zeroed Function +description: + The zeroed function returns a zeroed value of any type. +keywords: + [ + zeroed + ] +--- + +Implements `fn zeroed() -> T` to return a zeroed value of any type. This function is generally unsafe to use as the zeroed bit pattern is not guaranteed to be valid for all types. It can however, be useful in cases when the value is guaranteed not to be used such as in a BoundedVec library implementing a growable vector, up to a certain length, backed by an array. The array can be initialized with zeroed values which are guaranteed to be inaccessible until the vector is pushed to. Similarly, enumerations in noir can be implemented using this method by providing zeroed values for the unused variants. + +You can access the function at `std::unsafe::zeroed`. + +This function currently supports the following types: + +- Field +- Bool +- Uint +- Array +- Slice +- String +- Tuple +- Function + +Using it on other types could result in unexpected behavior. diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/.nojekyll b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/.nojekyll new file mode 100644 index 00000000000..e2ac6616add --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/.nojekyll @@ -0,0 +1 @@ +TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend.md new file mode 100644 index 00000000000..b18c1926b93 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend.md @@ -0,0 +1,119 @@ +# BarretenbergBackend + +## Implements + +- [`Backend`](../index.md#backend) + +## Constructors + +### new BarretenbergBackend(acirCircuit, options) + +```ts +new BarretenbergBackend(acirCircuit, options): BarretenbergBackend +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `acirCircuit` | `CompiledCircuit` | +| `options` | [`BackendOptions`](../type-aliases/BackendOptions.md) | + +#### Returns + +[`BarretenbergBackend`](BarretenbergBackend.md) + +## Methods + +### destroy() + +```ts +destroy(): Promise +``` + +#### Returns + +`Promise`\<`void`\> + +*** + +### generateProof() + +```ts +generateProof(compressedWitness): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `compressedWitness` | `Uint8Array` | + +#### Returns + +`Promise`\<`ProofData`\> + +#### Description + +Generates a proof + +*** + +### generateRecursiveProofArtifacts() + +```ts +generateRecursiveProofArtifacts(proofData, numOfPublicInputs): Promise +``` + +Generates artifacts that will be passed to a circuit that will verify this proof. + +Instead of passing the proof and verification key as a byte array, we pass them +as fields which makes it cheaper to verify in a circuit. + +The proof that is passed here will have been created using a circuit +that has the #[recursive] attribute on its `main` method. + +The number of public inputs denotes how many public inputs are in the inner proof. + +#### Parameters + +| Parameter | Type | Default value | +| :------ | :------ | :------ | +| `proofData` | `ProofData` | `undefined` | +| `numOfPublicInputs` | `number` | `0` | + +#### Returns + +`Promise`\<`object`\> + +#### Example + +```typescript +const artifacts = await backend.generateRecursiveProofArtifacts(proof, numOfPublicInputs); +``` + +*** + +### verifyProof() + +```ts +verifyProof(proofData): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `proofData` | `ProofData` | + +#### Returns + +`Promise`\<`boolean`\> + +#### Description + +Verifies a proof + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/index.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/index.md new file mode 100644 index 00000000000..c146316a915 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/index.md @@ -0,0 +1,58 @@ +# backend_barretenberg + +## Exports + +### Classes + +| Class | Description | +| :------ | :------ | +| [BarretenbergBackend](classes/BarretenbergBackend.md) | - | + +### Type Aliases + +| Type alias | Description | +| :------ | :------ | +| [BackendOptions](type-aliases/BackendOptions.md) | - | + +## References + +### CompiledCircuit + +Renames and re-exports [Backend](index.md#backend) + +*** + +### ProofData + +Renames and re-exports [Backend](index.md#backend) + +## Variables + +### Backend + +```ts +Backend: any; +``` + +## Functions + +### publicInputsToWitnessMap() + +```ts +publicInputsToWitnessMap(publicInputs, abi): Backend +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `publicInputs` | `string`[] | +| `abi` | `Abi` | + +#### Returns + +[`Backend`](index.md#backend) + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions.md new file mode 100644 index 00000000000..b49a479f4f4 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions.md @@ -0,0 +1,21 @@ +# BackendOptions + +```ts +type BackendOptions: object; +``` + +## Description + +An options object, currently only used to specify the number of threads to use. + +## Type declaration + +| Member | Type | Description | +| :------ | :------ | :------ | +| `memory` | `object` | - | +| `memory.maximum` | `number` | - | +| `threads` | `number` | **Description**

Number of threads | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/typedoc-sidebar.cjs b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/typedoc-sidebar.cjs new file mode 100644 index 00000000000..339353b9862 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/backend_barretenberg/typedoc-sidebar.cjs @@ -0,0 +1,4 @@ +// @ts-check +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const typedocSidebar = { items: [{"type":"category","label":"Classes","items":[{"type":"doc","id":"reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend","label":"BarretenbergBackend"}]},{"type":"category","label":"Type Aliases","items":[{"type":"doc","id":"reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions","label":"BackendOptions"}]}]}; +module.exports = typedocSidebar.items; \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/.nojekyll b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/.nojekyll new file mode 100644 index 00000000000..e2ac6616add --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/.nojekyll @@ -0,0 +1 @@ +TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/classes/Noir.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/classes/Noir.md new file mode 100644 index 00000000000..45dd62ee57e --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/classes/Noir.md @@ -0,0 +1,132 @@ +# Noir + +## Constructors + +### new Noir(circuit, backend) + +```ts +new Noir(circuit, backend?): Noir +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `circuit` | `CompiledCircuit` | +| `backend`? | `any` | + +#### Returns + +[`Noir`](Noir.md) + +## Methods + +### destroy() + +```ts +destroy(): Promise +``` + +#### Returns + +`Promise`\<`void`\> + +#### Description + +Destroys the underlying backend instance. + +#### Example + +```typescript +await noir.destroy(); +``` + +*** + +### execute() + +```ts +execute(inputs, foreignCallHandler?): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `inputs` | `InputMap` | +| `foreignCallHandler`? | [`ForeignCallHandler`](../type-aliases/ForeignCallHandler.md) | + +#### Returns + +`Promise`\<`object`\> + +#### Description + +Allows to execute a circuit to get its witness and return value. + +#### Example + +```typescript +async execute(inputs) +``` + +*** + +### generateProof() + +```ts +generateProof(inputs, foreignCallHandler?): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `inputs` | `InputMap` | +| `foreignCallHandler`? | [`ForeignCallHandler`](../type-aliases/ForeignCallHandler.md) | + +#### Returns + +`Promise`\<`ProofData`\> + +#### Description + +Generates a witness and a proof given an object as input. + +#### Example + +```typescript +async generateProof(input) +``` + +*** + +### verifyProof() + +```ts +verifyProof(proofData): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `proofData` | `ProofData` | + +#### Returns + +`Promise`\<`boolean`\> + +#### Description + +Instantiates the verification key and verifies a proof. + +#### Example + +```typescript +async verifyProof(proof) +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/and.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/and.md new file mode 100644 index 00000000000..c783283e396 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/and.md @@ -0,0 +1,22 @@ +# and() + +```ts +and(lhs, rhs): string +``` + +Performs a bitwise AND operation between `lhs` and `rhs` + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `lhs` | `string` | | +| `rhs` | `string` | | + +## Returns + +`string` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/blake2s256.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/blake2s256.md new file mode 100644 index 00000000000..7882d0da8d5 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/blake2s256.md @@ -0,0 +1,21 @@ +# blake2s256() + +```ts +blake2s256(inputs): Uint8Array +``` + +Calculates the Blake2s256 hash of the input bytes + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `inputs` | `Uint8Array` | | + +## Returns + +`Uint8Array` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify.md new file mode 100644 index 00000000000..5e3cd53e9d3 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify.md @@ -0,0 +1,28 @@ +# ecdsa\_secp256k1\_verify() + +```ts +ecdsa_secp256k1_verify( + hashed_msg, + public_key_x_bytes, + public_key_y_bytes, + signature): boolean +``` + +Verifies a ECDSA signature over the secp256k1 curve. + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `hashed_msg` | `Uint8Array` | | +| `public_key_x_bytes` | `Uint8Array` | | +| `public_key_y_bytes` | `Uint8Array` | | +| `signature` | `Uint8Array` | | + +## Returns + +`boolean` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify.md new file mode 100644 index 00000000000..0b20ff68957 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify.md @@ -0,0 +1,28 @@ +# ecdsa\_secp256r1\_verify() + +```ts +ecdsa_secp256r1_verify( + hashed_msg, + public_key_x_bytes, + public_key_y_bytes, + signature): boolean +``` + +Verifies a ECDSA signature over the secp256r1 curve. + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `hashed_msg` | `Uint8Array` | | +| `public_key_x_bytes` | `Uint8Array` | | +| `public_key_y_bytes` | `Uint8Array` | | +| `signature` | `Uint8Array` | | + +## Returns + +`boolean` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/keccak256.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/keccak256.md new file mode 100644 index 00000000000..d10f155ce86 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/keccak256.md @@ -0,0 +1,21 @@ +# keccak256() + +```ts +keccak256(inputs): Uint8Array +``` + +Calculates the Keccak256 hash of the input bytes + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `inputs` | `Uint8Array` | | + +## Returns + +`Uint8Array` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/sha256.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/sha256.md new file mode 100644 index 00000000000..6ba4ecac022 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/sha256.md @@ -0,0 +1,21 @@ +# sha256() + +```ts +sha256(inputs): Uint8Array +``` + +Calculates the SHA256 hash of the input bytes + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `inputs` | `Uint8Array` | | + +## Returns + +`Uint8Array` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/xor.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/xor.md new file mode 100644 index 00000000000..8d762b895d3 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/functions/xor.md @@ -0,0 +1,22 @@ +# xor() + +```ts +xor(lhs, rhs): string +``` + +Performs a bitwise XOR operation between `lhs` and `rhs` + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `lhs` | `string` | | +| `rhs` | `string` | | + +## Returns + +`string` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/index.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/index.md new file mode 100644 index 00000000000..cca6b3ace41 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/index.md @@ -0,0 +1,54 @@ +# noir_js + +## Exports + +### Classes + +| Class | Description | +| :------ | :------ | +| [Noir](classes/Noir.md) | - | + +### Type Aliases + +| Type alias | Description | +| :------ | :------ | +| [ForeignCallHandler](type-aliases/ForeignCallHandler.md) | A callback which performs an foreign call and returns the response. | +| [ForeignCallInput](type-aliases/ForeignCallInput.md) | - | +| [ForeignCallOutput](type-aliases/ForeignCallOutput.md) | - | +| [WitnessMap](type-aliases/WitnessMap.md) | - | + +### Functions + +| Function | Description | +| :------ | :------ | +| [and](functions/and.md) | Performs a bitwise AND operation between `lhs` and `rhs` | +| [blake2s256](functions/blake2s256.md) | Calculates the Blake2s256 hash of the input bytes | +| [ecdsa\_secp256k1\_verify](functions/ecdsa_secp256k1_verify.md) | Verifies a ECDSA signature over the secp256k1 curve. | +| [ecdsa\_secp256r1\_verify](functions/ecdsa_secp256r1_verify.md) | Verifies a ECDSA signature over the secp256r1 curve. | +| [keccak256](functions/keccak256.md) | Calculates the Keccak256 hash of the input bytes | +| [sha256](functions/sha256.md) | Calculates the SHA256 hash of the input bytes | +| [xor](functions/xor.md) | Performs a bitwise XOR operation between `lhs` and `rhs` | + +## References + +### CompiledCircuit + +Renames and re-exports [InputMap](index.md#inputmap) + +*** + +### ProofData + +Renames and re-exports [InputMap](index.md#inputmap) + +## Variables + +### InputMap + +```ts +InputMap: any; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md new file mode 100644 index 00000000000..812b8b16481 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md @@ -0,0 +1,24 @@ +# ForeignCallHandler + +```ts +type ForeignCallHandler: (name, inputs) => Promise; +``` + +A callback which performs an foreign call and returns the response. + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `name` | `string` | The identifier for the type of foreign call being performed. | +| `inputs` | [`ForeignCallInput`](ForeignCallInput.md)[] | An array of hex encoded inputs to the foreign call. | + +## Returns + +`Promise`\<[`ForeignCallOutput`](ForeignCallOutput.md)[]\> + +outputs - An array of hex encoded outputs containing the results of the foreign call. + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/ForeignCallInput.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/ForeignCallInput.md new file mode 100644 index 00000000000..dd95809186a --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/ForeignCallInput.md @@ -0,0 +1,9 @@ +# ForeignCallInput + +```ts +type ForeignCallInput: string[]; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md new file mode 100644 index 00000000000..b71fb78a946 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md @@ -0,0 +1,9 @@ +# ForeignCallOutput + +```ts +type ForeignCallOutput: string | string[]; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/WitnessMap.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/WitnessMap.md new file mode 100644 index 00000000000..258c46f9d0c --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/type-aliases/WitnessMap.md @@ -0,0 +1,9 @@ +# WitnessMap + +```ts +type WitnessMap: Map; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/typedoc-sidebar.cjs b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/typedoc-sidebar.cjs new file mode 100644 index 00000000000..c6d8125eaad --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_js/typedoc-sidebar.cjs @@ -0,0 +1,4 @@ +// @ts-check +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const typedocSidebar = { items: [{"type":"category","label":"Classes","items":[{"type":"doc","id":"reference/NoirJS/noir_js/classes/Noir","label":"Noir"}]},{"type":"category","label":"Type Aliases","items":[{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/ForeignCallHandler","label":"ForeignCallHandler"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/ForeignCallInput","label":"ForeignCallInput"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/ForeignCallOutput","label":"ForeignCallOutput"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/WitnessMap","label":"WitnessMap"}]},{"type":"category","label":"Functions","items":[{"type":"doc","id":"reference/NoirJS/noir_js/functions/and","label":"and"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/blake2s256","label":"blake2s256"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify","label":"ecdsa_secp256k1_verify"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify","label":"ecdsa_secp256r1_verify"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/keccak256","label":"keccak256"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/sha256","label":"sha256"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/xor","label":"xor"}]}]}; +module.exports = typedocSidebar.items; \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/.nojekyll b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/.nojekyll new file mode 100644 index 00000000000..e2ac6616add --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/.nojekyll @@ -0,0 +1 @@ +TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/compile.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/compile.md new file mode 100644 index 00000000000..6faf763b37f --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/compile.md @@ -0,0 +1,51 @@ +# compile() + +```ts +compile( + fileManager, + projectPath?, + logFn?, +debugLogFn?): Promise +``` + +Compiles a Noir project + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `fileManager` | `FileManager` | The file manager to use | +| `projectPath`? | `string` | The path to the project inside the file manager. Defaults to the root of the file manager | +| `logFn`? | `LogFn` | A logging function. If not provided, console.log will be used | +| `debugLogFn`? | `LogFn` | A debug logging function. If not provided, logFn will be used | + +## Returns + +`Promise`\<[`ProgramCompilationArtifacts`](../index.md#programcompilationartifacts)\> + +## Example + +```typescript +// Node.js + +import { compile_program, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager(myProjectPath); +const myCompiledCode = await compile_program(fm); +``` + +```typescript +// Browser + +import { compile_program, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager('/'); +for (const path of files) { + await fm.writeFile(path, await getFileAsStream(path)); +} +const myCompiledCode = await compile_program(fm); +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/compile_contract.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/compile_contract.md new file mode 100644 index 00000000000..7d0b39a43ef --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/compile_contract.md @@ -0,0 +1,51 @@ +# compile\_contract() + +```ts +compile_contract( + fileManager, + projectPath?, + logFn?, +debugLogFn?): Promise +``` + +Compiles a Noir project + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `fileManager` | `FileManager` | The file manager to use | +| `projectPath`? | `string` | The path to the project inside the file manager. Defaults to the root of the file manager | +| `logFn`? | `LogFn` | A logging function. If not provided, console.log will be used | +| `debugLogFn`? | `LogFn` | A debug logging function. If not provided, logFn will be used | + +## Returns + +`Promise`\<[`ContractCompilationArtifacts`](../index.md#contractcompilationartifacts)\> + +## Example + +```typescript +// Node.js + +import { compile_contract, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager(myProjectPath); +const myCompiledCode = await compile_contract(fm); +``` + +```typescript +// Browser + +import { compile_contract, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager('/'); +for (const path of files) { + await fm.writeFile(path, await getFileAsStream(path)); +} +const myCompiledCode = await compile_contract(fm); +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/createFileManager.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/createFileManager.md new file mode 100644 index 00000000000..7e65c1d69c7 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/createFileManager.md @@ -0,0 +1,21 @@ +# createFileManager() + +```ts +createFileManager(dataDir): FileManager +``` + +Creates a new FileManager instance based on fs in node and memfs in the browser (via webpack alias) + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `dataDir` | `string` | root of the file system | + +## Returns + +`FileManager` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/inflateDebugSymbols.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/inflateDebugSymbols.md new file mode 100644 index 00000000000..fcea9275341 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/functions/inflateDebugSymbols.md @@ -0,0 +1,21 @@ +# inflateDebugSymbols() + +```ts +inflateDebugSymbols(debugSymbols): any +``` + +Decompresses and decodes the debug symbols + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `debugSymbols` | `string` | The base64 encoded debug symbols | + +## Returns + +`any` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/index.md b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/index.md new file mode 100644 index 00000000000..b6e0f9d1bc0 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/index.md @@ -0,0 +1,49 @@ +# noir_wasm + +## Exports + +### Functions + +| Function | Description | +| :------ | :------ | +| [compile](functions/compile.md) | Compiles a Noir project | +| [compile\_contract](functions/compile_contract.md) | Compiles a Noir project | +| [createFileManager](functions/createFileManager.md) | Creates a new FileManager instance based on fs in node and memfs in the browser (via webpack alias) | +| [inflateDebugSymbols](functions/inflateDebugSymbols.md) | Decompresses and decodes the debug symbols | + +## References + +### compile\_program + +Renames and re-exports [compile](functions/compile.md) + +## Interfaces + +### ContractCompilationArtifacts + +The compilation artifacts of a given contract. + +#### Properties + +| Property | Type | Description | +| :------ | :------ | :------ | +| `contract` | `ContractArtifact` | The compiled contract. | +| `warnings` | `unknown`[] | Compilation warnings. | + +*** + +### ProgramCompilationArtifacts + +The compilation artifacts of a given program. + +#### Properties + +| Property | Type | Description | +| :------ | :------ | :------ | +| `name` | `string` | not part of the compilation output, injected later | +| `program` | `ProgramArtifact` | The compiled contract. | +| `warnings` | `unknown`[] | Compilation warnings. | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/typedoc-sidebar.cjs b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/typedoc-sidebar.cjs new file mode 100644 index 00000000000..e0870710349 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/NoirJS/noir_wasm/typedoc-sidebar.cjs @@ -0,0 +1,4 @@ +// @ts-check +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const typedocSidebar = { items: [{"type":"doc","id":"reference/NoirJS/noir_wasm/index","label":"API"},{"type":"category","label":"Functions","items":[{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/compile","label":"compile"},{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/compile_contract","label":"compile_contract"},{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/createFileManager","label":"createFileManager"},{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/inflateDebugSymbols","label":"inflateDebugSymbols"}]}]}; +module.exports = typedocSidebar.items; \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.26.0/reference/_category_.json b/docs/versioned_docs/version-v0.26.0/reference/_category_.json new file mode 100644 index 00000000000..5b6a20a609a --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 4, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.26.0/reference/nargo_commands.md b/docs/versioned_docs/version-v0.26.0/reference/nargo_commands.md new file mode 100644 index 00000000000..218fcfb0c8c --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/reference/nargo_commands.md @@ -0,0 +1,381 @@ +--- +title: Nargo +description: + Noir CLI Commands for Noir Prover and Verifier to create, execute, prove and verify programs, + generate Solidity verifier smart contract and compile into JSON file containing ACIR + representation and ABI of circuit. +keywords: + [ + Nargo, + Noir CLI, + Noir Prover, + Noir Verifier, + generate Solidity verifier, + compile JSON file, + ACIR representation, + ABI of circuit, + TypeScript, + ] +sidebar_position: 0 +--- + +# Command-Line Help for `nargo` + +This document contains the help content for the `nargo` command-line program. + +**Command Overview:** + +* [`nargo`↴](#nargo) +* [`nargo backend`↴](#nargo-backend) +* [`nargo backend current`↴](#nargo-backend-current) +* [`nargo backend ls`↴](#nargo-backend-ls) +* [`nargo backend use`↴](#nargo-backend-use) +* [`nargo backend install`↴](#nargo-backend-install) +* [`nargo backend uninstall`↴](#nargo-backend-uninstall) +* [`nargo check`↴](#nargo-check) +* [`nargo fmt`↴](#nargo-fmt) +* [`nargo codegen-verifier`↴](#nargo-codegen-verifier) +* [`nargo compile`↴](#nargo-compile) +* [`nargo new`↴](#nargo-new) +* [`nargo init`↴](#nargo-init) +* [`nargo execute`↴](#nargo-execute) +* [`nargo prove`↴](#nargo-prove) +* [`nargo verify`↴](#nargo-verify) +* [`nargo test`↴](#nargo-test) +* [`nargo info`↴](#nargo-info) +* [`nargo lsp`↴](#nargo-lsp) + +## `nargo` + +Noir's package manager + +**Usage:** `nargo ` + +###### **Subcommands:** + +* `backend` — Install and select custom backends used to generate and verify proofs +* `check` — Checks the constraint system for errors +* `fmt` — Format the Noir files in a workspace +* `codegen-verifier` — Generates a Solidity verifier smart contract for the program +* `compile` — Compile the program and its secret execution trace into ACIR format +* `new` — Create a Noir project in a new directory +* `init` — Create a Noir project in the current directory +* `execute` — Executes a circuit to calculate its return value +* `prove` — Create proof for this program. The proof is returned as a hex encoded string +* `verify` — Given a proof and a program, verify whether the proof is valid +* `test` — Run the tests for this program +* `info` — Provides detailed information on each of a program's function (represented by a single circuit) +* `lsp` — Starts the Noir LSP server + +###### **Options:** + + + + +## `nargo backend` + +Install and select custom backends used to generate and verify proofs + +**Usage:** `nargo backend ` + +###### **Subcommands:** + +* `current` — Prints the name of the currently active backend +* `ls` — Prints the list of currently installed backends +* `use` — Select the backend to use +* `install` — Install a new backend from a URL +* `uninstall` — Uninstalls a backend + + + +## `nargo backend current` + +Prints the name of the currently active backend + +**Usage:** `nargo backend current` + + + +## `nargo backend ls` + +Prints the list of currently installed backends + +**Usage:** `nargo backend ls` + + + +## `nargo backend use` + +Select the backend to use + +**Usage:** `nargo backend use ` + +###### **Arguments:** + +* `` + + + +## `nargo backend install` + +Install a new backend from a URL + +**Usage:** `nargo backend install ` + +###### **Arguments:** + +* `` — The name of the backend to install +* `` — The URL from which to download the backend + + + +## `nargo backend uninstall` + +Uninstalls a backend + +**Usage:** `nargo backend uninstall ` + +###### **Arguments:** + +* `` — The name of the backend to uninstall + + + +## `nargo check` + +Checks the constraint system for errors + +**Usage:** `nargo check [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to check +* `--workspace` — Check all packages in the workspace +* `--overwrite` — Force overwrite of existing files +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo fmt` + +Format the Noir files in a workspace + +**Usage:** `nargo fmt [OPTIONS]` + +###### **Options:** + +* `--check` — Run noirfmt in check mode + + + +## `nargo codegen-verifier` + +Generates a Solidity verifier smart contract for the program + +**Usage:** `nargo codegen-verifier [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to codegen +* `--workspace` — Codegen all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo compile` + +Compile the program and its secret execution trace into ACIR format + +**Usage:** `nargo compile [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to compile +* `--workspace` — Compile all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo new` + +Create a Noir project in a new directory + +**Usage:** `nargo new [OPTIONS] ` + +###### **Arguments:** + +* `` — The path to save the new project + +###### **Options:** + +* `--name ` — Name of the package [default: package directory name] +* `--lib` — Use a library template +* `--bin` — Use a binary template [default] +* `--contract` — Use a contract template + + + +## `nargo init` + +Create a Noir project in the current directory + +**Usage:** `nargo init [OPTIONS]` + +###### **Options:** + +* `--name ` — Name of the package [default: current directory name] +* `--lib` — Use a library template +* `--bin` — Use a binary template [default] +* `--contract` — Use a contract template + + + +## `nargo execute` + +Executes a circuit to calculate its return value + +**Usage:** `nargo execute [OPTIONS] [WITNESS_NAME]` + +###### **Arguments:** + +* `` — Write the execution witness to named file + +###### **Options:** + +* `-p`, `--prover-name ` — The name of the toml file which contains the inputs for the prover + + Default value: `Prover` +* `--package ` — The name of the package to execute +* `--workspace` — Execute all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings +* `--oracle-resolver ` — JSON RPC url to solve oracle calls + + + +## `nargo prove` + +Create proof for this program. The proof is returned as a hex encoded string + +**Usage:** `nargo prove [OPTIONS]` + +###### **Options:** + +* `-p`, `--prover-name ` — The name of the toml file which contains the inputs for the prover + + Default value: `Prover` +* `-v`, `--verifier-name ` — The name of the toml file which contains the inputs for the verifier + + Default value: `Verifier` +* `--verify` — Verify proof after proving +* `--package ` — The name of the package to prove +* `--workspace` — Prove all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings +* `--oracle-resolver ` — JSON RPC url to solve oracle calls + + + +## `nargo verify` + +Given a proof and a program, verify whether the proof is valid + +**Usage:** `nargo verify [OPTIONS]` + +###### **Options:** + +* `-v`, `--verifier-name ` — The name of the toml file which contains the inputs for the verifier + + Default value: `Verifier` +* `--package ` — The name of the package verify +* `--workspace` — Verify all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo test` + +Run the tests for this program + +**Usage:** `nargo test [OPTIONS] [TEST_NAME]` + +###### **Arguments:** + +* `` — If given, only tests with names containing this string will be run + +###### **Options:** + +* `--show-output` — Display output of `println` statements +* `--exact` — Only run tests that match exactly +* `--package ` — The name of the package to test +* `--workspace` — Test all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings +* `--oracle-resolver ` — JSON RPC url to solve oracle calls + + + +## `nargo info` + +Provides detailed information on each of a program's function (represented by a single circuit) + +Current information provided per circuit: 1. The number of ACIR opcodes 2. Counts the final number gates in the circuit used by a backend + +**Usage:** `nargo info [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to detail +* `--workspace` — Detail all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo lsp` + +Starts the Noir LSP server + +Starts an LSP server which allows IDEs such as VS Code to display diagnostics in Noir source. + +VS Code Noir Language Support: https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir + +**Usage:** `nargo lsp` + + + +
+ + + This document was generated automatically by + clap-markdown. + + diff --git a/docs/versioned_docs/version-v0.26.0/tutorials/noirjs_app.md b/docs/versioned_docs/version-v0.26.0/tutorials/noirjs_app.md new file mode 100644 index 00000000000..12beb476994 --- /dev/null +++ b/docs/versioned_docs/version-v0.26.0/tutorials/noirjs_app.md @@ -0,0 +1,279 @@ +--- +title: Building a web app with NoirJS +description: Learn how to setup a new app that uses Noir to generate and verify zero-knowledge SNARK proofs in a typescript or javascript environment. +keywords: [how to, guide, javascript, typescript, noir, barretenberg, zero-knowledge, proofs, app] +sidebar_position: 0 +pagination_next: noir/concepts/data_types/index +--- + +NoirJS is a set of packages meant to work both in a browser and a server environment. In this tutorial, we will build a simple web app using them. From here, you should get an idea on how to proceed with your own Noir projects! + +You can find the complete app code for this guide [here](https://github.com/noir-lang/tiny-noirjs-app). + +## Setup + +:::note + +Feel free to use whatever versions, just keep in mind that Nargo and the NoirJS packages are meant to be in sync. For example, Nargo 0.19.x matches `noir_js@0.19.x`, etc. + +In this guide, we will be pinned to 0.19.4. + +::: + +Before we start, we want to make sure we have Node and Nargo installed. + +We start by opening a terminal and executing `node --version`. If we don't get an output like `v20.10.0`, that means node is not installed. Let's do that by following the handy [nvm guide](https://github.com/nvm-sh/nvm?tab=readme-ov-file#install--update-script). + +As for `Nargo`, we can follow the the [Nargo guide](../getting_started/installation/index.md) to install it. If you're lazy, just paste this on a terminal and run `noirup`: + +```sh +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +Easy enough. Onwards! + +## Our project + +ZK is a powerful technology. An app that doesn't reveal one of the inputs to *anyone* is almost unbelievable, yet Noir makes it as easy as a single line of code. + +In fact, it's so simple that it comes nicely packaged in `nargo`. Let's do that! + +### Nargo + +Run: + +```nargo new circuit``` + +And... That's about it. Your program is ready to be compiled and run. + +To compile, let's `cd` into the `circuit` folder to enter our project, and call: + +```nargo compile``` + +This compiles our circuit into `json` format and add it to a new `target` folder. + +:::info + +At this point in the tutorial, your folder structure should look like this: + +```tree +. +└── circuit <---- our working directory + ├── Nargo.toml + ├── src + │ └── main.nr + └── target + └── circuit.json +``` + +::: + +### Node and Vite + +If you want to explore Nargo, feel free to go on a side-quest now and follow the steps in the +[getting started](../getting_started/hello_noir/index.md) guide. However, we want our app to run on the browser, so we need Vite. + +Vite is a powerful tool to generate static websites. While it provides all kinds of features, let's just go barebones with some good old vanilla JS. + +To do this this, go back to the previous folder (`cd ..`) and create a new vite project by running `npm create vite` and choosing "Vanilla" and "Javascript". + +You should see `vite-project` appear in your root folder. This seems like a good time to `cd` into it and install our NoirJS packages: + +```bash +npm i @noir-lang/backend_barretenberg@0.19.4 @noir-lang/noir_js@0.19.4 +``` + +:::info + +At this point in the tutorial, your folder structure should look like this: + +```tree +. +└── circuit + └── ...etc... +└── vite-project <---- our working directory + └── ...etc... +``` + +::: + +#### Some cleanup + +`npx create vite` is amazing but it creates a bunch of files we don't really need for our simple example. Actually, let's just delete everything except for `index.html`, `main.js` and `package.json`. I feel lighter already. + +![my heart is ready for you, noir.js](@site/static/img/memes/titanic.jpeg) + +## HTML + +Our app won't run like this, of course. We need some working HTML, at least. Let's open our broken-hearted `index.html` and replace everything with this code snippet: + +```html + + + + + + +

Noir app

+
+ + +
+
+

Logs

+

Proof

+
+ + +``` + +It *could* be a beautiful UI... Depending on which universe you live in. + +## Some good old vanilla Javascript + +Our love for Noir needs undivided attention, so let's just open `main.js` and delete everything (this is where the romantic scenery becomes a bit creepy). + +Start by pasting in this boilerplate code: + +```js +const setup = async () => { + await Promise.all([ + import("@noir-lang/noirc_abi").then(module => + module.default(new URL("@noir-lang/noirc_abi/web/noirc_abi_wasm_bg.wasm", import.meta.url).toString()) + ), + import("@noir-lang/acvm_js").then(module => + module.default(new URL("@noir-lang/acvm_js/web/acvm_js_bg.wasm", import.meta.url).toString()) + ) + ]); +} + +function display(container, msg) { + const c = document.getElementById(container); + const p = document.createElement('p'); + p.textContent = msg; + c.appendChild(p); +} + +document.getElementById('submitGuess').addEventListener('click', async () => { + try { + // here's where love happens + } catch(err) { + display("logs", "Oh 💔 Wrong guess") + } +}); + +``` + +The display function doesn't do much. We're simply manipulating our website to see stuff happening. For example, if the proof fails, it will simply log a broken heart 😢 + +As for the `setup` function, it's just a sad reminder that dealing with `wasm` on the browser is not as easy as it should. Just copy, paste, and forget. + +:::info + +At this point in the tutorial, your folder structure should look like this: + +```tree +. +└── circuit + └── ...same as above +└── vite-project + ├── main.js + ├── package.json + └── index.html +``` + +You'll see other files and folders showing up (like `package-lock.json`, `node_modules`) but you shouldn't have to care about those. + +::: + +## Some NoirJS + +We're starting with the good stuff now. If you've compiled the circuit as described above, you should have a `json` file we want to import at the very top of our `main.js` file: + +```ts +import circuit from '../circuit/target/circuit.json'; +``` + +[Noir is backend-agnostic](../index.mdx#whats-new-about-noir). We write Noir, but we also need a proving backend. That's why we need to import and instantiate the two dependencies we installed above: `BarretenbergBackend` and `Noir`. Let's import them right below: + +```js +import { BarretenbergBackend } from '@noir-lang/backend_barretenberg'; +import { Noir } from '@noir-lang/noir_js'; +``` + +And instantiate them inside our try-catch block: + +```ts +// try { +const backend = new BarretenbergBackend(circuit); +const noir = new Noir(circuit, backend); +// } +``` + +:::note + +For the remainder of the tutorial, everything will be happening inside the `try` block + +::: + +## Our app + +Now for the app itself. We're capturing whatever is in the input when people press the submit button. Just add this: + +```js +const x = parseInt(document.getElementById('guessInput').value); +const input = { x, y: 2 }; +``` + +Now we're ready to prove stuff! Let's feed some inputs to our circuit and calculate the proof: + +```js +await setup(); // let's squeeze our wasm inits here + +display('logs', 'Generating proof... ⌛'); +const proof = await noir.generateProof(input); +display('logs', 'Generating proof... ✅'); +display('results', proof.proof); +``` + +You're probably eager to see stuff happening, so go and run your app now! + +From your terminal, run `npm run dev`. If it doesn't open a browser for you, just visit `localhost:5173`. You should now see the worst UI ever, with an ugly input. + +![Getting Started 0](@site/static/img/noir_getting_started_1.png) + +Now, our circuit says `fn main(x: Field, y: pub Field)`. This means only the `y` value is public, and it's hardcoded above: `input = { x, y: 2 }`. In other words, you won't need to send your secret`x` to the verifier! + +By inputting any number other than 2 in the input box and clicking "submit", you should get a valid proof. Otherwise the proof won't even generate correctly. By the way, if you're human, you shouldn't be able to understand anything on the "proof" box. That's OK. We like you, human ❤️. + +## Verifying + +Time to celebrate, yes! But we shouldn't trust machines so blindly. Let's add these lines to see our proof being verified: + +```js +display('logs', 'Verifying proof... ⌛'); +const verification = await noir.verifyProof(proof); +if (verification) display('logs', 'Verifying proof... ✅'); +``` + +You have successfully generated a client-side Noir web app! + +![coded app without math knowledge](@site/static/img/memes/flextape.jpeg) + +## Further Reading + +You can see how noirjs is used in a full stack Next.js hardhat application in the [noir-starter repo here](https://github.com/noir-lang/noir-starter/tree/main/vite-hardhat). The example shows how to calculate a proof in the browser and verify it with a deployed Solidity verifier contract from noirjs. + +You should also check out the more advanced examples in the [noir-examples repo](https://github.com/noir-lang/noir-examples), where you'll find reference usage for some cool apps. diff --git a/docs/versioned_docs/version-v0.27.0/explainers/explainer-oracle.md b/docs/versioned_docs/version-v0.27.0/explainers/explainer-oracle.md new file mode 100644 index 00000000000..b84ca5dd986 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/explainers/explainer-oracle.md @@ -0,0 +1,57 @@ +--- +title: Oracles +description: This guide provides an in-depth understanding of how Oracles work in Noir programming. Learn how to use outside calculations in your programs, constrain oracles, and understand their uses and limitations. +keywords: + - Noir Programming + - Oracles + - JSON-RPC + - Foreign Call Handlers + - Constrained Functions + - Blockchain Programming +sidebar_position: 1 +--- + +If you've seen "The Matrix" you may recall "The Oracle" as Gloria Foster smoking cigarettes and baking cookies. While she appears to "know things", she is actually providing a calculation of a pre-determined future. Noir Oracles are similar, in a way. They don't calculate the future (yet), but they allow you to use outside calculations in your programs. + +![matrix oracle prediction](@site/static/img/memes/matrix_oracle.jpeg) + +A Noir program is usually self-contained. You can pass certain inputs to it, and it will generate a deterministic output for those inputs. But what if you wanted to defer some calculation to an outside process or source? + +Oracles are functions that provide this feature. + +## Use cases + +An example usage for Oracles is proving something on-chain. For example, proving that the ETH-USDC quote was below a certain target at a certain block time. Or even making more complex proofs like proving the ownership of an NFT as an anonymous login method. + +Another interesting use case is to defer expensive calculations to be made outside of the Noir program, and then constraining the result; similar to the use of [unconstrained functions](../noir/concepts//unconstrained.md). + +In short, anything that can be constrained in a Noir program but needs to be fetched from an external source is a great candidate to be used in oracles. + +## Constraining oracles + +Just like in The Matrix, Oracles are powerful. But with great power, comes great responsibility. Just because you're using them in a Noir program doesn't mean they're true. Noir has no superpowers. If you want to prove that Portugal won the Euro Cup 2016, you're still relying on potentially untrusted information. + +To give a concrete example, Alice wants to login to the [NounsDAO](https://nouns.wtf/) forum with her username "noir_nouner" by proving she owns a noun without revealing her ethereum address. Her Noir program could have a oracle call like this: + +```rust +#[oracle(getNoun)] +unconstrained fn get_noun(address: Field) -> Field +``` + +This oracle could naively resolve with the number of Nouns she possesses. However, it is useless as a trusted source, as the oracle could resolve to anything Alice wants. In order to make this oracle call actually useful, Alice would need to constrain the response from the oracle, by proving her address and the noun count belongs to the state tree of the contract. + +In short, **Oracles don't prove anything. Your Noir program does.** + +:::danger + +If you don't constrain the return of your oracle, you could be clearly opening an attack vector on your Noir program. Make double-triple sure that the return of an oracle call is constrained! + +::: + +## How to use Oracles + +On CLI, Nargo resolves oracles by making JSON RPC calls, which means it would require an RPC node to be running. + +In JavaScript, NoirJS accepts and resolves arbitrary call handlers (that is, not limited to JSON) as long as they matches the expected types the developer defines. Refer to [Foreign Call Handler](../reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md) to learn more about NoirJS's call handling. + +If you want to build using oracles, follow through to the [oracle guide](../how_to/how-to-oracles.md) for a simple example on how to do that. diff --git a/docs/versioned_docs/version-v0.27.0/explainers/explainer-recursion.md b/docs/versioned_docs/version-v0.27.0/explainers/explainer-recursion.md new file mode 100644 index 00000000000..18846176ca7 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/explainers/explainer-recursion.md @@ -0,0 +1,176 @@ +--- +title: Recursive proofs +description: Explore the concept of recursive proofs in Zero-Knowledge programming. Understand how recursion works in Noir, a language for writing smart contracts on the EVM blockchain. Learn through practical examples like Alice and Bob's guessing game, Charlie's recursive merkle tree, and Daniel's reusable components. Discover how to use recursive proofs to optimize computational resources and improve efficiency. + +keywords: + [ + "Recursive Proofs", + "Zero-Knowledge Programming", + "Noir", + "EVM Blockchain", + "Smart Contracts", + "Recursion in Noir", + "Alice and Bob Guessing Game", + "Recursive Merkle Tree", + "Reusable Components", + "Optimizing Computational Resources", + "Improving Efficiency", + "Verification Key", + "Aggregation", + "Recursive zkSNARK schemes", + "PLONK", + "Proving and Verification Keys" + ] +sidebar_position: 1 +pagination_next: how_to/how-to-recursion +--- + +In programming, we tend to think of recursion as something calling itself. A classic example would be the calculation of the factorial of a number: + +```js +function factorial(n) { + if (n === 0 || n === 1) { + return 1; + } else { + return n * factorial(n - 1); + } +} +``` + +In this case, while `n` is not `1`, this function will keep calling itself until it hits the base case, bubbling up the result on the call stack: + +```md + Is `n` 1? <--------- + /\ / + / \ n = n -1 + / \ / + Yes No -------- +``` + +In Zero-Knowledge, recursion has some similarities. + +It is not a Noir function calling itself, but a proof being used as an input to another circuit. In short, you verify one proof *inside* another proof, returning the proof that both proofs are valid. + +This means that, given enough computational resources, you can prove the correctness of any arbitrary number of proofs in a single proof. This could be useful to design state channels (for which a common example would be [Bitcoin's Lightning Network](https://en.wikipedia.org/wiki/Lightning_Network)), to save on gas costs by settling one proof on-chain, or simply to make business logic less dependent on a consensus mechanism. + +## Examples + +Let us look at some of these examples + +### Alice and Bob - Guessing game + +Alice and Bob are friends, and they like guessing games. They want to play a guessing game online, but for that, they need a trusted third-party that knows both of their secrets and finishes the game once someone wins. + +So, they use zero-knowledge proofs. Alice tries to guess Bob's number, and Bob will generate a ZK proof stating whether she succeeded or failed. + +This ZK proof can go on a smart contract, revealing the winner and even giving prizes. However, this means every turn needs to be verified on-chain. This incurs some cost and waiting time that may simply make the game too expensive or time-consuming to be worth it. + +As a solution, Alice proposes the following: "what if Bob generates his proof, and instead of sending it on-chain, I verify it *within* my own proof before playing my own turn?". + +She can then generate a proof that she verified his proof, and so on. + +```md + Did you fail? <-------------------------- + / \ / + / \ n = n -1 + / \ / + Yes No / + | | / + | | / + | You win / + | / + | / +Generate proof of that / + + / + my own guess ---------------- +``` + +### Charlie - Recursive merkle tree + +Charlie is a concerned citizen, and wants to be sure his vote in an election is accounted for. He votes with a ZK proof, but he has no way of knowing that his ZK proof was included in the total vote count! + +If the vote collector puts all of the votes into a [Merkle tree](https://en.wikipedia.org/wiki/Merkle_tree), everyone can prove the verification of two proofs within one proof, as such: + +```md + abcd + __________|______________ + | | + ab cd + _____|_____ ______|______ + | | | | + alice bob charlie daniel +``` + +Doing this recursively allows us to arrive on a final proof `abcd` which if true, verifies the correctness of all the votes. + +### Daniel - Reusable components + +Daniel has a big circuit and a big headache. A part of his circuit is a setup phase that finishes with some assertions that need to be made. But that section alone takes most of the proving time, and is largely independent of the rest of the circuit. + +He might find it more efficient to generate a proof for that setup phase separately, and verify that proof recursively in the actual business logic section of his circuit. This will allow for parallelization of both proofs, which results in a considerable speedup. + +## What params do I need + +As you can see in the [recursion reference](noir/standard_library/recursion.md), a simple recursive proof requires: + +- The proof to verify +- The Verification Key of the circuit that generated the proof +- A hash of this verification key, as it's needed for some backends +- The public inputs for the proof + +:::info + +Recursive zkSNARK schemes do not necessarily "verify a proof" in the sense that you expect a true or false to be spit out by the verifier. Rather an aggregation object is built over the public inputs. + +So, taking the example of Alice and Bob and their guessing game: + +- Alice makes her guess. Her proof is *not* recursive: it doesn't verify any proof within it! It's just a standard `assert(x != y)` circuit +- Bob verifies Alice's proof and makes his own guess. In this circuit, he doesn't exactly *prove* the verification of Alice's proof. Instead, he *aggregates* his proof to Alice's proof. The actual verification is done when the full proof is verified, for example when using `nargo verify` or through the verifier smart contract. + +We can imagine recursive proofs a [relay race](https://en.wikipedia.org/wiki/Relay_race). The first runner doesn't have to receive the baton from anyone else, as he/she already starts with it. But when his/her turn is over, the next runner needs to receive it, run a bit more, and pass it along. Even though every runner could theoretically verify the baton mid-run (why not? 🏃🔍), only at the end of the race does the referee verify that the whole race is valid. + +::: + +## Some architecture + +As with everything in computer science, there's no one-size-fits all. But there are some patterns that could help understanding and implementing them. To give three examples: + +### Adding some logic to a proof verification + +This would be an approach for something like our guessing game, where proofs are sent back and forth and are verified by each opponent. This circuit would be divided in two sections: + +- A `recursive verification` section, which would be just the call to `std::verify_proof`, and that would be skipped on the first move (since there's no proof to verify) +- A `guessing` section, which is basically the logic part where the actual guessing happens + +In such a situation, and assuming Alice is first, she would skip the first part and try to guess Bob's number. Bob would then verify her proof on the first section of his run, and try to guess Alice's number on the second part, and so on. + +### Aggregating proofs + +In some one-way interaction situations, recursion would allow for aggregation of simple proofs that don't need to be immediately verified on-chain or elsewhere. + +To give a practical example, a barman wouldn't need to verify a "proof-of-age" on-chain every time he serves alcohol to a customer. Instead, the architecture would comprise two circuits: + +- A `main`, non-recursive circuit with some logic +- A `recursive` circuit meant to verify two proofs in one proof + +The customer's proofs would be intermediate, and made on their phones, and the barman could just verify them locally. He would then aggregate them into a final proof sent on-chain (or elsewhere) at the end of the day. + +### Recursively verifying different circuits + +Nothing prevents you from verifying different circuits in a recursive proof, for example: + +- A `circuit1` circuit +- A `circuit2` circuit +- A `recursive` circuit + +In this example, a regulator could verify that taxes were paid for a specific purchase by aggregating both a `payer` circuit (proving that a purchase was made and taxes were paid), and a `receipt` circuit (proving that the payment was received) + +## How fast is it + +At the time of writing, verifying recursive proofs is surprisingly fast. This is because most of the time is spent on generating the verification key that will be used to generate the next proof. So you are able to cache the verification key and reuse it later. + +Currently, Noir JS packages don't expose the functionality of loading proving and verification keys, but that feature exists in the underlying `bb.js` package. + +## How can I try it + +Learn more about using recursion in Nargo and NoirJS in the [how-to guide](../how_to/how-to-recursion.md) and see a full example in [noir-examples](https://github.com/noir-lang/noir-examples). diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/_category_.json b/docs/versioned_docs/version-v0.27.0/getting_started/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/hello_noir/_category_.json b/docs/versioned_docs/version-v0.27.0/getting_started/hello_noir/_category_.json new file mode 100644 index 00000000000..23b560f610b --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/hello_noir/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/hello_noir/index.md b/docs/versioned_docs/version-v0.27.0/getting_started/hello_noir/index.md new file mode 100644 index 00000000000..743c4d8d634 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/hello_noir/index.md @@ -0,0 +1,142 @@ +--- +title: Creating a Project +description: + Learn how to create and verify your first Noir program using Nargo, a programming language for + zero-knowledge proofs. +keywords: + [ + Nargo, + Noir, + zero-knowledge proofs, + programming language, + create Noir program, + verify Noir program, + step-by-step guide, + ] +sidebar_position: 1 + +--- + +Now that we have installed Nargo, it is time to make our first hello world program! + +## Create a Project Directory + +Noir code can live anywhere on your computer. Let us create a _projects_ folder in the home +directory to house our Noir programs. + +For Linux, macOS, and Windows PowerShell, create the directory and change directory into it by +running: + +```sh +mkdir ~/projects +cd ~/projects +``` + +## Create Our First Nargo Project + +Now that we are in the projects directory, create a new Nargo project by running: + +```sh +nargo new hello_world +``` + +> **Note:** `hello_world` can be any arbitrary project name, we are simply using `hello_world` for +> demonstration. +> +> In production, the common practice is to name the project folder as `circuits` for better +> identifiability when sitting alongside other folders in the codebase (e.g. `contracts`, `scripts`, +> `test`). + +A `hello_world` folder would be created. Similar to Rust, the folder houses _src/main.nr_ and +_Nargo.toml_ which contain the source code and environmental options of your Noir program +respectively. + +### Intro to Noir Syntax + +Let us take a closer look at _main.nr_. The default _main.nr_ generated should look like this: + +```rust +fn main(x : Field, y : pub Field) { + assert(x != y); +} +``` + +The first line of the program specifies the program's inputs: + +```rust +x : Field, y : pub Field +``` + +Program inputs in Noir are private by default (e.g. `x`), but can be labeled public using the +keyword `pub` (e.g. `y`). To learn more about private and public values, check the +[Data Types](../../noir/concepts/data_types/index.md) section. + +The next line of the program specifies its body: + +```rust +assert(x != y); +``` + +The Noir syntax `assert` can be interpreted as something similar to constraints in other zk-contract languages. + +For more Noir syntax, check the [Language Concepts](../../noir/concepts/comments.md) chapter. + +## Build In/Output Files + +Change directory into _hello_world_ and build in/output files for your Noir program by running: + +```sh +cd hello_world +nargo check +``` + +Two additional files would be generated in your project directory: + +_Prover.toml_ houses input values, and _Verifier.toml_ houses public values. + +## Prove Our Noir Program + +Now that the project is set up, we can create a proof of correct execution of our Noir program. + +Fill in input values for execution in the _Prover.toml_ file. For example: + +```toml +x = "1" +y = "2" +``` + +Prove the valid execution of your Noir program: + +```sh +nargo prove +``` + +A new folder _proofs_ would then be generated in your project directory, containing the proof file +`.proof`, where the project name is defined in Nargo.toml. + +The _Verifier.toml_ file would also be updated with the public values computed from program +execution (in this case the value of `y`): + +```toml +y = "0x0000000000000000000000000000000000000000000000000000000000000002" +``` + +> **Note:** Values in _Verifier.toml_ are computed as 32-byte hex values. + +## Verify Our Noir Program + +Once a proof is generated, we can verify correct execution of our Noir program by verifying the +proof file. + +Verify your proof by running: + +```sh +nargo verify +``` + +The verification will complete in silence if it is successful. If it fails, it will log the +corresponding error instead. + +Congratulations, you have now created and verified a proof for your very first Noir program! + +In the [next section](./project_breakdown.md), we will go into more detail on each step performed. diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/hello_noir/project_breakdown.md b/docs/versioned_docs/version-v0.27.0/getting_started/hello_noir/project_breakdown.md new file mode 100644 index 00000000000..6160a102c6c --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/hello_noir/project_breakdown.md @@ -0,0 +1,199 @@ +--- +title: Project Breakdown +description: + Learn about the anatomy of a Nargo project, including the purpose of the Prover and Verifier TOML + files, and how to prove and verify your program. +keywords: + [Nargo, Nargo project, Prover.toml, Verifier.toml, proof verification, private asset transfer] +sidebar_position: 2 +--- + +This section breaks down our hello world program from the previous section. We elaborate on the project +structure and what the `prove` and `verify` commands did. + +## Anatomy of a Nargo Project + +Upon creating a new project with `nargo new` and building the in/output files with `nargo check` +commands, you would get a minimal Nargo project of the following structure: + + - src + - Prover.toml + - Verifier.toml + - Nargo.toml + +The source directory _src_ holds the source code for your Noir program. By default only a _main.nr_ +file will be generated within it. + +### Prover.toml + +_Prover.toml_ is used for specifying the input values for executing and proving the program. You can specify `toml` files with different names by using the `--prover-name` or `-p` flags, see the [Prover](#provertoml) section below. Optionally you may specify expected output values for prove-time checking as well. + +### Verifier.toml + +_Verifier.toml_ contains public in/output values computed when executing the Noir program. + +### Nargo.toml + +_Nargo.toml_ contains the environmental options of your project. It contains a "package" section and a "dependencies" section. + +Example Nargo.toml: + +```toml +[package] +name = "noir_starter" +type = "bin" +authors = ["Alice"] +compiler_version = "0.9.0" +description = "Getting started with Noir" +entry = "circuit/main.nr" +license = "MIT" + +[dependencies] +ecrecover = {tag = "v0.9.0", git = "https://github.com/colinnielsen/ecrecover-noir.git"} +``` + +Nargo.toml for a [workspace](../../noir/modules_packages_crates/workspaces.md) will look a bit different. For example: + +```toml +[workspace] +members = ["crates/a", "crates/b"] +default-member = "crates/a" +``` + +#### Package section + +The package section defines a number of fields including: + +- `name` (**required**) - the name of the package +- `type` (**required**) - can be "bin", "lib", or "contract" to specify whether its a binary, library or Aztec contract +- `authors` (optional) - authors of the project +- `compiler_version` - specifies the version of the compiler to use. This is enforced by the compiler and follow's [Rust's versioning](https://doc.rust-lang.org/cargo/reference/manifest.html#the-version-field), so a `compiler_version = 0.18.0` will enforce Nargo version 0.18.0, `compiler_version = ^0.18.0` will enforce anything above 0.18.0 but below 0.19.0, etc. For more information, see how [Rust handles these operators](https://docs.rs/semver/latest/semver/enum.Op.html) +- `description` (optional) +- `entry` (optional) - a relative filepath to use as the entry point into your package (overrides the default of `src/lib.nr` or `src/main.nr`) +- `backend` (optional) +- `license` (optional) + +#### Dependencies section + +This is where you will specify any dependencies for your project. See the [Dependencies page](../../noir/modules_packages_crates/dependencies.md) for more info. + +`./proofs/` and `./contract/` directories will not be immediately visible until you create a proof or +verifier contract respectively. + +### main.nr + +The _main.nr_ file contains a `main` method, this method is the entry point into your Noir program. + +In our sample program, _main.nr_ looks like this: + +```rust +fn main(x : Field, y : Field) { + assert(x != y); +} +``` + +The parameters `x` and `y` can be seen as the API for the program and must be supplied by the +prover. Since neither `x` nor `y` is marked as public, the verifier does not supply any inputs, when +verifying the proof. + +The prover supplies the values for `x` and `y` in the _Prover.toml_ file. + +As for the program body, `assert` ensures that the condition to be satisfied (e.g. `x != y`) is +constrained by the proof of the execution of said program (i.e. if the condition was not met, the +verifier would reject the proof as an invalid proof). + +### Prover.toml + +The _Prover.toml_ file is a file which the prover uses to supply his witness values(both private and +public). + +In our hello world program the _Prover.toml_ file looks like this: + +```toml +x = "1" +y = "2" +``` + +When the command `nargo prove` is executed, two processes happen: + +1. Noir creates a proof that `x`, which holds the value of `1`, and `y`, which holds the value of `2`, + is not equal. This inequality constraint is due to the line `assert(x != y)`. + +2. Noir creates and stores the proof of this statement in the _proofs_ directory in a file called your-project.proof. So if your project is named "private_voting" (defined in the project Nargo.toml), the proof will be saved at `./proofs/private_voting.proof`. Opening this file will display the proof in hex format. + +#### Arrays of Structs + +The following code shows how to pass an array of structs to a Noir program to generate a proof. + +```rust +// main.nr +struct Foo { + bar: Field, + baz: Field, +} + +fn main(foos: [Foo; 3]) -> pub Field { + foos[2].bar + foos[2].baz +} +``` + +Prover.toml: + +```toml +[[foos]] # foos[0] +bar = 0 +baz = 0 + +[[foos]] # foos[1] +bar = 0 +baz = 0 + +[[foos]] # foos[2] +bar = 1 +baz = 2 +``` + +#### Custom toml files + +You can specify a `toml` file with a different name to use for proving by using the `--prover-name` or `-p` flags. + +This command looks for proof inputs in the default **Prover.toml** and generates the proof and saves it at `./proofs/.proof`: + +```bash +nargo prove +``` + +This command looks for proof inputs in the custom **OtherProver.toml** and generates proof and saves it at `./proofs/.proof`: + +```bash +nargo prove -p OtherProver +``` + +## Verifying a Proof + +When the command `nargo verify` is executed, two processes happen: + +1. Noir checks in the _proofs_ directory for a proof file with the project name (eg. test_project.proof) + +2. If that file is found, the proof's validity is checked + +> **Note:** The validity of the proof is linked to the current Noir program; if the program is +> changed and the verifier verifies the proof, it will fail because the proof is not valid for the +> _modified_ Noir program. + +In production, the prover and the verifier are usually two separate entities. A prover would +retrieve the necessary inputs, execute the Noir program, generate a proof and pass it to the +verifier. The verifier would then retrieve the public inputs, usually from external sources, and +verify the validity of the proof against it. + +Take a private asset transfer as an example: + +A person using a browser as the prover would retrieve private inputs locally (e.g. the user's private key) and +public inputs (e.g. the user's encrypted balance on-chain), compute the transfer, generate a proof +and submit it to the verifier smart contract. + +The verifier contract would then draw the user's encrypted balance directly from the blockchain and +verify the proof submitted against it. If the verification passes, additional functions in the +verifier contract could trigger (e.g. approve the asset transfer). + +Now that you understand the concepts, you'll probably want some editor feedback while you are writing more complex code. diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/installation/_category_.json b/docs/versioned_docs/version-v0.27.0/getting_started/installation/_category_.json new file mode 100644 index 00000000000..0c02fb5d4d7 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/installation/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 0, + "label": "Install Nargo", + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/installation/index.md b/docs/versioned_docs/version-v0.27.0/getting_started/installation/index.md new file mode 100644 index 00000000000..4ef86aa5914 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/installation/index.md @@ -0,0 +1,48 @@ +--- +title: Nargo Installation +description: + nargo is a command line tool for interacting with Noir programs. This page is a quick guide on how to install Nargo through the most common and easy method, noirup +keywords: [ + Nargo + Noir + Rust + Cargo + Noirup + Installation + Terminal Commands + Version Check + Nightlies + Specific Versions + Branches + Noirup Repository +] +pagination_next: getting_started/hello_noir/index +--- + +`nargo` is the one-stop-shop for almost everything related with Noir. The name comes from our love for Rust and its package manager `cargo`. + +With `nargo`, you can start new projects, compile, execute, prove, verify, test, generate solidity contracts, and do pretty much all that is available in Noir. + +Similarly to `rustup`, we also maintain an easy installation method that covers most machines: `noirup`. + +## Installing Noirup + +Open a terminal on your machine, and write: + +```bash +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +Close the terminal, open another one, and run + +```bash +noirup +``` + +Done. That's it. You should have the latest version working. You can check with `nargo --version`. + +You can also install nightlies, specific versions +or branches. Check out the [noirup repository](https://github.com/noir-lang/noirup) for more +information. + +Now we're ready to start working on [our first Noir program!](../hello_noir/index.md) diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/installation/other_install_methods.md b/docs/versioned_docs/version-v0.27.0/getting_started/installation/other_install_methods.md new file mode 100644 index 00000000000..3634723562b --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/installation/other_install_methods.md @@ -0,0 +1,102 @@ +--- +title: Alternative Installations +description: There are different ways to install Nargo, the one-stop shop and command-line tool for developing Noir programs. This guide explains how to specify which version to install when using noirup, and using WSL for windows. +keywords: [ + Installation + Nargo + Noirup + Binaries + Compiling from Source + WSL for Windows + macOS + Linux + Nix + Direnv + Uninstalling Nargo + ] +sidebar_position: 1 +--- + +## Encouraged Installation Method: Noirup + +Noirup is the endorsed method for installing Nargo, streamlining the process of fetching binaries or compiling from source. It supports a range of options to cater to your specific needs, from nightly builds and specific versions to compiling from various sources. + +### Installing Noirup + +First, ensure you have `noirup` installed: + +```sh +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +### Fetching Binaries + +With `noirup`, you can easily switch between different Nargo versions, including nightly builds: + +- **Nightly Version**: Install the latest nightly build. + + ```sh + noirup --version nightly + ``` + +- **Specific Version**: Install a specific version of Nargo. + ```sh + noirup --version + ``` + +### Compiling from Source + +`noirup` also enables compiling Nargo from various sources: + +- **From a Specific Branch**: Install from the latest commit on a branch. + + ```sh + noirup --branch + ``` + +- **From a Fork**: Install from the main branch of a fork. + + ```sh + noirup --repo + ``` + +- **From a Specific Branch in a Fork**: Install from a specific branch in a fork. + + ```sh + noirup --repo --branch + ``` + +- **From a Specific Pull Request**: Install from a specific PR. + + ```sh + noirup --pr + ``` + +- **From a Specific Commit**: Install from a specific commit. + + ```sh + noirup -C + ``` + +- **From Local Source**: Compile and install from a local directory. + ```sh + noirup --path ./path/to/local/source + ``` + +## Installation on Windows + +The default backend for Noir (Barretenberg) doesn't provide Windows binaries at this time. For that reason, Noir cannot be installed natively. However, it is available by using Windows Subsystem for Linux (WSL). + +Step 1: Follow the instructions [here](https://learn.microsoft.com/en-us/windows/wsl/install) to install and run WSL. + +step 2: Follow the [Noirup instructions](#encouraged-installation-method-noirup). + +## Uninstalling Nargo + +If you installed Nargo with `noirup`, you can uninstall Nargo by removing the files in `~/.nargo`, `~/nargo`, and `~/noir_cache`. This ensures that all installed binaries, configurations, and cache related to Nargo are fully removed from your system. + +```bash +rm -r ~/.nargo +rm -r ~/nargo +rm -r ~/noir_cache +``` diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/tooling/_category_.json b/docs/versioned_docs/version-v0.27.0/getting_started/tooling/_category_.json new file mode 100644 index 00000000000..55804c03a71 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/tooling/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 2, + "label": "Tooling", + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/tooling/index.mdx b/docs/versioned_docs/version-v0.27.0/getting_started/tooling/index.mdx new file mode 100644 index 00000000000..ac480f3c9f5 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/tooling/index.mdx @@ -0,0 +1,38 @@ +--- +title: Tooling +Description: This section provides information about the various tools and utilities available for Noir development. It covers the Noir playground, IDE tools, Codespaces, and community projects. +Keywords: [Noir, Development, Playground, IDE Tools, Language Service Provider, VS Code Extension, Codespaces, noir-starter, Community Projects, Awesome Noir Repository, Developer Tooling] +--- + +Noir is meant to be easy to develop with. For that reason, a number of utilities have been put together to ease the development process as much as feasible in the zero-knowledge world. + +## Playground + +The Noir playground is an easy way to test small ideas, share snippets, and integrate in other websites. You can access it at [play.noir-lang.org](https://play.noir-lang.org). + +## IDE tools + +When you install Nargo, you're also installing a Language Service Provider (LSP), which can be used by IDEs to provide syntax highlighting, codelens, warnings, and more. + +The easiest way to use these tools is by installing the [Noir VS Code extension](https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir). + +## Codespaces + +Some Noir repos have leveraged Codespaces in order to ease the development process. You can visit the [noir-starter](https://github.com/noir-lang/noir-starter) for an example. + + + +## GitHub Actions + +You can use `noirup` with GitHub Actions for CI/CD and automated testing. It is as simple as +installing `noirup` and running tests in your GitHub Action `yml` file. + +See the +[config file in the Noir repo](https://github.com/TomAFrench/noir-hashes/blob/master/.github/workflows/noir.yml) for an example usage. + +## Community projects + +As an open-source project, Noir has received many contributions over time. Some of them are related with developer tooling, and you can see some of them in [Awesome Noir repository](https://github.com/noir-lang/awesome-noir#dev-tools) diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/tooling/language_server.md b/docs/versioned_docs/version-v0.27.0/getting_started/tooling/language_server.md new file mode 100644 index 00000000000..81e0356ef8a --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/tooling/language_server.md @@ -0,0 +1,43 @@ +--- +title: Language Server +description: Learn about the Noir Language Server, how to install the components, and configuration that may be required. +keywords: [Nargo, Language Server, LSP, VSCode, Visual Studio Code] +sidebar_position: 0 +--- + +This section helps you install and configure the Noir Language Server. + +The Language Server Protocol (LSP) has two components, the [Server](#language-server) and the [Client](#language-client). Below we describe each in the context of Noir. + +## Language Server + +The Server component is provided by the Nargo command line tool that you installed at the beginning of this guide. +As long as Nargo is installed and you've used it to run other commands in this guide, it should be good to go! + +If you'd like to verify that the `nargo lsp` command is available, you can run `nargo --help` and look for `lsp` in the list of commands. If you see it, you're using a version of Noir with LSP support. + +## Language Client + +The Client component is usually an editor plugin that launches the Server. It communicates LSP messages between the editor and the Server. For example, when you save a file, the Client will alert the Server, so it can try to compile the project and report any errors. + +Currently, Noir provides a Language Client for Visual Studio Code via the [vscode-noir](https://github.com/noir-lang/vscode-noir) extension. You can install it via the [Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir). + +> **Note:** Noir's Language Server Protocol support currently assumes users' VSCode workspace root to be the same as users' Noir project root (i.e. where Nargo.toml lies). +> +> If LSP features seem to be missing / malfunctioning, make sure you are opening your Noir project directly (instead of as a sub-folder) in your VSCode instance. + +When your language server is running correctly and the VSCode plugin is installed, you should see handy codelens buttons for compilation, measuring circuit size, execution, and tests: + +![Compile and Execute](@site/static/img/codelens_compile_execute.png) +![Run test](@site/static/img/codelens_run_test.png) + +You should also see your tests in the `testing` panel: + +![Testing panel](@site/static/img/codelens_testing_panel.png) + +### Configuration + +- **Noir: Enable LSP** - If checked, the extension will launch the Language Server via `nargo lsp` and communicate with it. +- **Noir: Nargo Flags** - Additional flags may be specified if you require them to be added when the extension calls `nargo lsp`. +- **Noir: Nargo Path** - An absolute path to a Nargo binary with the `lsp` command. This may be useful if Nargo is not within the `PATH` of your editor. +- **Noir > Trace: Server** - Setting this to `"messages"` or `"verbose"` will log LSP messages between the Client and Server. Useful for debugging. diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/tooling/noir_codegen.md b/docs/versioned_docs/version-v0.27.0/getting_started/tooling/noir_codegen.md new file mode 100644 index 00000000000..d65151da0ab --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/tooling/noir_codegen.md @@ -0,0 +1,113 @@ +--- +title: Noir Codegen for TypeScript +description: Learn how to use Noir codegen to generate TypeScript bindings +keywords: [Nargo, Noir, compile, TypeScript] +sidebar_position: 2 +--- + +When using TypeScript, it is extra work to interpret Noir program outputs in a type-safe way. Third party libraries may exist for popular Noir programs, but they are either hard to find or unmaintained. + +Now you can generate TypeScript bindings for your Noir programs in two steps: +1. Exporting Noir functions using `nargo export` +2. Using the TypeScript module `noir_codegen` to generate TypeScript binding + +**Note:** you can only export functions from a Noir *library* (not binary or contract program types). + +## Installation + +### Your TypeScript project + +If you don't already have a TypeScript project you can add the module with `yarn` (or `npm`), then initialize it: + +```bash +yarn add typescript -D +npx tsc --init +``` + +### Add TypeScript module - `noir_codegen` + +The following command will add the module to your project's devDependencies: + +```bash +yarn add @noir-lang/noir_codegen -D +``` + +### Nargo library +Make sure you have Nargo, v0.25.0 or greater, installed. If you don't, follow the [installation guide](../installation/index.md). + +If you're in a new project, make a `circuits` folder and create a new Noir library: + +```bash +mkdir circuits && cd circuits +nargo new --lib myNoirLib +``` + +## Usage + +### Export ABI of specified functions + +First go to the `.nr` files in your Noir library, and add the `#[export]` macro to each function that you want to use in TypeScript. + +```rust +#[export] +fn your_function(... +``` + +From your Noir library (where `Nargo.toml` is), run the following command: + +```bash +nargo export +``` + +You will now have an `export` directory with a .json file per exported function. + +You can also specify the directory of Noir programs using `--program-dir`, for example: + +```bash +nargo export --program-dir=./circuits/myNoirLib +``` + +### Generate TypeScript bindings from exported functions + +To use the `noir-codegen` package we added to the TypeScript project: + +```bash +yarn noir-codegen ./export/your_function.json +``` + +This creates an `exports` directory with an `index.ts` file containing all exported functions. + +**Note:** adding `--out-dir` allows you to specify an output dir for your TypeScript bindings to go. Eg: + +```bash +yarn noir-codegen ./export/*.json --out-dir ./path/to/output/dir +``` + +## Example .nr function to .ts output + +Consider a Noir library with this function: + +```rust +#[export] +fn not_equal(x: Field, y: Field) -> bool { + x != y +} +``` + +After the export and codegen steps, you should have an `index.ts` like: + +```typescript +export type Field = string; + + +export const is_equal_circuit: CompiledCircuit = {"abi":{"parameters":[{"name":"x","type":{"kind":"field"},"visibility":"private"},{"name":"y","type":{"kind":"field"},"visibility":"private"}],"param_witnesses":{"x":[{"start":0,"end":1}],"y":[{"start":1,"end":2}]},"return_type":{"abi_type":{"kind":"boolean"},"visibility":"private"},"return_witnesses":[4]},"bytecode":"H4sIAAAAAAAA/7WUMQ7DIAxFQ0Krrr2JjSGYLVcpKrn/CaqqDQN12WK+hPBgmWd/wEyHbF1SS923uhOs3pfoChI+wKXMAXzIKyNj4PB0TFTYc0w5RUjoqeAeEu1wqK0F54RGkWvW44LPzExnlkbMEs4JNZmN8PxS42uHv82T8a3Jeyn2Ks+VLPcO558HmyLMCDOXAXXtpPt4R/Rt9T36ss6dS9HGPx/eG17nGegKBQAA"}; + +export async function is_equal(x: Field, y: Field, foreignCallHandler?: ForeignCallHandler): Promise { + const program = new Noir(is_equal_circuit); + const args: InputMap = { x, y }; + const { returnValue } = await program.execute(args, foreignCallHandler); + return returnValue as boolean; +} +``` + +Now the `is_equal()` function and relevant types are readily available for use in TypeScript. diff --git a/docs/versioned_docs/version-v0.27.0/getting_started/tooling/testing.md b/docs/versioned_docs/version-v0.27.0/getting_started/tooling/testing.md new file mode 100644 index 00000000000..d3e0c522473 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/getting_started/tooling/testing.md @@ -0,0 +1,62 @@ +--- +title: Testing in Noir +description: Learn how to use Nargo to test your Noir program in a quick and easy way +keywords: [Nargo, testing, Noir, compile, test] +sidebar_position: 1 +--- + +You can test your Noir programs using Noir circuits. + +Nargo will automatically compile and run any functions which have the decorator `#[test]` on them if +you run `nargo test`. + +For example if you have a program like: + +```rust +fn add(x: u64, y: u64) -> u64 { + x + y +} +#[test] +fn test_add() { + assert(add(2,2) == 4); + assert(add(0,1) == 1); + assert(add(1,0) == 1); +} +``` + +Running `nargo test` will test that the `test_add` function can be executed while satisfying all +the constraints which allows you to test that add returns the expected values. Test functions can't +have any arguments currently. + +### Test fail + +You can write tests that are expected to fail by using the decorator `#[test(should_fail)]`. For example: + +```rust +fn add(x: u64, y: u64) -> u64 { + x + y +} +#[test(should_fail)] +fn test_add() { + assert(add(2,2) == 5); +} +``` + +You can be more specific and make it fail with a specific reason by using `should_fail_with = "`: + +```rust +fn main(african_swallow_avg_speed : Field) { + assert(african_swallow_avg_speed == 65, "What is the airspeed velocity of an unladen swallow"); +} + +#[test] +fn test_king_arthur() { + main(65); +} + +#[test(should_fail_with = "What is the airspeed velocity of an unladen swallow")] +fn test_bridgekeeper() { + main(32); +} + +``` diff --git a/docs/versioned_docs/version-v0.27.0/how_to/_category_.json b/docs/versioned_docs/version-v0.27.0/how_to/_category_.json new file mode 100644 index 00000000000..23b560f610b --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/how_to/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.27.0/how_to/how-to-oracles.md b/docs/versioned_docs/version-v0.27.0/how_to/how-to-oracles.md new file mode 100644 index 00000000000..8cf8035a5c4 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/how_to/how-to-oracles.md @@ -0,0 +1,276 @@ +--- +title: How to use Oracles +description: Learn how to use oracles in your Noir program with examples in both Nargo and NoirJS. This guide also covers writing a JSON RPC server and providing custom foreign call handlers for NoirJS. +keywords: + - Noir Programming + - Oracles + - Nargo + - NoirJS + - JSON RPC Server + - Foreign Call Handlers +sidebar_position: 1 +--- + +This guide shows you how to use oracles in your Noir program. For the sake of clarity, it assumes that: + +- You have read the [explainer on Oracles](../explainers/explainer-oracle.md) and are comfortable with the concept. +- You have a Noir program to add oracles to. You can create one using the [vite-hardhat starter](https://github.com/noir-lang/noir-starter/tree/main/vite-hardhat) as a boilerplate. +- You understand the concept of a JSON-RPC server. Visit the [JSON-RPC website](https://www.jsonrpc.org/) if you need a refresher. +- You are comfortable with server-side JavaScript (e.g. Node.js, managing packages, etc.). + +For reference, you can find the snippets used in this tutorial on the [Aztec DevRel Repository](https://github.com/AztecProtocol/dev-rel/tree/main/code-snippets/how-to-oracles). + +## Rundown + +This guide has 3 major steps: + +1. How to modify our Noir program to make use of oracle calls as unconstrained functions +2. How to write a JSON RPC Server to resolve these oracle calls with Nargo +3. How to use them in Nargo and how to provide a custom resolver in NoirJS + +## Step 1 - Modify your Noir program + +An oracle is defined in a Noir program by defining two methods: + +- An unconstrained method - This tells the compiler that it is executing an [unconstrained functions](../noir/concepts//unconstrained.md). +- A decorated oracle method - This tells the compiler that this method is an RPC call. + +An example of an oracle that returns a `Field` would be: + +```rust +#[oracle(getSqrt)] +unconstrained fn sqrt(number: Field) -> Field { } + +unconstrained fn get_sqrt(number: Field) -> Field { + sqrt(number) +} +``` + +In this example, we're wrapping our oracle function in a unconstrained method, and decorating it with `oracle(getSqrt)`. We can then call the unconstrained function as we would call any other function: + +```rust +fn main(input: Field) { + let sqrt = get_sqrt(input); +} +``` + +In the next section, we will make this `getSqrt` (defined on the `sqrt` decorator) be a method of the RPC server Noir will use. + +:::danger + +As explained in the [Oracle Explainer](../explainers/explainer-oracle.md), this `main` function is unsafe unless you constrain its return value. For example: + +```rust +fn main(input: Field) { + let sqrt = get_sqrt(input); + assert(sqrt.pow_32(2) as u64 == input as u64); // <---- constrain the return of an oracle! +} +``` + +::: + +:::info + +Currently, oracles only work with single params or array params. For example: + +```rust +#[oracle(getSqrt)] +unconstrained fn sqrt([Field; 2]) -> [Field; 2] { } +``` + +::: + +## Step 2 - Write an RPC server + +Brillig will call *one* RPC server. Most likely you will have to write your own, and you can do it in whatever language you prefer. In this guide, we will do it in Javascript. + +Let's use the above example of an oracle that consumes an array with two `Field` and returns their square roots: + +```rust +#[oracle(getSqrt)] +unconstrained fn sqrt(input: [Field; 2]) -> [Field; 2] { } + +unconstrained fn get_sqrt(input: [Field; 2]) -> [Field; 2] { + sqrt(input) +} + +fn main(input: [Field; 2]) { + let sqrt = get_sqrt(input); + assert(sqrt[0].pow_32(2) as u64 == input[0] as u64); + assert(sqrt[1].pow_32(2) as u64 == input[1] as u64); +} +``` + +:::info + +Why square root? + +In general, computing square roots is computationally more expensive than multiplications, which takes a toll when speaking about ZK applications. In this case, instead of calculating the square root in Noir, we are using our oracle to offload that computation to be made in plain. In our circuit we can simply multiply the two values. + +::: + +Now, we should write the correspondent RPC server, starting with the [default JSON-RPC 2.0 boilerplate](https://www.npmjs.com/package/json-rpc-2.0#example): + +```js +import { JSONRPCServer } from "json-rpc-2.0"; +import express from "express"; +import bodyParser from "body-parser"; + +const app = express(); +app.use(bodyParser.json()); + +const server = new JSONRPCServer(); +app.post("/", (req, res) => { + const jsonRPCRequest = req.body; + server.receive(jsonRPCRequest).then((jsonRPCResponse) => { + if (jsonRPCResponse) { + res.json(jsonRPCResponse); + } else { + res.sendStatus(204); + } + }); +}); + +app.listen(5555); +``` + +Now, we will add our `getSqrt` method, as expected by the `#[oracle(getSqrt)]` decorator in our Noir code. It maps through the params array and returns their square roots: + +```js +server.addMethod("getSqrt", async (params) => { + const values = params[0].Array.map((field) => { + return `${Math.sqrt(parseInt(field, 16))}`; + }); + return { values: [{ Array: values }] }; +}); +``` + +:::tip + +Brillig expects an object with an array of values. Each value is an object declaring to be `Single` or `Array` and returning a field element *as a string*. For example: + +```json +{ "values": [{ "Array": ["1", "2"] }]} +{ "values": [{ "Single": "1" }]} +{ "values": [{ "Single": "1" }, { "Array": ["1", "2"] }]} +``` + +If you're using Typescript, the following types may be helpful in understanding the expected return value and making sure they're easy to follow: + +```js +interface SingleForeignCallParam { + Single: string, +} + +interface ArrayForeignCallParam { + Array: string[], +} + +type ForeignCallParam = SingleForeignCallParam | ArrayForeignCallParam; + +interface ForeignCallResult { + values: ForeignCallParam[], +} +``` + +::: + +## Step 3 - Usage with Nargo + +Using the [`nargo` CLI tool](../getting_started/installation/index.md), you can use oracles in the `nargo test`, `nargo execute` and `nargo prove` commands by passing a value to `--oracle-resolver`. For example: + +```bash +nargo test --oracle-resolver http://localhost:5555 +``` + +This tells `nargo` to use your RPC Server URL whenever it finds an oracle decorator. + +## Step 4 - Usage with NoirJS + +In a JS environment, an RPC server is not strictly necessary, as you may want to resolve your oracles without needing any JSON call at all. NoirJS simply expects that you pass a callback function when you generate proofs, and that callback function can be anything. + +For example, if your Noir program expects the host machine to provide CPU pseudo-randomness, you could simply pass it as the `foreignCallHandler`. You don't strictly need to create an RPC server to serve pseudo-randomness, as you may as well get it directly in your app: + +```js +const foreignCallHandler = (name, inputs) => crypto.randomBytes(16) // etc + +await noir.generateProof(inputs, foreignCallHandler) +``` + +As one can see, in NoirJS, the [`foreignCallHandler`](../reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md) function simply means "a callback function that returns a value of type [`ForeignCallOutput`](../reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md). It doesn't have to be an RPC call like in the case for Nargo. + +:::tip + +Does this mean you don't have to write an RPC server like in [Step #2](#step-2---write-an-rpc-server)? + +You don't technically have to, but then how would you run `nargo test` or `nargo prove`? To use both `Nargo` and `NoirJS` in your development flow, you will have to write a JSON RPC server. + +::: + +In this case, let's make `foreignCallHandler` call the JSON RPC Server we created in [Step #2](#step-2---write-an-rpc-server), by making it a JSON RPC Client. + +For example, using the same `getSqrt` program in [Step #1](#step-1---modify-your-noir-program) (comments in the code): + +```js +import { JSONRPCClient } from "json-rpc-2.0"; + +// declaring the JSONRPCClient +const client = new JSONRPCClient((jsonRPCRequest) => { +// hitting the same JSON RPC Server we coded above + return fetch("http://localhost:5555", { + method: "POST", + headers: { + "content-type": "application/json", + }, + body: JSON.stringify(jsonRPCRequest), + }).then((response) => { + if (response.status === 200) { + return response + .json() + .then((jsonRPCResponse) => client.receive(jsonRPCResponse)); + } else if (jsonRPCRequest.id !== undefined) { + return Promise.reject(new Error(response.statusText)); + } + }); +}); + +// declaring a function that takes the name of the foreign call (getSqrt) and the inputs +const foreignCallHandler = async (name, input) => { + // notice that the "inputs" parameter contains *all* the inputs + // in this case we to make the RPC request with the first parameter "numbers", which would be input[0] + const oracleReturn = await client.request(name, [ + { Array: input[0].map((i) => i.toString("hex")) }, + ]); + return [oracleReturn.values[0].Array]; +}; + +// the rest of your NoirJS code +const input = { input: [4, 16] }; +const { witness } = await noir.execute(numbers, foreignCallHandler); +``` + +:::tip + +If you're in a NoirJS environment running your RPC server together with a frontend app, you'll probably hit a familiar problem in full-stack development: requests being blocked by [CORS](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) policy. For development only, you can simply install and use the [`cors` npm package](https://www.npmjs.com/package/cors) to get around the problem: + +```bash +yarn add cors +``` + +and use it as a middleware: + +```js +import cors from "cors"; + +const app = express(); +app.use(cors()) +``` + +::: + +## Conclusion + +Hopefully by the end of this guide, you should be able to: + +- Write your own logic around Oracles and how to write a JSON RPC server to make them work with your Nargo commands. +- Provide custom foreign call handlers for NoirJS. diff --git a/docs/versioned_docs/version-v0.27.0/how_to/how-to-recursion.md b/docs/versioned_docs/version-v0.27.0/how_to/how-to-recursion.md new file mode 100644 index 00000000000..4c45bb87ae2 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/how_to/how-to-recursion.md @@ -0,0 +1,179 @@ +--- +title: How to use recursion on NoirJS +description: Learn how to implement recursion with NoirJS, a powerful tool for creating smart contracts on the EVM blockchain. This guide assumes familiarity with NoirJS, solidity verifiers, and the Barretenberg proving backend. Discover how to generate both final and intermediate proofs using `noir_js` and `backend_barretenberg`. +keywords: + [ + "NoirJS", + "EVM blockchain", + "smart contracts", + "recursion", + "solidity verifiers", + "Barretenberg backend", + "noir_js", + "backend_barretenberg", + "intermediate proofs", + "final proofs", + "nargo compile", + "json import", + "recursive circuit", + "recursive app" + ] +sidebar_position: 1 +--- + +This guide shows you how to use recursive proofs in your NoirJS app. For the sake of clarity, it is assumed that: + +- You already have a NoirJS app. If you don't, please visit the [NoirJS tutorial](../tutorials/noirjs_app.md) and the [reference](../reference/NoirJS/noir_js/index.md). +- You are familiar with what are recursive proofs and you have read the [recursion explainer](../explainers/explainer-recursion.md) +- You already built a recursive circuit following [the reference](../noir/standard_library/recursion.md), and understand how it works. + +It is also assumed that you're not using `noir_wasm` for compilation, and instead you've used [`nargo compile`](../reference/nargo_commands.md) to generate the `json` you're now importing into your project. However, the guide should work just the same if you're using `noir_wasm`. + +:::info + +As you've read in the [explainer](../explainers/explainer-recursion.md), a recursive proof is an intermediate proof. This means that it doesn't necessarily generate the final step that makes it verifiable in a smart contract. However, it is easy to verify within another circuit. + +While "standard" usage of NoirJS packages abstracts final proofs, it currently lacks the necessary interface to abstract away intermediate proofs. This means that these proofs need to be created by using the backend directly. + +In short: + +- `noir_js` generates *only* final proofs +- `backend_barretenberg` generates both types of proofs + +::: + +In a standard recursive app, you're also dealing with at least two circuits. For the purpose of this guide, we will assume the following: + +- `main`: a circuit of type `assert(x != y)`, where `main` is marked with a `#[recursive]` attribute. This attribute states that the backend should generate proofs that are friendly for verification within another circuit. +- `recursive`: a circuit that verifies `main` + +For a full example on how recursive proofs work, please refer to the [noir-examples](https://github.com/noir-lang/noir-examples) repository. We will *not* be using it as a reference for this guide. + +## Step 1: Setup + +In a common NoirJS app, you need to instantiate a backend with something like `const backend = new Backend(circuit)`. Then you feed it to the `noir_js` interface. + +For recursion, this doesn't happen, and the only need for `noir_js` is only to `execute` a circuit and get its witness and return value. Everything else is not interfaced, so it needs to happen on the `backend` object. + +It is also recommended that you instantiate the backend with as many threads as possible, to allow for maximum concurrency: + +```js +const backend = new Backend(circuit, { threads: 8 }) +``` + +:::tip +You can use the [`os.cpus()`](https://nodejs.org/api/os.html#oscpus) object in `nodejs` or [`navigator.hardwareConcurrency`](https://developer.mozilla.org/en-US/docs/Web/API/Navigator/hardwareConcurrency) on the browser to make the most out of those glorious cpu cores +::: + +## Step 2: Generating the witness and the proof for `main` + +After instantiating the backend, you should also instantiate `noir_js`. We will use it to execute the circuit and get the witness. + +```js +const noir = new Noir(circuit, backend) +const { witness } = noir.execute(input) +``` + +With this witness, you are now able to generate the intermediate proof for the main circuit: + +```js +const { proof, publicInputs } = await backend.generateProof(witness) +``` + +:::warning + +Always keep in mind what is actually happening on your development process, otherwise you'll quickly become confused about what circuit we are actually running and why! + +In this case, you can imagine that Alice (running the `main` circuit) is proving something to Bob (running the `recursive` circuit), and Bob is verifying her proof within his proof. + +With this in mind, it becomes clear that our intermediate proof is the one *meant to be verified within another circuit*, so it must be Alice's. Actually, the only final proof in this theoretical scenario would be the last one, sent on-chain. + +::: + +## Step 3 - Verification and proof artifacts + +Optionally, you are able to verify the intermediate proof: + +```js +const verified = await backend.verifyProof({ proof, publicInputs }) +``` + +This can be useful to make sure our intermediate proof was correctly generated. But the real goal is to do it within another circuit. For that, we need to generate recursive proof artifacts that will be passed to the circuit that is verifying the proof we just generated. Instead of passing the proof and verification key as a byte array, we pass them as fields which makes it cheaper to verify in a circuit: + +```js +const { proofAsFields, vkAsFields, vkHash } = await backend.generateRecursiveProofArtifacts( { publicInputs, proof }, publicInputsCount) +``` + +This call takes the public inputs and the proof, but also the public inputs count. While this is easily retrievable by simply counting the `publicInputs` length, the backend interface doesn't currently abstract it away. + +:::info + +The `proofAsFields` has a constant size `[Field; 93]` and verification keys in Barretenberg are always `[Field; 114]`. + +::: + +:::warning + +One common mistake is to forget *who* makes this call. + +In a situation where Alice is generating the `main` proof, if she generates the proof artifacts and sends them to Bob, which gladly takes them as true, this would mean Alice could prove anything! + +Instead, Bob needs to make sure *he* extracts the proof artifacts, using his own instance of the `main` circuit backend. This way, Alice has to provide a valid proof for the correct `main` circuit. + +::: + +## Step 4 - Recursive proof generation + +With the artifacts, generating a recursive proof is no different from a normal proof. You simply use the `backend` (with the recursive circuit) to generate it: + +```js +const recursiveInputs = { + verification_key: vkAsFields, // array of length 114 + proof: proofAsFields, // array of length 93 + size of public inputs + publicInputs: [mainInput.y], // using the example above, where `y` is the only public input + key_hash: vkHash, +} + +const { witness, returnValue } = noir.execute(recursiveInputs) // we're executing the recursive circuit now! +const { proof, publicInputs } = backend.generateProof(witness) +const verified = backend.verifyProof({ proof, publicInputs }) +``` + +You can obviously chain this proof into another proof. In fact, if you're using recursive proofs, you're probably interested of using them this way! + +:::tip + +Managing circuits and "who does what" can be confusing. To make sure your naming is consistent, you can keep them in an object. For example: + +```js +const circuits = { + main: mainJSON, + recursive: recursiveJSON +} +const backends = { + main: new BarretenbergBackend(circuits.main), + recursive: new BarretenbergBackend(circuits.recursive) +} +const noir_programs = { + main: new Noir(circuits.main, backends.main), + recursive: new Noir(circuits.recursive, backends.recursive) +} +``` + +This allows you to neatly call exactly the method you want without conflicting names: + +```js +// Alice runs this 👇 +const { witness: mainWitness } = await noir_programs.main.execute(input) +const proof = await backends.main.generateProof(mainWitness) + +// Bob runs this 👇 +const verified = await backends.main.verifyProof(proof) +const { proofAsFields, vkAsFields, vkHash } = await backends.main.generateRecursiveProofArtifacts( + proof, + numPublicInputs, +); +const recursiveProof = await noir_programs.recursive.generateProof(recursiveInputs) +``` + +::: diff --git a/docs/versioned_docs/version-v0.27.0/how_to/how-to-solidity-verifier.md b/docs/versioned_docs/version-v0.27.0/how_to/how-to-solidity-verifier.md new file mode 100644 index 00000000000..e3c7c1065da --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/how_to/how-to-solidity-verifier.md @@ -0,0 +1,231 @@ +--- +title: Generate a Solidity Verifier +description: + Learn how to run the verifier as a smart contract on the blockchain. Compile a Solidity verifier + contract for your Noir program and deploy it on any EVM blockchain acting as a verifier smart + contract. Read more to find out +keywords: + [ + solidity verifier, + smart contract, + blockchain, + compiler, + plonk_vk.sol, + EVM blockchain, + verifying Noir programs, + proving backend, + Barretenberg, + ] +sidebar_position: 0 +pagination_next: tutorials/noirjs_app +--- + +Noir has the ability to generate a verifier contract in Solidity, which can be deployed in many EVM-compatible blockchains such as Ethereum. + +This allows for a powerful feature set, as one can make use of the conciseness and the privacy provided by Noir in an immutable ledger. Applications can range from simple P2P guessing games, to complex private DeFi interactions. + +This guide shows you how to generate a Solidity Verifier and deploy it on the [Remix IDE](https://remix.ethereum.org/). It is assumed that: + +- You are comfortable with the Solidity programming language and understand how contracts are deployed on the Ethereum network +- You have Noir installed and you have a Noir program. If you don't, [get started](../getting_started/installation/index.md) with Nargo and the example Hello Noir circuit +- You are comfortable navigating RemixIDE. If you aren't or you need a refresher, you can find some video tutorials [here](https://www.youtube.com/channel/UCjTUPyFEr2xDGN6Cg8nKDaA) that could help you. + +## Rundown + +Generating a Solidity Verifier contract is actually a one-command process. However, compiling it and deploying it can have some caveats. Here's the rundown of this guide: + +1. How to generate a solidity smart contract +2. How to compile the smart contract in the RemixIDE +3. How to deploy it to a testnet + +## Step 1 - Generate a contract + +This is by far the most straight-forward step. Just run: + +```sh +nargo codegen-verifier +``` + +A new `contract` folder would then be generated in your project directory, containing the Solidity +file `plonk_vk.sol`. It can be deployed to any EVM blockchain acting as a verifier smart contract. + +:::info + +It is possible to generate verifier contracts of Noir programs for other smart contract platforms as long as the proving backend supplies an implementation. + +Barretenberg, the default proving backend for Nargo, supports generation of verifier contracts, for the time being these are only in Solidity. +::: + +## Step 2 - Compiling + +We will mostly skip the details of RemixIDE, as the UI can change from version to version. For now, we can just open +Remix and create a blank workspace. + +![Create Workspace](@site/static/img/how-tos/solidity_verifier_1.png) + +We will create a new file to contain the contract Nargo generated, and copy-paste its content. + +:::warning + +You'll likely see a warning advising you to not trust pasted code. While it is an important warning, it is irrelevant in the context of this guide and can be ignored. We will not be deploying anywhere near a mainnet. + +::: + +To compile our the verifier, we can navigate to the compilation tab: + +![Compilation Tab](@site/static/img/how-tos/solidity_verifier_2.png) + +Remix should automatically match a suitable compiler version. However, hitting the "Compile" button will most likely generate a "Stack too deep" error: + +![Stack too deep](@site/static/img/how-tos/solidity_verifier_3.png) + +This is due to the verify function needing to put many variables on the stack, but enabling the optimizer resolves the issue. To do this, let's open the "Advanced Configurations" tab and enable optimization. The default 200 runs will suffice. + +:::info + +This time we will see a warning about an unused function parameter. This is expected, as the `verify` function doesn't use the `_proof` parameter inside a solidity block, it is loaded from calldata and used in assembly. + +::: + +![Compilation success](@site/static/img/how-tos/solidity_verifier_4.png) + +## Step 3 - Deploying + +At this point we should have a compiled contract read to deploy. If we navigate to the deploy section in Remix, we will see many different environments we can deploy to. The steps to deploy on each environment would be out-of-scope for this guide, so we will just use the default Remix VM. + +Looking closely, we will notice that our "Solidity Verifier" is actually three contracts working together: + +- An `UltraVerificationKey` library which simply stores the verification key for our circuit. +- An abstract contract `BaseUltraVerifier` containing most of the verifying logic. +- A main `UltraVerifier` contract that inherits from the Base and uses the Key contract. + +Remix will take care of the dependencies for us so we can simply deploy the UltraVerifier contract by selecting it and hitting "deploy": + +![Deploying UltraVerifier](@site/static/img/how-tos/solidity_verifier_5.png) + +A contract will show up in the "Deployed Contracts" section, where we can retrieve the Verification Key Hash. This is particularly useful for double-checking the deployer contract is the correct one. + +:::note + +Why "UltraVerifier"? + +To be precise, the Noir compiler (`nargo`) doesn't generate the verifier contract directly. It compiles the Noir code into an intermediate language (ACIR), which is then executed by the backend. So it is the backend that returns the verifier smart contract, not Noir. + +In this case, the Barretenberg Backend uses the UltraPlonk proving system, hence the "UltraVerifier" name. + +::: + +## Step 4 - Verifying + +To verify a proof using the Solidity verifier contract, we call the `verify` function in this extended contract: + +```solidity +function verify(bytes calldata _proof, bytes32[] calldata _publicInputs) external view returns (bool) +``` + +When using the default example in the [Hello Noir](../getting_started/hello_noir/index.md) guide, the easiest way to confirm that the verifier contract is doing its job is by calling the `verify` function via remix with the required parameters. For `_proof`, run `nargo prove` and use the string in `proof/.proof` (adding the hex `0x` prefix). We can also copy the public input from `Verifier.toml`, as it will be properly formatted as 32-byte strings: + +``` +0x...... , [0x0000.....02] +``` + +A programmatic example of how the `verify` function is called can be seen in the example zk voting application [here](https://github.com/noir-lang/noir-examples/blob/33e598c257e2402ea3a6b68dd4c5ad492bce1b0a/foundry-voting/src/zkVote.sol#L35): + +```solidity +function castVote(bytes calldata proof, uint proposalId, uint vote, bytes32 nullifierHash) public returns (bool) { + // ... + bytes32[] memory publicInputs = new bytes32[](4); + publicInputs[0] = merkleRoot; + publicInputs[1] = bytes32(proposalId); + publicInputs[2] = bytes32(vote); + publicInputs[3] = nullifierHash; + require(verifier.verify(proof, publicInputs), "Invalid proof"); +``` + +:::info[Return Values] + +A circuit doesn't have the concept of a return value. Return values are just syntactic sugar in +Noir. + +Under the hood, the return value is passed as an input to the circuit and is checked at the end of +the circuit program. + +For example, if you have Noir program like this: + +```rust +fn main( + // Public inputs + pubkey_x: pub Field, + pubkey_y: pub Field, + // Private inputs + priv_key: Field, +) -> pub Field +``` + +the `verify` function will expect the public inputs array (second function parameter) to be of length 3, the two inputs and the return value. Like before, these values are populated in Verifier.toml after running `nargo prove`. + +Passing only two inputs will result in an error such as `PUBLIC_INPUT_COUNT_INVALID(3, 2)`. + +In this case, the inputs parameter to `verify` would be an array ordered as `[pubkey_x, pubkey_y, return]`. + +::: + +:::tip[Structs] + +You can pass structs to the verifier contract. They will be flattened so that the array of inputs is 1-dimensional array. + +For example, consider the following program: + +```rust +struct Type1 { + val1: Field, + val2: Field, +} + +struct Nested { + t1: Type1, + is_true: bool, +} + +fn main(x: pub Field, nested: pub Nested, y: pub Field) { + //... +} +``` + +The order of these inputs would be flattened to: `[x, nested.t1.val1, nested.t1.val2, nested.is_true, y]` + +::: + +The other function you can call is our entrypoint `verify` function, as defined above. + +:::tip + +It's worth noticing that the `verify` function is actually a `view` function. A `view` function does not alter the blockchain state, so it doesn't need to be distributed (i.e. it will run only on the executing node), and therefore doesn't cost any gas. + +This can be particularly useful in some situations. If Alice generated a proof and wants Bob to verify its correctness, Bob doesn't need to run Nargo, NoirJS, or any Noir specific infrastructure. He can simply make a call to the blockchain with the proof and verify it is correct without paying any gas. + +It would be incorrect to say that a Noir proof verification costs any gas at all. However, most of the time the result of `verify` is used to modify state (for example, to update a balance, a game state, etc). In that case the whole network needs to execute it, which does incur gas costs (calldata and execution, but not storage). + +::: + +## A Note on EVM chains + +ZK-SNARK verification depends on some precompiled cryptographic primitives such as Elliptic Curve Pairings (if you like complex math, you can read about EC Pairings [here](https://medium.com/@VitalikButerin/exploring-elliptic-curve-pairings-c73c1864e627)). Not all EVM chains support EC Pairings, notably some of the ZK-EVMs. This means that you won't be able to use the verifier contract in all of them. + +For example, chains like `zkSync ERA` and `Polygon zkEVM` do not currently support these precompiles, so proof verification via Solidity verifier contracts won't work. Here's a quick list of EVM chains that have been tested and are known to work: + +- Optimism +- Arbitrum +- Polygon PoS +- Scroll +- Celo + +If you test any other chains, please open a PR on this page to update the list. See [this doc](https://github.com/noir-lang/noir-starter/tree/main/with-foundry#testing-on-chain) for more info about testing verifier contracts on different EVM chains. + +## What's next + +Now that you know how to call a Noir Solidity Verifier on a smart contract using Remix, you should be comfortable with using it with some programmatic frameworks, such as [hardhat](https://github.com/noir-lang/noir-starter/tree/main/vite-hardhat) and [foundry](https://github.com/noir-lang/noir-starter/tree/main/with-foundry). + +You can find other tools, examples, boilerplates and libraries in the [awesome-noir](https://github.com/noir-lang/awesome-noir) repository. + +You should also be ready to write and deploy your first NoirJS app and start generating proofs on websites, phones, and NodeJS environments! Head on to the [NoirJS tutorial](../tutorials/noirjs_app.md) to learn how to do that. diff --git a/docs/versioned_docs/version-v0.27.0/how_to/merkle-proof.mdx b/docs/versioned_docs/version-v0.27.0/how_to/merkle-proof.mdx new file mode 100644 index 00000000000..003c7019a93 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/how_to/merkle-proof.mdx @@ -0,0 +1,48 @@ +--- +title: Prove Merkle Tree Membership +description: + Learn how to use merkle membership proof in Noir to prove that a given leaf is a member of a + merkle tree with a specified root, at a given index. +keywords: + [merkle proof, merkle membership proof, Noir, rust, hash function, Pedersen, sha256, merkle tree] +--- + +Let's walk through an example of a merkle membership proof in Noir that proves that a given leaf is +in a merkle tree. + +```rust +use dep::std; + +fn main(message : [Field; 62], index : Field, hashpath : [Field; 40], root : Field) { + let leaf = std::hash::hash_to_field(message.as_slice()); + let merkle_root = std::merkle::compute_merkle_root(leaf, index, hashpath); + assert(merkle_root == root); +} + +``` + +The message is hashed using `hash_to_field`. The specific hash function that is being used is chosen +by the backend. The only requirement is that this hash function can heuristically be used as a +random oracle. If only collision resistance is needed, then one can call `std::hash::pedersen_hash` +instead. + +```rust +let leaf = std::hash::hash_to_field(message.as_slice()); +``` + +The leaf is then passed to a compute_merkle_root function with the root, index and hashpath. The returned root can then be asserted to be the same as the provided root. + +```rust +let merkle_root = std::merkle::compute_merkle_root(leaf, index, hashpath); +assert (merkle_root == root); +``` + +> **Note:** It is possible to re-implement the merkle tree implementation without standard library. +> However, for most usecases, it is enough. In general, the standard library will always opt to be +> as conservative as possible, while striking a balance with efficiency. + +An example, the merkle membership proof, only requires a hash function that has collision +resistance, hence a hash function like Pedersen is allowed, which in most cases is more efficient +than the even more conservative sha256. + +[View an example on the starter repo](https://github.com/noir-lang/noir-examples/blob/3ea09545cabfa464124ec2f3ea8e60c608abe6df/stealthdrop/circuits/src/main.nr#L20) diff --git a/docs/versioned_docs/version-v0.27.0/how_to/using-devcontainers.mdx b/docs/versioned_docs/version-v0.27.0/how_to/using-devcontainers.mdx new file mode 100644 index 00000000000..727ec6ca667 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/how_to/using-devcontainers.mdx @@ -0,0 +1,110 @@ +--- +title: Developer Containers and Codespaces +description: "Learn how to set up a devcontainer in your GitHub repository for a seamless coding experience with Codespaces. Follow our easy 8-step guide to create your own Noir environment without installing Nargo locally." +keywords: ["Devcontainer", "Codespaces", "GitHub", "Noir Environment", "Docker Image", "Development Environment", "Remote Coding", "GitHub Codespaces", "Noir Programming", "Nargo", "VSCode Extensions", "Noirup"] +sidebar_position: 1 +--- + +Adding a developer container configuration file to your Noir project is one of the easiest way to unlock coding in browser. + +## What's a devcontainer after all? + +A [Developer Container](https://containers.dev/) (devcontainer for short) is a Docker image that comes preloaded with tools, extensions, and other tools you need to quickly get started or continue a project, without having to install Nargo locally. Think of it as a development environment in a box. + +There are many advantages to this: + +- It's platform and architecture agnostic +- You don't need to have an IDE installed, or Nargo, or use a terminal at all +- It's safer for using on a public machine or public network + +One of the best ways of using devcontainers is... not using your machine at all, for maximum control, performance, and ease of use. +Enter Codespaces. + +## Codespaces + +If a devcontainer is just a Docker image, then what stops you from provisioning a `p3dn.24xlarge` AWS EC2 instance with 92 vCPUs and 768 GiB RAM and using it to prove your 10-gate SNARK proof? + +Nothing! Except perhaps the 30-40$ per hour it will cost you. + +The problem is that provisioning takes time, and I bet you don't want to see the AWS console every time you want to code something real quick. + +Fortunately, there's an easy and free way to get a decent remote machine ready and loaded in less than 2 minutes: Codespaces. [Codespaces is a Github feature](https://github.com/features/codespaces) that allows you to code in a remote machine by using devcontainers, and it's pretty cool: + +- You can start coding Noir in less than a minute +- It uses the resources of a remote machine, so you can code on your grandma's phone if needed be +- It makes it easy to share work with your frens +- It's fully reusable, you can stop and restart whenever you need to + +:::info + +Don't take out your wallet just yet. Free GitHub accounts get about [15-60 hours of coding](https://github.com/features/codespaces) for free per month, depending on the size of your provisioned machine. + +::: + +## Tell me it's _actually_ easy + +It is! + +Github comes with a default codespace and you can use it to code your own devcontainer. That's exactly what we will be doing in this guide. + + + +8 simple steps: + +#### 1. Create a new repository on GitHub. + +#### 2. Click "Start coding with Codespaces". This will use the default image. + +#### 3. Create a folder called `.devcontainer` in the root of your repository. + +#### 4. Create a Dockerfile in that folder, and paste the following code: + +```docker +FROM --platform=linux/amd64 node:lts-bookworm-slim +SHELL ["/bin/bash", "-c"] +RUN apt update && apt install -y curl bash git tar gzip libc++-dev +RUN curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +ENV PATH="/root/.nargo/bin:$PATH" +RUN noirup +ENTRYPOINT ["nargo"] +``` +#### 5. Create a file called `devcontainer.json` in the same folder, and paste the following code: + +```json +{ + "name": "Noir on Codespaces", + "build": { + "context": ".", + "dockerfile": "Dockerfile" + }, + "customizations": { + "vscode": { + "extensions": ["noir-lang.vscode-noir"] + } + } +} +``` +#### 6. Commit and push your changes + +This will pull the new image and build it, so it could take a minute or so + +#### 8. Done! +Just wait for the build to finish, and there's your easy Noir environment. + + +Refer to [noir-starter](https://github.com/noir-lang/noir-starter/) as an example of how devcontainers can be used together with codespaces. + + + +## How do I use it? + +Using the codespace is obviously much easier than setting it up. +Just navigate to your repository and click "Code" -> "Open with Codespaces". It should take a few seconds to load, and you're ready to go. + +:::info + +If you really like the experience, you can add a badge to your readme, links to existing codespaces, and more. +Check out the [official docs](https://docs.github.com/en/codespaces/setting-up-your-project-for-codespaces/setting-up-your-repository/facilitating-quick-creation-and-resumption-of-codespaces) for more info. diff --git a/docs/versioned_docs/version-v0.27.0/index.mdx b/docs/versioned_docs/version-v0.27.0/index.mdx new file mode 100644 index 00000000000..75086ddcdde --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/index.mdx @@ -0,0 +1,67 @@ +--- +title: Noir Lang +hide_title: true +description: + Learn about the public alpha release of Noir, a domain specific language heavily influenced by Rust that compiles to + an intermediate language which can be compiled to an arithmetic circuit or a rank-1 constraint system. +keywords: + [Noir, + Domain Specific Language, + Rust, + Intermediate Language, + Arithmetic Circuit, + Rank-1 Constraint System, + Ethereum Developers, + Protocol Developers, + Blockchain Developers, + Proving System, + Smart Contract Language] +sidebar_position: 0 +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +Noir Logo + +Noir is a Domain-Specific Language for SNARK proving systems developed by [Aztec Labs](https://aztec.network/). It allows you to generate complex Zero-Knowledge Programs (ZKP) by using simple and flexible syntax, requiring no previous knowledge on the underlying mathematics or cryptography. + +ZK programs are programs that can generate short proofs of a certain statement without revealing some details about it. You can read more about ZKPs [here](https://dev.to/spalladino/a-beginners-intro-to-coding-zero-knowledge-proofs-c56). + +## What's new about Noir? + +Noir works differently from most ZK languages by taking a two-pronged path. First, it compiles the program to an adaptable intermediate language known as ACIR. From there, depending on a given project's needs, ACIR can be further compiled into an arithmetic circuit for integration with the proving backend. + +:::info + +Noir is backend agnostic, which means it makes no assumptions on which proving backend powers the ZK proof. Being the language that powers [Aztec Contracts](https://docs.aztec.network/developers/contracts/main), it defaults to Aztec's Barretenberg proving backend. + +However, the ACIR output can be transformed to be compatible with other PLONK-based backends, or into a [rank-1 constraint system](https://www.rareskills.io/post/rank-1-constraint-system) suitable for backends such as Arkwork's Marlin. + +::: + +## Who is Noir for? + +Noir can be used both in complex cloud-based backends and in user's smartphones, requiring no knowledge on the underlying math or cryptography. From authorization systems that keep a password in the user's device, to complex on-chain verification of recursive proofs, Noir is designed to abstract away complexity without any significant overhead. Here are some examples of situations where Noir can be used: + + + + Noir Logo + + Aztec Contracts leverage Noir to allow for the storage and execution of private information. Writing an Aztec Contract is as easy as writing Noir, and Aztec developers can easily interact with the network storage and execution through the [Aztec.nr](https://docs.aztec.network/developers/contracts/main) library. + + + Soliditry Verifier Example + Noir can auto-generate Solidity verifier contracts that verify Noir proofs. This allows for non-interactive verification of proofs containing private information in an immutable system. This feature powers a multitude of use-case scenarios, from P2P chess tournaments, to [Aztec Layer-2 Blockchain](https://docs.aztec.network/) + + + Aztec Labs developed NoirJS, an easy interface to generate and verify Noir proofs in a Javascript environment. This allows for Noir to be used in webpages, mobile apps, games, and any other environment supporting JS execution in a standalone manner. + + + + +## Libraries + +Noir is meant to be easy to extend by simply importing Noir libraries just like in Rust. +The [awesome-noir repo](https://github.com/noir-lang/awesome-noir#libraries) is a collection of libraries developed by the Noir community. +Writing a new library is easy and makes code be composable and easy to reuse. See the section on [dependencies](noir/modules_packages_crates/dependencies.md) for more information. diff --git a/docs/versioned_docs/version-v0.27.0/migration_notes.md b/docs/versioned_docs/version-v0.27.0/migration_notes.md new file mode 100644 index 00000000000..6bd740024e5 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/migration_notes.md @@ -0,0 +1,105 @@ +--- +title: Migration notes +description: Read about migration notes from previous versions, which could solve problems while updating +keywords: [Noir, notes, migration, updating, upgrading] +--- + +Noir is in full-speed development. Things break fast, wild, and often. This page attempts to leave some notes on errors you might encounter when upgrading and how to resolve them until proper patches are built. + +### `backend encountered an error: libc++.so.1` + +Depending on your OS, you may encounter the following error when running `nargo prove` for the first time: + +```text +The backend encountered an error: "/home/codespace/.nargo/backends/acvm-backend-barretenberg/backend_binary: error while loading shared libraries: libc++.so.1: cannot open shared object file: No such file or directory\n" +``` + +Install the `libc++-dev` library with: + +```bash +sudo apt install libc++-dev +``` + +## ≥0.19 + +### Enforcing `compiler_version` + +From this version on, the compiler will check for the `compiler_version` field in `Nargo.toml`, and will error if it doesn't match the current Nargo version in use. + +To update, please make sure this field in `Nargo.toml` matches the output of `nargo --version`. + +## ≥0.14 + +The index of the [for loops](noir/concepts/control_flow.md#loops) is now of type `u64` instead of `Field`. An example refactor would be: + +```rust +for i in 0..10 { + let i = i as Field; +} +``` + +## ≥v0.11.0 and Nargo backend + +From this version onwards, Nargo starts managing backends through the `nargo backend` command. Upgrading to the versions per usual steps might lead to: + +### `backend encountered an error` + +This is likely due to the existing locally installed version of proving backend (e.g. barretenberg) is incompatible with the version of Nargo in use. + +To fix the issue: + +1. Uninstall the existing backend + +```bash +nargo backend uninstall acvm-backend-barretenberg +``` + +You may replace _acvm-backend-barretenberg_ with the name of your backend listed in `nargo backend ls` or in ~/.nargo/backends. + +2. Reinstall a compatible version of the proving backend. + +If you are using the default barretenberg backend, simply run: + +``` +nargo prove +``` + +with your Noir program. + +This will trigger the download and installation of the latest version of barretenberg compatible with your Nargo in use. + +### `backend encountered an error: illegal instruction` + +On certain Intel-based systems, an `illegal instruction` error may arise due to incompatibility of barretenberg with certain CPU instructions. + +To fix the issue: + +1. Uninstall the existing backend + +```bash +nargo backend uninstall acvm-backend-barretenberg +``` + +You may replace _acvm-backend-barretenberg_ with the name of your backend listed in `nargo backend ls` or in ~/.nargo/backends. + +2. Reinstall a compatible version of the proving backend. + +If you are using the default barretenberg backend, simply run: + +``` +nargo backend install acvm-backend-barretenberg https://github.com/noir-lang/barretenberg-js-binary/raw/master/run-bb.tar.gz +``` + +This downloads and installs a specific bb.js based version of barretenberg binary from GitHub. + +The gzipped file is running [this bash script](https://github.com/noir-lang/barretenberg-js-binary/blob/master/run-bb-js.sh), where we need to gzip it as the Nargo currently expect the backend to be zipped up. + +Then run: + +``` +DESIRED_BINARY_VERSION=0.8.1 nargo info +``` + +This overrides the bb native binary with a bb.js node application instead, which should be compatible with most if not all hardware. This does come with the drawback of being generally slower than native binary. + +0.8.1 indicates bb.js version 0.8.1, so if you change that it will update to a different version or the default version in the script if none was supplied. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/_category_.json b/docs/versioned_docs/version-v0.27.0/noir/concepts/_category_.json new file mode 100644 index 00000000000..7da08f8a8c5 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Concepts", + "position": 0, + "collapsible": true, + "collapsed": true +} \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/assert.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/assert.md new file mode 100644 index 00000000000..bcff613a695 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/assert.md @@ -0,0 +1,45 @@ +--- +title: Assert Function +description: + Learn about the assert function in Noir, which can be used to explicitly constrain the predicate or + comparison expression that follows to be true, and what happens if the expression is false at + runtime. +keywords: [Noir programming language, assert statement, predicate expression, comparison expression] +sidebar_position: 4 +--- + +Noir includes a special `assert` function which will explicitly constrain the predicate/comparison +expression that follows to be true. If this expression is false at runtime, the program will fail to +be proven. Example: + +```rust +fn main(x : Field, y : Field) { + assert(x == y); +} +``` + +> Assertions only work for predicate operations, such as `==`. If there's any ambiguity on the operation, the program will fail to compile. For example, it is unclear if `assert(x + y)` would check for `x + y == 0` or simply would return `true`. + +You can optionally provide a message to be logged when the assertion fails: + +```rust +assert(x == y, "x and y are not equal"); +``` + +Aside string literals, the optional message can be a format string or any other type supported as input for Noir's [print](../standard_library/logging.md) functions. This feature lets you incorporate runtime variables into your failed assertion logs: + +```rust +assert(x == y, f"Expected x == y, but got {x} == {y}"); +``` + +Using a variable as an assertion message directly: + +```rust +struct myStruct { + myField: Field +} + +let s = myStruct { myField: y }; +assert(s.myField == x, s); +``` + diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/comments.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/comments.md new file mode 100644 index 00000000000..b51a85f5c94 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/comments.md @@ -0,0 +1,33 @@ +--- +title: Comments +description: + Learn how to write comments in Noir programming language. A comment is a line of code that is + ignored by the compiler, but it can be read by programmers. Single-line and multi-line comments + are supported in Noir. +keywords: [Noir programming language, comments, single-line comments, multi-line comments] +sidebar_position: 10 +--- + +A comment is a line in your codebase which the compiler ignores, however it can be read by +programmers. + +Here is a single line comment: + +```rust +// This is a comment and is ignored +``` + +`//` is used to tell the compiler to ignore the rest of the line. + +Noir also supports multi-line block comments. Start a block comment with `/*` and end the block with `*/`. + +Noir does not natively support doc comments. You may be able to use [Rust doc comments](https://doc.rust-lang.org/reference/comments.html) in your code to leverage some Rust documentation build tools with Noir code. + +```rust +/* + This is a block comment describing a complex function. +*/ +fn main(x : Field, y : pub Field) { + assert(x != y); +} +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/control_flow.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/control_flow.md new file mode 100644 index 00000000000..045d3c3a5f5 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/control_flow.md @@ -0,0 +1,77 @@ +--- +title: Control Flow +description: + Learn how to use loops and if expressions in the Noir programming language. Discover the syntax + and examples for for loops and if-else statements. +keywords: [Noir programming language, loops, for loop, if-else statements, Rust syntax] +sidebar_position: 2 +--- + +## If Expressions + +Noir supports `if-else` statements. The syntax is most similar to Rust's where it is not required +for the statement's conditional to be surrounded by parentheses. + +```rust +let a = 0; +let mut x: u32 = 0; + +if a == 0 { + if a != 0 { + x = 6; + } else { + x = 2; + } +} else { + x = 5; + assert(x == 5); +} +assert(x == 2); +``` + +## Loops + +Noir has one kind of loop: the `for` loop. `for` loops allow you to repeat a block of code multiple +times. + +The following block of code between the braces is run 10 times. + +```rust +for i in 0..10 { + // do something +} +``` + +The index for loops is of type `u64`. + +### Break and Continue + +In unconstrained code, `break` and `continue` are also allowed in `for` loops. These are only allowed +in unconstrained code since normal constrained code requires that Noir knows exactly how many iterations +a loop may have. `break` and `continue` can be used like so: + +```rust +for i in 0 .. 10 { + println("Iteration start") + + if i == 2 { + continue; + } + + if i == 5 { + break; + } + + println(i); +} +println("Loop end") +``` + +When used, `break` will end the current loop early and jump to the statement after the for loop. In the example +above, the `break` will stop the loop and jump to the `println("Loop end")`. + +`continue` will stop the current iteration of the loop, and jump to the start of the next iteration. In the example +above, `continue` will jump to `println("Iteration start")` when used. Note that the loop continues as normal after this. +The iteration variable `i` is still increased by one as normal when `continue` is used. + +`break` and `continue` cannot currently be used to jump out of more than a single loop at a time. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_bus.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_bus.md new file mode 100644 index 00000000000..e54fc861257 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_bus.md @@ -0,0 +1,21 @@ +--- +title: Data Bus +sidebar_position: 13 +--- +**Disclaimer** this feature is experimental, do not use it! + +The data bus is an optimization that the backend can use to make recursion more efficient. +In order to use it, you must define some inputs of the program entry points (usually the `main()` +function) with the `call_data` modifier, and the return values with the `return_data` modifier. +These modifiers are incompatible with `pub` and `mut` modifiers. + +## Example + +```rust +fn main(mut x: u32, y: call_data u32, z: call_data [u32;4] ) -> return_data u32 { + let a = z[x]; + a+y +} +``` + +As a result, both call_data and return_data will be treated as private inputs and encapsulated into a read-only array each, for the backend to process. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/_category_.json b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/arrays.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/arrays.md new file mode 100644 index 00000000000..efce3e95d32 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/arrays.md @@ -0,0 +1,251 @@ +--- +title: Arrays +description: + Dive into the Array data type in Noir. Grasp its methods, practical examples, and best practices for efficiently using Arrays in your Noir code. +keywords: + [ + noir, + array type, + methods, + examples, + indexing, + ] +sidebar_position: 4 +--- + +An array is one way of grouping together values into one compound type. Array types can be inferred +or explicitly specified via the syntax `[; ]`: + +```rust +fn main(x : Field, y : Field) { + let my_arr = [x, y]; + let your_arr: [Field; 2] = [x, y]; +} +``` + +Here, both `my_arr` and `your_arr` are instantiated as an array containing two `Field` elements. + +Array elements can be accessed using indexing: + +```rust +fn main() { + let a = [1, 2, 3, 4, 5]; + + let first = a[0]; + let second = a[1]; +} +``` + +All elements in an array must be of the same type (i.e. homogeneous). That is, an array cannot group +a `Field` value and a `u8` value together for example. + +You can write mutable arrays, like: + +```rust +fn main() { + let mut arr = [1, 2, 3, 4, 5]; + assert(arr[0] == 1); + + arr[0] = 42; + assert(arr[0] == 42); +} +``` + +You can instantiate a new array of a fixed size with the same value repeated for each element. The following example instantiates an array of length 32 where each element is of type Field and has the value 0. + +```rust +let array: [Field; 32] = [0; 32]; +``` + +Like in Rust, arrays in Noir are a fixed size. However, if you wish to convert an array to a [slice](./slices), you can just call `as_slice` on your array: + +```rust +let array: [Field; 32] = [0; 32]; +let sl = array.as_slice() +``` + +You can define multidimensional arrays: + +```rust +let array : [[Field; 2]; 2]; +let element = array[0][0]; +``` +However, multidimensional slices are not supported. For example, the following code will error at compile time: +```rust +let slice : [[Field]] = &[]; +``` + +## Types + +You can create arrays of primitive types or structs. There is not yet support for nested arrays +(arrays of arrays) or arrays of structs that contain arrays. + +## Methods + +For convenience, the STD provides some ready-to-use, common methods for arrays. +Each of these functions are located within the generic impl `impl [T; N] {`. +So anywhere `self` appears, it refers to the variable `self: [T; N]`. + +### len + +Returns the length of an array + +```rust +fn len(self) -> Field +``` + +example + +```rust +fn main() { + let array = [42, 42]; + assert(array.len() == 2); +} +``` + +### sort + +Returns a new sorted array. The original array remains untouched. Notice that this function will +only work for arrays of fields or integers, not for any arbitrary type. This is because the sorting +logic it uses internally is optimized specifically for these values. If you need a sort function to +sort any type, you should use the function `sort_via` described below. + +```rust +fn sort(self) -> [T; N] +``` + +example + +```rust +fn main() { + let arr = [42, 32]; + let sorted = arr.sort(); + assert(sorted == [32, 42]); +} +``` + +### sort_via + +Sorts the array with a custom comparison function + +```rust +fn sort_via(self, ordering: fn(T, T) -> bool) -> [T; N] +``` + +example + +```rust +fn main() { + let arr = [42, 32] + let sorted_ascending = arr.sort_via(|a, b| a < b); + assert(sorted_ascending == [32, 42]); // verifies + + let sorted_descending = arr.sort_via(|a, b| a > b); + assert(sorted_descending == [32, 42]); // does not verify +} +``` + +### map + +Applies a function to each element of the array, returning a new array containing the mapped elements. + +```rust +fn map(self, f: fn(T) -> U) -> [U; N] +``` + +example + +```rust +let a = [1, 2, 3]; +let b = a.map(|a| a * 2); // b is now [2, 4, 6] +``` + +### fold + +Applies a function to each element of the array, returning the final accumulated value. The first +parameter is the initial value. + +```rust +fn fold(self, mut accumulator: U, f: fn(U, T) -> U) -> U +``` + +This is a left fold, so the given function will be applied to the accumulator and first element of +the array, then the second, and so on. For a given call the expected result would be equivalent to: + +```rust +let a1 = [1]; +let a2 = [1, 2]; +let a3 = [1, 2, 3]; + +let f = |a, b| a - b; +a1.fold(10, f) //=> f(10, 1) +a2.fold(10, f) //=> f(f(10, 1), 2) +a3.fold(10, f) //=> f(f(f(10, 1), 2), 3) +``` + +example: + +```rust + +fn main() { + let arr = [2, 2, 2, 2, 2]; + let folded = arr.fold(0, |a, b| a + b); + assert(folded == 10); +} + +``` + +### reduce + +Same as fold, but uses the first element as starting element. + +```rust +fn reduce(self, f: fn(T, T) -> T) -> T +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 2]; + let reduced = arr.reduce(|a, b| a + b); + assert(reduced == 10); +} +``` + +### all + +Returns true if all the elements satisfy the given predicate + +```rust +fn all(self, predicate: fn(T) -> bool) -> bool +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 2]; + let all = arr.all(|a| a == 2); + assert(all); +} +``` + +### any + +Returns true if any of the elements satisfy the given predicate + +```rust +fn any(self, predicate: fn(T) -> bool) -> bool +``` + +example: + +```rust +fn main() { + let arr = [2, 2, 2, 2, 5]; + let any = arr.any(|a| a == 5); + assert(any); +} + +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/booleans.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/booleans.md new file mode 100644 index 00000000000..69826fcd724 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/booleans.md @@ -0,0 +1,31 @@ +--- +title: Booleans +description: + Delve into the Boolean data type in Noir. Understand its methods, practical examples, and best practices for using Booleans in your Noir programs. +keywords: + [ + noir, + boolean type, + methods, + examples, + logical operations, + ] +sidebar_position: 2 +--- + + +The `bool` type in Noir has two possible values: `true` and `false`: + +```rust +fn main() { + let t = true; + let f: bool = false; +} +``` + +> **Note:** When returning a boolean value, it will show up as a value of 1 for `true` and 0 for +> `false` in _Verifier.toml_. + +The boolean type is most commonly used in conditionals like `if` expressions and `assert` +statements. More about conditionals is covered in the [Control Flow](../control_flow) and +[Assert Function](../assert) sections. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/fields.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/fields.md new file mode 100644 index 00000000000..a10a4810788 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/fields.md @@ -0,0 +1,192 @@ +--- +title: Fields +description: + Dive deep into the Field data type in Noir. Understand its methods, practical examples, and best practices to effectively use Fields in your Noir programs. +keywords: + [ + noir, + field type, + methods, + examples, + best practices, + ] +sidebar_position: 0 +--- + +The field type corresponds to the native field type of the proving backend. + +The size of a Noir field depends on the elliptic curve's finite field for the proving backend +adopted. For example, a field would be a 254-bit integer when paired with the default backend that +spans the Grumpkin curve. + +Fields support integer arithmetic and are often used as the default numeric type in Noir: + +```rust +fn main(x : Field, y : Field) { + let z = x + y; +} +``` + +`x`, `y` and `z` are all private fields in this example. Using the `let` keyword we defined a new +private value `z` constrained to be equal to `x + y`. + +If proving efficiency is of priority, fields should be used as a default for solving problems. +Smaller integer types (e.g. `u64`) incur extra range constraints. + +## Methods + +After declaring a Field, you can use these common methods on it: + +### to_le_bits + +Transforms the field into an array of bits, Little Endian. + +```rust +fn to_le_bits(_x : Field, _bit_size: u32) -> [u1] +``` + +example: + +```rust +fn main() { + let field = 2; + let bits = field.to_le_bits(32); +} +``` + +### to_be_bits + +Transforms the field into an array of bits, Big Endian. + +```rust +fn to_be_bits(_x : Field, _bit_size: u32) -> [u1] +``` + +example: + +```rust +fn main() { + let field = 2; + let bits = field.to_be_bits(32); +} +``` + +### to_le_bytes + +Transforms into an array of bytes, Little Endian + +```rust +fn to_le_bytes(_x : Field, byte_size: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let bytes = field.to_le_bytes(4); +} +``` + +### to_be_bytes + +Transforms into an array of bytes, Big Endian + +```rust +fn to_be_bytes(_x : Field, byte_size: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let bytes = field.to_be_bytes(4); +} +``` + +### to_le_radix + +Decomposes into a vector over the specified base, Little Endian + +```rust +fn to_le_radix(_x : Field, _radix: u32, _result_len: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let radix = field.to_le_radix(256, 4); +} +``` + +### to_be_radix + +Decomposes into a vector over the specified base, Big Endian + +```rust +fn to_be_radix(_x : Field, _radix: u32, _result_len: u32) -> [u8] +``` + +example: + +```rust +fn main() { + let field = 2; + let radix = field.to_be_radix(256, 4); +} +``` + +### pow_32 + +Returns the value to the power of the specified exponent + +```rust +fn pow_32(self, exponent: Field) -> Field +``` + +example: + +```rust +fn main() { + let field = 2 + let pow = field.pow_32(4); + assert(pow == 16); +} +``` + +### assert_max_bit_size + +Adds a constraint to specify that the field can be represented with `bit_size` number of bits + +```rust +fn assert_max_bit_size(self, bit_size: u32) +``` + +example: + +```rust +fn main() { + let field = 2 + field.assert_max_bit_size(32); +} +``` + +### sgn0 + +Parity of (prime) Field element, i.e. sgn0(x mod p) = 0 if x ∈ \{0, ..., p-1\} is even, otherwise sgn0(x mod p) = 1. + +```rust +fn sgn0(self) -> u1 +``` + + +### lt + +Returns true if the field is less than the other field + +```rust +pub fn lt(self, another: Field) -> bool +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/function_types.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/function_types.md new file mode 100644 index 00000000000..f6121af17e2 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/function_types.md @@ -0,0 +1,26 @@ +--- +title: Function types +sidebar_position: 10 +--- + +Noir supports higher-order functions. The syntax for a function type is as follows: + +```rust +fn(arg1_type, arg2_type, ...) -> return_type +``` + +Example: + +```rust +fn assert_returns_100(f: fn() -> Field) { // f takes no args and returns a Field + assert(f() == 100); +} + +fn main() { + assert_returns_100(|| 100); // ok + assert_returns_100(|| 150); // fails +} +``` + +A function type also has an optional capture environment - this is necessary to support closures. +See [Lambdas](../lambdas.md) for more details. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/index.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/index.md new file mode 100644 index 00000000000..357813c147a --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/index.md @@ -0,0 +1,110 @@ +--- +title: Data Types +description: + Get a clear understanding of the two categories of Noir data types - primitive types and compound + types. Learn about their characteristics, differences, and how to use them in your Noir + programming. +keywords: + [ + noir, + data types, + primitive types, + compound types, + private types, + public types, + ] +--- + +Every value in Noir has a type, which determines which operations are valid for it. + +All values in Noir are fundamentally composed of `Field` elements. For a more approachable +developing experience, abstractions are added on top to introduce different data types in Noir. + +Noir has two category of data types: primitive types (e.g. `Field`, integers, `bool`) and compound +types that group primitive types (e.g. arrays, tuples, structs). Each value can either be private or +public. + +## Private & Public Types + +A **private value** is known only to the Prover, while a **public value** is known by both the +Prover and Verifier. Mark values as `private` when the value should only be known to the prover. All +primitive types (including individual fields of compound types) in Noir are private by default, and +can be marked public when certain values are intended to be revealed to the Verifier. + +> **Note:** For public values defined in Noir programs paired with smart contract verifiers, once +> the proofs are verified on-chain the values can be considered known to everyone that has access to +> that blockchain. + +Public data types are treated no differently to private types apart from the fact that their values +will be revealed in proofs generated. Simply changing the value of a public type will not change the +circuit (where the same goes for changing values of private types as well). + +_Private values_ are also referred to as _witnesses_ sometimes. + +> **Note:** The terms private and public when applied to a type (e.g. `pub Field`) have a different +> meaning than when applied to a function (e.g. `pub fn foo() {}`). +> +> The former is a visibility modifier for the Prover to interpret if a value should be made known to +> the Verifier, while the latter is a visibility modifier for the compiler to interpret if a +> function should be made accessible to external Noir programs like in other languages. + +### pub Modifier + +All data types in Noir are private by default. Types are explicitly declared as public using the +`pub` modifier: + +```rust +fn main(x : Field, y : pub Field) -> pub Field { + x + y +} +``` + +In this example, `x` is **private** while `y` and `x + y` (the return value) are **public**. Note +that visibility is handled **per variable**, so it is perfectly valid to have one input that is +private and another that is public. + +> **Note:** Public types can only be declared through parameters on `main`. + +## Type Aliases + +A type alias is a new name for an existing type. Type aliases are declared with the keyword `type`: + +```rust +type Id = u8; + +fn main() { + let id: Id = 1; + let zero: u8 = 0; + assert(zero + 1 == id); +} +``` + +Type aliases can also be used with [generics](../generics.md): + +```rust +type Id = Size; + +fn main() { + let id: Id = 1; + let zero: u32 = 0; + assert(zero + 1 == id); +} +``` + +Type aliases can even refer to other aliases. An error will be issued if they form a cycle: + +```rust +// Ok! +type A = B; +type B = Field; + +type Bad1 = Bad2; + +// error: Dependency cycle found +type Bad2 = Bad1; +// ^^^^^^^^^^^ 'Bad2' recursively depends on itself: Bad2 -> Bad1 -> Bad2 +``` + +### BigInt + +You can achieve BigInt functionality using the [Noir BigInt](https://github.com/shuklaayush/noir-bigint) library. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/integers.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/integers.md new file mode 100644 index 00000000000..1c6b375db49 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/integers.md @@ -0,0 +1,155 @@ +--- +title: Integers +description: Explore the Integer data type in Noir. Learn about its methods, see real-world examples, and grasp how to efficiently use Integers in your Noir code. +keywords: [noir, integer types, methods, examples, arithmetic] +sidebar_position: 1 +--- + +An integer type is a range constrained field type. The Noir frontend supports both unsigned and signed integer types. The allowed sizes are 1, 8, 32 and 64 bits. + +:::info + +When an integer is defined in Noir without a specific type, it will default to `Field`. + +The one exception is for loop indices which default to `u64` since comparisons on `Field`s are not possible. + +::: + +## Unsigned Integers + +An unsigned integer type is specified first with the letter `u` (indicating its unsigned nature) followed by its bit size (e.g. `8`): + +```rust +fn main() { + let x: u8 = 1; + let y: u8 = 1; + let z = x + y; + assert (z == 2); +} +``` + +The bit size determines the maximum value the integer type can store. For example, a `u8` variable can store a value in the range of 0 to 255 (i.e. $\\2^{8}-1\\$). + +## Signed Integers + +A signed integer type is specified first with the letter `i` (which stands for integer) followed by its bit size (e.g. `8`): + +```rust +fn main() { + let x: i8 = -1; + let y: i8 = -1; + let z = x + y; + assert (z == -2); +} +``` + +The bit size determines the maximum and minimum range of value the integer type can store. For example, an `i8` variable can store a value in the range of -128 to 127 (i.e. $\\-2^{7}\\$ to $\\2^{7}-1\\$). + +## 128 bits Unsigned Integers + +The built-in structure `U128` allows you to use 128-bit unsigned integers almost like a native integer type. However, there are some differences to keep in mind: +- You cannot cast between a native integer and `U128` +- There is a higher performance cost when using `U128`, compared to a native type. + +Conversion between unsigned integer types and U128 are done through the use of `from_integer` and `to_integer` functions. `from_integer` also accepts the `Field` type as input. + +```rust +fn main() { + let x = U128::from_integer(23); + let y = U128::from_hex("0x7"); + let z = x + y; + assert(z.to_integer() == 30); +} +``` + +`U128` is implemented with two 64 bits limbs, representing the low and high bits, which explains the performance cost. You should expect `U128` to be twice more costly for addition and four times more costly for multiplication. +You can construct a U128 from its limbs: +```rust +fn main(x: u64, y: u64) { + let x = U128::from_u64s_be(x,y); + assert(z.hi == x as Field); + assert(z.lo == y as Field); +} +``` + +Note that the limbs are stored as Field elements in order to avoid unnecessary conversions. +Apart from this, most operations will work as usual: + +```rust +fn main(x: U128, y: U128) { + // multiplication + let c = x * y; + // addition and subtraction + let c = c - x + y; + // division + let c = x / y; + // bit operation; + let c = x & y | y; + // bit shift + let c = x << y; + // comparisons; + let c = x < y; + let c = x == y; +} +``` + +## Overflows + +Computations that exceed the type boundaries will result in overflow errors. This happens with both signed and unsigned integers. For example, attempting to prove: + +```rust +fn main(x: u8, y: u8) { + let z = x + y; +} +``` + +With: + +```toml +x = "255" +y = "1" +``` + +Would result in: + +``` +$ nargo prove +error: Assertion failed: 'attempt to add with overflow' +┌─ ~/src/main.nr:9:13 +│ +│ let z = x + y; +│ ----- +│ += Call stack: + ... +``` + +A similar error would happen with signed integers: + +```rust +fn main() { + let x: i8 = -118; + let y: i8 = -11; + let z = x + y; +} +``` + +### Wrapping methods + +Although integer overflow is expected to error, some use-cases rely on wrapping. For these use-cases, the standard library provides `wrapping` variants of certain common operations: + +```rust +fn wrapping_add(x: T, y: T) -> T; +fn wrapping_sub(x: T, y: T) -> T; +fn wrapping_mul(x: T, y: T) -> T; +``` + +Example of how it is used: + +```rust +use dep::std; + +fn main(x: u8, y: u8) -> pub u8 { + std::wrapping_add(x, y) +} +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/references.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/references.md new file mode 100644 index 00000000000..a5293d11cfb --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/references.md @@ -0,0 +1,23 @@ +--- +title: References +sidebar_position: 9 +--- + +Noir supports first-class references. References are a bit like pointers: they point to a specific address that can be followed to access the data stored at that address. You can use Rust-like syntax to use pointers in Noir: the `&` operator references the variable, the `*` operator dereferences it. + +Example: + +```rust +fn main() { + let mut x = 2; + + // you can reference x as &mut and pass it to multiplyBy2 + multiplyBy2(&mut x); +} + +// you can access &mut here +fn multiplyBy2(x: &mut Field) { + // and dereference it with * + *x = *x * 2; +} +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/slices.mdx b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/slices.mdx new file mode 100644 index 00000000000..828faf4a8f8 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/slices.mdx @@ -0,0 +1,170 @@ +--- +title: Slices +description: Explore the Slice data type in Noir. Understand its methods, see real-world examples, and learn how to effectively use Slices in your Noir programs. +keywords: [noir, slice type, methods, examples, subarrays] +sidebar_position: 5 +--- + +import Experimental from '@site/src/components/Notes/_experimental.mdx'; + + + +A slice is a dynamically-sized view into a sequence of elements. They can be resized at runtime, but because they don't own the data, they cannot be returned from a circuit. You can treat slices as arrays without a constrained size. + +```rust +use dep::std::slice; + +fn main() -> pub Field { + let mut slice: [Field] = &[0; 2]; + + let mut new_slice = slice.push_back(6); + new_slice.len() +} +``` + +To write a slice literal, use a preceeding ampersand as in: `&[0; 2]` or +`&[1, 2, 3]`. + +It is important to note that slices are not references to arrays. In Noir, +`&[..]` is more similar to an immutable, growable vector. + +View the corresponding test file [here][test-file]. + +[test-file]: https://github.com/noir-lang/noir/blob/f387ec1475129732f72ba294877efdf6857135ac/crates/nargo_cli/tests/test_data_ssa_refactor/slices/src/main.nr + +## Methods + +For convenience, the STD provides some ready-to-use, common methods for slices: + +### push_back + +Pushes a new element to the end of the slice, returning a new slice with a length one greater than the original unmodified slice. + +```rust +fn push_back(_self: [T], _elem: T) -> [T] +``` + +example: + +```rust +fn main() -> pub Field { + let mut slice: [Field] = &[0; 2]; + + let mut new_slice = slice.push_back(6); + new_slice.len() +} +``` + +View the corresponding test file [here][test-file]. + +### push_front + +Returns a new array with the specified element inserted at index 0. The existing elements indexes are incremented by 1. + +```rust +fn push_front(_self: Self, _elem: T) -> Self +``` + +Example: + +```rust +let mut new_slice: [Field] = &[]; +new_slice = new_slice.push_front(20); +assert(new_slice[0] == 20); // returns true +``` + +View the corresponding test file [here][test-file]. + +### pop_front + +Returns a tuple of two items, the first element of the array and the rest of the array. + +```rust +fn pop_front(_self: Self) -> (T, Self) +``` + +Example: + +```rust +let (first_elem, rest_of_slice) = slice.pop_front(); +``` + +View the corresponding test file [here][test-file]. + +### pop_back + +Returns a tuple of two items, the beginning of the array with the last element omitted and the last element. + +```rust +fn pop_back(_self: Self) -> (Self, T) +``` + +Example: + +```rust +let (popped_slice, last_elem) = slice.pop_back(); +``` + +View the corresponding test file [here][test-file]. + +### append + +Loops over a slice and adds it to the end of another. + +```rust +fn append(mut self, other: Self) -> Self +``` + +Example: + +```rust +let append = &[1, 2].append(&[3, 4, 5]); +``` + +### insert + +Inserts an element at a specified index and shifts all following elements by 1. + +```rust +fn insert(_self: Self, _index: Field, _elem: T) -> Self +``` + +Example: + +```rust +new_slice = rest_of_slice.insert(2, 100); +assert(new_slice[2] == 100); +``` + +View the corresponding test file [here][test-file]. + +### remove + +Remove an element at a specified index, shifting all elements after it to the left, returning the altered slice and the removed element. + +```rust +fn remove(_self: Self, _index: Field) -> (Self, T) +``` + +Example: + +```rust +let (remove_slice, removed_elem) = slice.remove(3); +``` + +### len + +Returns the length of a slice + +```rust +fn len(self) -> Field +``` + +Example: + +```rust +fn main() { + let slice = &[42, 42]; + assert(slice.len() == 2); +} +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/strings.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/strings.md new file mode 100644 index 00000000000..311dfd64416 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/strings.md @@ -0,0 +1,80 @@ +--- +title: Strings +description: + Discover the String data type in Noir. Learn about its methods, see real-world examples, and understand how to effectively manipulate and use Strings in Noir. +keywords: + [ + noir, + string type, + methods, + examples, + concatenation, + ] +sidebar_position: 3 +--- + + +The string type is a fixed length value defined with `str`. + +You can use strings in `assert()` functions or print them with +`println()`. See more about [Logging](../../standard_library/logging). + +```rust +use dep::std; + +fn main(message : pub str<11>, hex_as_string : str<4>) { + println(message); + assert(message == "hello world"); + assert(hex_as_string == "0x41"); +} +``` + +You can convert a `str` to a byte array by calling `as_bytes()` +or a vector by calling `as_bytes_vec()`. + +```rust +fn main() { + let message = "hello world"; + let message_bytes = message.as_bytes(); + let mut message_vec = message.as_bytes_vec(); + assert(message_bytes.len() == 11); + assert(message_bytes[0] == 104); + assert(message_bytes[0] == message_vec.get(0)); +} +``` + +## Escape characters + +You can use escape characters for your strings: + +| Escape Sequence | Description | +|-----------------|-----------------| +| `\r` | Carriage Return | +| `\n` | Newline | +| `\t` | Tab | +| `\0` | Null Character | +| `\"` | Double Quote | +| `\\` | Backslash | + +Example: + +```rust +let s = "Hello \"world" // prints "Hello "world" +let s = "hey \tyou"; // prints "hey you" +``` + +## Raw strings + +A raw string begins with the letter `r` and is optionally delimited by a number of hashes `#`. + +Escape characters are *not* processed within raw strings. All contents are interpreted literally. + +Example: + +```rust +let s = r"Hello world"; +let s = r#"Simon says "hello world""#; + +// Any number of hashes may be used (>= 1) as long as the string also terminates with the same number of hashes +let s = r#####"One "#, Two "##, Three "###, Four "####, Five will end the string."#####; +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/structs.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/structs.md new file mode 100644 index 00000000000..dbf68c99813 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/structs.md @@ -0,0 +1,70 @@ +--- +title: Structs +description: + Explore the Struct data type in Noir. Learn about its methods, see real-world examples, and grasp how to effectively define and use Structs in your Noir programs. +keywords: + [ + noir, + struct type, + methods, + examples, + data structures, + ] +sidebar_position: 8 +--- + +A struct also allows for grouping multiple values of different types. Unlike tuples, we can also +name each field. + +> **Note:** The usage of _field_ here refers to each element of the struct and is unrelated to the +> field type of Noir. + +Defining a struct requires giving it a name and listing each field within as `: ` pairs: + +```rust +struct Animal { + hands: Field, + legs: Field, + eyes: u8, +} +``` + +An instance of a struct can then be created with actual values in `: ` pairs in any +order. Struct fields are accessible using their given names: + +```rust +fn main() { + let legs = 4; + + let dog = Animal { + eyes: 2, + hands: 0, + legs, + }; + + let zero = dog.hands; +} +``` + +Structs can also be destructured in a pattern, binding each field to a new variable: + +```rust +fn main() { + let Animal { hands, legs: feet, eyes } = get_octopus(); + + let ten = hands + feet + eyes as u8; +} + +fn get_octopus() -> Animal { + let octopus = Animal { + hands: 0, + legs: 8, + eyes: 2, + }; + + octopus +} +``` + +The new variables can be bound with names different from the original struct field names, as +showcased in the `legs --> feet` binding in the example above. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/tuples.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/tuples.md new file mode 100644 index 00000000000..2ec5c9c4113 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/data_types/tuples.md @@ -0,0 +1,48 @@ +--- +title: Tuples +description: + Dive into the Tuple data type in Noir. Understand its methods, practical examples, and best practices for efficiently using Tuples in your Noir code. +keywords: + [ + noir, + tuple type, + methods, + examples, + multi-value containers, + ] +sidebar_position: 7 +--- + +A tuple collects multiple values like an array, but with the added ability to collect values of +different types: + +```rust +fn main() { + let tup: (u8, u64, Field) = (255, 500, 1000); +} +``` + +One way to access tuple elements is via destructuring using pattern matching: + +```rust +fn main() { + let tup = (1, 2); + + let (one, two) = tup; + + let three = one + two; +} +``` + +Another way to access tuple elements is via direct member access, using a period (`.`) followed by +the index of the element we want to access. Index `0` corresponds to the first tuple element, `1` to +the second and so on: + +```rust +fn main() { + let tup = (5, 6, 7, 8); + + let five = tup.0; + let eight = tup.3; +} +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/distinct.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/distinct.md new file mode 100644 index 00000000000..6c993b8b5e0 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/distinct.md @@ -0,0 +1,64 @@ +--- +title: Distinct Witnesses +sidebar_position: 11 +--- + +The `distinct` keyword prevents repetitions of witness indices in the program's ABI. This ensures +that the witnesses being returned as public inputs are all unique. + +The `distinct` keyword is only used for return values on program entry points (usually the `main()` +function). + +When using `distinct` and `pub` simultaneously, `distinct` comes first. See the example below. + +You can read more about the problem this solves +[here](https://github.com/noir-lang/noir/issues/1183). + +## Example + +Without the `distinct` keyword, the following program + +```rust +fn main(x : pub Field, y : pub Field) -> pub [Field; 4] { + let a = 1; + let b = 1; + [x + 1, y, a, b] +} +``` + +compiles to + +```json +{ + //... + "abi": { + //... + "param_witnesses": { "x": [1], "y": [2] }, + "return_witnesses": [3, 2, 4, 4] + } +} +``` + +Whereas (with the `distinct` keyword) + +```rust +fn main(x : pub Field, y : pub Field) -> distinct pub [Field; 4] { + let a = 1; + let b = 1; + [x + 1, y, a, b] +} +``` + +compiles to + +```json +{ + //... + "abi": { + //... + "param_witnesses": { "x": [1], "y": [2] }, + //... + "return_witnesses": [3, 4, 5, 6] + } +} +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/functions.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/functions.md new file mode 100644 index 00000000000..2c9bc33fdfc --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/functions.md @@ -0,0 +1,226 @@ +--- +title: Functions +description: + Learn how to declare functions and methods in Noir, a programming language with Rust semantics. + This guide covers parameter declaration, return types, call expressions, and more. +keywords: [Noir, Rust, functions, methods, parameter declaration, return types, call expressions] +sidebar_position: 1 +--- + +Functions in Noir follow the same semantics of Rust, though Noir does not support early returns. + +To declare a function the `fn` keyword is used. + +```rust +fn foo() {} +``` + +By default, functions are visible only within the package they are defined. To make them visible outside of that package (for example, as part of a [library](../modules_packages_crates/crates_and_packages.md#libraries)), you should mark them as `pub`: + +```rust +pub fn foo() {} +``` + +You can also restrict the visibility of the function to only the crate it was defined in, by specifying `pub(crate)`: + +```rust +pub(crate) fn foo() {} //foo can only be called within its crate +``` + +All parameters in a function must have a type and all types are known at compile time. The parameter +is pre-pended with a colon and the parameter type. Multiple parameters are separated using a comma. + +```rust +fn foo(x : Field, y : Field){} +``` + +The return type of a function can be stated by using the `->` arrow notation. The function below +states that the foo function must return a `Field`. If the function returns no value, then the arrow +is omitted. + +```rust +fn foo(x : Field, y : Field) -> Field { + x + y +} +``` + +Note that a `return` keyword is unneeded in this case - the last expression in a function's body is +returned. + +## Main function + +If you're writing a binary, the `main` function is the starting point of your program. You can pass all types of expressions to it, as long as they have a fixed size at compile time: + +```rust +fn main(x : Field) // this is fine: passing a Field +fn main(x : [Field; 2]) // this is also fine: passing a Field with known size at compile-time +fn main(x : (Field, bool)) // 👌: passing a (Field, bool) tuple means size 2 +fn main(x : str<5>) // this is fine, as long as you pass a string of size 5 + +fn main(x : Vec) // can't compile, has variable size +fn main(x : [Field]) // can't compile, has variable size +fn main(....// i think you got it by now +``` + +Keep in mind [tests](../../getting_started/tooling/testing.md) don't differentiate between `main` and any other function. The following snippet passes tests, but won't compile or prove: + +```rust +fn main(x : [Field]) { + assert(x[0] == 1); +} + +#[test] +fn test_one() { + main(&[1, 2]); +} +``` + +```bash +$ nargo test +[testing] Running 1 test functions +[testing] Testing test_one... ok +[testing] All tests passed + +$ nargo check +The application panicked (crashed). +Message: Cannot have variable sized arrays as a parameter to main +``` + +## Call Expressions + +Calling a function in Noir is executed by using the function name and passing in the necessary +arguments. + +Below we show how to call the `foo` function from the `main` function using a call expression: + +```rust +fn main(x : Field, y : Field) { + let z = foo(x); +} + +fn foo(x : Field) -> Field { + x + x +} +``` + +## Methods + +You can define methods in Noir on any struct type in scope. + +```rust +struct MyStruct { + foo: Field, + bar: Field, +} + +impl MyStruct { + fn new(foo: Field) -> MyStruct { + MyStruct { + foo, + bar: 2, + } + } + + fn sum(self) -> Field { + self.foo + self.bar + } +} + +fn main() { + let s = MyStruct::new(40); + assert(s.sum() == 42); +} +``` + +Methods are just syntactic sugar for functions, so if we wanted to we could also call `sum` as +follows: + +```rust +assert(MyStruct::sum(s) == 42); +``` + +It is also possible to specialize which method is chosen depending on the [generic](./generics.md) type that is used. In this example, the `foo` function returns different values depending on its type: + +```rust +struct Foo {} + +impl Foo { + fn foo(self) -> Field { 1 } +} + +impl Foo { + fn foo(self) -> Field { 2 } +} + +fn main() { + let f1: Foo = Foo{}; + let f2: Foo = Foo{}; + assert(f1.foo() + f2.foo() == 3); +} +``` + +Also note that impls with the same method name defined in them cannot overlap. For example, if we already have `foo` defined for `Foo` and `Foo` like we do above, we cannot also define `foo` in an `impl Foo` since it would be ambiguous which version of `foo` to choose. + +```rust +// Including this impl in the same project as the above snippet would +// cause an overlapping impls error +impl Foo { + fn foo(self) -> Field { 3 } +} +``` + +## Lambdas + +Lambdas are anonymous functions. They follow the syntax of Rust - `|arg1, arg2, ..., argN| return_expression`. + +```rust +let add_50 = |val| val + 50; +assert(add_50(100) == 150); +``` + +See [Lambdas](./lambdas.md) for more details. + +## Attributes + +Attributes are metadata that can be applied to a function, using the following syntax: `#[attribute(value)]`. + +Supported attributes include: + +- **builtin**: the function is implemented by the compiler, for efficiency purposes. +- **deprecated**: mark the function as _deprecated_. Calling the function will generate a warning: `warning: use of deprecated function` +- **field**: Used to enable conditional compilation of code depending on the field size. See below for more details +- **oracle**: mark the function as _oracle_; meaning it is an external unconstrained function, implemented in noir_js. See [Unconstrained](./unconstrained.md) and [NoirJS](../../reference/NoirJS/noir_js/index.md) for more details. +- **test**: mark the function as unit tests. See [Tests](../../getting_started/tooling/testing.md) for more details + +### Field Attribute + +The field attribute defines which field the function is compatible for. The function is conditionally compiled, under the condition that the field attribute matches the Noir native field. +The field can be defined implicitly, by using the name of the elliptic curve usually associated to it - for instance bn254, bls12_381 - or explicitly by using the field (prime) order, in decimal or hexadecimal form. +As a result, it is possible to define multiple versions of a function with each version specialized for a different field attribute. This can be useful when a function requires different parameters depending on the underlying elliptic curve. + +Example: we define the function `foo()` three times below. Once for the default Noir bn254 curve, once for the field $\mathbb F_{23}$, which will normally never be used by Noir, and once again for the bls12_381 curve. + +```rust +#[field(bn254)] +fn foo() -> u32 { + 1 +} + +#[field(23)] +fn foo() -> u32 { + 2 +} + +// This commented code would not compile as foo would be defined twice because it is the same field as bn254 +// #[field(21888242871839275222246405745257275088548364400416034343698204186575808495617)] +// fn foo() -> u32 { +// 2 +// } + +#[field(bls12_381)] +fn foo() -> u32 { + 3 +} +``` + +If the field name is not known to Noir, it will discard the function. Field names are case insensitive. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/generics.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/generics.md new file mode 100644 index 00000000000..ddd42bf1f9b --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/generics.md @@ -0,0 +1,106 @@ +--- +title: Generics +description: Learn how to use Generics in Noir +keywords: [Noir, Rust, generics, functions, structs] +sidebar_position: 7 +--- + +Generics allow you to use the same functions with multiple different concrete data types. You can +read more about the concept of generics in the Rust documentation +[here](https://doc.rust-lang.org/book/ch10-01-syntax.html). + +Here is a trivial example showing the identity function that supports any type. In Rust, it is +common to refer to the most general type as `T`. We follow the same convention in Noir. + +```rust +fn id(x: T) -> T { + x +} +``` + +## In Structs + +Generics are useful for specifying types in structs. For example, we can specify that a field in a +struct will be of a certain generic type. In this case `value` is of type `T`. + +```rust +struct RepeatedValue { + value: T, + count: Field, +} + +impl RepeatedValue { + fn print(self) { + for _i in 0 .. self.count { + println(self.value); + } + } +} + +fn main() { + let repeated = RepeatedValue { value: "Hello!", count: 2 }; + repeated.print(); +} +``` + +The `print` function will print `Hello!` an arbitrary number of times, twice in this case. + +If we want to be generic over array lengths (which are type-level integers), we can use numeric +generics. Using these looks just like using regular generics, but these generics can resolve to +integers at compile-time, rather than resolving to types. Here's an example of a struct that is +generic over the size of the array it contains internally: + +```rust +struct BigInt { + limbs: [u32; N], +} + +impl BigInt { + // `N` is in scope of all methods in the impl + fn first(first: BigInt, second: BigInt) -> Self { + assert(first.limbs != second.limbs); + first + + fn second(first: BigInt, second: Self) -> Self { + assert(first.limbs != second.limbs); + second + } +} +``` + +## Calling functions on generic parameters + +Since a generic type `T` can represent any type, how can we call functions on the underlying type? +In other words, how can we go from "any type `T`" to "any type `T` that has certain methods available?" + +This is what [traits](../concepts/traits) are for in Noir. Here's an example of a function generic over +any type `T` that implements the `Eq` trait for equality: + +```rust +fn first_element_is_equal(array1: [T; N], array2: [T; N]) -> bool + where T: Eq +{ + if (array1.len() == 0) | (array2.len() == 0) { + true + } else { + array1[0] == array2[0] + } +} + +fn main() { + assert(first_element_is_equal([1, 2, 3], [1, 5, 6])); + + // We can use first_element_is_equal for arrays of any type + // as long as we have an Eq impl for the types we pass in + let array = [MyStruct::new(), MyStruct::new()]; + assert(array_eq(array, array, MyStruct::eq)); +} + +impl Eq for MyStruct { + fn eq(self, other: MyStruct) -> bool { + self.foo == other.foo + } +} +``` + +You can find more details on traits and trait implementations on the [traits page](../concepts/traits). diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/globals.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/globals.md new file mode 100644 index 00000000000..063a3d89248 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/globals.md @@ -0,0 +1,72 @@ +--- +title: Global Variables +description: + Learn about global variables in Noir. Discover how + to declare, modify, and use them in your programs. +keywords: [noir programming language, globals, global variables, constants] +sidebar_position: 8 +--- + +## Globals + + +Noir supports global variables. The global's type can be inferred by the compiler entirely: + +```rust +global N = 5; // Same as `global N: Field = 5` + +global TUPLE = (3, 2); + +fn main() { + assert(N == 5); + assert(N == TUPLE.0 + TUPLE.1); +} +``` + +:::info + +Globals can be defined as any expression, so long as they don't depend on themselves - otherwise there would be a dependency cycle! For example: + +```rust +global T = foo(T); // dependency error +``` + +::: + + +If they are initialized to a literal integer, globals can be used to specify an array's length: + +```rust +global N: Field = 2; + +fn main(y : [Field; N]) { + assert(y[0] == y[1]) +} +``` + +A global from another module can be imported or referenced externally like any other name: + +```rust +global N = 20; + +fn main() { + assert(my_submodule::N != N); +} + +mod my_submodule { + global N: Field = 10; +} +``` + +When a global is used, Noir replaces the name with its definition on each occurrence. +This means globals defined using function calls will repeat the call each time they're used: + +```rust +global RESULT = foo(); + +fn foo() -> [Field; 100] { ... } +``` + +This is usually fine since Noir will generally optimize any function call that does not +refer to a program input into a constant. It should be kept in mind however, if the called +function performs side-effects like `println`, as these will still occur on each use. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/lambdas.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/lambdas.md new file mode 100644 index 00000000000..be3c7e0b5ca --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/lambdas.md @@ -0,0 +1,81 @@ +--- +title: Lambdas +description: Learn how to use anonymous functions in Noir programming language. +keywords: [Noir programming language, lambda, closure, function, anonymous function] +sidebar_position: 9 +--- + +## Introduction + +Lambdas are anonymous functions. The syntax is `|arg1, arg2, ..., argN| return_expression`. + +```rust +let add_50 = |val| val + 50; +assert(add_50(100) == 150); +``` + +A block can be used as the body of a lambda, allowing you to declare local variables inside it: + +```rust +let cool = || { + let x = 100; + let y = 100; + x + y +} + +assert(cool() == 200); +``` + +## Closures + +Inside the body of a lambda, you can use variables defined in the enclosing function. Such lambdas are called **closures**. In this example `x` is defined inside `main` and is accessed from within the lambda: + +```rust +fn main() { + let x = 100; + let closure = || x + 150; + assert(closure() == 250); +} +``` + +## Passing closures to higher-order functions + +It may catch you by surprise that the following code fails to compile: + +```rust +fn foo(f: fn () -> Field) -> Field { + f() +} + +fn main() { + let (x, y) = (50, 50); + assert(foo(|| x + y) == 100); // error :( +} +``` + +The reason is that the closure's capture environment affects its type - we have a closure that captures two Fields and `foo` +expects a regular function as an argument - those are incompatible. +:::note + +Variables contained within the `||` are the closure's parameters, and the expression that follows it is the closure's body. The capture environment is comprised of any variables used in the closure's body that are not parameters. + +E.g. in |x| x + y, y would be a captured variable, but x would not be, since it is a parameter of the closure. + +::: +The syntax for the type of a closure is `fn[env](args) -> ret_type`, where `env` is the capture environment of the closure - +in this example that's `(Field, Field)`. + +The best solution in our case is to make `foo` generic over the environment type of its parameter, so that it can be called +with closures with any environment, as well as with regular functions: + +```rust +fn foo(f: fn[Env]() -> Field) -> Field { + f() +} + +fn main() { + let (x, y) = (50, 50); + assert(foo(|| x + y) == 100); // compiles fine + assert(foo(|| 60) == 60); // compiles fine +} +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/mutability.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/mutability.md new file mode 100644 index 00000000000..fdeef6a87c5 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/mutability.md @@ -0,0 +1,121 @@ +--- +title: Mutability +description: + Learn about mutable variables in Noir. Discover how + to declare, modify, and use them in your programs. +keywords: [noir programming language, mutability in noir, mutable variables] +sidebar_position: 8 +--- + +Variables in noir can be declared mutable via the `mut` keyword. Mutable variables can be reassigned +to via an assignment expression. + +```rust +let x = 2; +x = 3; // error: x must be mutable to be assigned to + +let mut y = 3; +let y = 4; // OK +``` + +The `mut` modifier can also apply to patterns: + +```rust +let (a, mut b) = (1, 2); +a = 11; // error: a must be mutable to be assigned to +b = 12; // OK + +let mut (c, d) = (3, 4); +c = 13; // OK +d = 14; // OK + +// etc. +let MyStruct { x: mut y } = MyStruct { x: a }; +// y is now in scope +``` + +Note that mutability in noir is local and everything is passed by value, so if a called function +mutates its parameters then the parent function will keep the old value of the parameters. + +```rust +fn main() -> pub Field { + let x = 3; + helper(x); + x // x is still 3 +} + +fn helper(mut x: i32) { + x = 4; +} +``` + +## Non-local mutability + +Non-local mutability can be achieved through the mutable reference type `&mut T`: + +```rust +fn set_to_zero(x: &mut Field) { + *x = 0; +} + +fn main() { + let mut y = 42; + set_to_zero(&mut y); + assert(*y == 0); +} +``` + +When creating a mutable reference, the original variable being referred to (`y` in this +example) must also be mutable. Since mutable references are a reference type, they must +be explicitly dereferenced via `*` to retrieve the underlying value. Note that this yields +a copy of the value, so mutating this copy will not change the original value behind the +reference: + +```rust +fn main() { + let mut x = 1; + let x_ref = &mut x; + + let mut y = *x_ref; + let y_ref = &mut y; + + x = 2; + *x_ref = 3; + + y = 4; + *y_ref = 5; + + assert(x == 3); + assert(*x_ref == 3); + assert(y == 5); + assert(*y_ref == 5); +} +``` + +Note that types in Noir are actually deeply immutable so the copy that occurs when +dereferencing is only a conceptual copy - no additional constraints will occur. + +Mutable references can also be stored within structs. Note that there is also +no lifetime parameter on these unlike rust. This is because the allocated memory +always lasts the entire program - as if it were an array of one element. + +```rust +struct Foo { + x: &mut Field +} + +impl Foo { + fn incr(mut self) { + *self.x += 1; + } +} + +fn main() { + let foo = Foo { x: &mut 0 }; + foo.incr(); + assert(*foo.x == 1); +} +``` + +In general, you should avoid non-local & shared mutability unless it is needed. Sticking +to only local mutability will improve readability and potentially improve compiler optimizations as well. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/ops.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/ops.md new file mode 100644 index 00000000000..60425cb8994 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/ops.md @@ -0,0 +1,98 @@ +--- +title: Logical Operations +description: + Learn about the supported arithmetic and logical operations in the Noir programming language. + Discover how to perform operations on private input types, integers, and booleans. +keywords: + [ + Noir programming language, + supported operations, + arithmetic operations, + logical operations, + predicate operators, + bitwise operations, + short-circuiting, + backend, + ] +sidebar_position: 3 +--- + +# Operations + +## Table of Supported Operations + +| Operation | Description | Requirements | +| :-------- | :------------------------------------------------------------: | -------------------------------------: | +| + | Adds two private input types together | Types must be private input | +| - | Subtracts two private input types together | Types must be private input | +| \* | Multiplies two private input types together | Types must be private input | +| / | Divides two private input types together | Types must be private input | +| ^ | XOR two private input types together | Types must be integer | +| & | AND two private input types together | Types must be integer | +| \| | OR two private input types together | Types must be integer | +| \<\< | Left shift an integer by another integer amount | Types must be integer | +| >> | Right shift an integer by another integer amount | Types must be integer | +| ! | Bitwise not of a value | Type must be integer or boolean | +| \< | returns a bool if one value is less than the other | Upper bound must have a known bit size | +| \<= | returns a bool if one value is less than or equal to the other | Upper bound must have a known bit size | +| > | returns a bool if one value is more than the other | Upper bound must have a known bit size | +| >= | returns a bool if one value is more than or equal to the other | Upper bound must have a known bit size | +| == | returns a bool if one value is equal to the other | Both types must not be constants | +| != | returns a bool if one value is not equal to the other | Both types must not be constants | + +### Predicate Operators + +`<,<=, !=, == , >, >=` are known as predicate/comparison operations because they compare two values. +This differs from the operations such as `+` where the operands are used in _computation_. + +### Bitwise Operations Example + +```rust +fn main(x : Field) { + let y = x as u32; + let z = y & y; +} +``` + +`z` is implicitly constrained to be the result of `y & y`. The `&` operand is used to denote bitwise +`&`. + +> `x & x` would not compile as `x` is a `Field` and not an integer type. + +### Logical Operators + +Noir has no support for the logical operators `||` and `&&`. This is because encoding the +short-circuiting that these operators require can be inefficient for Noir's backend. Instead you can +use the bitwise operators `|` and `&` which operate identically for booleans, just without the +short-circuiting. + +```rust +let my_val = 5; + +let mut flag = 1; +if (my_val > 6) | (my_val == 0) { + flag = 0; +} +assert(flag == 1); + +if (my_val != 10) & (my_val < 50) { + flag = 0; +} +assert(flag == 0); +``` + +### Shorthand operators + +Noir shorthand operators for most of the above operators, namely `+=, -=, *=, /=, %=, &=, |=, ^=, <<=`, and `>>=`. These allow for more concise syntax. For example: + +```rust +let mut i = 0; +i = i + 1; +``` + +could be written as: + +```rust +let mut i = 0; +i += 1; +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/oracles.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/oracles.md new file mode 100644 index 00000000000..2e6a6818d48 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/oracles.md @@ -0,0 +1,23 @@ +--- +title: Oracles +description: Dive into how Noir supports Oracles via RPC calls, and learn how to declare an Oracle in Noir with our comprehensive guide. +keywords: + - Noir + - Oracles + - RPC Calls + - Unconstrained Functions + - Programming + - Blockchain +sidebar_position: 6 +--- + +Noir has support for Oracles via RPC calls. This means Noir will make an RPC call and use the return value for proof generation. + +Since Oracles are not resolved by Noir, they are [`unconstrained` functions](./unconstrained.md) + +You can declare an Oracle through the `#[oracle()]` flag. Example: + +```rust +#[oracle(get_number_sequence)] +unconstrained fn get_number_sequence(_size: Field) -> [Field] {} +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/shadowing.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/shadowing.md new file mode 100644 index 00000000000..5ce6130d201 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/shadowing.md @@ -0,0 +1,44 @@ +--- +title: Shadowing +sidebar_position: 12 +--- + +Noir allows for inheriting variables' values and re-declaring them with the same name similar to Rust, known as shadowing. + +For example, the following function is valid in Noir: + +```rust +fn main() { + let x = 5; + + { + let x = x * 2; + assert (x == 10); + } + + assert (x == 5); +} +``` + +In this example, a variable x is first defined with the value 5. + +The local scope that follows shadows the original x, i.e. creates a local mutable x based on the value of the original x. It is given a value of 2 times the original x. + +When we return to the main scope, x once again refers to just the original x, which stays at the value of 5. + +## Temporal mutability + +One way that shadowing is useful, in addition to ergonomics across scopes, is for temporarily mutating variables. + +```rust +fn main() { + let age = 30; + // age = age + 5; // Would error as `age` is immutable by default. + + let mut age = age + 5; // Temporarily mutates `age` with a new value. + + let age = age; // Locks `age`'s mutability again. + + assert (age == 35); +} +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/traits.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/traits.md new file mode 100644 index 00000000000..ef1445a5907 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/traits.md @@ -0,0 +1,389 @@ +--- +title: Traits +description: + Traits in Noir can be used to abstract out a common interface for functions across + several data types. +keywords: [noir programming language, traits, interfaces, generic, protocol] +sidebar_position: 14 +--- + +## Overview + +Traits in Noir are a useful abstraction similar to interfaces or protocols in other languages. Each trait defines +the interface of several methods contained within the trait. Types can then implement this trait by providing +implementations for these methods. For example in the program: + +```rust +struct Rectangle { + width: Field, + height: Field, +} + +impl Rectangle { + fn area(self) -> Field { + self.width * self.height + } +} + +fn log_area(r: Rectangle) { + println(r.area()); +} +``` + +We have a function `log_area` to log the area of a `Rectangle`. Now how should we change the program if we want this +function to work on `Triangle`s as well?: + +```rust +struct Triangle { + width: Field, + height: Field, +} + +impl Triangle { + fn area(self) -> Field { + self.width * self.height / 2 + } +} +``` + +Making `log_area` generic over all types `T` would be invalid since not all types have an `area` method. Instead, we can +introduce a new `Area` trait and make `log_area` generic over all types `T` that implement `Area`: + +```rust +trait Area { + fn area(self) -> Field; +} + +fn log_area(shape: T) where T: Area { + println(shape.area()); +} +``` + +We also need to explicitly implement `Area` for `Rectangle` and `Triangle`. We can do that by changing their existing +impls slightly. Note that the parameter types and return type of each of our `area` methods must match those defined +by the `Area` trait. + +```rust +impl Area for Rectangle { + fn area(self) -> Field { + self.width * self.height + } +} + +impl Area for Triangle { + fn area(self) -> Field { + self.width * self.height / 2 + } +} +``` + +Now we have a working program that is generic over any type of Shape that is used! Others can even use this program +as a library with their own types - such as `Circle` - as long as they also implement `Area` for these types. + +## Where Clauses + +As seen in `log_area` above, when we want to create a function or method that is generic over any type that implements +a trait, we can add a where clause to the generic function. + +```rust +fn log_area(shape: T) where T: Area { + println(shape.area()); +} +``` + +It is also possible to apply multiple trait constraints on the same variable at once by combining traits with the `+` +operator. Similarly, we can have multiple trait constraints by separating each with a comma: + +```rust +fn foo(elements: [T], thing: U) where + T: Default + Add + Eq, + U: Bar, +{ + let mut sum = T::default(); + + for element in elements { + sum += element; + } + + if sum == T::default() { + thing.bar(); + } +} +``` + +## Generic Implementations + +You can add generics to a trait implementation by adding the generic list after the `impl` keyword: + +```rust +trait Second { + fn second(self) -> Field; +} + +impl Second for (T, Field) { + fn second(self) -> Field { + self.1 + } +} +``` + +You can also implement a trait for every type this way: + +```rust +trait Debug { + fn debug(self); +} + +impl Debug for T { + fn debug(self) { + println(self); + } +} + +fn main() { + 1.debug(); +} +``` + +### Generic Trait Implementations With Where Clauses + +Where clauses can also be placed on trait implementations themselves to restrict generics in a similar way. +For example, while `impl Foo for T` implements the trait `Foo` for every type, `impl Foo for T where T: Bar` +will implement `Foo` only for types that also implement `Bar`. This is often used for implementing generic types. +For example, here is the implementation for array equality: + +```rust +impl Eq for [T; N] where T: Eq { + // Test if two arrays have the same elements. + // Because both arrays must have length N, we know their lengths already match. + fn eq(self, other: Self) -> bool { + let mut result = true; + + for i in 0 .. self.len() { + // The T: Eq constraint is needed to call == on the array elements here + result &= self[i] == other[i]; + } + + result + } +} +``` + +## Generic Traits + +Traits themselves can also be generic by placing the generic arguments after the trait name. These generics are in +scope of every item within the trait. + +```rust +trait Into { + // Convert `self` to type `T` + fn into(self) -> T; +} +``` + +When implementing generic traits the generic arguments of the trait must be specified. This is also true anytime +when referencing a generic trait (e.g. in a `where` clause). + +```rust +struct MyStruct { + array: [Field; 2], +} + +impl Into<[Field; 2]> for MyStruct { + fn into(self) -> [Field; 2] { + self.array + } +} + +fn as_array(x: T) -> [Field; 2] + where T: Into<[Field; 2]> +{ + x.into() +} + +fn main() { + let array = [1, 2]; + let my_struct = MyStruct { array }; + + assert_eq(as_array(my_struct), array); +} +``` + +## Trait Methods With No `self` + +A trait can contain any number of methods, each of which have access to the `Self` type which represents each type +that eventually implements the trait. Similarly, the `self` variable is available as well but is not required to be used. +For example, we can define a trait to create a default value for a type. This trait will need to return the `Self` type +but doesn't need to take any parameters: + +```rust +trait Default { + fn default() -> Self; +} +``` + +Implementing this trait can be done similarly to any other trait: + +```rust +impl Default for Field { + fn default() -> Field { + 0 + } +} + +struct MyType {} + +impl Default for MyType { + fn default() -> Field { + MyType {} + } +} +``` + +However, since there is no `self` parameter, we cannot call it via the method call syntax `object.method()`. +Instead, we'll need to refer to the function directly. This can be done either by referring to the +specific impl `MyType::default()` or referring to the trait itself `Default::default()`. In the later +case, type inference determines the impl that is selected. + +```rust +let my_struct = MyStruct::default(); + +let x: Field = Default::default(); +let result = x + Default::default(); +``` + +:::warning + +```rust +let _ = Default::default(); +``` + +If type inference cannot select which impl to use because of an ambiguous `Self` type, an impl will be +arbitrarily selected. This occurs most often when the result of a trait function call with no parameters +is unused. To avoid this, when calling a trait function with no `self` or `Self` parameters or return type, +always refer to it via the implementation type's namespace - e.g. `MyType::default()`. +This is set to change to an error in future Noir versions. + +::: + +## Default Method Implementations + +A trait can also have default implementations of its methods by giving a body to the desired functions. +Note that this body must be valid for all types that may implement the trait. As a result, the only +valid operations on `self` will be operations valid for any type or other operations on the trait itself. + +```rust +trait Numeric { + fn add(self, other: Self) -> Self; + + // Default implementation of double is (self + self) + fn double(self) -> Self { + self.add(self) + } +} +``` + +When implementing a trait with default functions, a type may choose to implement only the required functions: + +```rust +impl Numeric for Field { + fn add(self, other: Field) -> Field { + self + other + } +} +``` + +Or it may implement the optional methods as well: + +```rust +impl Numeric for u32 { + fn add(self, other: u32) -> u32 { + self + other + } + + fn double(self) -> u32 { + self * 2 + } +} +``` + +## Impl Specialization + +When implementing traits for a generic type it is possible to implement the trait for only a certain combination +of generics. This can be either as an optimization or because those specific generics are required to implement the trait. + +```rust +trait Sub { + fn sub(self, other: Self) -> Self; +} + +struct NonZero { + value: T, +} + +impl Sub for NonZero { + fn sub(self, other: Self) -> Self { + let value = self.value - other.value; + assert(value != 0); + NonZero { value } + } +} +``` + +## Overlapping Implementations + +Overlapping implementations are disallowed by Noir to ensure Noir's decision on which impl to select is never ambiguous. +This means if a trait `Foo` is already implemented +by a type `Bar` for all `T`, then we cannot also have a separate impl for `Bar` (or any other +type argument). Similarly, if there is an impl for all `T` such as `impl Debug for T`, we cannot create +any more impls to `Debug` for other types since it would be ambiguous which impl to choose for any given +method call. + +```rust +trait Trait {} + +// Previous impl defined here +impl Trait for (A, B) {} + +// error: Impl for type `(Field, Field)` overlaps with existing impl +impl Trait for (Field, Field) {} +``` + +## Trait Coherence + +Another restriction on trait implementations is coherence. This restriction ensures other crates cannot create +impls that may overlap with other impls, even if several unrelated crates are used as dependencies in the same +program. + +The coherence restriction is: to implement a trait, either the trait itself or the object type must be declared +in the crate the impl is in. + +In practice this often comes up when using types provided by libraries. If a library provides a type `Foo` that does +not implement a trait in the standard library such as `Default`, you may not `impl Default for Foo` in your own crate. +While restrictive, this prevents later issues or silent changes in the program if the `Foo` library later added its +own impl for `Default`. If you are a user of the `Foo` library in this scenario and need a trait not implemented by the +library your choices are to either submit a patch to the library or use the newtype pattern. + +### The Newtype Pattern + +The newtype pattern gets around the coherence restriction by creating a new wrapper type around the library type +that we cannot create `impl`s for. Since the new wrapper type is defined in our current crate, we can create +impls for any trait we need on it. + +```rust +struct Wrapper { + foo: dep::some_library::Foo, +} + +impl Default for Wrapper { + fn default() -> Wrapper { + Wrapper { + foo: dep::some_library::Foo::new(), + } + } +} +``` + +Since we have an impl for our own type, the behavior of this code will not change even if `some_library` is updated +to provide its own `impl Default for Foo`. The downside of this pattern is that it requires extra wrapping and +unwrapping of values when converting to and from the `Wrapper` and `Foo` types. diff --git a/docs/versioned_docs/version-v0.27.0/noir/concepts/unconstrained.md b/docs/versioned_docs/version-v0.27.0/noir/concepts/unconstrained.md new file mode 100644 index 00000000000..b8e71fe65f0 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/concepts/unconstrained.md @@ -0,0 +1,99 @@ +--- +title: Unconstrained Functions +description: "Learn about what unconstrained functions in Noir are, how to use them and when you'd want to." + +keywords: [Noir programming language, unconstrained, open] +sidebar_position: 5 +--- + +Unconstrained functions are functions which do not constrain any of the included computation and allow for non-deterministic computation. + +## Why? + +Zero-knowledge (ZK) domain-specific languages (DSL) enable developers to generate ZK proofs from their programs by compiling code down to the constraints of an NP complete language (such as R1CS or PLONKish languages). However, the hard bounds of a constraint system can be very limiting to the functionality of a ZK DSL. + +Enabling a circuit language to perform unconstrained execution is a powerful tool. Said another way, unconstrained execution lets developers generate witnesses from code that does not generate any constraints. Being able to execute logic outside of a circuit is critical for both circuit performance and constructing proofs on information that is external to a circuit. + +Fetching information from somewhere external to a circuit can also be used to enable developers to improve circuit efficiency. + +A ZK DSL does not just prove computation, but proves that some computation was handled correctly. Thus, it is necessary that when we switch from performing some operation directly inside of a circuit to inside of an unconstrained environment that the appropriate constraints are still laid down elsewhere in the circuit. + +## Example + +An in depth example might help drive the point home. This example comes from the excellent [post](https://discord.com/channels/1113924620781883405/1124022445054111926/1128747641853972590) by Tom in the Noir Discord. + +Let's look at how we can optimize a function to turn a `u72` into an array of `u8`s. + +```rust +fn main(num: u72) -> pub [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8)) as u72 & 0xff) as u8; + } + + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 91 +Backend circuit size: 3619 +``` + +A lot of the operations in this function are optimized away by the compiler (all the bit-shifts turn into divisions by constants). However we can save a bunch of gates by casting to u8 a bit earlier. This automatically truncates the bit-shifted value to fit in a u8 which allows us to remove the AND against 0xff. This saves us ~480 gates in total. + +```rust +fn main(num: u72) -> pub [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8)) as u8; + } + + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 75 +Backend circuit size: 3143 +``` + +Those are some nice savings already but we can do better. This code is all constrained so we're proving every step of calculating out using num, but we don't actually care about how we calculate this, just that it's correct. This is where brillig comes in. + +It turns out that truncating a u72 into a u8 is hard to do inside a snark, each time we do as u8 we lay down 4 ACIR opcodes which get converted into multiple gates. It's actually much easier to calculate num from out than the other way around. All we need to do is multiply each element of out by a constant and add them all together, both relatively easy operations inside a snark. + +We can then run u72_to_u8 as unconstrained brillig code in order to calculate out, then use that result in our constrained function and assert that if we were to do the reverse calculation we'd get back num. This looks a little like the below: + +```rust +fn main(num: u72) -> pub [u8; 8] { + let out = u72_to_u8(num); + + let mut reconstructed_num: u72 = 0; + for i in 0..8 { + reconstructed_num += (out[i] as u72 << (56 - (8 * i))); + } + assert(num == reconstructed_num); + out +} + +unconstrained fn u72_to_u8(num: u72) -> [u8; 8] { + let mut out: [u8; 8] = [0; 8]; + for i in 0..8 { + out[i] = (num >> (56 - (i * 8))) as u8; + } + out +} +``` + +``` +Total ACIR opcodes generated for language PLONKCSat { width: 3 }: 78 +Backend circuit size: 2902 +``` + +This ends up taking off another ~250 gates from our circuit! We've ended up with more ACIR opcodes than before but they're easier for the backend to prove (resulting in fewer gates). + +Generally we want to use brillig whenever there's something that's easy to verify but hard to compute within the circuit. For example, if you wanted to calculate a square root of a number it'll be a much better idea to calculate this in brillig and then assert that if you square the result you get back your number. + +## Break and Continue + +In addition to loops over runtime bounds, `break` and `continue` are also available in unconstrained code. See [break and continue](../concepts/control_flow/#break-and-continue) diff --git a/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/_category_.json b/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/_category_.json new file mode 100644 index 00000000000..1debcfe7675 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Modules, Packages and Crates", + "position": 2, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/crates_and_packages.md b/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/crates_and_packages.md new file mode 100644 index 00000000000..95ee9f52ab2 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/crates_and_packages.md @@ -0,0 +1,43 @@ +--- +title: Crates and Packages +description: Learn how to use Crates and Packages in your Noir project +keywords: [Nargo, dependencies, package management, crates, package] +sidebar_position: 0 +--- + +## Crates + +A crate is the smallest amount of code that the Noir compiler considers at a time. +Crates can contain modules, and the modules may be defined in other files that get compiled with the crate, as we’ll see in the coming sections. + +### Crate Types + +A Noir crate can come in several forms: binaries, libraries or contracts. + +#### Binaries + +_Binary crates_ are programs which you can compile to an ACIR circuit which you can then create proofs against. Each must have a function called `main` that defines the ACIR circuit which is to be proved. + +#### Libraries + +_Library crates_ don't have a `main` function and they don't compile down to ACIR. Instead they define functionality intended to be shared with multiple projects, and eventually included in a binary crate. + +#### Contracts + +Contract crates are similar to binary crates in that they compile to ACIR which you can create proofs against. They are different in that they do not have a single `main` function, but are a collection of functions to be deployed to the [Aztec network](https://aztec.network). You can learn more about the technical details of Aztec in the [monorepo](https://github.com/AztecProtocol/aztec-packages) or contract [examples](https://github.com/AztecProtocol/aztec-packages/tree/master/noir-projects/noir-contracts/contracts). + +### Crate Root + +Every crate has a root, which is the source file that the compiler starts, this is also known as the root module. The Noir compiler does not enforce any conditions on the name of the file which is the crate root, however if you are compiling via Nargo the crate root must be called `lib.nr` or `main.nr` for library or binary crates respectively. + +## Packages + +A Nargo _package_ is a collection of one of more crates that provides a set of functionality. A package must include a Nargo.toml file. + +A package _must_ contain either a library or a binary crate, but not both. + +### Differences from Cargo Packages + +One notable difference between Rust's Cargo and Noir's Nargo is that while Cargo allows a package to contain an unlimited number of binary crates and a single library crate, Nargo currently only allows a package to contain a single crate. + +In future this restriction may be lifted to allow a Nargo package to contain both a binary and library crate or multiple binary crates. diff --git a/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/dependencies.md b/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/dependencies.md new file mode 100644 index 00000000000..04c1703d929 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/dependencies.md @@ -0,0 +1,124 @@ +--- +title: Dependencies +description: + Learn how to specify and manage dependencies in Nargo, allowing you to upload packages to GitHub + and use them easily in your project. +keywords: [Nargo, dependencies, GitHub, package management, versioning] +sidebar_position: 1 +--- + +Nargo allows you to upload packages to GitHub and use them as dependencies. + +## Specifying a dependency + +Specifying a dependency requires a tag to a specific commit and the git url to the url containing +the package. + +Currently, there are no requirements on the tag contents. If requirements are added, it would follow +semver 2.0 guidelines. + +> Note: Without a `tag` , there would be no versioning and dependencies would change each time you +> compile your project. + +For example, to add the [ecrecover-noir library](https://github.com/colinnielsen/ecrecover-noir) to your project, add it to `Nargo.toml`: + +```toml +# Nargo.toml + +[dependencies] +ecrecover = {tag = "v0.8.0", git = "https://github.com/colinnielsen/ecrecover-noir"} +``` + +If the module is in a subdirectory, you can define a subdirectory in your git repository, for example: + +```toml +# Nargo.toml + +[dependencies] +easy_private_token_contract = {tag ="v0.1.0-alpha62", git = "https://github.com/AztecProtocol/aztec-packages", directory = "noir-contracts/contracts/easy_private_token_contract"} +``` + +## Specifying a local dependency + +You can also specify dependencies that are local to your machine. + +For example, this file structure has a library and binary crate + +```tree +├── binary_crate +│   ├── Nargo.toml +│   └── src +│   └── main.nr +└── lib_a + ├── Nargo.toml + └── src + └── lib.nr +``` + +Inside of the binary crate, you can specify: + +```toml +# Nargo.toml + +[dependencies] +lib_a = { path = "../lib_a" } +``` + +## Importing dependencies + +You can import a dependency to a Noir file using the following syntax. For example, to import the +ecrecover-noir library and local lib_a referenced above: + +```rust +use dep::ecrecover; +use dep::lib_a; +``` + +You can also import only the specific parts of dependency that you want to use, like so: + +```rust +use dep::std::hash::sha256; +use dep::std::scalar_mul::fixed_base_embedded_curve; +``` + +Lastly, as demonstrated in the +[elliptic curve example](../standard_library/cryptographic_primitives/ec_primitives#examples), you +can import multiple items in the same line by enclosing them in curly braces: + +```rust +use dep::std::ec::tecurve::affine::{Curve, Point}; +``` + +We don't have a way to consume libraries from inside a [workspace](./workspaces) as external dependencies right now. + +Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. + +## Dependencies of Dependencies + +Note that when you import a dependency, you also get access to all of the dependencies of that package. + +For example, the [phy_vector](https://github.com/resurgencelabs/phy_vector) library imports an [fraction](https://github.com/resurgencelabs/fraction) library. If you're importing the phy_vector library, then you can access the functions in fractions library like so: + +```rust +use dep::phy_vector; + +fn main(x : Field, y : pub Field) { + //... + let f = phy_vector::fraction::toFraction(true, 2, 1); + //... +} +``` + +## Available Libraries + +Noir does not currently have an official package manager. You can find a list of available Noir libraries in the [awesome-noir repo here](https://github.com/noir-lang/awesome-noir#libraries). + +Some libraries that are available today include: + +- [Standard Library](https://github.com/noir-lang/noir/tree/master/noir_stdlib) - the Noir Standard Library +- [Ethereum Storage Proof Verification](https://github.com/aragonzkresearch/noir-trie-proofs) - a library that contains the primitives necessary for RLP decoding (in the form of look-up table construction) and Ethereum state and storage proof verification (or verification of any trie proof involving 32-byte long keys) +- [BigInt](https://github.com/shuklaayush/noir-bigint) - a library that provides a custom BigUint56 data type, allowing for computations on large unsigned integers +- [ECrecover](https://github.com/colinnielsen/ecrecover-noir/tree/main) - a library to verify an ECDSA signature and return the source Ethereum address +- [Sparse Merkle Tree Verifier](https://github.com/vocdoni/smtverifier-noir/tree/main) - a library for verification of sparse Merkle trees +- [Signed Int](https://github.com/resurgencelabs/signed_int) - a library for accessing a custom Signed Integer data type, allowing access to negative numbers on Noir +- [Fraction](https://github.com/resurgencelabs/fraction) - a library for accessing fractional number data type in Noir, allowing results that aren't whole numbers diff --git a/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/modules.md b/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/modules.md new file mode 100644 index 00000000000..ae822a1cff4 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/modules.md @@ -0,0 +1,105 @@ +--- +title: Modules +description: + Learn how to organize your files using modules in Noir, following the same convention as Rust's + module system. Examples included. +keywords: [Noir, Rust, modules, organizing files, sub-modules] +sidebar_position: 2 +--- + +Noir's module system follows the same convention as the _newer_ version of Rust's module system. + +## Purpose of Modules + +Modules are used to organize files. Without modules all of your code would need to live in a single +file. In Noir, the compiler does not automatically scan all of your files to detect modules. This +must be done explicitly by the developer. + +## Examples + +### Importing a module in the crate root + +Filename : `src/main.nr` + +```rust +mod foo; + +fn main() { + foo::hello_world(); +} +``` + +Filename : `src/foo.nr` + +```rust +fn from_foo() {} +``` + +In the above snippet, the crate root is the `src/main.nr` file. The compiler sees the module +declaration `mod foo` which prompts it to look for a foo.nr file. + +Visually this module hierarchy looks like the following : + +``` +crate + ├── main + │ + └── foo + └── from_foo + +``` + +### Importing a module throughout the tree + +All modules are accessible from the `crate::` namespace. + +``` +crate + ├── bar + ├── foo + └── main + +``` + +In the above snippet, if `bar` would like to use functions in `foo`, it can do so by `use crate::foo::function_name`. + +### Sub-modules + +Filename : `src/main.nr` + +```rust +mod foo; + +fn main() { + foo::from_foo(); +} +``` + +Filename : `src/foo.nr` + +```rust +mod bar; +fn from_foo() {} +``` + +Filename : `src/foo/bar.nr` + +```rust +fn from_bar() {} +``` + +In the above snippet, we have added an extra module to the module tree; `bar`. `bar` is a submodule +of `foo` hence we declare bar in `foo.nr` with `mod bar`. Since `foo` is not the crate root, the +compiler looks for the file associated with the `bar` module in `src/foo/bar.nr` + +Visually the module hierarchy looks as follows: + +``` +crate + ├── main + │ + └── foo + ├── from_foo + └── bar + └── from_bar +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/workspaces.md b/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/workspaces.md new file mode 100644 index 00000000000..513497f12bf --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/modules_packages_crates/workspaces.md @@ -0,0 +1,42 @@ +--- +title: Workspaces +sidebar_position: 3 +--- + +Workspaces are a feature of nargo that allow you to manage multiple related Noir packages in a single repository. A workspace is essentially a group of related projects that share common build output directories and configurations. + +Each Noir project (with it's own Nargo.toml file) can be thought of as a package. Each package is expected to contain exactly one "named circuit", being the "name" defined in Nargo.toml with the program logic defined in `./src/main.nr`. + +For a project with the following structure: + +```tree +├── crates +│ ├── a +│ │ ├── Nargo.toml +│ │ └── Prover.toml +│ │ └── src +│ │ └── main.nr +│ └── b +│ ├── Nargo.toml +│ └── Prover.toml +│ └── src +│ └── main.nr +│ +└── Nargo.toml +``` + +You can define a workspace in Nargo.toml like so: + +```toml +[workspace] +members = ["crates/a", "crates/b"] +default-member = "crates/a" +``` + +`members` indicates which packages are included in the workspace. As such, all member packages of a workspace will be processed when the `--workspace` flag is used with various commands or if a `default-member` is not specified. + +`default-member` indicates which package various commands process by default. + +Libraries can be defined in a workspace. Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. + +Inside a workspace, these are consumed as `{ path = "../to_lib" }` dependencies in Nargo.toml. diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/_category_.json b/docs/versioned_docs/version-v0.27.0/noir/standard_library/_category_.json new file mode 100644 index 00000000000..af04c0933fd --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "Standard Library", + "position": 1, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/bigint.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/bigint.md new file mode 100644 index 00000000000..da6a7cdfd81 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/bigint.md @@ -0,0 +1,119 @@ +--- +title: Big Integers +description: How to use big integers from Noir standard library +keywords: + [ + Big Integer, + Noir programming language, + Noir libraries, + ] +--- + +The BigInt module in the standard library exposes some class of integers which do not fit (well) into a Noir native field. It implements modulo arithmetic, modulo a 'big' prime number. + +:::note + +The module can currently be considered as `Field`s with fixed modulo sizes used by a set of elliptic curves, in addition to just the native curve. [More work](https://github.com/noir-lang/noir/issues/510) is needed to achieve arbitrarily sized big integers. + +::: + +Currently 6 classes of integers (i.e 'big' prime numbers) are available in the module, namely: + +- BN254 Fq: Bn254Fq +- BN254 Fr: Bn254Fr +- Secp256k1 Fq: Secpk1Fq +- Secp256k1 Fr: Secpk1Fr +- Secp256r1 Fr: Secpr1Fr +- Secp256r1 Fq: Secpr1Fq + +Where XXX Fq and XXX Fr denote respectively the order of the base and scalar field of the (usual) elliptic curve XXX. +For instance the big integer 'Secpk1Fq' in the standard library refers to integers modulo $2^{256}-2^{32}-977$. + +Feel free to explore the source code for the other primes: + +```rust title="big_int_definition" showLineNumbers +struct BigInt { + pointer: u32, + modulus: u32, +} +``` +> Source code: noir_stdlib/src/bigint.nr#L16-L21 + + +## Example usage + +A common use-case is when constructing a big integer from its bytes representation, and performing arithmetic operations on it: + +```rust title="big_int_example" showLineNumbers +fn big_int_example(x: u8, y: u8) { + let a = Secpk1Fq::from_le_bytes(&[x, y, 0, 45, 2]); + let b = Secpk1Fq::from_le_bytes(&[y, x, 9]); + let c = (a + b) * b / a; + let d = c.to_le_bytes(); + println(d[0]); +} +``` +> Source code: test_programs/execution_success/bigint/src/main.nr#L20-L28 + + +## Methods + +The available operations for each big integer are: + +### from_le_bytes + +Construct a big integer from its little-endian bytes representation. Example: + +```rust + let a = Secpk1Fq::from_le_bytes(&[x, y, 0, 45, 2]); + ``` + +Sure, here's the formatted version of the remaining methods: + +### to_le_bytes + +Return the little-endian bytes representation of a big integer. Example: + +```rust +let bytes = a.to_le_bytes(); +``` + +### add + +Add two big integers. Example: + +```rust +let sum = a + b; +``` + +### sub + +Subtract two big integers. Example: + +```rust +let difference = a - b; +``` + +### mul + +Multiply two big integers. Example: + +```rust +let product = a * b; +``` + +### div + +Divide two big integers. Note that division is field division and not euclidean division. Example: + +```rust +let quotient = a / b; +``` + +### eq + +Compare two big integers. Example: + +```rust +let are_equal = a == b; +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/black_box_fns.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/black_box_fns.md new file mode 100644 index 00000000000..be8c65679c3 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/black_box_fns.md @@ -0,0 +1,31 @@ +--- +title: Black Box Functions +description: Black box functions are functions in Noir that rely on backends implementing support for specialized constraints. +keywords: [noir, black box functions] +--- + +Black box functions are functions in Noir that rely on backends implementing support for specialized constraints. This makes certain zk-snark unfriendly computations cheaper than if they were implemented in Noir. + +The ACVM spec defines a set of blackbox functions which backends will be expected to implement. This allows backends to use optimized implementations of these constraints if they have them, however they may also fallback to less efficient naive implementations if not. + +## Function list + +Here is a list of the current black box functions: + +- [SHA256](./cryptographic_primitives/hashes.mdx#sha256) +- [Schnorr signature verification](./cryptographic_primitives/schnorr.mdx) +- [Blake2s](./cryptographic_primitives/hashes.mdx#blake2s) +- [Blake3](./cryptographic_primitives/hashes.mdx#blake3) +- [Pedersen Hash](./cryptographic_primitives/hashes.mdx#pedersen_hash) +- [Pedersen Commitment](./cryptographic_primitives/hashes.mdx#pedersen_commitment) +- [ECDSA signature verification](./cryptographic_primitives/ecdsa_sig_verification.mdx) +- [Fixed base scalar multiplication](./cryptographic_primitives/scalar.mdx) +- AND +- XOR +- RANGE +- [Keccak256](./cryptographic_primitives/hashes.mdx#keccak256) +- [Recursive proof verification](./recursion) + +Most black box functions are included as part of the Noir standard library, however `AND`, `XOR` and `RANGE` are used as part of the Noir language syntax. For instance, using the bitwise operator `&` will invoke the `AND` black box function. + +You can view the black box functions defined in the ACVM code [here](https://github.com/noir-lang/noir/blob/master/acvm-repo/acir/src/circuit/black_box_functions.rs). diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/bn254.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/bn254.md new file mode 100644 index 00000000000..3294f005dbb --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/bn254.md @@ -0,0 +1,46 @@ +--- +title: Bn254 Field Library +--- + +Noir provides a module in standard library with some optimized functions for bn254 Fr in `std::field::bn254`. + +## decompose + +```rust +fn decompose(x: Field) -> (Field, Field) {} +``` + +Decomposes a single field into two fields, low and high. The low field contains the lower 16 bytes of the input field and the high field contains the upper 16 bytes of the input field. Both field results are range checked to 128 bits. + + +## assert_gt + +```rust +fn assert_gt(a: Field, b: Field) {} +``` + +Asserts that a > b. This will generate less constraints than using `assert(gt(a, b))`. + +## assert_lt + +```rust +fn assert_lt(a: Field, b: Field) {} +``` + +Asserts that a < b. This will generate less constraints than using `assert(lt(a, b))`. + +## gt + +```rust +fn gt(a: Field, b: Field) -> bool {} +``` + +Returns true if a > b. + +## lt + +```rust +fn lt(a: Field, b: Field) -> bool {} +``` + +Returns true if a < b. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/boundedvec.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/boundedvec.md new file mode 100644 index 00000000000..ce4529f6e57 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/boundedvec.md @@ -0,0 +1,326 @@ +--- +title: Bounded Vectors +keywords: [noir, vector, bounded vector, slice] +sidebar_position: 1 +--- + +A `BoundedVec` is a growable storage similar to a `Vec` except that it +is bounded with a maximum possible length. Unlike `Vec`, `BoundedVec` is not implemented +via slices and thus is not subject to the same restrictions slices are (notably, nested +slices - and thus nested vectors as well - are disallowed). + +Since a BoundedVec is backed by a normal array under the hood, growing the BoundedVec by +pushing an additional element is also more efficient - the length only needs to be increased +by one. + +For these reasons `BoundedVec` should generally be preferred over `Vec` when there +is a reasonable maximum bound that can be placed on the vector. + +Example: + +```rust +let mut vector: BoundedVec = BoundedVec::new(); +for i in 0..5 { + vector.push(i); +} +assert(vector.len() == 5); +assert(vector.max_len() == 10); +``` + +## Methods + +### new + +```rust +pub fn new() -> Self +``` + +Creates a new, empty vector of length zero. + +Since this container is backed by an array internally, it still needs an initial value +to give each element. To resolve this, each element is zeroed internally. This value +is guaranteed to be inaccessible unless `get_unchecked` is used. + +Example: + +```rust +let empty_vector: BoundedVec = BoundedVec::new(); +assert(empty_vector.len() == 0); +``` + +Note that whenever calling `new` the maximum length of the vector should always be specified +via a type signature: + +```rust title="new_example" showLineNumbers +fn foo() -> BoundedVec { + // Ok! MaxLen is specified with a type annotation + let v1: BoundedVec = BoundedVec::new(); + let v2 = BoundedVec::new(); + + // Ok! MaxLen is known from the type of foo's return value + v2 +} + +fn bad() { + let mut v3 = BoundedVec::new(); + + // Not Ok! We don't know if v3's MaxLen is at least 1, and the compiler often infers 0 by default. + v3.push(5); +} +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L11-L27 + + +This defaulting of `MaxLen` (and numeric generics in general) to zero may change in future noir versions +but for now make sure to use type annotations when using bounded vectors. Otherwise, you will receive a constraint failure at runtime when the vec is pushed to. + +### get + +```rust +pub fn get(mut self: Self, index: u64) -> T { +``` + +Retrieves an element from the vector at the given index, starting from zero. + +If the given index is equal to or greater than the length of the vector, this +will issue a constraint failure. + +Example: + +```rust +fn foo(v: BoundedVec) { + let first = v.get(0); + let last = v.get(v.len() - 1); + assert(first != last); +} +``` + +### get_unchecked + +```rust +pub fn get_unchecked(mut self: Self, index: u64) -> T { +``` + +Retrieves an element from the vector at the given index, starting from zero, without +performing a bounds check. + +Since this function does not perform a bounds check on length before accessing the element, +it is unsafe! Use at your own risk! + +Example: + +```rust title="get_unchecked_example" showLineNumbers +fn sum_of_first_three(v: BoundedVec) -> u32 { + // Always ensure the length is larger than the largest + // index passed to get_unchecked + assert(v.len() > 2); + let first = v.get_unchecked(0); + let second = v.get_unchecked(1); + let third = v.get_unchecked(2); + first + second + third +} +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L54-L64 + + + +### push + +```rust +pub fn push(&mut self, elem: T) { +``` + +Pushes an element to the end of the vector. This increases the length +of the vector by one. + +Panics if the new length of the vector will be greater than the max length. + +Example: + +```rust title="bounded-vec-push-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + + v.push(1); + v.push(2); + + // Panics with failed assertion "push out of bounds" + v.push(3); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L68-L76 + + +### pop + +```rust +pub fn pop(&mut self) -> T +``` + +Pops the element at the end of the vector. This will decrease the length +of the vector by one. + +Panics if the vector is empty. + +Example: + +```rust title="bounded-vec-pop-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + v.push(1); + v.push(2); + + let two = v.pop(); + let one = v.pop(); + + assert(two == 2); + assert(one == 1); + // error: cannot pop from an empty vector + // let _ = v.pop(); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L81-L93 + + +### len + +```rust +pub fn len(self) -> u64 { +``` + +Returns the current length of this vector + +Example: + +```rust title="bounded-vec-len-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + assert(v.len() == 0); + + v.push(100); + assert(v.len() == 1); + + v.push(200); + v.push(300); + v.push(400); + assert(v.len() == 4); + + let _ = v.pop(); + let _ = v.pop(); + assert(v.len() == 2); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L98-L113 + + +### max_len + +```rust +pub fn max_len(_self: BoundedVec) -> u64 { +``` + +Returns the maximum length of this vector. This is always +equal to the `MaxLen` parameter this vector was initialized with. + +Example: + +```rust title="bounded-vec-max-len-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + + assert(v.max_len() == 5); + v.push(10); + assert(v.max_len() == 5); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L118-L124 + + +### storage + +```rust +pub fn storage(self) -> [T; MaxLen] { +``` + +Returns the internal array within this vector. +Since arrays in Noir are immutable, mutating the returned storage array will not mutate +the storage held internally by this vector. + +Note that uninitialized elements may be zeroed out! + +Example: + +```rust title="bounded-vec-storage-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + + assert(v.storage() == [0, 0, 0, 0, 0]); + + v.push(57); + assert(v.storage() == [57, 0, 0, 0, 0]); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L129-L136 + + +### extend_from_array + +```rust +pub fn extend_from_array(&mut self, array: [T; Len]) +``` + +Pushes each element from the given array to this vector. + +Panics if pushing each element would cause the length of this vector +to exceed the maximum length. + +Example: + +```rust title="bounded-vec-extend-from-array-example" showLineNumbers +let mut vec: BoundedVec = BoundedVec::new(); + vec.extend_from_array([2, 4]); + + assert(vec.len == 2); + assert(vec.get(0) == 2); + assert(vec.get(1) == 4); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L141-L148 + + +### extend_from_bounded_vec + +```rust +pub fn extend_from_bounded_vec(&mut self, vec: BoundedVec) +``` + +Pushes each element from the other vector to this vector. The length of +the other vector is left unchanged. + +Panics if pushing each element would cause the length of this vector +to exceed the maximum length. + +Example: + +```rust title="bounded-vec-extend-from-bounded-vec-example" showLineNumbers +let mut v1: BoundedVec = BoundedVec::new(); + let mut v2: BoundedVec = BoundedVec::new(); + + v2.extend_from_array([1, 2, 3]); + v1.extend_from_bounded_vec(v2); + + assert(v1.storage() == [1, 2, 3, 0, 0]); + assert(v2.storage() == [1, 2, 3, 0, 0, 0, 0]); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L153-L162 + + +### any + +```rust +pub fn any(self, predicate: fn[Env](T) -> bool) -> bool +``` + +Returns true if the given predicate returns true for any element +in this vector. + +Example: + +```rust title="bounded-vec-any-example" showLineNumbers +let mut v: BoundedVec = BoundedVec::new(); + v.extend_from_array([2, 4, 6]); + + let all_even = !v.any(|elem: u32| elem % 2 != 0); + assert(all_even); +``` +> Source code: test_programs/noir_test_success/bounded_vec/src/main.nr#L229-L235 + diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/hashmap.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/hashmap.md new file mode 100644 index 00000000000..91604af765d --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/hashmap.md @@ -0,0 +1,569 @@ +--- +title: HashMap +keywords: [noir, map, hash, hashmap] +sidebar_position: 1 +--- + +`HashMap` is used to efficiently store and look up key-value pairs. + +`HashMap` is a bounded type which can store anywhere from zero to `MaxLen` total elements. +Note that due to hash collisions, the actual maximum number of elements stored by any particular +hashmap is likely lower than `MaxLen`. This is true even with cryptographic hash functions since +every hash value will be performed modulo `MaxLen`. + +When creating `HashMap`s, the `MaxLen` generic should always be specified if it is not already +known. Otherwise, the compiler may infer a different value for `MaxLen` (such as zero), which +will likely change the result of the program. This behavior is set to become an error in future +versions instead. + +Example: + +```rust +// Create a mapping from Fields to u32s with a maximum length of 12 +// using a pedersen hash +let mut map: HashMap> = HashMap::default(); + +map.insert(1, 2); +map.insert(3, 4); + +let two = map.get(1).unwrap(); +``` + +## Methods + +### default + +```rust title="default" showLineNumbers +impl Default for HashMap +where + B: BuildHasher + Default, + H: Hasher + Default +{ + fn default() -> Self { +``` +> Source code: noir_stdlib/src/collections/map.nr#L462-L469 + + +Creates a fresh, empty HashMap. + +When using this function, always make sure to specify the maximum size of the hash map. + +This is the same `default` from the `Default` implementation given further below. It is +repeated here for convenience since it is the recommended way to create a hashmap. + +Example: + +```rust title="default_example" showLineNumbers +let hashmap: HashMap> = HashMap::default(); + assert(hashmap.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L202-L205 + + +Because `HashMap` has so many generic arguments that are likely to be the same throughout +your program, it may be helpful to create a type alias: + +```rust title="type_alias" showLineNumbers +type MyMap = HashMap>; +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L196-L198 + + +### with_hasher + +```rust title="with_hasher" showLineNumbers +pub fn with_hasher(_build_hasher: B) -> Self + where + B: BuildHasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L82-L86 + + +Creates a hashmap with an existing `BuildHasher`. This can be used to ensure multiple +hashmaps are created with the same hasher instance. + +Example: + +```rust title="with_hasher_example" showLineNumbers +let my_hasher: BuildHasherDefault = Default::default(); + let hashmap: HashMap> = HashMap::with_hasher(my_hasher); + assert(hashmap.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L207-L211 + + +### get + +```rust title="get" showLineNumbers +pub fn get( + self, + key: K + ) -> Option + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L278-L287 + + +Retrieves a value from the hashmap, returning `Option::none()` if it was not found. + +Example: + +```rust title="get_example" showLineNumbers +fn get_example(map: HashMap>) { + let x = map.get(12); + + if x.is_some() { + assert(x.unwrap() == 42); + } +} +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L299-L307 + + +### insert + +```rust title="insert" showLineNumbers +pub fn insert( + &mut self, + key: K, + value: V + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L313-L323 + + +Inserts a new key-value pair into the map. If the key was already in the map, its +previous value will be overridden with the newly provided one. + +Example: + +```rust title="insert_example" showLineNumbers +let mut map: HashMap> = HashMap::default(); + map.insert(12, 42); + assert(map.len() == 1); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L213-L217 + + +### remove + +```rust title="remove" showLineNumbers +pub fn remove( + &mut self, + key: K + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L356-L365 + + +Removes the given key-value pair from the map. If the key was not already present +in the map, this does nothing. + +Example: + +```rust title="remove_example" showLineNumbers +map.remove(12); + assert(map.is_empty()); + + // If a key was not present in the map, remove does nothing + map.remove(12); + assert(map.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L221-L228 + + +### is_empty + +```rust title="is_empty" showLineNumbers +pub fn is_empty(self) -> bool { +``` +> Source code: noir_stdlib/src/collections/map.nr#L115-L117 + + +True if the length of the hash map is empty. + +Example: + +```rust title="is_empty_example" showLineNumbers +assert(map.is_empty()); + + map.insert(1, 2); + assert(!map.is_empty()); + + map.remove(1); + assert(map.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L230-L238 + + +### len + +```rust title="len" showLineNumbers +pub fn len(self) -> u64 { +``` +> Source code: noir_stdlib/src/collections/map.nr#L264-L266 + + +Returns the current length of this hash map. + +Example: + +```rust title="len_example" showLineNumbers +// This is equivalent to checking map.is_empty() + assert(map.len() == 0); + + map.insert(1, 2); + map.insert(3, 4); + map.insert(5, 6); + assert(map.len() == 3); + + // 3 was already present as a key in the hash map, so the length is unchanged + map.insert(3, 7); + assert(map.len() == 3); + + map.remove(1); + assert(map.len() == 2); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L240-L255 + + +### capacity + +```rust title="capacity" showLineNumbers +pub fn capacity(_self: Self) -> u64 { +``` +> Source code: noir_stdlib/src/collections/map.nr#L271-L273 + + +Returns the maximum capacity of this hashmap. This is always equal to the capacity +specified in the hashmap's type. + +Unlike hashmaps in general purpose programming languages, hashmaps in Noir have a +static capacity that does not increase as the map grows larger. Thus, this capacity +is also the maximum possible element count that can be inserted into the hashmap. +Due to hash collisions (modulo the hashmap length), it is likely the actual maximum +element count will be lower than the full capacity. + +Example: + +```rust title="capacity_example" showLineNumbers +let empty_map: HashMap> = HashMap::default(); + assert(empty_map.len() == 0); + assert(empty_map.capacity() == 42); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L257-L261 + + +### clear + +```rust title="clear" showLineNumbers +pub fn clear(&mut self) { +``` +> Source code: noir_stdlib/src/collections/map.nr#L93-L95 + + +Clears the hashmap, removing all key-value pairs from it. + +Example: + +```rust title="clear_example" showLineNumbers +assert(!map.is_empty()); + map.clear(); + assert(map.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L263-L267 + + +### contains_key + +```rust title="contains_key" showLineNumbers +pub fn contains_key( + self, + key: K + ) -> bool + where + K: Hash + Eq, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L101-L110 + + +True if the hashmap contains the given key. Unlike `get`, this will not also return +the value associated with the key. + +Example: + +```rust title="contains_key_example" showLineNumbers +if map.contains_key(7) { + let value = map.get(7); + assert(value.is_some()); + } else { + println("No value for key 7!"); + } +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L269-L276 + + +### entries + +```rust title="entries" showLineNumbers +pub fn entries(self) -> BoundedVec<(K, V), N> { +``` +> Source code: noir_stdlib/src/collections/map.nr#L123-L125 + + +Returns a vector of each key-value pair present in the hashmap. + +The length of the returned vector is always equal to the length of the hashmap. + +Example: + +```rust title="entries_example" showLineNumbers +let entries = map.entries(); + + // The length of a hashmap may not be compile-time known, so we + // need to loop over its capacity instead + for i in 0..map.capacity() { + if i < entries.len() { + let (key, value) = entries.get(i); + println(f"{key} -> {value}"); + } + } +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L310-L321 + + +### keys + +```rust title="keys" showLineNumbers +pub fn keys(self) -> BoundedVec { +``` +> Source code: noir_stdlib/src/collections/map.nr#L144-L146 + + +Returns a vector of each key present in the hashmap. + +The length of the returned vector is always equal to the length of the hashmap. + +Example: + +```rust title="keys_example" showLineNumbers +let keys = map.keys(); + + for i in 0..keys.max_len() { + if i < keys.len() { + let key = keys.get_unchecked(i); + let value = map.get(key).unwrap_unchecked(); + println(f"{key} -> {value}"); + } + } +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L323-L333 + + +### values + +```rust title="values" showLineNumbers +pub fn values(self) -> BoundedVec { +``` +> Source code: noir_stdlib/src/collections/map.nr#L164-L166 + + +Returns a vector of each value present in the hashmap. + +The length of the returned vector is always equal to the length of the hashmap. + +Example: + +```rust title="values_example" showLineNumbers +let values = map.values(); + + for i in 0..values.max_len() { + if i < values.len() { + let value = values.get_unchecked(i); + println(f"Found value {value}"); + } + } +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L335-L344 + + +### iter_mut + +```rust title="iter_mut" showLineNumbers +pub fn iter_mut( + &mut self, + f: fn(K, V) -> (K, V) + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L183-L192 + + +Iterates through each key-value pair of the HashMap, setting each key-value pair to the +result returned from the given function. + +Note that since keys can be mutated, the HashMap needs to be rebuilt as it is iterated +through. If this is not desired, use `iter_values_mut` if only values need to be mutated, +or `entries` if neither keys nor values need to be mutated. + +The iteration order is left unspecified. As a result, if two keys are mutated to become +equal, which of the two values that will be present for the key in the resulting map is also unspecified. + +Example: + +```rust title="iter_mut_example" showLineNumbers +// Add 1 to each key in the map, and double the value associated with that key. + map.iter_mut(|k, v| (k + 1, v * 2)); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L348-L351 + + +### iter_keys_mut + +```rust title="iter_keys_mut" showLineNumbers +pub fn iter_keys_mut( + &mut self, + f: fn(K) -> K + ) + where + K: Eq + Hash, + B: BuildHasher, + H: Hasher { +``` +> Source code: noir_stdlib/src/collections/map.nr#L208-L217 + + +Iterates through the HashMap, mutating each key to the result returned from +the given function. + +Note that since keys can be mutated, the HashMap needs to be rebuilt as it is iterated +through. If only iteration is desired and the keys are not intended to be mutated, +prefer using `entries` instead. + +The iteration order is left unspecified. As a result, if two keys are mutated to become +equal, which of the two values that will be present for the key in the resulting map is also unspecified. + +Example: + +```rust title="iter_keys_mut_example" showLineNumbers +// Double each key, leaving the value associated with that key untouched + map.iter_keys_mut(|k| k * 2); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L353-L356 + + +### iter_values_mut + +```rust title="iter_values_mut" showLineNumbers +pub fn iter_values_mut(&mut self, f: fn(V) -> V) { +``` +> Source code: noir_stdlib/src/collections/map.nr#L233-L235 + + +Iterates through the HashMap, applying the given function to each value and mutating the +value to equal the result. This function is more efficient than `iter_mut` and `iter_keys_mut` +because the keys are untouched and the underlying hashmap thus does not need to be reordered. + +Example: + +```rust title="iter_values_mut_example" showLineNumbers +// Halve each value + map.iter_values_mut(|v| v / 2); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L358-L361 + + +### retain + +```rust title="retain" showLineNumbers +pub fn retain(&mut self, f: fn(K, V) -> bool) { +``` +> Source code: noir_stdlib/src/collections/map.nr#L247-L249 + + +Retains only the key-value pairs for which the given function returns true. +Any key-value pairs for which the function returns false will be removed from the map. + +Example: + +```rust title="retain_example" showLineNumbers +map.retain(|k, v| (k != 0) & (v != 0)); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L281-L283 + + +## Trait Implementations + +### default + +```rust title="default" showLineNumbers +impl Default for HashMap +where + B: BuildHasher + Default, + H: Hasher + Default +{ + fn default() -> Self { +``` +> Source code: noir_stdlib/src/collections/map.nr#L462-L469 + + +Constructs an empty HashMap. + +Example: + +```rust title="default_example" showLineNumbers +let hashmap: HashMap> = HashMap::default(); + assert(hashmap.is_empty()); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L202-L205 + + +### eq + +```rust title="eq" showLineNumbers +impl Eq for HashMap +where + K: Eq + Hash, + V: Eq, + B: BuildHasher, + H: Hasher +{ + fn eq(self, other: HashMap) -> bool { +``` +> Source code: noir_stdlib/src/collections/map.nr#L426-L435 + + +Checks if two HashMaps are equal. + +Example: + +```rust title="eq_example" showLineNumbers +let mut map1: HashMap> = HashMap::default(); + let mut map2: HashMap> = HashMap::default(); + + map1.insert(1, 2); + map1.insert(3, 4); + + map2.insert(3, 4); + map2.insert(1, 2); + + assert(map1 == map2); +``` +> Source code: test_programs/execution_success/hashmap/src/main.nr#L285-L296 + diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/index.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/index.md new file mode 100644 index 00000000000..ea84c6d5c21 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/index.md @@ -0,0 +1,5 @@ +--- +title: Containers +description: Container types provided by Noir's standard library for storing and retrieving data +keywords: [containers, data types, vec, hashmap] +--- diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/vec.mdx b/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/vec.mdx new file mode 100644 index 00000000000..fcfd7e07aa0 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/containers/vec.mdx @@ -0,0 +1,151 @@ +--- +title: Vectors +description: Delve into the Vec data type in Noir. Learn about its methods, practical examples, and best practices for using Vectors in your Noir code. +keywords: [noir, vector type, methods, examples, dynamic arrays] +sidebar_position: 6 +--- + +import Experimental from '@site/src/components/Notes/_experimental.mdx'; + + + +A vector is a collection type similar to Rust's `Vec` type. In Noir, it is a convenient way to use slices as mutable arrays. + +Example: + +```rust +let mut vector: Vec = Vec::new(); +for i in 0..5 { + vector.push(i); +} +assert(vector.len() == 5); +``` + +## Methods + +### new + +Creates a new, empty vector. + +```rust +pub fn new() -> Self +``` + +Example: + +```rust +let empty_vector: Vec = Vec::new(); +assert(empty_vector.len() == 0); +``` + +### from_slice + +Creates a vector containing each element from a given slice. Mutations to the resulting vector will not affect the original slice. + +```rust +pub fn from_slice(slice: [T]) -> Self +``` + +Example: + +```rust +let slice: [Field] = &[1, 2, 3]; +let vector_from_slice = Vec::from_slice(slice); +assert(vector_from_slice.len() == 3); +``` + +### len + +Returns the number of elements in the vector. + +```rust +pub fn len(self) -> Field +``` + +Example: + +```rust +let empty_vector: Vec = Vec::new(); +assert(empty_vector.len() == 0); +``` + +### get + +Retrieves an element from the vector at a given index. Panics if the index points beyond the vector's end. + +```rust +pub fn get(self, index: Field) -> T +``` + +Example: + +```rust +let vector: Vec = Vec::from_slice(&[10, 20, 30]); +assert(vector.get(1) == 20); +``` + +### push + +Adds a new element to the vector's end, returning a new vector with a length one greater than the original unmodified vector. + +```rust +pub fn push(&mut self, elem: T) +``` + +Example: + +```rust +let mut vector: Vec = Vec::new(); +vector.push(10); +assert(vector.len() == 1); +``` + +### pop + +Removes an element from the vector's end, returning a new vector with a length one less than the original vector, along with the removed element. Panics if the vector's length is zero. + +```rust +pub fn pop(&mut self) -> T +``` + +Example: + +```rust +let mut vector = Vec::from_slice(&[10, 20]); +let popped_elem = vector.pop(); +assert(popped_elem == 20); +assert(vector.len() == 1); +``` + +### insert + +Inserts an element at a specified index, shifting subsequent elements to the right. + +```rust +pub fn insert(&mut self, index: Field, elem: T) +``` + +Example: + +```rust +let mut vector = Vec::from_slice(&[10, 30]); +vector.insert(1, 20); +assert(vector.get(1) == 20); +``` + +### remove + +Removes an element at a specified index, shifting subsequent elements to the left, and returns the removed element. + +```rust +pub fn remove(&mut self, index: Field) -> T +``` + +Example: + +```rust +let mut vector = Vec::from_slice(&[10, 20, 30]); +let removed_elem = vector.remove(1); +assert(removed_elem == 20); +assert(vector.len() == 2); +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/_category_.json b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/_category_.json new file mode 100644 index 00000000000..5d694210bbf --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 0, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/ec_primitives.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/ec_primitives.md new file mode 100644 index 00000000000..d2b42d67b7c --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/ec_primitives.md @@ -0,0 +1,102 @@ +--- +title: Elliptic Curve Primitives +keywords: [cryptographic primitives, Noir project] +sidebar_position: 4 +--- + +Data structures and methods on them that allow you to carry out computations involving elliptic +curves over the (mathematical) field corresponding to `Field`. For the field currently at our +disposal, applications would involve a curve embedded in BN254, e.g. the +[Baby Jubjub curve](https://eips.ethereum.org/EIPS/eip-2494). + +## Data structures + +### Elliptic curve configurations + +(`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::Curve`), i.e. the specific elliptic +curve you want to use, which would be specified using any one of the methods +`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::new` which take the coefficients in the +defining equation together with a generator point as parameters. You can find more detail in the +comments in +[`noir_stdlib/src/ec.nr`](https://github.com/noir-lang/noir/blob/master/noir_stdlib/src/ec.nr), but +the gist of it is that the elliptic curves of interest are usually expressed in one of the standard +forms implemented here (Twisted Edwards, Montgomery and Short Weierstraß), and in addition to that, +you could choose to use `affine` coordinates (Cartesian coordinates - the usual (x,y) - possibly +together with a point at infinity) or `curvegroup` coordinates (some form of projective coordinates +requiring more coordinates but allowing for more efficient implementations of elliptic curve +operations). Conversions between all of these forms are provided, and under the hood these +conversions are done whenever an operation is more efficient in a different representation (or a +mixed coordinate representation is employed). + +### Points + +(`std::ec::{tecurve,montcurve,swcurve}::{affine,curvegroup}::Point`), i.e. points lying on the +elliptic curve. For a curve configuration `c` and a point `p`, it may be checked that `p` +does indeed lie on `c` by calling `c.contains(p1)`. + +## Methods + +(given a choice of curve representation, e.g. use `std::ec::tecurve::affine::Curve` and use +`std::ec::tecurve::affine::Point`) + +- The **zero element** is given by `Point::zero()`, and we can verify whether a point `p: Point` is + zero by calling `p.is_zero()`. +- **Equality**: Points `p1: Point` and `p2: Point` may be checked for equality by calling + `p1.eq(p2)`. +- **Addition**: For `c: Curve` and points `p1: Point` and `p2: Point` on the curve, adding these two + points is accomplished by calling `c.add(p1,p2)`. +- **Negation**: For a point `p: Point`, `p.negate()` is its negation. +- **Subtraction**: For `c` and `p1`, `p2` as above, subtracting `p2` from `p1` is accomplished by + calling `c.subtract(p1,p2)`. +- **Scalar multiplication**: For `c` as above, `p: Point` a point on the curve and `n: Field`, + scalar multiplication is given by `c.mul(n,p)`. If instead `n :: [u1; N]`, i.e. `n` is a bit + array, the `bit_mul` method may be used instead: `c.bit_mul(n,p)` +- **Multi-scalar multiplication**: For `c` as above and arrays `n: [Field; N]` and `p: [Point; N]`, + multi-scalar multiplication is given by `c.msm(n,p)`. +- **Coordinate representation conversions**: The `into_group` method converts a point or curve + configuration in the affine representation to one in the CurveGroup representation, and + `into_affine` goes in the other direction. +- **Curve representation conversions**: `tecurve` and `montcurve` curves and points are equivalent + and may be converted between one another by calling `into_montcurve` or `into_tecurve` on their + configurations or points. `swcurve` is more general and a curve c of one of the other two types + may be converted to this representation by calling `c.into_swcurve()`, whereas a point `p` lying + on the curve given by `c` may be mapped to its corresponding `swcurve` point by calling + `c.map_into_swcurve(p)`. +- **Map-to-curve methods**: The Elligator 2 method of mapping a field element `n: Field` into a + `tecurve` or `montcurve` with configuration `c` may be called as `c.elligator2_map(n)`. For all of + the curve configurations, the SWU map-to-curve method may be called as `c.swu_map(z,n)`, where + `z: Field` depends on `Field` and `c` and must be chosen by the user (the conditions it needs to + satisfy are specified in the comments + [here](https://github.com/noir-lang/noir/blob/master/noir_stdlib/src/ec.nr)). + +## Examples + +The +[ec_baby_jubjub test](https://github.com/noir-lang/noir/blob/master/test_programs/compile_success_empty/ec_baby_jubjub/src/main.nr) +illustrates all of the above primitives on various forms of the Baby Jubjub curve. A couple of more +interesting examples in Noir would be: + +Public-key cryptography: Given an elliptic curve and a 'base point' on it, determine the public key +from the private key. This is a matter of using scalar multiplication. In the case of Baby Jubjub, +for example, this code would do: + +```rust +use dep::std::ec::tecurve::affine::{Curve, Point}; + +fn bjj_pub_key(priv_key: Field) -> Point +{ + + let bjj = Curve::new(168700, 168696, G::new(995203441582195749578291179787384436505546430278305826713579947235728471134,5472060717959818805561601436314318772137091100104008585924551046643952123905)); + + let base_pt = Point::new(5299619240641551281634865583518297030282874472190772894086521144482721001553, 16950150798460657717958625567821834550301663161624707787222815936182638968203); + + bjj.mul(priv_key,base_pt) +} +``` + +This would come in handy in a Merkle proof. + +- EdDSA signature verification: This is a matter of combining these primitives with a suitable hash + function. See + [feat(stdlib): EdDSA sig verification noir#1136](https://github.com/noir-lang/noir/pull/1136) for + the case of Baby Jubjub and the Poseidon hash function. diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx new file mode 100644 index 00000000000..4394b48f907 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/ecdsa_sig_verification.mdx @@ -0,0 +1,98 @@ +--- +title: ECDSA Signature Verification +description: Learn about the cryptographic primitives regarding ECDSA over the secp256k1 and secp256r1 curves +keywords: [cryptographic primitives, Noir project, ecdsa, secp256k1, secp256r1, signatures] +sidebar_position: 3 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +Noir supports ECDSA signatures verification over the secp256k1 and secp256r1 curves. + +## ecdsa_secp256k1::verify_signature + +Verifier for ECDSA Secp256k1 signatures. +See ecdsa_secp256k1::verify_signature_slice for a version that accepts slices directly. + +```rust title="ecdsa_secp256k1" showLineNumbers +pub fn verify_signature( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8; N] +) -> bool +``` +> Source code: noir_stdlib/src/ecdsa_secp256k1.nr#L2-L9 + + +example: + +```rust +fn main(hashed_message : [u8;32], pub_key_x : [u8;32], pub_key_y : [u8;32], signature : [u8;64]) { + let valid_signature = std::ecdsa_secp256k1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message); + assert(valid_signature); +} +``` + + + +## ecdsa_secp256k1::verify_signature_slice + +Verifier for ECDSA Secp256k1 signatures where the message is a slice. + +```rust title="ecdsa_secp256k1_slice" showLineNumbers +pub fn verify_signature_slice( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8] +) -> bool +``` +> Source code: noir_stdlib/src/ecdsa_secp256k1.nr#L13-L20 + + + + +## ecdsa_secp256r1::verify_signature + +Verifier for ECDSA Secp256r1 signatures. +See ecdsa_secp256r1::verify_signature_slice for a version that accepts slices directly. + +```rust title="ecdsa_secp256r1" showLineNumbers +pub fn verify_signature( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8; N] +) -> bool +``` +> Source code: noir_stdlib/src/ecdsa_secp256r1.nr#L2-L9 + + +example: + +```rust +fn main(hashed_message : [u8;32], pub_key_x : [u8;32], pub_key_y : [u8;32], signature : [u8;64]) { + let valid_signature = std::ecdsa_secp256r1::verify_signature(pub_key_x, pub_key_y, signature, hashed_message); + assert(valid_signature); +} +``` + + + +## ecdsa_secp256r1::verify_signature + +Verifier for ECDSA Secp256r1 signatures where the message is a slice. + +```rust title="ecdsa_secp256r1_slice" showLineNumbers +pub fn verify_signature_slice( + public_key_x: [u8; 32], + public_key_y: [u8; 32], + signature: [u8; 64], + message_hash: [u8] +) -> bool +``` +> Source code: noir_stdlib/src/ecdsa_secp256r1.nr#L13-L20 + + + diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/eddsa.mdx b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/eddsa.mdx new file mode 100644 index 00000000000..c2c0624dfad --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/eddsa.mdx @@ -0,0 +1,37 @@ +--- +title: EdDSA Verification +description: Learn about the cryptographic primitives regarding EdDSA +keywords: [cryptographic primitives, Noir project, eddsa, signatures] +sidebar_position: 5 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## eddsa::eddsa_poseidon_verify + +Verifier for EdDSA signatures + +```rust +fn eddsa_poseidon_verify(public_key_x : Field, public_key_y : Field, signature_s: Field, signature_r8_x: Field, signature_r8_y: Field, message: Field) -> bool +``` + +It is also possible to specify the hash algorithm used for the signature by using the `eddsa_verify_with_hasher` function with a parameter implementing the Hasher trait. For instance, if you want to use Poseidon2 instead, you can do the following: +```rust +use dep::std::hash::poseidon2::Poseidon2Hasher; + +let mut hasher = Poseidon2Hasher::default(); +eddsa_verify_with_hasher(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg, &mut hasher); +``` + + + +## eddsa::eddsa_to_pub + +Private to public key conversion. + +Returns `(pub_key_x, pub_key_y)` + +```rust +fn eddsa_to_pub(secret : Field) -> (Field, Field) +``` + diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/hashes.mdx b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/hashes.mdx new file mode 100644 index 00000000000..695c7d9406f --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/hashes.mdx @@ -0,0 +1,331 @@ +--- +title: Hash methods +description: + Learn about the cryptographic primitives ready to use for any Noir project, including sha256, + blake2s, pedersen, mimc_bn254 and mimc +keywords: + [cryptographic primitives, Noir project, sha256, blake2s, pedersen, mimc_bn254, mimc, hash] +sidebar_position: 0 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## sha256 + +Given an array of bytes, returns the resulting sha256 hash. +See sha256_slice for a version that works directly on slices. + +```rust title="sha256" showLineNumbers +pub fn sha256(input: [u8; N]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L10-L12 + + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::sha256(x); +} +``` + + + +## sha256_slice + +A version of sha256 specialized to slices: + +```rust title="sha256_slice" showLineNumbers +pub fn sha256_slice(input: [u8]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L16-L18 + + + + +## blake2s + +Given an array of bytes, returns an array with the Blake2 hash +See blake2s_slice for a version that works directly on slices. + +```rust title="blake2s" showLineNumbers +pub fn blake2s(input: [u8; N]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L22-L24 + + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::blake2s(x); +} +``` + + + +## blake2s_slice + +A version of blake2s specialized to slices: + +```rust title="blake2s_slice" showLineNumbers +pub fn blake2s_slice(input: [u8]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L28-L30 + + + + +## blake3 + +Given an array of bytes, returns an array with the Blake3 hash +See blake3_slice for a version that works directly on slices. + +```rust title="blake3" showLineNumbers +pub fn blake3(input: [u8; N]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L34-L36 + + +example: + +```rust +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::blake3(x); +} +``` + + + +## blake3_slice + +A version of blake3 specialized to slices: + +```rust title="blake3_slice" showLineNumbers +pub fn blake3_slice(input: [u8]) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L40-L42 + + + + +## pedersen_hash + +Given an array of Fields, returns the Pedersen hash. +See pedersen_hash_slice for a version that works directly on slices. + +```rust title="pedersen_hash" showLineNumbers +pub fn pedersen_hash(input: [Field; N]) -> Field +``` +> Source code: noir_stdlib/src/hash.nr#L78-L80 + + +example: + +```rust title="pedersen-hash" showLineNumbers +use dep::std; + +fn main(x: Field, y: Field, expected_hash: Field) { + let hash = std::hash::pedersen_hash([x, y]); + assert_eq(hash, expected_hash); +} +``` +> Source code: test_programs/execution_success/pedersen_hash/src/main.nr#L1-L8 + + + + +## pedersen_hash_slice + +Given a slice of Fields, returns the Pedersen hash. + +```rust title="pedersen_hash_slice" showLineNumbers +pub fn pedersen_hash_slice(input: [Field]) -> Field +``` +> Source code: noir_stdlib/src/hash.nr#L85-L87 + + + + +## pedersen_commitment + +Given an array of Fields, returns the Pedersen commitment. +See pedersen_commitment_slice for a version that works directly on slices. + +```rust title="pedersen_commitment" showLineNumbers +struct PedersenPoint { + x : Field, + y : Field, +} + +pub fn pedersen_commitment(input: [Field; N]) -> PedersenPoint { +``` +> Source code: noir_stdlib/src/hash.nr#L45-L52 + + +example: + +```rust title="pedersen-commitment" showLineNumbers +use dep::std; + +fn main(x: Field, y: Field, expected_commitment: std::hash::PedersenPoint) { + let commitment = std::hash::pedersen_commitment([x, y]); + assert_eq(commitment.x, expected_commitment.x); + assert_eq(commitment.y, expected_commitment.y); +} +``` +> Source code: test_programs/execution_success/pedersen_commitment/src/main.nr#L1-L9 + + + + +## pedersen_commitment_slice + +Given a slice of Fields, returns the Pedersen commitment. + +```rust title="pedersen_commitment_slice" showLineNumbers +pub fn pedersen_commitment_slice(input: [Field]) -> PedersenPoint { + pedersen_commitment_with_separator_slice(input, 0) +} +``` +> Source code: noir_stdlib/src/hash.nr#L56-L60 + + + + +## keccak256 + +Given an array of bytes (`u8`), returns the resulting keccak hash as an array of +32 bytes (`[u8; 32]`). Specify a message_size to hash only the first +`message_size` bytes of the input. See keccak256_slice for a version that works +directly on slices. + +```rust title="keccak256" showLineNumbers +pub fn keccak256(input: [u8; N], message_size: u32) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L113-L115 + + +example: + +```rust title="keccak256" showLineNumbers +use dep::std; + +fn main(x: Field, result: [u8; 32]) { + // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field + // The padding is taken care of by the program + let digest = std::hash::keccak256([x as u8], 1); + assert(digest == result); + + //#1399: variable message size + let message_size = 4; + let hash_a = std::hash::keccak256([1, 2, 3, 4], message_size); + let hash_b = std::hash::keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size); + + assert(hash_a == hash_b); + + let message_size_big = 8; + let hash_c = std::hash::keccak256([1, 2, 3, 4, 0, 0, 0, 0], message_size_big); + + assert(hash_a != hash_c); +} +``` +> Source code: test_programs/execution_success/keccak256/src/main.nr#L1-L22 + + + + +## keccak256_slice + +Given a slice of bytes (`u8`), returns the resulting keccak hash as an array of +32 bytes (`[u8; 32]`). + +```rust title="keccak256_slice" showLineNumbers +pub fn keccak256_slice(input: [u8], message_size: u32) -> [u8; 32] +``` +> Source code: noir_stdlib/src/hash.nr#L119-L121 + + + + +## poseidon + +Given an array of Fields, returns a new Field with the Poseidon Hash. Mind that you need to specify +how many inputs are there to your Poseidon function. + +```rust +// example for hash_1, hash_2 accepts an array of length 2, etc +fn hash_1(input: [Field; 1]) -> Field +``` + +example: + +```rust title="poseidon" showLineNumbers +use dep::std::hash::poseidon; +use dep::std::hash::poseidon2; + +fn main(x1: [Field; 2], y1: pub Field, x2: [Field; 4], y2: pub Field, x3: [Field; 4], y3: Field) { + let hash1 = poseidon::bn254::hash_2(x1); + assert(hash1 == y1); + + let hash2 = poseidon::bn254::hash_4(x2); + assert(hash2 == y2); + + let hash3 = poseidon2::Poseidon2::hash(x3, x3.len()); + assert(hash3 == y3); +} +``` +> Source code: test_programs/execution_success/poseidon_bn254_hash/src/main.nr#L1-L15 + + +## poseidon 2 + +Given an array of Fields, returns a new Field with the Poseidon2 Hash. Contrary to the Poseidon +function, there is only one hash and you can specify a message_size to hash only the first +`message_size` bytes of the input, + +```rust +// example for hashing the first three elements of the input +Poseidon2::hash(input, 3); +``` + +The above example for Poseidon also includes Poseidon2. + +## mimc_bn254 and mimc + +`mimc_bn254` is `mimc`, but with hardcoded parameters for the BN254 curve. You can use it by +providing an array of Fields, and it returns a Field with the hash. You can use the `mimc` method if +you're willing to input your own constants: + +```rust +fn mimc(x: Field, k: Field, constants: [Field; N], exp : Field) -> Field +``` + +otherwise, use the `mimc_bn254` method: + +```rust +fn mimc_bn254(array: [Field; N]) -> Field +``` + +example: + +```rust + +fn main() { + let x = [163, 117, 178, 149]; // some random bytes + let hash = std::hash::mimc::mimc_bn254(x); +} +``` + +## hash_to_field + +```rust +fn hash_to_field(_input : [Field]) -> Field {} +``` + +Calculates the `blake2s` hash of the inputs and returns the hash modulo the field modulus to return +a value which can be represented as a `Field`. + diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/index.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/index.md new file mode 100644 index 00000000000..650f30165d5 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/index.md @@ -0,0 +1,14 @@ +--- +title: Cryptographic Primitives +description: + Learn about the cryptographic primitives ready to use for any Noir project +keywords: + [ + cryptographic primitives, + Noir project, + ] +--- + +The Noir team is progressively adding new cryptographic primitives to the standard library. Reach out for news or if you would be interested in adding more of these calculations in Noir. + +Some methods are available thanks to the Aztec backend, not being performed using Noir. When using other backends, these methods may or may not be supplied. diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/scalar.mdx b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/scalar.mdx new file mode 100644 index 00000000000..df411ca5443 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/scalar.mdx @@ -0,0 +1,33 @@ +--- +title: Scalar multiplication +description: See how you can perform scalar multiplications over a fixed base in Noir +keywords: [cryptographic primitives, Noir project, scalar multiplication] +sidebar_position: 1 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## scalar_mul::fixed_base_embedded_curve + +Performs scalar multiplication over the embedded curve whose coordinates are defined by the +configured noir field. For the BN254 scalar field, this is BabyJubJub or Grumpkin. + +```rust title="fixed_base_embedded_curve" showLineNumbers +pub fn fixed_base_embedded_curve( + low: Field, + high: Field +) -> [Field; 2] +``` +> Source code: noir_stdlib/src/scalar_mul.nr#L27-L32 + + +example + +```rust +fn main(x : Field) { + let scal = std::scalar_mul::fixed_base_embedded_curve(x); + println(scal); +} +``` + + diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/schnorr.mdx b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/schnorr.mdx new file mode 100644 index 00000000000..b59e69c8f07 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/cryptographic_primitives/schnorr.mdx @@ -0,0 +1,64 @@ +--- +title: Schnorr Signatures +description: Learn how you can verify Schnorr signatures using Noir +keywords: [cryptographic primitives, Noir project, schnorr, signatures] +sidebar_position: 2 +--- + +import BlackBoxInfo from '@site/src/components/Notes/_blackbox.mdx'; + +## schnorr::verify_signature + +Verifier for Schnorr signatures over the embedded curve (for BN254 it is Grumpkin). +See schnorr::verify_signature_slice for a version that works directly on slices. + +```rust title="schnorr_verify" showLineNumbers +pub fn verify_signature( + public_key_x: Field, + public_key_y: Field, + signature: [u8; 64], + message: [u8; N] +) -> bool +``` +> Source code: noir_stdlib/src/schnorr.nr#L2-L9 + + +where `_signature` can be generated like so using the npm package +[@noir-lang/barretenberg](https://www.npmjs.com/package/@noir-lang/barretenberg) + +```js +const { BarretenbergWasm } = require('@noir-lang/barretenberg/dest/wasm'); +const { Schnorr } = require('@noir-lang/barretenberg/dest/crypto/schnorr'); + +... + +const barretenberg = await BarretenbergWasm.new(); +const schnorr = new Schnorr(barretenberg); +const pubKey = schnorr.computePublicKey(privateKey); +const message = ... +const signature = Array.from( + schnorr.constructSignature(hash, privateKey).toBuffer() +); + +... +``` + + + +## schnorr::verify_signature_slice + +Verifier for Schnorr signatures over the embedded curve (for BN254 it is Grumpkin) +where the message is a slice. + +```rust title="schnorr_verify_slice" showLineNumbers +pub fn verify_signature_slice( + public_key_x: Field, + public_key_y: Field, + signature: [u8; 64], + message: [u8] +) -> bool +``` +> Source code: noir_stdlib/src/schnorr.nr#L13-L20 + + + diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/logging.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/logging.md new file mode 100644 index 00000000000..db75ef9f86f --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/logging.md @@ -0,0 +1,78 @@ +--- +title: Logging +description: + Learn how to use the println statement for debugging in Noir with this tutorial. Understand the + basics of logging in Noir and how to implement it in your code. +keywords: + [ + noir logging, + println statement, + print statement, + debugging in noir, + noir std library, + logging tutorial, + basic logging in noir, + noir logging implementation, + noir debugging techniques, + rust, + ] +--- + +The standard library provides two familiar statements you can use: `println` and `print`. Despite being a limited implementation of rust's `println!` and `print!` macros, these constructs can be useful for debugging. + +You can print the output of both statements in your Noir code by using the `nargo execute` command or the `--show-output` flag when using `nargo test` (provided there are print statements in your tests). + +It is recommended to use `nargo execute` if you want to debug failing constraints with `println` or `print` statements. This is due to every input in a test being a constant rather than a witness, so we issue an error during compilation while we only print during execution (which comes after compilation). Neither `println`, nor `print` are callable for failed constraints caught at compile time. + +Both `print` and `println` are generic functions which can work on integers, fields, strings, and even structs or expressions. Note however, that slices are currently unsupported. For example: + +```rust +struct Person { + age: Field, + height: Field, +} + +fn main(age: Field, height: Field) { + let person = Person { + age: age, + height: height, + }; + println(person); + println(age + height); + println("Hello world!"); +} +``` + +You can print different types in the same statement (including strings) with a type called `fmtstr`. It can be specified in the same way as a normal string, just prepended with an "f" character: + +```rust + let fmt_str = f"i: {i}, j: {j}"; + println(fmt_str); + + let s = myStruct { y: x, x: y }; + println(s); + + println(f"i: {i}, s: {s}"); + + println(x); + println([x, y]); + + let foo = fooStruct { my_struct: s, foo: 15 }; + println(f"s: {s}, foo: {foo}"); + + println(15); // prints 0x0f, implicit Field + println(-1 as u8); // prints 255 + println(-1 as i8); // prints -1 +``` + +Examples shown above are interchangeable between the two `print` statements: + +```rust +let person = Person { age : age, height : height }; + +println(person); +print(person); + +println("Hello world!"); // Prints with a newline at the end of the input +print("Hello world!"); // Prints the input and keeps cursor on the same line +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/merkle_trees.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/merkle_trees.md new file mode 100644 index 00000000000..6a9ebf72ada --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/merkle_trees.md @@ -0,0 +1,58 @@ +--- +title: Merkle Trees +description: Learn about Merkle Trees in Noir with this tutorial. Explore the basics of computing a merkle root using a proof, with examples. +keywords: + [ + Merkle trees in Noir, + Noir programming language, + check membership, + computing root from leaf, + Noir Merkle tree implementation, + Merkle tree tutorial, + Merkle tree code examples, + Noir libraries, + pedersen hash., + ] +--- + +## compute_merkle_root + +Returns the root of the tree from the provided leaf and its hash path, using a [Pedersen hash](./cryptographic_primitives/hashes.mdx#pedersen_hash). + +```rust +fn compute_merkle_root(leaf : Field, index : Field, hash_path: [Field]) -> Field +``` + +example: + +```rust +/** + // these values are for this example only + index = "0" + priv_key = "0x000000000000000000000000000000000000000000000000000000616c696365" + secret = "0x1929ea3ab8d9106a899386883d9428f8256cfedb3c4f6b66bf4aa4d28a79988f" + note_hash_path = [ + "0x1e61bdae0f027b1b2159e1f9d3f8d00fa668a952dddd822fda80dc745d6f65cc", + "0x0e4223f3925f98934393c74975142bd73079ab0621f4ee133cee050a3c194f1a", + "0x2fd7bb412155bf8693a3bd2a3e7581a679c95c68a052f835dddca85fa1569a40" + ] + */ +fn main(index: Field, priv_key: Field, secret: Field, note_hash_path: [Field; 3]) { + + let pubkey = std::scalar_mul::fixed_base_embedded_curve(priv_key); + let pubkey_x = pubkey[0]; + let pubkey_y = pubkey[1]; + let note_commitment = std::hash::pedersen(&[pubkey_x, pubkey_y, secret]); + + let root = std::merkle::compute_merkle_root(note_commitment[0], index, note_hash_path.as_slice()); + println(root); +} +``` + +To check merkle tree membership: + +1. Include a merkle root as a program input. +2. Compute the merkle root of a given leaf, index and hash path. +3. Assert the merkle roots are equal. + +For more info about merkle trees, see the Wikipedia [page](https://en.wikipedia.org/wiki/Merkle_tree). diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/options.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/options.md new file mode 100644 index 00000000000..a1bd4e1de5f --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/options.md @@ -0,0 +1,101 @@ +--- +title: Option Type +--- + +The `Option` type is a way to express that a value might be present (`Some(T))` or absent (`None`). It's a safer way to handle potential absence of values, compared to using nulls in many other languages. + +```rust +struct Option { + None, + Some(T), +} +``` + +The `Option` type, already imported into your Noir program, can be used directly: + +```rust +fn main() { + let none = Option::none(); + let some = Option::some(3); +} +``` + +See [this test](https://github.com/noir-lang/noir/blob/5cbfb9c4a06c8865c98ff2b594464b037d821a5c/crates/nargo_cli/tests/test_data/option/src/main.nr) for a more comprehensive set of examples of each of the methods described below. + +## Methods + +### none + +Constructs a none value. + +### some + +Constructs a some wrapper around a given value. + +### is_none + +Returns true if the Option is None. + +### is_some + +Returns true of the Option is Some. + +### unwrap + +Asserts `self.is_some()` and returns the wrapped value. + +### unwrap_unchecked + +Returns the inner value without asserting `self.is_some()`. This method can be useful within an if condition when we already know that `option.is_some()`. If the option is None, there is no guarantee what value will be returned, only that it will be of type T for an `Option`. + +### unwrap_or + +Returns the wrapped value if `self.is_some()`. Otherwise, returns the given default value. + +### unwrap_or_else + +Returns the wrapped value if `self.is_some()`. Otherwise, calls the given function to return a default value. + +### expect + +Asserts `self.is_some()` with a provided custom message and returns the contained `Some` value. The custom message is expected to be a format string. + +### map + +If self is `Some(x)`, this returns `Some(f(x))`. Otherwise, this returns `None`. + +### map_or + +If self is `Some(x)`, this returns `f(x)`. Otherwise, this returns the given default value. + +### map_or_else + +If self is `Some(x)`, this returns `f(x)`. Otherwise, this returns `default()`. + +### and + +Returns None if self is None. Otherwise, this returns `other`. + +### and_then + +If self is None, this returns None. Otherwise, this calls the given function with the Some value contained within self, and returns the result of that call. In some languages this function is called `flat_map` or `bind`. + +### or + +If self is Some, return self. Otherwise, return `other`. + +### or_else + +If self is Some, return self. Otherwise, return `default()`. + +### xor + +If only one of the two Options is Some, return that option. Otherwise, if both options are Some or both are None, None is returned. + +### filter + +Returns `Some(x)` if self is `Some(x)` and `predicate(x)` is true. Otherwise, this returns `None`. + +### flatten + +Flattens an `Option>` into a `Option`. This returns `None` if the outer Option is None. Otherwise, this returns the inner Option. diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/recursion.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/recursion.md new file mode 100644 index 00000000000..a93894043dc --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/recursion.md @@ -0,0 +1,88 @@ +--- +title: Recursive Proofs +description: Learn about how to write recursive proofs in Noir. +keywords: [recursion, recursive proofs, verification_key, verify_proof] +--- + +Noir supports recursively verifying proofs, meaning you verify the proof of a Noir program in another Noir program. This enables creating proofs of arbitrary size by doing step-wise verification of smaller components of a large proof. + +Read [the explainer on recursion](../../explainers/explainer-recursion.md) to know more about this function and the [guide on how to use it.](../../how_to/how-to-recursion.md) + +## The `#[recursive]` Attribute + +In Noir, the `#[recursive]` attribute is used to indicate that a circuit is designed for recursive proof generation. When applied, it informs the compiler and the tooling that the circuit should be compiled in a way that makes its proofs suitable for recursive verification. This attribute eliminates the need for manual flagging of recursion at the tooling level, streamlining the proof generation process for recursive circuits. + +### Example usage with `#[recursive]` + +```rust +#[recursive] +fn main(x: Field, y: pub Field) { + assert(x == y, "x and y are not equal"); +} + +// This marks the circuit as recursion-friendly and indicates that proofs generated from this circuit +// are intended for recursive verification. +``` + +By incorporating this attribute directly in the circuit's definition, tooling like Nargo and NoirJS can automatically execute recursive-specific duties for Noir programs (e.g. recursive-friendly proof artifact generation) without additional flags or configurations. + +## Verifying Recursive Proofs + +```rust +#[foreign(recursive_aggregation)] +pub fn verify_proof(verification_key: [Field], proof: [Field], public_inputs: [Field], key_hash: Field) {} +``` + +:::info + +This is a black box function. Read [this section](./black_box_fns) to learn more about black box functions in Noir. + +::: + +## Example usage + +```rust +use dep::std; + +fn main( + verification_key : [Field; 114], + proof : [Field; 93], + public_inputs : [Field; 1], + key_hash : Field, + proof_b : [Field; 93], +) { + std::verify_proof( + verification_key.as_slice(), + proof.as_slice(), + public_inputs.as_slice(), + key_hash + ); + + std::verify_proof( + verification_key.as_slice(), + proof_b.as_slice(), + public_inputs.as_slice(), + key_hash + ); +} +``` + +You can see a full example of recursive proofs in [this example recursion demo repo](https://github.com/noir-lang/noir-examples/tree/master/recursion). + +## Parameters + +### `verification_key` + +The verification key for the zk program that is being verified. + +### `proof` + +The proof for the zk program that is being verified. + +### `public_inputs` + +These represent the public inputs of the proof we are verifying. + +### `key_hash` + +A key hash is used to check the validity of the verification key. The circuit implementing this opcode can use this hash to ensure that the key provided to the circuit matches the key produced by the circuit creator. diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/traits.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/traits.md new file mode 100644 index 00000000000..68a9dc3d54b --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/traits.md @@ -0,0 +1,408 @@ +--- +title: Traits +description: Noir's stdlib provides a few commonly used traits. +keywords: [traits, trait, interface, protocol, default, add, eq] +--- + +## `std::default` + +### `std::default::Default` + +```rust title="default-trait" showLineNumbers +trait Default { + fn default() -> Self; +} +``` +> Source code: noir_stdlib/src/default.nr#L1-L5 + + +Constructs a default value of a type. + +Implementations: +```rust +impl Default for Field { .. } + +impl Default for i8 { .. } +impl Default for i16 { .. } +impl Default for i32 { .. } +impl Default for i64 { .. } + +impl Default for u8 { .. } +impl Default for u16 { .. } +impl Default for u32 { .. } +impl Default for u64 { .. } + +impl Default for () { .. } +impl Default for bool { .. } + +impl Default for [T; N] + where T: Default { .. } + +impl Default for [T] { .. } + +impl Default for (A, B) + where A: Default, B: Default { .. } + +impl Default for (A, B, C) + where A: Default, B: Default, C: Default { .. } + +impl Default for (A, B, C, D) + where A: Default, B: Default, C: Default, D: Default { .. } + +impl Default for (A, B, C, D, E) + where A: Default, B: Default, C: Default, D: Default, E: Default { .. } +``` + +For primitive integer types, the return value of `default` is `0`. Container +types such as arrays are filled with default values of their element type, +except slices whose length is unknown and thus defaulted to zero. + + +## `std::convert` + +### `std::convert::From` + +```rust title="from-trait" showLineNumbers +trait From { + fn from(input: T) -> Self; +} +``` +> Source code: noir_stdlib/src/convert.nr#L1-L5 + + +The `From` trait defines how to convert from a given type `T` to the type on which the trait is implemented. + +The Noir standard library provides a number of implementations of `From` between primitive types. +```rust title="from-impls" showLineNumbers +// Unsigned integers + +impl From for u32 { fn from(value: u8) -> u32 { value as u32 } } + +impl From for u64 { fn from(value: u8) -> u64 { value as u64 } } +impl From for u64 { fn from(value: u32) -> u64 { value as u64 } } + +impl From for Field { fn from(value: u8) -> Field { value as Field } } +impl From for Field { fn from(value: u32) -> Field { value as Field } } +impl From for Field { fn from(value: u64) -> Field { value as Field } } + +// Signed integers + +impl From for i32 { fn from(value: i8) -> i32 { value as i32 } } + +impl From for i64 { fn from(value: i8) -> i64 { value as i64 } } +impl From for i64 { fn from(value: i32) -> i64 { value as i64 } } + +// Booleans +impl From for u8 { fn from(value: bool) -> u8 { value as u8 } } +impl From for u32 { fn from(value: bool) -> u32 { value as u32 } } +impl From for u64 { fn from(value: bool) -> u64 { value as u64 } } +impl From for i8 { fn from(value: bool) -> i8 { value as i8 } } +impl From for i32 { fn from(value: bool) -> i32 { value as i32 } } +impl From for i64 { fn from(value: bool) -> i64 { value as i64 } } +impl From for Field { fn from(value: bool) -> Field { value as Field } } +``` +> Source code: noir_stdlib/src/convert.nr#L25-L52 + + +#### When to implement `From` + +As a general rule of thumb, `From` may be implemented in the [situations where it would be suitable in Rust](https://doc.rust-lang.org/std/convert/trait.From.html#when-to-implement-from): + +- The conversion is *infallible*: Noir does not provide an equivalent to Rust's `TryFrom`, if the conversion can fail then provide a named method instead. +- The conversion is *lossless*: semantically, it should not lose or discard information. For example, `u32: From` can losslessly convert any `u16` into a valid `u32` such that the original `u16` can be recovered. On the other hand, `u16: From` should not be implemented as `2**16` is a `u32` which cannot be losslessly converted into a `u16`. +- The conversion is *value-preserving*: the conceptual kind and meaning of the resulting value is the same, even though the Noir type and technical representation might be different. While it's possible to infallibly and losslessly convert a `u8` into a `str<2>` hex representation, `4u8` and `"04"` are too different for `str<2>: From` to be implemented. +- The conversion is *obvious*: it's the only reasonable conversion between the two types. If there's ambiguity on how to convert between them such that the same input could potentially map to two different values then a named method should be used. For instance rather than implementing `U128: From<[u8; 16]>`, the methods `U128::from_le_bytes` and `U128::from_be_bytes` are used as otherwise the endianness of the array would be ambiguous, resulting in two potential values of `U128` from the same byte array. + +One additional recommendation specific to Noir is: +- The conversion is *efficient*: it's relatively cheap to convert between the two types. Due to being a ZK DSL, it's more important to avoid unnecessary computation compared to Rust. If the implementation of `From` would encourage users to perform unnecessary conversion, resulting in additional proving time, then it may be preferable to expose functionality such that this conversion may be avoided. + +### `std::convert::Into` + +The `Into` trait is defined as the reciprocal of `From`. It should be easy to convince yourself that if we can convert to type `A` from type `B`, then it's possible to convert type `B` into type `A`. + +For this reason, implementing `From` on a type will automatically generate a matching `Into` implementation. One should always prefer implementing `From` over `Into` as implementing `Into` will not generate a matching `From` implementation. + +```rust title="into-trait" showLineNumbers +trait Into { + fn into(input: Self) -> T; +} + +impl Into for U where T: From { + fn into(input: U) -> T { + T::from(input) + } +} +``` +> Source code: noir_stdlib/src/convert.nr#L13-L23 + + +`Into` is most useful when passing function arguments where the types don't quite match up with what the function expects. In this case, the compiler has enough type information to perform the necessary conversion by just appending `.into()` onto the arguments in question. + + +## `std::cmp` + +### `std::cmp::Eq` + +```rust title="eq-trait" showLineNumbers +trait Eq { + fn eq(self, other: Self) -> bool; +} +``` +> Source code: noir_stdlib/src/cmp.nr#L1-L5 + + +Returns `true` if `self` is equal to `other`. Implementing this trait on a type +allows the type to be used with `==` and `!=`. + +Implementations: +```rust +impl Eq for Field { .. } + +impl Eq for i8 { .. } +impl Eq for i16 { .. } +impl Eq for i32 { .. } +impl Eq for i64 { .. } + +impl Eq for u8 { .. } +impl Eq for u16 { .. } +impl Eq for u32 { .. } +impl Eq for u64 { .. } + +impl Eq for () { .. } +impl Eq for bool { .. } + +impl Eq for [T; N] + where T: Eq { .. } + +impl Eq for [T] + where T: Eq { .. } + +impl Eq for (A, B) + where A: Eq, B: Eq { .. } + +impl Eq for (A, B, C) + where A: Eq, B: Eq, C: Eq { .. } + +impl Eq for (A, B, C, D) + where A: Eq, B: Eq, C: Eq, D: Eq { .. } + +impl Eq for (A, B, C, D, E) + where A: Eq, B: Eq, C: Eq, D: Eq, E: Eq { .. } +``` + +### `std::cmp::Ord` + +```rust title="ord-trait" showLineNumbers +trait Ord { + fn cmp(self, other: Self) -> Ordering; +} +``` +> Source code: noir_stdlib/src/cmp.nr#L102-L106 + + +`a.cmp(b)` compares two values returning `Ordering::less()` if `a < b`, +`Ordering::equal()` if `a == b`, or `Ordering::greater()` if `a > b`. +Implementing this trait on a type allows `<`, `<=`, `>`, and `>=` to be +used on values of the type. + +Implementations: + +```rust +impl Ord for u8 { .. } +impl Ord for u16 { .. } +impl Ord for u32 { .. } +impl Ord for u64 { .. } + +impl Ord for i8 { .. } +impl Ord for i16 { .. } +impl Ord for i32 { .. } + +impl Ord for i64 { .. } + +impl Ord for () { .. } +impl Ord for bool { .. } + +impl Ord for [T; N] + where T: Ord { .. } + +impl Ord for [T] + where T: Ord { .. } + +impl Ord for (A, B) + where A: Ord, B: Ord { .. } + +impl Ord for (A, B, C) + where A: Ord, B: Ord, C: Ord { .. } + +impl Ord for (A, B, C, D) + where A: Ord, B: Ord, C: Ord, D: Ord { .. } + +impl Ord for (A, B, C, D, E) + where A: Ord, B: Ord, C: Ord, D: Ord, E: Ord { .. } +``` + +## `std::ops` + +### `std::ops::Add`, `std::ops::Sub`, `std::ops::Mul`, and `std::ops::Div` + +These traits abstract over addition, subtraction, multiplication, and division respectively. +Implementing these traits for a given type will also allow that type to be used with the corresponding operator +for that trait (`+` for Add, etc) in addition to the normal method names. + +```rust title="add-trait" showLineNumbers +trait Add { + fn add(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L1-L5 + +```rust title="sub-trait" showLineNumbers +trait Sub { + fn sub(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L17-L21 + +```rust title="mul-trait" showLineNumbers +trait Mul { + fn mul(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L33-L37 + +```rust title="div-trait" showLineNumbers +trait Div { + fn div(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L49-L53 + + +The implementations block below is given for the `Add` trait, but the same types that implement +`Add` also implement `Sub`, `Mul`, and `Div`. + +Implementations: +```rust +impl Add for Field { .. } + +impl Add for i8 { .. } +impl Add for i16 { .. } +impl Add for i32 { .. } +impl Add for i64 { .. } + +impl Add for u8 { .. } +impl Add for u16 { .. } +impl Add for u32 { .. } +impl Add for u64 { .. } +``` + +### `std::ops::Rem` + +```rust title="rem-trait" showLineNumbers +trait Rem{ + fn rem(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L65-L69 + + +`Rem::rem(a, b)` is the remainder function returning the result of what is +left after dividing `a` and `b`. Implementing `Rem` allows the `%` operator +to be used with the implementation type. + +Unlike other numeric traits, `Rem` is not implemented for `Field`. + +Implementations: +```rust +impl Rem for u8 { fn rem(self, other: u8) -> u8 { self % other } } +impl Rem for u16 { fn rem(self, other: u16) -> u16 { self % other } } +impl Rem for u32 { fn rem(self, other: u32) -> u32 { self % other } } +impl Rem for u64 { fn rem(self, other: u64) -> u64 { self % other } } + +impl Rem for i8 { fn rem(self, other: i8) -> i8 { self % other } } +impl Rem for i16 { fn rem(self, other: i16) -> i16 { self % other } } +impl Rem for i32 { fn rem(self, other: i32) -> i32 { self % other } } +impl Rem for i64 { fn rem(self, other: i64) -> i64 { self % other } } +``` + +### `std::ops::{ BitOr, BitAnd, BitXor }` + +```rust title="bitor-trait" showLineNumbers +trait BitOr { + fn bitor(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L79-L83 + +```rust title="bitand-trait" showLineNumbers +trait BitAnd { + fn bitand(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L95-L99 + +```rust title="bitxor-trait" showLineNumbers +trait BitXor { + fn bitxor(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L111-L115 + + +Traits for the bitwise operations `|`, `&`, and `^`. + +Implementing `BitOr`, `BitAnd` or `BitXor` for a type allows the `|`, `&`, or `^` operator respectively +to be used with the type. + +The implementations block below is given for the `BitOr` trait, but the same types that implement +`BitOr` also implement `BitAnd` and `BitXor`. + +Implementations: +```rust +impl BitOr for bool { fn bitor(self, other: bool) -> bool { self | other } } + +impl BitOr for u8 { fn bitor(self, other: u8) -> u8 { self | other } } +impl BitOr for u16 { fn bitor(self, other: u16) -> u16 { self | other } } +impl BitOr for u32 { fn bitor(self, other: u32) -> u32 { self | other } } +impl BitOr for u64 { fn bitor(self, other: u64) -> u64 { self | other } } + +impl BitOr for i8 { fn bitor(self, other: i8) -> i8 { self | other } } +impl BitOr for i16 { fn bitor(self, other: i16) -> i16 { self | other } } +impl BitOr for i32 { fn bitor(self, other: i32) -> i32 { self | other } } +impl BitOr for i64 { fn bitor(self, other: i64) -> i64 { self | other } } +``` + +### `std::ops::{ Shl, Shr }` + +```rust title="shl-trait" showLineNumbers +trait Shl { + fn shl(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L127-L131 + +```rust title="shr-trait" showLineNumbers +trait Shr { + fn shr(self, other: Self) -> Self; +} +``` +> Source code: noir_stdlib/src/ops.nr#L142-L146 + + +Traits for a bit shift left and bit shift right. + +Implementing `Shl` for a type allows the left shift operator (`<<`) to be used with the implementation type. +Similarly, implementing `Shr` allows the right shift operator (`>>`) to be used with the type. + +Note that bit shifting is not currently implemented for signed types. + +The implementations block below is given for the `Shl` trait, but the same types that implement +`Shl` also implement `Shr`. + +Implementations: +```rust +impl Shl for u8 { fn shl(self, other: u8) -> u8 { self << other } } +impl Shl for u16 { fn shl(self, other: u16) -> u16 { self << other } } +impl Shl for u32 { fn shl(self, other: u32) -> u32 { self << other } } +impl Shl for u64 { fn shl(self, other: u64) -> u64 { self << other } } +``` diff --git a/docs/versioned_docs/version-v0.27.0/noir/standard_library/zeroed.md b/docs/versioned_docs/version-v0.27.0/noir/standard_library/zeroed.md new file mode 100644 index 00000000000..f450fecdd36 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/noir/standard_library/zeroed.md @@ -0,0 +1,26 @@ +--- +title: Zeroed Function +description: + The zeroed function returns a zeroed value of any type. +keywords: + [ + zeroed + ] +--- + +Implements `fn zeroed() -> T` to return a zeroed value of any type. This function is generally unsafe to use as the zeroed bit pattern is not guaranteed to be valid for all types. It can however, be useful in cases when the value is guaranteed not to be used such as in a BoundedVec library implementing a growable vector, up to a certain length, backed by an array. The array can be initialized with zeroed values which are guaranteed to be inaccessible until the vector is pushed to. Similarly, enumerations in noir can be implemented using this method by providing zeroed values for the unused variants. + +You can access the function at `std::unsafe::zeroed`. + +This function currently supports the following types: + +- Field +- Bool +- Uint +- Array +- Slice +- String +- Tuple +- Function + +Using it on other types could result in unexpected behavior. diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/.nojekyll b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/.nojekyll new file mode 100644 index 00000000000..e2ac6616add --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/.nojekyll @@ -0,0 +1 @@ +TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend.md new file mode 100644 index 00000000000..b18c1926b93 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend.md @@ -0,0 +1,119 @@ +# BarretenbergBackend + +## Implements + +- [`Backend`](../index.md#backend) + +## Constructors + +### new BarretenbergBackend(acirCircuit, options) + +```ts +new BarretenbergBackend(acirCircuit, options): BarretenbergBackend +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `acirCircuit` | `CompiledCircuit` | +| `options` | [`BackendOptions`](../type-aliases/BackendOptions.md) | + +#### Returns + +[`BarretenbergBackend`](BarretenbergBackend.md) + +## Methods + +### destroy() + +```ts +destroy(): Promise +``` + +#### Returns + +`Promise`\<`void`\> + +*** + +### generateProof() + +```ts +generateProof(compressedWitness): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `compressedWitness` | `Uint8Array` | + +#### Returns + +`Promise`\<`ProofData`\> + +#### Description + +Generates a proof + +*** + +### generateRecursiveProofArtifacts() + +```ts +generateRecursiveProofArtifacts(proofData, numOfPublicInputs): Promise +``` + +Generates artifacts that will be passed to a circuit that will verify this proof. + +Instead of passing the proof and verification key as a byte array, we pass them +as fields which makes it cheaper to verify in a circuit. + +The proof that is passed here will have been created using a circuit +that has the #[recursive] attribute on its `main` method. + +The number of public inputs denotes how many public inputs are in the inner proof. + +#### Parameters + +| Parameter | Type | Default value | +| :------ | :------ | :------ | +| `proofData` | `ProofData` | `undefined` | +| `numOfPublicInputs` | `number` | `0` | + +#### Returns + +`Promise`\<`object`\> + +#### Example + +```typescript +const artifacts = await backend.generateRecursiveProofArtifacts(proof, numOfPublicInputs); +``` + +*** + +### verifyProof() + +```ts +verifyProof(proofData): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `proofData` | `ProofData` | + +#### Returns + +`Promise`\<`boolean`\> + +#### Description + +Verifies a proof + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/index.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/index.md new file mode 100644 index 00000000000..c146316a915 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/index.md @@ -0,0 +1,58 @@ +# backend_barretenberg + +## Exports + +### Classes + +| Class | Description | +| :------ | :------ | +| [BarretenbergBackend](classes/BarretenbergBackend.md) | - | + +### Type Aliases + +| Type alias | Description | +| :------ | :------ | +| [BackendOptions](type-aliases/BackendOptions.md) | - | + +## References + +### CompiledCircuit + +Renames and re-exports [Backend](index.md#backend) + +*** + +### ProofData + +Renames and re-exports [Backend](index.md#backend) + +## Variables + +### Backend + +```ts +Backend: any; +``` + +## Functions + +### publicInputsToWitnessMap() + +```ts +publicInputsToWitnessMap(publicInputs, abi): Backend +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `publicInputs` | `string`[] | +| `abi` | `Abi` | + +#### Returns + +[`Backend`](index.md#backend) + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions.md new file mode 100644 index 00000000000..b49a479f4f4 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions.md @@ -0,0 +1,21 @@ +# BackendOptions + +```ts +type BackendOptions: object; +``` + +## Description + +An options object, currently only used to specify the number of threads to use. + +## Type declaration + +| Member | Type | Description | +| :------ | :------ | :------ | +| `memory` | `object` | - | +| `memory.maximum` | `number` | - | +| `threads` | `number` | **Description**

Number of threads | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/typedoc-sidebar.cjs b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/typedoc-sidebar.cjs new file mode 100644 index 00000000000..339353b9862 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/backend_barretenberg/typedoc-sidebar.cjs @@ -0,0 +1,4 @@ +// @ts-check +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const typedocSidebar = { items: [{"type":"category","label":"Classes","items":[{"type":"doc","id":"reference/NoirJS/backend_barretenberg/classes/BarretenbergBackend","label":"BarretenbergBackend"}]},{"type":"category","label":"Type Aliases","items":[{"type":"doc","id":"reference/NoirJS/backend_barretenberg/type-aliases/BackendOptions","label":"BackendOptions"}]}]}; +module.exports = typedocSidebar.items; \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/.nojekyll b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/.nojekyll new file mode 100644 index 00000000000..e2ac6616add --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/.nojekyll @@ -0,0 +1 @@ +TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/classes/Noir.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/classes/Noir.md new file mode 100644 index 00000000000..45dd62ee57e --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/classes/Noir.md @@ -0,0 +1,132 @@ +# Noir + +## Constructors + +### new Noir(circuit, backend) + +```ts +new Noir(circuit, backend?): Noir +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `circuit` | `CompiledCircuit` | +| `backend`? | `any` | + +#### Returns + +[`Noir`](Noir.md) + +## Methods + +### destroy() + +```ts +destroy(): Promise +``` + +#### Returns + +`Promise`\<`void`\> + +#### Description + +Destroys the underlying backend instance. + +#### Example + +```typescript +await noir.destroy(); +``` + +*** + +### execute() + +```ts +execute(inputs, foreignCallHandler?): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `inputs` | `InputMap` | +| `foreignCallHandler`? | [`ForeignCallHandler`](../type-aliases/ForeignCallHandler.md) | + +#### Returns + +`Promise`\<`object`\> + +#### Description + +Allows to execute a circuit to get its witness and return value. + +#### Example + +```typescript +async execute(inputs) +``` + +*** + +### generateProof() + +```ts +generateProof(inputs, foreignCallHandler?): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `inputs` | `InputMap` | +| `foreignCallHandler`? | [`ForeignCallHandler`](../type-aliases/ForeignCallHandler.md) | + +#### Returns + +`Promise`\<`ProofData`\> + +#### Description + +Generates a witness and a proof given an object as input. + +#### Example + +```typescript +async generateProof(input) +``` + +*** + +### verifyProof() + +```ts +verifyProof(proofData): Promise +``` + +#### Parameters + +| Parameter | Type | +| :------ | :------ | +| `proofData` | `ProofData` | + +#### Returns + +`Promise`\<`boolean`\> + +#### Description + +Instantiates the verification key and verifies a proof. + +#### Example + +```typescript +async verifyProof(proof) +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/and.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/and.md new file mode 100644 index 00000000000..c783283e396 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/and.md @@ -0,0 +1,22 @@ +# and() + +```ts +and(lhs, rhs): string +``` + +Performs a bitwise AND operation between `lhs` and `rhs` + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `lhs` | `string` | | +| `rhs` | `string` | | + +## Returns + +`string` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/blake2s256.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/blake2s256.md new file mode 100644 index 00000000000..7882d0da8d5 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/blake2s256.md @@ -0,0 +1,21 @@ +# blake2s256() + +```ts +blake2s256(inputs): Uint8Array +``` + +Calculates the Blake2s256 hash of the input bytes + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `inputs` | `Uint8Array` | | + +## Returns + +`Uint8Array` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify.md new file mode 100644 index 00000000000..5e3cd53e9d3 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify.md @@ -0,0 +1,28 @@ +# ecdsa\_secp256k1\_verify() + +```ts +ecdsa_secp256k1_verify( + hashed_msg, + public_key_x_bytes, + public_key_y_bytes, + signature): boolean +``` + +Verifies a ECDSA signature over the secp256k1 curve. + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `hashed_msg` | `Uint8Array` | | +| `public_key_x_bytes` | `Uint8Array` | | +| `public_key_y_bytes` | `Uint8Array` | | +| `signature` | `Uint8Array` | | + +## Returns + +`boolean` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify.md new file mode 100644 index 00000000000..0b20ff68957 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify.md @@ -0,0 +1,28 @@ +# ecdsa\_secp256r1\_verify() + +```ts +ecdsa_secp256r1_verify( + hashed_msg, + public_key_x_bytes, + public_key_y_bytes, + signature): boolean +``` + +Verifies a ECDSA signature over the secp256r1 curve. + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `hashed_msg` | `Uint8Array` | | +| `public_key_x_bytes` | `Uint8Array` | | +| `public_key_y_bytes` | `Uint8Array` | | +| `signature` | `Uint8Array` | | + +## Returns + +`boolean` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/keccak256.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/keccak256.md new file mode 100644 index 00000000000..d10f155ce86 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/keccak256.md @@ -0,0 +1,21 @@ +# keccak256() + +```ts +keccak256(inputs): Uint8Array +``` + +Calculates the Keccak256 hash of the input bytes + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `inputs` | `Uint8Array` | | + +## Returns + +`Uint8Array` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/sha256.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/sha256.md new file mode 100644 index 00000000000..6ba4ecac022 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/sha256.md @@ -0,0 +1,21 @@ +# sha256() + +```ts +sha256(inputs): Uint8Array +``` + +Calculates the SHA256 hash of the input bytes + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `inputs` | `Uint8Array` | | + +## Returns + +`Uint8Array` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/xor.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/xor.md new file mode 100644 index 00000000000..8d762b895d3 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/functions/xor.md @@ -0,0 +1,22 @@ +# xor() + +```ts +xor(lhs, rhs): string +``` + +Performs a bitwise XOR operation between `lhs` and `rhs` + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `lhs` | `string` | | +| `rhs` | `string` | | + +## Returns + +`string` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/index.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/index.md new file mode 100644 index 00000000000..cca6b3ace41 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/index.md @@ -0,0 +1,54 @@ +# noir_js + +## Exports + +### Classes + +| Class | Description | +| :------ | :------ | +| [Noir](classes/Noir.md) | - | + +### Type Aliases + +| Type alias | Description | +| :------ | :------ | +| [ForeignCallHandler](type-aliases/ForeignCallHandler.md) | A callback which performs an foreign call and returns the response. | +| [ForeignCallInput](type-aliases/ForeignCallInput.md) | - | +| [ForeignCallOutput](type-aliases/ForeignCallOutput.md) | - | +| [WitnessMap](type-aliases/WitnessMap.md) | - | + +### Functions + +| Function | Description | +| :------ | :------ | +| [and](functions/and.md) | Performs a bitwise AND operation between `lhs` and `rhs` | +| [blake2s256](functions/blake2s256.md) | Calculates the Blake2s256 hash of the input bytes | +| [ecdsa\_secp256k1\_verify](functions/ecdsa_secp256k1_verify.md) | Verifies a ECDSA signature over the secp256k1 curve. | +| [ecdsa\_secp256r1\_verify](functions/ecdsa_secp256r1_verify.md) | Verifies a ECDSA signature over the secp256r1 curve. | +| [keccak256](functions/keccak256.md) | Calculates the Keccak256 hash of the input bytes | +| [sha256](functions/sha256.md) | Calculates the SHA256 hash of the input bytes | +| [xor](functions/xor.md) | Performs a bitwise XOR operation between `lhs` and `rhs` | + +## References + +### CompiledCircuit + +Renames and re-exports [InputMap](index.md#inputmap) + +*** + +### ProofData + +Renames and re-exports [InputMap](index.md#inputmap) + +## Variables + +### InputMap + +```ts +InputMap: any; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md new file mode 100644 index 00000000000..812b8b16481 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/ForeignCallHandler.md @@ -0,0 +1,24 @@ +# ForeignCallHandler + +```ts +type ForeignCallHandler: (name, inputs) => Promise; +``` + +A callback which performs an foreign call and returns the response. + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `name` | `string` | The identifier for the type of foreign call being performed. | +| `inputs` | [`ForeignCallInput`](ForeignCallInput.md)[] | An array of hex encoded inputs to the foreign call. | + +## Returns + +`Promise`\<[`ForeignCallOutput`](ForeignCallOutput.md)[]\> + +outputs - An array of hex encoded outputs containing the results of the foreign call. + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/ForeignCallInput.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/ForeignCallInput.md new file mode 100644 index 00000000000..dd95809186a --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/ForeignCallInput.md @@ -0,0 +1,9 @@ +# ForeignCallInput + +```ts +type ForeignCallInput: string[]; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md new file mode 100644 index 00000000000..b71fb78a946 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/ForeignCallOutput.md @@ -0,0 +1,9 @@ +# ForeignCallOutput + +```ts +type ForeignCallOutput: string | string[]; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/WitnessMap.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/WitnessMap.md new file mode 100644 index 00000000000..258c46f9d0c --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/type-aliases/WitnessMap.md @@ -0,0 +1,9 @@ +# WitnessMap + +```ts +type WitnessMap: Map; +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/typedoc-sidebar.cjs b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/typedoc-sidebar.cjs new file mode 100644 index 00000000000..c6d8125eaad --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_js/typedoc-sidebar.cjs @@ -0,0 +1,4 @@ +// @ts-check +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const typedocSidebar = { items: [{"type":"category","label":"Classes","items":[{"type":"doc","id":"reference/NoirJS/noir_js/classes/Noir","label":"Noir"}]},{"type":"category","label":"Type Aliases","items":[{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/ForeignCallHandler","label":"ForeignCallHandler"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/ForeignCallInput","label":"ForeignCallInput"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/ForeignCallOutput","label":"ForeignCallOutput"},{"type":"doc","id":"reference/NoirJS/noir_js/type-aliases/WitnessMap","label":"WitnessMap"}]},{"type":"category","label":"Functions","items":[{"type":"doc","id":"reference/NoirJS/noir_js/functions/and","label":"and"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/blake2s256","label":"blake2s256"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/ecdsa_secp256k1_verify","label":"ecdsa_secp256k1_verify"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/ecdsa_secp256r1_verify","label":"ecdsa_secp256r1_verify"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/keccak256","label":"keccak256"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/sha256","label":"sha256"},{"type":"doc","id":"reference/NoirJS/noir_js/functions/xor","label":"xor"}]}]}; +module.exports = typedocSidebar.items; \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/.nojekyll b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/.nojekyll new file mode 100644 index 00000000000..e2ac6616add --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/.nojekyll @@ -0,0 +1 @@ +TypeDoc added this file to prevent GitHub Pages from using Jekyll. You can turn off this behavior by setting the `githubPages` option to false. \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/compile.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/compile.md new file mode 100644 index 00000000000..6faf763b37f --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/compile.md @@ -0,0 +1,51 @@ +# compile() + +```ts +compile( + fileManager, + projectPath?, + logFn?, +debugLogFn?): Promise +``` + +Compiles a Noir project + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `fileManager` | `FileManager` | The file manager to use | +| `projectPath`? | `string` | The path to the project inside the file manager. Defaults to the root of the file manager | +| `logFn`? | `LogFn` | A logging function. If not provided, console.log will be used | +| `debugLogFn`? | `LogFn` | A debug logging function. If not provided, logFn will be used | + +## Returns + +`Promise`\<[`ProgramCompilationArtifacts`](../index.md#programcompilationartifacts)\> + +## Example + +```typescript +// Node.js + +import { compile_program, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager(myProjectPath); +const myCompiledCode = await compile_program(fm); +``` + +```typescript +// Browser + +import { compile_program, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager('/'); +for (const path of files) { + await fm.writeFile(path, await getFileAsStream(path)); +} +const myCompiledCode = await compile_program(fm); +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/compile_contract.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/compile_contract.md new file mode 100644 index 00000000000..7d0b39a43ef --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/compile_contract.md @@ -0,0 +1,51 @@ +# compile\_contract() + +```ts +compile_contract( + fileManager, + projectPath?, + logFn?, +debugLogFn?): Promise +``` + +Compiles a Noir project + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `fileManager` | `FileManager` | The file manager to use | +| `projectPath`? | `string` | The path to the project inside the file manager. Defaults to the root of the file manager | +| `logFn`? | `LogFn` | A logging function. If not provided, console.log will be used | +| `debugLogFn`? | `LogFn` | A debug logging function. If not provided, logFn will be used | + +## Returns + +`Promise`\<[`ContractCompilationArtifacts`](../index.md#contractcompilationartifacts)\> + +## Example + +```typescript +// Node.js + +import { compile_contract, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager(myProjectPath); +const myCompiledCode = await compile_contract(fm); +``` + +```typescript +// Browser + +import { compile_contract, createFileManager } from '@noir-lang/noir_wasm'; + +const fm = createFileManager('/'); +for (const path of files) { + await fm.writeFile(path, await getFileAsStream(path)); +} +const myCompiledCode = await compile_contract(fm); +``` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/createFileManager.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/createFileManager.md new file mode 100644 index 00000000000..7e65c1d69c7 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/createFileManager.md @@ -0,0 +1,21 @@ +# createFileManager() + +```ts +createFileManager(dataDir): FileManager +``` + +Creates a new FileManager instance based on fs in node and memfs in the browser (via webpack alias) + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `dataDir` | `string` | root of the file system | + +## Returns + +`FileManager` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/inflateDebugSymbols.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/inflateDebugSymbols.md new file mode 100644 index 00000000000..fcea9275341 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/functions/inflateDebugSymbols.md @@ -0,0 +1,21 @@ +# inflateDebugSymbols() + +```ts +inflateDebugSymbols(debugSymbols): any +``` + +Decompresses and decodes the debug symbols + +## Parameters + +| Parameter | Type | Description | +| :------ | :------ | :------ | +| `debugSymbols` | `string` | The base64 encoded debug symbols | + +## Returns + +`any` + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/index.md b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/index.md new file mode 100644 index 00000000000..b6e0f9d1bc0 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/index.md @@ -0,0 +1,49 @@ +# noir_wasm + +## Exports + +### Functions + +| Function | Description | +| :------ | :------ | +| [compile](functions/compile.md) | Compiles a Noir project | +| [compile\_contract](functions/compile_contract.md) | Compiles a Noir project | +| [createFileManager](functions/createFileManager.md) | Creates a new FileManager instance based on fs in node and memfs in the browser (via webpack alias) | +| [inflateDebugSymbols](functions/inflateDebugSymbols.md) | Decompresses and decodes the debug symbols | + +## References + +### compile\_program + +Renames and re-exports [compile](functions/compile.md) + +## Interfaces + +### ContractCompilationArtifacts + +The compilation artifacts of a given contract. + +#### Properties + +| Property | Type | Description | +| :------ | :------ | :------ | +| `contract` | `ContractArtifact` | The compiled contract. | +| `warnings` | `unknown`[] | Compilation warnings. | + +*** + +### ProgramCompilationArtifacts + +The compilation artifacts of a given program. + +#### Properties + +| Property | Type | Description | +| :------ | :------ | :------ | +| `name` | `string` | not part of the compilation output, injected later | +| `program` | `ProgramArtifact` | The compiled contract. | +| `warnings` | `unknown`[] | Compilation warnings. | + +*** + +Generated using [typedoc-plugin-markdown](https://www.npmjs.com/package/typedoc-plugin-markdown) and [TypeDoc](https://typedoc.org/) diff --git a/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/typedoc-sidebar.cjs b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/typedoc-sidebar.cjs new file mode 100644 index 00000000000..e0870710349 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/NoirJS/noir_wasm/typedoc-sidebar.cjs @@ -0,0 +1,4 @@ +// @ts-check +/** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ +const typedocSidebar = { items: [{"type":"doc","id":"reference/NoirJS/noir_wasm/index","label":"API"},{"type":"category","label":"Functions","items":[{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/compile","label":"compile"},{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/compile_contract","label":"compile_contract"},{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/createFileManager","label":"createFileManager"},{"type":"doc","id":"reference/NoirJS/noir_wasm/functions/inflateDebugSymbols","label":"inflateDebugSymbols"}]}]}; +module.exports = typedocSidebar.items; \ No newline at end of file diff --git a/docs/versioned_docs/version-v0.27.0/reference/_category_.json b/docs/versioned_docs/version-v0.27.0/reference/_category_.json new file mode 100644 index 00000000000..5b6a20a609a --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 4, + "collapsible": true, + "collapsed": true +} diff --git a/docs/versioned_docs/version-v0.27.0/reference/nargo_commands.md b/docs/versioned_docs/version-v0.27.0/reference/nargo_commands.md new file mode 100644 index 00000000000..218fcfb0c8c --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/reference/nargo_commands.md @@ -0,0 +1,381 @@ +--- +title: Nargo +description: + Noir CLI Commands for Noir Prover and Verifier to create, execute, prove and verify programs, + generate Solidity verifier smart contract and compile into JSON file containing ACIR + representation and ABI of circuit. +keywords: + [ + Nargo, + Noir CLI, + Noir Prover, + Noir Verifier, + generate Solidity verifier, + compile JSON file, + ACIR representation, + ABI of circuit, + TypeScript, + ] +sidebar_position: 0 +--- + +# Command-Line Help for `nargo` + +This document contains the help content for the `nargo` command-line program. + +**Command Overview:** + +* [`nargo`↴](#nargo) +* [`nargo backend`↴](#nargo-backend) +* [`nargo backend current`↴](#nargo-backend-current) +* [`nargo backend ls`↴](#nargo-backend-ls) +* [`nargo backend use`↴](#nargo-backend-use) +* [`nargo backend install`↴](#nargo-backend-install) +* [`nargo backend uninstall`↴](#nargo-backend-uninstall) +* [`nargo check`↴](#nargo-check) +* [`nargo fmt`↴](#nargo-fmt) +* [`nargo codegen-verifier`↴](#nargo-codegen-verifier) +* [`nargo compile`↴](#nargo-compile) +* [`nargo new`↴](#nargo-new) +* [`nargo init`↴](#nargo-init) +* [`nargo execute`↴](#nargo-execute) +* [`nargo prove`↴](#nargo-prove) +* [`nargo verify`↴](#nargo-verify) +* [`nargo test`↴](#nargo-test) +* [`nargo info`↴](#nargo-info) +* [`nargo lsp`↴](#nargo-lsp) + +## `nargo` + +Noir's package manager + +**Usage:** `nargo ` + +###### **Subcommands:** + +* `backend` — Install and select custom backends used to generate and verify proofs +* `check` — Checks the constraint system for errors +* `fmt` — Format the Noir files in a workspace +* `codegen-verifier` — Generates a Solidity verifier smart contract for the program +* `compile` — Compile the program and its secret execution trace into ACIR format +* `new` — Create a Noir project in a new directory +* `init` — Create a Noir project in the current directory +* `execute` — Executes a circuit to calculate its return value +* `prove` — Create proof for this program. The proof is returned as a hex encoded string +* `verify` — Given a proof and a program, verify whether the proof is valid +* `test` — Run the tests for this program +* `info` — Provides detailed information on each of a program's function (represented by a single circuit) +* `lsp` — Starts the Noir LSP server + +###### **Options:** + + + + +## `nargo backend` + +Install and select custom backends used to generate and verify proofs + +**Usage:** `nargo backend ` + +###### **Subcommands:** + +* `current` — Prints the name of the currently active backend +* `ls` — Prints the list of currently installed backends +* `use` — Select the backend to use +* `install` — Install a new backend from a URL +* `uninstall` — Uninstalls a backend + + + +## `nargo backend current` + +Prints the name of the currently active backend + +**Usage:** `nargo backend current` + + + +## `nargo backend ls` + +Prints the list of currently installed backends + +**Usage:** `nargo backend ls` + + + +## `nargo backend use` + +Select the backend to use + +**Usage:** `nargo backend use ` + +###### **Arguments:** + +* `` + + + +## `nargo backend install` + +Install a new backend from a URL + +**Usage:** `nargo backend install ` + +###### **Arguments:** + +* `` — The name of the backend to install +* `` — The URL from which to download the backend + + + +## `nargo backend uninstall` + +Uninstalls a backend + +**Usage:** `nargo backend uninstall ` + +###### **Arguments:** + +* `` — The name of the backend to uninstall + + + +## `nargo check` + +Checks the constraint system for errors + +**Usage:** `nargo check [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to check +* `--workspace` — Check all packages in the workspace +* `--overwrite` — Force overwrite of existing files +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo fmt` + +Format the Noir files in a workspace + +**Usage:** `nargo fmt [OPTIONS]` + +###### **Options:** + +* `--check` — Run noirfmt in check mode + + + +## `nargo codegen-verifier` + +Generates a Solidity verifier smart contract for the program + +**Usage:** `nargo codegen-verifier [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to codegen +* `--workspace` — Codegen all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo compile` + +Compile the program and its secret execution trace into ACIR format + +**Usage:** `nargo compile [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to compile +* `--workspace` — Compile all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo new` + +Create a Noir project in a new directory + +**Usage:** `nargo new [OPTIONS] ` + +###### **Arguments:** + +* `` — The path to save the new project + +###### **Options:** + +* `--name ` — Name of the package [default: package directory name] +* `--lib` — Use a library template +* `--bin` — Use a binary template [default] +* `--contract` — Use a contract template + + + +## `nargo init` + +Create a Noir project in the current directory + +**Usage:** `nargo init [OPTIONS]` + +###### **Options:** + +* `--name ` — Name of the package [default: current directory name] +* `--lib` — Use a library template +* `--bin` — Use a binary template [default] +* `--contract` — Use a contract template + + + +## `nargo execute` + +Executes a circuit to calculate its return value + +**Usage:** `nargo execute [OPTIONS] [WITNESS_NAME]` + +###### **Arguments:** + +* `` — Write the execution witness to named file + +###### **Options:** + +* `-p`, `--prover-name ` — The name of the toml file which contains the inputs for the prover + + Default value: `Prover` +* `--package ` — The name of the package to execute +* `--workspace` — Execute all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings +* `--oracle-resolver ` — JSON RPC url to solve oracle calls + + + +## `nargo prove` + +Create proof for this program. The proof is returned as a hex encoded string + +**Usage:** `nargo prove [OPTIONS]` + +###### **Options:** + +* `-p`, `--prover-name ` — The name of the toml file which contains the inputs for the prover + + Default value: `Prover` +* `-v`, `--verifier-name ` — The name of the toml file which contains the inputs for the verifier + + Default value: `Verifier` +* `--verify` — Verify proof after proving +* `--package ` — The name of the package to prove +* `--workspace` — Prove all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings +* `--oracle-resolver ` — JSON RPC url to solve oracle calls + + + +## `nargo verify` + +Given a proof and a program, verify whether the proof is valid + +**Usage:** `nargo verify [OPTIONS]` + +###### **Options:** + +* `-v`, `--verifier-name ` — The name of the toml file which contains the inputs for the verifier + + Default value: `Verifier` +* `--package ` — The name of the package verify +* `--workspace` — Verify all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo test` + +Run the tests for this program + +**Usage:** `nargo test [OPTIONS] [TEST_NAME]` + +###### **Arguments:** + +* `` — If given, only tests with names containing this string will be run + +###### **Options:** + +* `--show-output` — Display output of `println` statements +* `--exact` — Only run tests that match exactly +* `--package ` — The name of the package to test +* `--workspace` — Test all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings +* `--oracle-resolver ` — JSON RPC url to solve oracle calls + + + +## `nargo info` + +Provides detailed information on each of a program's function (represented by a single circuit) + +Current information provided per circuit: 1. The number of ACIR opcodes 2. Counts the final number gates in the circuit used by a backend + +**Usage:** `nargo info [OPTIONS]` + +###### **Options:** + +* `--package ` — The name of the package to detail +* `--workspace` — Detail all packages in the workspace +* `--expression-width ` — Override the expression width requested by the backend +* `--force` — Force a full recompilation +* `--print-acir` — Display the ACIR for compiled circuit +* `--deny-warnings` — Treat all warnings as errors +* `--silence-warnings` — Suppress warnings + + + +## `nargo lsp` + +Starts the Noir LSP server + +Starts an LSP server which allows IDEs such as VS Code to display diagnostics in Noir source. + +VS Code Noir Language Support: https://marketplace.visualstudio.com/items?itemName=noir-lang.vscode-noir + +**Usage:** `nargo lsp` + + + +
+ + + This document was generated automatically by + clap-markdown. + + diff --git a/docs/versioned_docs/version-v0.27.0/tutorials/noirjs_app.md b/docs/versioned_docs/version-v0.27.0/tutorials/noirjs_app.md new file mode 100644 index 00000000000..12beb476994 --- /dev/null +++ b/docs/versioned_docs/version-v0.27.0/tutorials/noirjs_app.md @@ -0,0 +1,279 @@ +--- +title: Building a web app with NoirJS +description: Learn how to setup a new app that uses Noir to generate and verify zero-knowledge SNARK proofs in a typescript or javascript environment. +keywords: [how to, guide, javascript, typescript, noir, barretenberg, zero-knowledge, proofs, app] +sidebar_position: 0 +pagination_next: noir/concepts/data_types/index +--- + +NoirJS is a set of packages meant to work both in a browser and a server environment. In this tutorial, we will build a simple web app using them. From here, you should get an idea on how to proceed with your own Noir projects! + +You can find the complete app code for this guide [here](https://github.com/noir-lang/tiny-noirjs-app). + +## Setup + +:::note + +Feel free to use whatever versions, just keep in mind that Nargo and the NoirJS packages are meant to be in sync. For example, Nargo 0.19.x matches `noir_js@0.19.x`, etc. + +In this guide, we will be pinned to 0.19.4. + +::: + +Before we start, we want to make sure we have Node and Nargo installed. + +We start by opening a terminal and executing `node --version`. If we don't get an output like `v20.10.0`, that means node is not installed. Let's do that by following the handy [nvm guide](https://github.com/nvm-sh/nvm?tab=readme-ov-file#install--update-script). + +As for `Nargo`, we can follow the the [Nargo guide](../getting_started/installation/index.md) to install it. If you're lazy, just paste this on a terminal and run `noirup`: + +```sh +curl -L https://raw.githubusercontent.com/noir-lang/noirup/main/install | bash +``` + +Easy enough. Onwards! + +## Our project + +ZK is a powerful technology. An app that doesn't reveal one of the inputs to *anyone* is almost unbelievable, yet Noir makes it as easy as a single line of code. + +In fact, it's so simple that it comes nicely packaged in `nargo`. Let's do that! + +### Nargo + +Run: + +```nargo new circuit``` + +And... That's about it. Your program is ready to be compiled and run. + +To compile, let's `cd` into the `circuit` folder to enter our project, and call: + +```nargo compile``` + +This compiles our circuit into `json` format and add it to a new `target` folder. + +:::info + +At this point in the tutorial, your folder structure should look like this: + +```tree +. +└── circuit <---- our working directory + ├── Nargo.toml + ├── src + │ └── main.nr + └── target + └── circuit.json +``` + +::: + +### Node and Vite + +If you want to explore Nargo, feel free to go on a side-quest now and follow the steps in the +[getting started](../getting_started/hello_noir/index.md) guide. However, we want our app to run on the browser, so we need Vite. + +Vite is a powerful tool to generate static websites. While it provides all kinds of features, let's just go barebones with some good old vanilla JS. + +To do this this, go back to the previous folder (`cd ..`) and create a new vite project by running `npm create vite` and choosing "Vanilla" and "Javascript". + +You should see `vite-project` appear in your root folder. This seems like a good time to `cd` into it and install our NoirJS packages: + +```bash +npm i @noir-lang/backend_barretenberg@0.19.4 @noir-lang/noir_js@0.19.4 +``` + +:::info + +At this point in the tutorial, your folder structure should look like this: + +```tree +. +└── circuit + └── ...etc... +└── vite-project <---- our working directory + └── ...etc... +``` + +::: + +#### Some cleanup + +`npx create vite` is amazing but it creates a bunch of files we don't really need for our simple example. Actually, let's just delete everything except for `index.html`, `main.js` and `package.json`. I feel lighter already. + +![my heart is ready for you, noir.js](@site/static/img/memes/titanic.jpeg) + +## HTML + +Our app won't run like this, of course. We need some working HTML, at least. Let's open our broken-hearted `index.html` and replace everything with this code snippet: + +```html + + + + + + +

Noir app

+
+ + +
+
+

Logs

+

Proof

+
+ + +``` + +It *could* be a beautiful UI... Depending on which universe you live in. + +## Some good old vanilla Javascript + +Our love for Noir needs undivided attention, so let's just open `main.js` and delete everything (this is where the romantic scenery becomes a bit creepy). + +Start by pasting in this boilerplate code: + +```js +const setup = async () => { + await Promise.all([ + import("@noir-lang/noirc_abi").then(module => + module.default(new URL("@noir-lang/noirc_abi/web/noirc_abi_wasm_bg.wasm", import.meta.url).toString()) + ), + import("@noir-lang/acvm_js").then(module => + module.default(new URL("@noir-lang/acvm_js/web/acvm_js_bg.wasm", import.meta.url).toString()) + ) + ]); +} + +function display(container, msg) { + const c = document.getElementById(container); + const p = document.createElement('p'); + p.textContent = msg; + c.appendChild(p); +} + +document.getElementById('submitGuess').addEventListener('click', async () => { + try { + // here's where love happens + } catch(err) { + display("logs", "Oh 💔 Wrong guess") + } +}); + +``` + +The display function doesn't do much. We're simply manipulating our website to see stuff happening. For example, if the proof fails, it will simply log a broken heart 😢 + +As for the `setup` function, it's just a sad reminder that dealing with `wasm` on the browser is not as easy as it should. Just copy, paste, and forget. + +:::info + +At this point in the tutorial, your folder structure should look like this: + +```tree +. +└── circuit + └── ...same as above +└── vite-project + ├── main.js + ├── package.json + └── index.html +``` + +You'll see other files and folders showing up (like `package-lock.json`, `node_modules`) but you shouldn't have to care about those. + +::: + +## Some NoirJS + +We're starting with the good stuff now. If you've compiled the circuit as described above, you should have a `json` file we want to import at the very top of our `main.js` file: + +```ts +import circuit from '../circuit/target/circuit.json'; +``` + +[Noir is backend-agnostic](../index.mdx#whats-new-about-noir). We write Noir, but we also need a proving backend. That's why we need to import and instantiate the two dependencies we installed above: `BarretenbergBackend` and `Noir`. Let's import them right below: + +```js +import { BarretenbergBackend } from '@noir-lang/backend_barretenberg'; +import { Noir } from '@noir-lang/noir_js'; +``` + +And instantiate them inside our try-catch block: + +```ts +// try { +const backend = new BarretenbergBackend(circuit); +const noir = new Noir(circuit, backend); +// } +``` + +:::note + +For the remainder of the tutorial, everything will be happening inside the `try` block + +::: + +## Our app + +Now for the app itself. We're capturing whatever is in the input when people press the submit button. Just add this: + +```js +const x = parseInt(document.getElementById('guessInput').value); +const input = { x, y: 2 }; +``` + +Now we're ready to prove stuff! Let's feed some inputs to our circuit and calculate the proof: + +```js +await setup(); // let's squeeze our wasm inits here + +display('logs', 'Generating proof... ⌛'); +const proof = await noir.generateProof(input); +display('logs', 'Generating proof... ✅'); +display('results', proof.proof); +``` + +You're probably eager to see stuff happening, so go and run your app now! + +From your terminal, run `npm run dev`. If it doesn't open a browser for you, just visit `localhost:5173`. You should now see the worst UI ever, with an ugly input. + +![Getting Started 0](@site/static/img/noir_getting_started_1.png) + +Now, our circuit says `fn main(x: Field, y: pub Field)`. This means only the `y` value is public, and it's hardcoded above: `input = { x, y: 2 }`. In other words, you won't need to send your secret`x` to the verifier! + +By inputting any number other than 2 in the input box and clicking "submit", you should get a valid proof. Otherwise the proof won't even generate correctly. By the way, if you're human, you shouldn't be able to understand anything on the "proof" box. That's OK. We like you, human ❤️. + +## Verifying + +Time to celebrate, yes! But we shouldn't trust machines so blindly. Let's add these lines to see our proof being verified: + +```js +display('logs', 'Verifying proof... ⌛'); +const verification = await noir.verifyProof(proof); +if (verification) display('logs', 'Verifying proof... ✅'); +``` + +You have successfully generated a client-side Noir web app! + +![coded app without math knowledge](@site/static/img/memes/flextape.jpeg) + +## Further Reading + +You can see how noirjs is used in a full stack Next.js hardhat application in the [noir-starter repo here](https://github.com/noir-lang/noir-starter/tree/main/vite-hardhat). The example shows how to calculate a proof in the browser and verify it with a deployed Solidity verifier contract from noirjs. + +You should also check out the more advanced examples in the [noir-examples repo](https://github.com/noir-lang/noir-examples), where you'll find reference usage for some cool apps. diff --git a/docs/versioned_sidebars/version-v0.26.0-sidebars.json b/docs/versioned_sidebars/version-v0.26.0-sidebars.json new file mode 100644 index 00000000000..b16f79cc176 --- /dev/null +++ b/docs/versioned_sidebars/version-v0.26.0-sidebars.json @@ -0,0 +1,83 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "index" + }, + { + "type": "category", + "label": "Getting Started", + "items": [ + { + "type": "autogenerated", + "dirName": "getting_started" + } + ] + }, + { + "type": "category", + "label": "The Noir Language", + "items": [ + { + "type": "autogenerated", + "dirName": "noir" + } + ] + }, + { + "type": "html", + "value": "
", + "defaultStyle": true + }, + { + "type": "category", + "label": "How To Guides", + "items": [ + { + "type": "autogenerated", + "dirName": "how_to" + } + ] + }, + { + "type": "category", + "label": "Explainers", + "items": [ + { + "type": "autogenerated", + "dirName": "explainers" + } + ] + }, + { + "type": "category", + "label": "Tutorials", + "items": [ + { + "type": "autogenerated", + "dirName": "tutorials" + } + ] + }, + { + "type": "category", + "label": "Reference", + "items": [ + { + "type": "autogenerated", + "dirName": "reference" + } + ] + }, + { + "type": "html", + "value": "
", + "defaultStyle": true + }, + { + "type": "doc", + "id": "migration_notes", + "label": "Migration notes" + } + ] +} diff --git a/docs/versioned_sidebars/version-v0.27.0-sidebars.json b/docs/versioned_sidebars/version-v0.27.0-sidebars.json new file mode 100644 index 00000000000..b16f79cc176 --- /dev/null +++ b/docs/versioned_sidebars/version-v0.27.0-sidebars.json @@ -0,0 +1,83 @@ +{ + "sidebar": [ + { + "type": "doc", + "id": "index" + }, + { + "type": "category", + "label": "Getting Started", + "items": [ + { + "type": "autogenerated", + "dirName": "getting_started" + } + ] + }, + { + "type": "category", + "label": "The Noir Language", + "items": [ + { + "type": "autogenerated", + "dirName": "noir" + } + ] + }, + { + "type": "html", + "value": "
", + "defaultStyle": true + }, + { + "type": "category", + "label": "How To Guides", + "items": [ + { + "type": "autogenerated", + "dirName": "how_to" + } + ] + }, + { + "type": "category", + "label": "Explainers", + "items": [ + { + "type": "autogenerated", + "dirName": "explainers" + } + ] + }, + { + "type": "category", + "label": "Tutorials", + "items": [ + { + "type": "autogenerated", + "dirName": "tutorials" + } + ] + }, + { + "type": "category", + "label": "Reference", + "items": [ + { + "type": "autogenerated", + "dirName": "reference" + } + ] + }, + { + "type": "html", + "value": "
", + "defaultStyle": true + }, + { + "type": "doc", + "id": "migration_notes", + "label": "Migration notes" + } + ] +} diff --git a/noir_stdlib/src/bigint.nr b/noir_stdlib/src/bigint.nr index 39ec40a1480..ee9f8e44625 100644 --- a/noir_stdlib/src/bigint.nr +++ b/noir_stdlib/src/bigint.nr @@ -32,7 +32,7 @@ impl BigInt { #[builtin(bigint_from_le_bytes)] fn from_le_bytes(bytes: [u8], modulus: [u8]) -> BigInt {} #[builtin(bigint_to_le_bytes)] - fn to_le_bytes(self) -> [u8] {} + fn to_le_bytes(self) -> [u8; 32] {} fn check_32_bytes(self: Self, other: BigInt) -> bool { let bytes = self.to_le_bytes(); @@ -47,305 +47,420 @@ impl BigInt { trait BigField { fn from_le_bytes(bytes: [u8]) -> Self; + fn from_le_bytes_32(bytes: [u8; 32]) -> Self; fn to_le_bytes(self) -> [u8]; } struct Secpk1Fq { - inner: BigInt, + array: [u8;32], } impl BigField for Secpk1Fq { fn from_le_bytes(bytes: [u8]) -> Secpk1Fq { + assert(bytes.len() <= 32); + let mut array = [0;32]; + for i in 0..bytes.len() { + array[i] = bytes[i]; + } + Secpk1Fq { + array: array, + } + } + + fn from_le_bytes_32(bytes: [u8;32]) -> Secpk1Fq { Secpk1Fq { - inner: BigInt::from_le_bytes(bytes, secpk1_fq) + array: bytes, } } + fn to_le_bytes(self) -> [u8] { - self.inner.to_le_bytes() + self.array } } impl Add for Secpk1Fq { fn add(self: Self, other: Secpk1Fq) -> Secpk1Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpk1Fq { - inner: self.inner.bigint_add(other.inner) + array: a.bigint_add(b).to_le_bytes() } } } impl Sub for Secpk1Fq { fn sub(self: Self, other: Secpk1Fq) -> Secpk1Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpk1Fq { - inner: self.inner.bigint_sub(other.inner) + array: a.bigint_sub(b).to_le_bytes() } } } impl Mul for Secpk1Fq { fn mul(self: Self, other: Secpk1Fq) -> Secpk1Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpk1Fq { - inner: self.inner.bigint_mul(other.inner) + array: a.bigint_mul(b).to_le_bytes() } - } } impl Div for Secpk1Fq { fn div(self: Self, other: Secpk1Fq) -> Secpk1Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpk1Fq { - inner: self.inner.bigint_div(other.inner) + array: a.bigint_div(b).to_le_bytes() } } } impl Eq for Secpk1Fq { fn eq(self: Self, other: Secpk1Fq) -> bool { - self.inner.check_32_bytes(other.inner) + self.array == other.array } } struct Secpk1Fr { - inner: BigInt, + array: [u8;32], } impl BigField for Secpk1Fr { fn from_le_bytes(bytes: [u8]) -> Secpk1Fr { + assert(bytes.len() <= 32); + let mut array = [0;32]; + for i in 0..bytes.len() { + array[i] = bytes[i]; + } Secpk1Fr { - inner: BigInt::from_le_bytes(bytes, secpk1_fr) + array: array, } } + + fn from_le_bytes_32(bytes: [u8;32]) -> Secpk1Fr { + Secpk1Fr { + array: bytes, + } + } + fn to_le_bytes(self) -> [u8] { - self.inner.to_le_bytes() + self.array } } impl Add for Secpk1Fr { fn add(self: Self, other: Secpk1Fr) -> Secpk1Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpk1Fr { - inner: self.inner.bigint_add(other.inner) + array: a.bigint_add(b).to_le_bytes() } } } impl Sub for Secpk1Fr { fn sub(self: Self, other: Secpk1Fr) -> Secpk1Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpk1Fr { - inner: self.inner.bigint_sub(other.inner) + array: a.bigint_sub(b).to_le_bytes() } } } impl Mul for Secpk1Fr { fn mul(self: Self, other: Secpk1Fr) -> Secpk1Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpk1Fr { - inner: self.inner.bigint_mul(other.inner) + array: a.bigint_mul(b).to_le_bytes() } - } } impl Div for Secpk1Fr { fn div(self: Self, other: Secpk1Fr) -> Secpk1Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpk1Fr { - inner: self.inner.bigint_div(other.inner) + array: a.bigint_div(b).to_le_bytes() } } } impl Eq for Secpk1Fr { fn eq(self: Self, other: Secpk1Fr) -> bool { - self.inner.check_32_bytes(other.inner) + self.array == other.array } } struct Bn254Fr { - inner: BigInt, + array: [u8;32], } impl BigField for Bn254Fr { fn from_le_bytes(bytes: [u8]) -> Bn254Fr { + assert(bytes.len() <= 32); + let mut array = [0;32]; + for i in 0..bytes.len() { + array[i] = bytes[i]; + } + Bn254Fr { + array: array, + } + } + + fn from_le_bytes_32(bytes: [u8;32]) -> Bn254Fr { Bn254Fr { - inner: BigInt::from_le_bytes(bytes, bn254_fr) + array: bytes, } } + fn to_le_bytes(self) -> [u8] { - self.inner.to_le_bytes() + self.array } } impl Add for Bn254Fr { fn add(self: Self, other: Bn254Fr) -> Bn254Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Bn254Fr { - inner: self.inner.bigint_add(other.inner) + array: a.bigint_add(b).to_le_bytes() } } } impl Sub for Bn254Fr { fn sub(self: Self, other: Bn254Fr) -> Bn254Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Bn254Fr { - inner: self.inner.bigint_sub(other.inner) + array: a.bigint_sub(b).to_le_bytes() } } } impl Mul for Bn254Fr { fn mul(self: Self, other: Bn254Fr) -> Bn254Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Bn254Fr { - inner: self.inner.bigint_mul(other.inner) + array: a.bigint_mul(b).to_le_bytes() } - } } impl Div for Bn254Fr { fn div(self: Self, other: Bn254Fr) -> Bn254Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Bn254Fr { - inner: self.inner.bigint_div(other.inner) + array: a.bigint_div(b).to_le_bytes() } } } impl Eq for Bn254Fr { fn eq(self: Self, other: Bn254Fr) -> bool { - self.inner.check_32_bytes(other.inner) + self.array == other.array } } struct Bn254Fq { - inner: BigInt, + array: [u8;32], } impl BigField for Bn254Fq { fn from_le_bytes(bytes: [u8]) -> Bn254Fq { + assert(bytes.len() <= 32); + let mut array = [0;32]; + for i in 0..bytes.len() { + array[i] = bytes[i]; + } Bn254Fq { - inner: BigInt::from_le_bytes(bytes, bn254_fq) + array: array, } } + + fn from_le_bytes_32(bytes: [u8;32]) -> Bn254Fq { + Bn254Fq { + array: bytes, + } + } + fn to_le_bytes(self) -> [u8] { - self.inner.to_le_bytes() + self.array } } impl Add for Bn254Fq { fn add(self: Self, other: Bn254Fq) -> Bn254Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Bn254Fq { - inner: self.inner.bigint_add(other.inner) + array: a.bigint_add(b).to_le_bytes() } } } impl Sub for Bn254Fq { fn sub(self: Self, other: Bn254Fq) -> Bn254Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Bn254Fq { - inner: self.inner.bigint_sub(other.inner) + array: a.bigint_sub(b).to_le_bytes() } } } impl Mul for Bn254Fq { fn mul(self: Self, other: Bn254Fq) -> Bn254Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Bn254Fq { - inner: self.inner.bigint_mul(other.inner) + array: a.bigint_mul(b).to_le_bytes() } - } } impl Div for Bn254Fq { fn div(self: Self, other: Bn254Fq) -> Bn254Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Bn254Fq { - inner: self.inner.bigint_div(other.inner) + array: a.bigint_div(b).to_le_bytes() } } } impl Eq for Bn254Fq { fn eq(self: Self, other: Bn254Fq) -> bool { - self.inner.check_32_bytes(other.inner) + self.array == other.array } } struct Secpr1Fq { - inner: BigInt, + array: [u8;32], } impl BigField for Secpr1Fq { fn from_le_bytes(bytes: [u8]) -> Secpr1Fq { + assert(bytes.len() <= 32); + let mut array = [0;32]; + for i in 0..bytes.len() { + array[i] = bytes[i]; + } + Secpr1Fq { + array: array, + } + } + + fn from_le_bytes_32(bytes: [u8;32]) -> Secpr1Fq { Secpr1Fq { - inner: BigInt::from_le_bytes(bytes, secpr1_fq) + array: bytes, } } + fn to_le_bytes(self) -> [u8] { - self.inner.to_le_bytes() + self.array } } impl Add for Secpr1Fq { fn add(self: Self, other: Secpr1Fq) -> Secpr1Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpr1Fq { - inner: self.inner.bigint_add(other.inner) + array: a.bigint_add(b).to_le_bytes() } } } impl Sub for Secpr1Fq { fn sub(self: Self, other: Secpr1Fq) -> Secpr1Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpr1Fq { - inner: self.inner.bigint_sub(other.inner) + array: a.bigint_sub(b).to_le_bytes() } } } impl Mul for Secpr1Fq { fn mul(self: Self, other: Secpr1Fq) -> Secpr1Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpr1Fq { - inner: self.inner.bigint_mul(other.inner) + array: a.bigint_mul(b).to_le_bytes() } - } } impl Div for Secpr1Fq { fn div(self: Self, other: Secpr1Fq) -> Secpr1Fq { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpr1Fq { - inner: self.inner.bigint_div(other.inner) + array: a.bigint_div(b).to_le_bytes() } } } impl Eq for Secpr1Fq { fn eq(self: Self, other: Secpr1Fq) -> bool { - self.inner.check_32_bytes(other.inner) + self.array == other.array } } struct Secpr1Fr { - inner: BigInt, + array: [u8;32], } impl BigField for Secpr1Fr { fn from_le_bytes(bytes: [u8]) -> Secpr1Fr { + assert(bytes.len() <= 32); + let mut array = [0;32]; + for i in 0..bytes.len() { + array[i] = bytes[i]; + } + Secpr1Fr { + array: array, + } + } + + fn from_le_bytes_32(bytes: [u8;32]) -> Secpr1Fr { Secpr1Fr { - inner: BigInt::from_le_bytes(bytes, secpr1_fr) + array: bytes, } } + fn to_le_bytes(self) -> [u8] { - self.inner.to_le_bytes() + self.array } } impl Add for Secpr1Fr { fn add(self: Self, other: Secpr1Fr) -> Secpr1Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpr1Fr { - inner: self.inner.bigint_add(other.inner) + array: a.bigint_add(b).to_le_bytes() } } } impl Sub for Secpr1Fr { fn sub(self: Self, other: Secpr1Fr) -> Secpr1Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpr1Fr { - inner: self.inner.bigint_sub(other.inner) + array: a.bigint_sub(b).to_le_bytes() } } } impl Mul for Secpr1Fr { fn mul(self: Self, other: Secpr1Fr) -> Secpr1Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpr1Fr { - inner: self.inner.bigint_mul(other.inner) + array: a.bigint_mul(b).to_le_bytes() } - } } impl Div for Secpr1Fr { fn div(self: Self, other: Secpr1Fr) -> Secpr1Fr { + let a = BigInt::from_le_bytes(self.array.as_slice(), secpk1_fq); + let b = BigInt::from_le_bytes(other.array.as_slice(), secpk1_fq); Secpr1Fr { - inner: self.inner.bigint_div(other.inner) + array: a.bigint_div(b).to_le_bytes() } } } impl Eq for Secpr1Fr { fn eq(self: Self, other: Secpr1Fr) -> bool { - self.inner.check_32_bytes(other.inner) + self.array == other.array } } diff --git a/noir_stdlib/src/cmp.nr b/noir_stdlib/src/cmp.nr index dde29d7ee87..457b2cfa167 100644 --- a/noir_stdlib/src/cmp.nr +++ b/noir_stdlib/src/cmp.nr @@ -314,3 +314,55 @@ impl Ord for (A, B, C, D, E) where A: Ord, B: Ord, C: Ord, D: Ord result } } + +// Compares and returns the maximum of two values. +// +// Returns the second argument if the comparison determines them to be equal. +// +// # Examples +// +// ``` +// use std::cmp; +// +// assert_eq(cmp::max(1, 2), 2); +// assert_eq(cmp::max(2, 2), 2); +// ``` +pub fn max(v1: T, v2: T) -> T where T: Ord { + if v1 > v2 { v1 } else { v2 } +} + +// Compares and returns the minimum of two values. +// +// Returns the first argument if the comparison determines them to be equal. +// +// # Examples +// +// ``` +// use std::cmp; +// +// assert_eq(cmp::min(1, 2), 1); +// assert_eq(cmp::min(2, 2), 2); +// ``` +pub fn min(v1: T, v2: T) -> T where T: Ord { + if v1 > v2 { v2 } else { v1 } +} + +mod cmp_tests { + use crate::cmp::{min, max}; + + #[test] + fn sanity_check_min() { + assert_eq(min(0 as u64, 1 as u64), 0); + assert_eq(min(0 as u64, 0 as u64), 0); + assert_eq(min(1 as u64, 1 as u64), 1); + assert_eq(min(255 as u8, 0 as u8), 0); + } + + #[test] + fn sanity_check_max() { + assert_eq(max(0 as u64, 1 as u64), 1); + assert_eq(max(0 as u64, 0 as u64), 0); + assert_eq(max(1 as u64, 1 as u64), 1); + assert_eq(max(255 as u8, 0 as u8), 255); + } +} diff --git a/noir_stdlib/src/collections/bounded_vec.nr b/noir_stdlib/src/collections/bounded_vec.nr index 6d5fbd44247..c6a3365a979 100644 --- a/noir_stdlib/src/collections/bounded_vec.nr +++ b/noir_stdlib/src/collections/bounded_vec.nr @@ -1,3 +1,5 @@ +use crate::cmp::Eq; + struct BoundedVec { storage: [T; MaxLen], len: u64, @@ -10,7 +12,7 @@ impl BoundedVec { } pub fn get(mut self: Self, index: u64) -> T { - assert(index as u64 < self.len); + assert(index < self.len); self.storage[index] } @@ -19,7 +21,7 @@ impl BoundedVec { } pub fn push(&mut self, elem: T) { - assert(self.len < MaxLen as u64, "push out of bounds"); + assert(self.len < MaxLen, "push out of bounds"); self.storage[self.len] = elem; self.len += 1; @@ -41,7 +43,7 @@ impl BoundedVec { pub fn extend_from_array(&mut self, array: [T; Len]) { let new_len = self.len + array.len(); - assert(new_len as u64 <= MaxLen as u64, "extend_from_array out of bounds"); + assert(new_len <= MaxLen, "extend_from_array out of bounds"); for i in 0..array.len() { self.storage[self.len + i] = array[i]; } @@ -50,7 +52,7 @@ impl BoundedVec { pub fn extend_from_slice(&mut self, slice: [T]) { let new_len = self.len + slice.len(); - assert(new_len as u64 <= MaxLen as u64, "extend_from_slice out of bounds"); + assert(new_len <= MaxLen, "extend_from_slice out of bounds"); for i in 0..slice.len() { self.storage[self.len + i] = slice[i]; } @@ -60,7 +62,7 @@ impl BoundedVec { pub fn extend_from_bounded_vec(&mut self, vec: BoundedVec) { let append_len = vec.len(); let new_len = self.len + append_len; - assert(new_len as u64 <= MaxLen as u64, "extend_from_bounded_vec out of bounds"); + assert(new_len <= MaxLen, "extend_from_bounded_vec out of bounds"); let mut exceeded_len = false; for i in 0..Len { @@ -73,7 +75,7 @@ impl BoundedVec { } pub fn pop(&mut self) -> T { - assert(self.len as u64 > 0); + assert(self.len > 0); self.len -= 1; let elem = self.storage[self.len]; @@ -93,3 +95,37 @@ impl BoundedVec { ret } } + +impl Eq for BoundedVec where T: Eq { + fn eq(self, other: BoundedVec) -> bool { + // TODO: https://github.com/noir-lang/noir/issues/4837 + // + // We make the assumption that the user has used the proper interface for working with `BoundedVec`s + // rather than directly manipulating the internal fields as this can result in an inconsistent internal state. + + (self.len == other.len) & (self.storage == other.storage) + } +} + +mod bounded_vec_tests { + // TODO: Allow imports from "super" + use crate::collections::bounded_vec::BoundedVec; + + #[test] + fn empty_equality() { + let mut bounded_vec1: BoundedVec = BoundedVec::new(); + let mut bounded_vec2: BoundedVec = BoundedVec::new(); + + assert_eq(bounded_vec1, bounded_vec2); + } + + #[test] + fn inequality() { + let mut bounded_vec1: BoundedVec = BoundedVec::new(); + let mut bounded_vec2: BoundedVec = BoundedVec::new(); + bounded_vec1.push(1); + bounded_vec2.push(2); + + assert(bounded_vec1 != bounded_vec2); + } +} diff --git a/noir_stdlib/src/hash.nr b/noir_stdlib/src/hash.nr index 1a61b5e084e..26a9fa6c2c0 100644 --- a/noir_stdlib/src/hash.nr +++ b/noir_stdlib/src/hash.nr @@ -1,7 +1,6 @@ mod poseidon; mod mimc; mod poseidon2; -mod pedersen; use crate::default::Default; use crate::uint128::U128; @@ -12,36 +11,18 @@ pub fn sha256(input: [u8; N]) -> [u8; 32] // docs:end:sha256 {} -#[foreign(sha256)] -// docs:start:sha256_slice -pub fn sha256_slice(input: [u8]) -> [u8; 32] -// docs:end:sha256_slice -{} - #[foreign(blake2s)] // docs:start:blake2s pub fn blake2s(input: [u8; N]) -> [u8; 32] // docs:end:blake2s {} -#[foreign(blake2s)] -// docs:start:blake2s_slice -pub fn blake2s_slice(input: [u8]) -> [u8; 32] -// docs:end:blake2s_slice -{} - #[foreign(blake3)] // docs:start:blake3 pub fn blake3(input: [u8; N]) -> [u8; 32] // docs:end:blake3 {} -#[foreign(blake3)] -// docs:start:blake3_slice -pub fn blake3_slice(input: [u8]) -> [u8; 32] -// docs:end:blake3_slice -{} - // docs:start:pedersen_commitment struct PedersenPoint { x : Field, @@ -53,28 +34,14 @@ pub fn pedersen_commitment(input: [Field; N]) -> PedersenPoint { pedersen_commitment_with_separator(input, 0) } -// docs:start:pedersen_commitment_slice -pub fn pedersen_commitment_slice(input: [Field]) -> PedersenPoint { - pedersen_commitment_with_separator_slice(input, 0) -} -// docs:end:pedersen_commitment_slice - #[foreign(pedersen_commitment)] pub fn __pedersen_commitment_with_separator(input: [Field; N], separator: u32) -> [Field; 2] {} -#[foreign(pedersen_commitment)] -pub fn __pedersen_commitment_with_separator_slice(input: [Field], separator: u32) -> [Field; 2] {} - pub fn pedersen_commitment_with_separator(input: [Field; N], separator: u32) -> PedersenPoint { let values = __pedersen_commitment_with_separator(input, separator); PedersenPoint { x: values[0], y: values[1] } } -pub fn pedersen_commitment_with_separator_slice(input: [Field], separator: u32) -> PedersenPoint { - let values = __pedersen_commitment_with_separator_slice(input, separator); - PedersenPoint { x: values[0], y: values[1] } -} - // docs:start:pedersen_hash pub fn pedersen_hash(input: [Field; N]) -> Field // docs:end:pedersen_hash @@ -82,31 +49,18 @@ pub fn pedersen_hash(input: [Field; N]) -> Field pedersen_hash_with_separator(input, 0) } -// docs:start:pedersen_hash_slice -pub fn pedersen_hash_slice(input: [Field]) -> Field -// docs:end:pedersen_hash_slice -{ - pedersen_hash_with_separator_slice(input, 0) -} - #[foreign(pedersen_hash)] pub fn pedersen_hash_with_separator(input: [Field; N], separator: u32) -> Field {} -#[foreign(pedersen_hash)] -pub fn pedersen_hash_with_separator_slice(input: [Field], separator: u32) -> Field {} - pub fn hash_to_field(inputs: [Field]) -> Field { - let mut inputs_as_bytes = &[]; + let mut sum = 0; for input in inputs { - let input_bytes = input.to_le_bytes(32); - for i in 0..32 { - inputs_as_bytes = inputs_as_bytes.push_back(input_bytes[i]); - } + let input_bytes: [u8; 32] = input.to_le_bytes(32).as_array(); + sum += crate::field::bytes32_to_field(blake2s(input_bytes)); } - let hashed_input = blake2s_slice(inputs_as_bytes); - crate::field::bytes32_to_field(hashed_input) + sum } #[foreign(keccak256)] @@ -115,12 +69,6 @@ pub fn keccak256(input: [u8; N], message_size: u32) -> [u8; 32] // docs:end:keccak256 {} -#[foreign(keccak256)] -// docs:start:keccak256_slice -pub fn keccak256_slice(input: [u8], message_size: u32) -> [u8; 32] -// docs:end:keccak256_slice -{} - #[foreign(poseidon2_permutation)] pub fn poseidon2_permutation(_input: [Field; N], _state_length: u32) -> [Field; N] {} @@ -140,7 +88,7 @@ trait Hash{ trait Hasher{ fn finish(self) -> Field; - fn write(&mut self, input: [Field]); + fn write(&mut self, input: Field); } // BuildHasher is a factory trait, responsible for production of specific Hasher. @@ -170,49 +118,49 @@ where impl Hash for Field { fn hash(self, state: &mut H) where H: Hasher{ - H::write(state, &[self]); + H::write(state, self); } } impl Hash for u8 { fn hash(self, state: &mut H) where H: Hasher{ - H::write(state, &[self as Field]); + H::write(state, self as Field); } } impl Hash for u32 { fn hash(self, state: &mut H) where H: Hasher{ - H::write(state, &[self as Field]); + H::write(state, self as Field); } } impl Hash for u64 { fn hash(self, state: &mut H) where H: Hasher{ - H::write(state, &[self as Field]); + H::write(state, self as Field); } } impl Hash for i8 { fn hash(self, state: &mut H) where H: Hasher{ - H::write(state, &[self as Field]); + H::write(state, self as Field); } } impl Hash for i32 { fn hash(self, state: &mut H) where H: Hasher{ - H::write(state, &[self as Field]); + H::write(state, self as Field); } } impl Hash for i64 { fn hash(self, state: &mut H) where H: Hasher{ - H::write(state, &[self as Field]); + H::write(state, self as Field); } } impl Hash for bool { fn hash(self, state: &mut H) where H: Hasher{ - H::write(state, &[self as Field]); + H::write(state, self as Field); } } @@ -222,7 +170,8 @@ impl Hash for () { impl Hash for U128 { fn hash(self, state: &mut H) where H: Hasher{ - H::write(state, &[self.lo as Field, self.hi as Field]); + H::write(state, self.lo as Field); + H::write(state, self.hi as Field); } } diff --git a/noir_stdlib/src/hash/mimc.nr b/noir_stdlib/src/hash/mimc.nr index 1fb53701013..6c5502c2fbf 100644 --- a/noir_stdlib/src/hash/mimc.nr +++ b/noir_stdlib/src/hash/mimc.nr @@ -126,9 +126,8 @@ pub fn mimc_bn254(array: [Field; N]) -> Field { r } -struct MimcHasher{ +struct MimcHasher { _state: [Field], - _len: u64, } impl Hasher for MimcHasher { @@ -136,24 +135,22 @@ impl Hasher for MimcHasher { fn finish(self) -> Field { let exponent = 7; let mut r = 0; - for i in 0..self._len { + for i in 0..self._state.len() { let h = mimc(self._state[i], r, MIMC_BN254_CONSTANTS, exponent); r = r + self._state[i] + h; } r } - fn write(&mut self, input: [Field]){ - self._state = self._state.append(input); - self._len += input.len(); + fn write(&mut self, input: Field){ + self._state = self._state.push_back(input); } } impl Default for MimcHasher{ fn default() -> Self{ - MimcHasher{ + MimcHasher { _state: &[], - _len: 0, } } } diff --git a/noir_stdlib/src/hash/pedersen.nr b/noir_stdlib/src/hash/pedersen.nr deleted file mode 100644 index ad21e728945..00000000000 --- a/noir_stdlib/src/hash/pedersen.nr +++ /dev/null @@ -1,24 +0,0 @@ -use crate::hash::{Hasher, pedersen_hash_slice}; -use crate::default::Default; - -struct PedersenHasher{ - _state: [Field] -} - -impl Hasher for PedersenHasher { - fn finish(self) -> Field { - pedersen_hash_slice(self._state) - } - - fn write(&mut self, input: [Field]){ - self._state = self._state.append(input); - } -} - -impl Default for PedersenHasher{ - fn default() -> Self{ - PedersenHasher{ - _state: &[] - } - } -} diff --git a/noir_stdlib/src/hash/poseidon.nr b/noir_stdlib/src/hash/poseidon.nr index 85a0802f630..742bfcaf804 100644 --- a/noir_stdlib/src/hash/poseidon.nr +++ b/noir_stdlib/src/hash/poseidon.nr @@ -105,66 +105,65 @@ fn apply_matrix(a: [Field; M], x: [Field; N]) -> [Field; N] { struct PoseidonHasher{ _state: [Field], - _len: u64, } impl Hasher for PoseidonHasher { #[field(bn254)] fn finish(self) -> Field { let mut result = 0; - assert(self._len < 16); - if self._len == 1 { + let len = self._state.len(); + assert(len < 16); + if len == 1 { result = bn254::hash_1([self._state[0]]); } - if self._len == 2 { + if len == 2 { result = bn254::hash_2([self._state[0],self._state[1]]); } - if self._len == 3 { + if len == 3 { result = bn254::hash_3([self._state[0],self._state[1],self._state[2]]); } - if self._len == 4 { + if len == 4 { result = bn254::hash_4([self._state[0],self._state[1],self._state[2],self._state[3]]); } - if self._len == 5 { + if len == 5 { result = bn254::hash_5([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4]]); } - if self._len == 6 { + if len == 6 { result = bn254::hash_6([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4], self._state[5]]); } - if self._len == 7 { + if len == 7 { result = bn254::hash_7([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4], self._state[5], self._state[6]]); } - if self._len == 8 { + if len == 8 { result = bn254::hash_8([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4], self._state[5], self._state[6], self._state[7]]); } - if self._len == 9 { + if len == 9 { result = bn254::hash_9([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4], self._state[5], self._state[6], self._state[7], self._state[8]]); } - if self._len == 10 { + if len == 10 { result = bn254::hash_10([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4], self._state[5], self._state[6], self._state[7], self._state[8], self._state[9]]); } - if self._len == 11 { + if len == 11 { result = bn254::hash_11([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4], self._state[5], self._state[6], self._state[7], self._state[8], self._state[9], self._state[10]]); } - if self._len == 12 { + if len == 12 { result = bn254::hash_12([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4], self._state[5], self._state[6], self._state[7], self._state[8], self._state[9], self._state[10], self._state[11]]); } - if self._len == 13 { + if len == 13 { result = bn254::hash_13([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4], self._state[5], self._state[6], self._state[7], self._state[8], self._state[9], self._state[10], self._state[11], self._state[12]]); } - if self._len == 14 { + if len == 14 { result = bn254::hash_14([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4], self._state[5], self._state[6], self._state[7], self._state[8], self._state[9], self._state[10], self._state[11], self._state[12], self._state[13]]); } - if self._len == 15 { + if len == 15 { result = bn254::hash_15([self._state[0],self._state[1],self._state[2],self._state[3],self._state[4], self._state[5], self._state[6], self._state[7], self._state[8], self._state[9], self._state[10], self._state[11], self._state[12], self._state[13], self._state[14]]); } result } - fn write(&mut self, input: [Field]){ - self._state = self._state.append(input); - self._len += input.len(); + fn write(&mut self, input: Field){ + self._state = self._state.push_back(input); } } @@ -172,7 +171,6 @@ impl Default for PoseidonHasher{ fn default() -> Self{ PoseidonHasher{ _state: &[], - _len: 0, } } } diff --git a/noir_stdlib/src/hash/poseidon2.nr b/noir_stdlib/src/hash/poseidon2.nr index 12bf373e671..e5a82a596c6 100644 --- a/noir_stdlib/src/hash/poseidon2.nr +++ b/noir_stdlib/src/hash/poseidon2.nr @@ -117,30 +117,27 @@ impl Poseidon2 { struct Poseidon2Hasher{ _state: [Field], - _len: u64, } impl Hasher for Poseidon2Hasher { fn finish(self) -> Field { let iv : Field = (self._state.len() as Field)*18446744073709551616; // iv = (self._state.len() << 64) let mut sponge = Poseidon2::new(iv); - for i in 0..self._len { + for i in 0..self._state.len() { sponge.absorb(self._state[i]); } sponge.squeeze() } - fn write(&mut self, input: [Field]){ - self._state = self._state.append(input); - self._len += input.len(); + fn write(&mut self, input: Field){ + self._state = self._state.push_back(input); } } -impl Default for Poseidon2Hasher{ - fn default() -> Self{ - Poseidon2Hasher{ +impl Default for Poseidon2Hasher { + fn default() -> Self { + Poseidon2Hasher { _state: &[], - _len: 0, } } } diff --git a/noir_stdlib/src/slice.nr b/noir_stdlib/src/slice.nr index 164b4f96cf6..ac542a960ed 100644 --- a/noir_stdlib/src/slice.nr +++ b/noir_stdlib/src/slice.nr @@ -43,4 +43,14 @@ impl [T] { } self } + + pub fn as_array(self) -> [T; N] { + assert(self.len() == N); + + let mut array = [crate::unsafe::zeroed(); N]; + for i in 0..N { + array[i] = self[i]; + } + array + } } diff --git a/noir_stdlib/src/test.nr b/noir_stdlib/src/test.nr index e1c320215de..e6a7e03fefc 100644 --- a/noir_stdlib/src/test.nr +++ b/noir_stdlib/src/test.nr @@ -4,6 +4,9 @@ unconstrained fn create_mock_oracle(name: str) -> Field {} #[oracle(set_mock_params)] unconstrained fn set_mock_params_oracle

(id: Field, params: P) {} +#[oracle(get_mock_last_params)] +unconstrained fn get_mock_last_params_oracle

(id: Field) -> P {} + #[oracle(set_mock_returns)] unconstrained fn set_mock_returns_oracle(id: Field, returns: R) {} @@ -27,6 +30,10 @@ impl OracleMock { self } + unconstrained pub fn get_last_params

(self) -> P { + get_mock_last_params_oracle(self.id) + } + unconstrained pub fn returns(self, returns: R) -> Self { set_mock_returns_oracle(self.id, returns); self diff --git a/package.json b/package.json index 3cffdf4c802..8abaced7bdd 100644 --- a/package.json +++ b/package.json @@ -13,7 +13,7 @@ "docs" ], "scripts": { - "build": "yarn workspaces foreach --parallel --topological-dev --verbose run build", + "build": "yarn workspaces foreach -vp --topological-dev --exclude \"{docs,@noir-lang/root}\" run build", "test": "yarn workspaces foreach --parallel --verbose run test", "test:integration": "yarn workspace integration-tests test", "clean:workspaces": "yarn workspaces foreach --exclude @noir-lang/root run clean", diff --git a/scripts/benchmark_start.sh b/scripts/benchmark_start.sh new file mode 100755 index 00000000000..3e69b3d2c65 --- /dev/null +++ b/scripts/benchmark_start.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid diff --git a/scripts/benchmark_stop.sh b/scripts/benchmark_stop.sh new file mode 100755 index 00000000000..964e5291817 --- /dev/null +++ b/scripts/benchmark_stop.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +echo 4 | sudo tee /proc/sys/kernel/perf_event_paranoid diff --git a/test_programs/execution_success/regression_sha256_slice/Nargo.toml b/test_programs/execution_failure/fold_dyn_index_fail/Nargo.toml similarity index 71% rename from test_programs/execution_success/regression_sha256_slice/Nargo.toml rename to test_programs/execution_failure/fold_dyn_index_fail/Nargo.toml index 759c3b20ba8..e49a82cf0fb 100644 --- a/test_programs/execution_success/regression_sha256_slice/Nargo.toml +++ b/test_programs/execution_failure/fold_dyn_index_fail/Nargo.toml @@ -1,5 +1,5 @@ [package] -name = "regression_sha256_slice" +name = "fold_dyn_index_fail" type = "bin" authors = [""] compiler_version = ">=0.26.0" diff --git a/test_programs/execution_failure/fold_dyn_index_fail/Prover.toml b/test_programs/execution_failure/fold_dyn_index_fail/Prover.toml new file mode 100644 index 00000000000..caf3448c56f --- /dev/null +++ b/test_programs/execution_failure/fold_dyn_index_fail/Prover.toml @@ -0,0 +1,2 @@ +x = [104, 101, 108, 108, 111] +z = "4" diff --git a/test_programs/execution_failure/fold_dyn_index_fail/src/main.nr b/test_programs/execution_failure/fold_dyn_index_fail/src/main.nr new file mode 100644 index 00000000000..b12dea630b0 --- /dev/null +++ b/test_programs/execution_failure/fold_dyn_index_fail/src/main.nr @@ -0,0 +1,10 @@ +fn main(mut x: [u32; 5], z: Field) { + x[z] = 4; + dynamic_index_check(x, z + 10); +} + +#[fold] +fn dynamic_index_check(x: [u32; 5], idx: Field) { + // Dynamic index is greater than length of the array + assert(x[idx] != 0); +} diff --git a/test_programs/execution_failure/fold_nested_brillig_assert_fail/Nargo.toml b/test_programs/execution_failure/fold_nested_brillig_assert_fail/Nargo.toml new file mode 100644 index 00000000000..bb7d5e20dcc --- /dev/null +++ b/test_programs/execution_failure/fold_nested_brillig_assert_fail/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "fold_nested_brillig_assert_fail" +type = "bin" +authors = [""] +compiler_version = ">=0.26.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_failure/fold_nested_brillig_assert_fail/Prover.toml b/test_programs/execution_failure/fold_nested_brillig_assert_fail/Prover.toml new file mode 100644 index 00000000000..11497a473bc --- /dev/null +++ b/test_programs/execution_failure/fold_nested_brillig_assert_fail/Prover.toml @@ -0,0 +1 @@ +x = "0" diff --git a/test_programs/execution_failure/fold_nested_brillig_assert_fail/src/main.nr b/test_programs/execution_failure/fold_nested_brillig_assert_fail/src/main.nr new file mode 100644 index 00000000000..0a5038c179b --- /dev/null +++ b/test_programs/execution_failure/fold_nested_brillig_assert_fail/src/main.nr @@ -0,0 +1,26 @@ +// Tests a very simple program. +// +// The features being tested is using assert on brillig that is triggered through nested ACIR calls. +// We want to make sure we get a call stack from the original call in main to the failed assert. +fn main(x: Field) { + assert(1 == fold_conditional_wrapper(x as bool)); +} + +#[fold] +fn fold_conditional_wrapper(x: bool) -> Field { + fold_conditional(x) +} + +#[fold] +fn fold_conditional(x: bool) -> Field { + conditional_wrapper(x) +} + +unconstrained fn conditional_wrapper(x: bool) -> Field { + conditional(x) +} + +unconstrained fn conditional(x: bool) -> Field { + assert(x); + 1 +} diff --git a/test_programs/execution_failure/hashmap_load_factor/src/main.nr b/test_programs/execution_failure/hashmap_load_factor/src/main.nr index ade43f898e1..907c3628142 100644 --- a/test_programs/execution_failure/hashmap_load_factor/src/main.nr +++ b/test_programs/execution_failure/hashmap_load_factor/src/main.nr @@ -1,6 +1,6 @@ use dep::std::collections::map::HashMap; use dep::std::hash::BuildHasherDefault; -use dep::std::hash::pedersen::PedersenHasher; +use dep::std::hash::poseidon2::Poseidon2Hasher; struct Entry{ key: Field, @@ -10,7 +10,7 @@ struct Entry{ global HASHMAP_CAP = 8; global HASHMAP_LEN = 6; -fn allocate_hashmap() -> HashMap> { +fn allocate_hashmap() -> HashMap> { HashMap::default() } diff --git a/test_programs/execution_success/bigint/src/main.nr b/test_programs/execution_success/bigint/src/main.nr index db269d63ac0..5645e4e9e1b 100644 --- a/test_programs/execution_success/bigint/src/main.nr +++ b/test_programs/execution_success/bigint/src/main.nr @@ -4,17 +4,51 @@ use dep::std::{bigint::Secpk1Fq, println}; fn main(mut x: [u8; 5], y: [u8; 5]) { let a = bigint::Secpk1Fq::from_le_bytes(&[x[0], x[1], x[2], x[3], x[4]]); let b = bigint::Secpk1Fq::from_le_bytes(&[y[0], y[1], y[2], y[3], y[4]]); + let mut a_be_bytes = [0; 32]; + let mut b_be_bytes = [0; 32]; + for i in 0..5 { + a_be_bytes[31-i] = x[i]; + b_be_bytes[31-i] = y[i]; + } + let a_field = dep::std::field::bytes32_to_field(a_be_bytes); + let b_field = dep::std::field::bytes32_to_field(b_be_bytes); + + // Regression for #4682 + let c = if x[0] != 0 { + test_unconstrained1(a, b) + } else { + test_unconstrained2(a, b) + }; + assert(c.array[0] == dep::std::wrapping_mul(x[0], y[0])); + let a_bytes = a.to_le_bytes(); let b_bytes = b.to_le_bytes(); for i in 0..5 { assert(a_bytes[i] == x[i]); assert(b_bytes[i] == y[i]); } + //Regression for issue #4578 + let d = a * b; + assert(d / b == a); - let d = a * b - b; - let d1 = bigint::Secpk1Fq::from_le_bytes(597243850900842442924.to_le_bytes(10)); + let d = d - b; + let mut result = [0; 32]; + let result_slice = (a_field * b_field - b_field).to_le_bytes(32); + for i in 0..32 { + result[i] = result_slice[i]; + } + let d1 = bigint::Secpk1Fq::from_le_bytes_32(result); assert(d1 == d); - // big_int_example(x[0], x[1]); + big_int_example(x[0], x[1]); +} + +fn test_unconstrained1(a: Secpk1Fq, b: Secpk1Fq) -> Secpk1Fq { + let c = a * b; + c +} +unconstrained fn test_unconstrained2(a: Secpk1Fq, b: Secpk1Fq) -> Secpk1Fq { + let c = a + b; + test_unconstrained1(a, c) } // docs:start:big_int_example diff --git a/test_programs/execution_success/brillig_slice_input/src/main.nr b/test_programs/execution_success/brillig_slice_input/src/main.nr index 09a9d9aef9d..8403cb7d4a0 100644 --- a/test_programs/execution_success/brillig_slice_input/src/main.nr +++ b/test_programs/execution_success/brillig_slice_input/src/main.nr @@ -25,6 +25,9 @@ fn main() { y: 8, } ]); + let brillig_sum = sum_slice(slice); + assert_eq(brillig_sum, 55); + slice = slice.push_back([ Point { x: 15, diff --git a/test_programs/execution_success/eddsa/src/main.nr b/test_programs/execution_success/eddsa/src/main.nr index fd1a95ee5fb..012c8466f2f 100644 --- a/test_programs/execution_success/eddsa/src/main.nr +++ b/test_programs/execution_success/eddsa/src/main.nr @@ -4,7 +4,6 @@ use dep::std::ec::tecurve::affine::Point as TEPoint; use dep::std::hash; use dep::std::eddsa::{eddsa_to_pub, eddsa_poseidon_verify, eddsa_verify_with_hasher}; use dep::std::hash::poseidon2::Poseidon2Hasher; -use dep::std::hash::pedersen::PedersenHasher; fn main(msg: pub Field, _priv_key_a: Field, _priv_key_b: Field) { // Skip this test for non-bn254 backends @@ -53,8 +52,5 @@ fn main(msg: pub Field, _priv_key_a: Field, _priv_key_b: Field) { // Using a different hash should fail let mut hasher = Poseidon2Hasher::default(); assert(!eddsa_verify_with_hasher(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg, &mut hasher)); - // Using a different hash should fail - let mut hasher = PedersenHasher::default(); - assert(!eddsa_verify_with_hasher(pub_key_a.x, pub_key_a.y, s_a, r8_a.x, r8_a.y, msg, &mut hasher)); } } diff --git a/test_programs/execution_success/fold_after_inlined_calls/Nargo.toml b/test_programs/execution_success/fold_after_inlined_calls/Nargo.toml new file mode 100644 index 00000000000..d23924af083 --- /dev/null +++ b/test_programs/execution_success/fold_after_inlined_calls/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "fold_after_inlined_calls" +type = "bin" +authors = [""] +compiler_version = ">=0.27.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/workspace/Prover.toml b/test_programs/execution_success/fold_after_inlined_calls/Prover.toml similarity index 50% rename from test_programs/execution_success/workspace/Prover.toml rename to test_programs/execution_success/fold_after_inlined_calls/Prover.toml index a0397e89477..4dd6b405159 100644 --- a/test_programs/execution_success/workspace/Prover.toml +++ b/test_programs/execution_success/fold_after_inlined_calls/Prover.toml @@ -1,2 +1 @@ x = "1" -y = "0" diff --git a/test_programs/execution_success/fold_after_inlined_calls/src/main.nr b/test_programs/execution_success/fold_after_inlined_calls/src/main.nr new file mode 100644 index 00000000000..84c81190b9b --- /dev/null +++ b/test_programs/execution_success/fold_after_inlined_calls/src/main.nr @@ -0,0 +1,14 @@ +fn main(x: u32) { + // We want to call a foldable function after a call to a function that is set to be inlined + assert(increment(x) == x + 1); + foo(x); +} + +#[fold] +fn foo(x: u32) { + assert(x == 1); +} + +fn increment(x: u32) -> u32 { + x + 1 +} diff --git a/test_programs/execution_success/fold_call_witness_condition/Prover.toml b/test_programs/execution_success/fold_call_witness_condition/Prover.toml index 8481ce25648..a4d6339b661 100644 --- a/test_programs/execution_success/fold_call_witness_condition/Prover.toml +++ b/test_programs/execution_success/fold_call_witness_condition/Prover.toml @@ -1,5 +1,3 @@ -# TODO(https://github.com/noir-lang/noir/issues/4707): Change these inputs to fail the assertion in `fn return_value` -# and change `enable` to false. For now we need the inputs to pass as we do not handle predicates with ACIR calls -x = "5" +x = "10" y = "10" -enable = true \ No newline at end of file +enable = false diff --git a/test_programs/execution_success/fold_numeric_generic_poseidon/Nargo.toml b/test_programs/execution_success/fold_numeric_generic_poseidon/Nargo.toml new file mode 100644 index 00000000000..8c2bc79ea8d --- /dev/null +++ b/test_programs/execution_success/fold_numeric_generic_poseidon/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "fold_numeric_generic_poseidon" +type = "bin" +authors = [""] +compiler_version = ">=0.27.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/fold_numeric_generic_poseidon/Prover.toml b/test_programs/execution_success/fold_numeric_generic_poseidon/Prover.toml new file mode 100644 index 00000000000..00e821cf89d --- /dev/null +++ b/test_programs/execution_success/fold_numeric_generic_poseidon/Prover.toml @@ -0,0 +1,2 @@ +enable = [true, false] +to_hash = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] diff --git a/test_programs/execution_success/fold_numeric_generic_poseidon/src/main.nr b/test_programs/execution_success/fold_numeric_generic_poseidon/src/main.nr new file mode 100644 index 00000000000..f9f3e75789b --- /dev/null +++ b/test_programs/execution_success/fold_numeric_generic_poseidon/src/main.nr @@ -0,0 +1,33 @@ +use dep::std::hash::{pedersen_hash_with_separator, poseidon2::Poseidon2}; + +global NUM_HASHES = 2; +global HASH_LENGTH = 10; + +#[fold] +pub fn poseidon_hash(inputs: [Field; N]) -> Field { + Poseidon2::hash(inputs, inputs.len()) +} + +fn main( + to_hash: [[Field; HASH_LENGTH]; NUM_HASHES], + enable: [bool; NUM_HASHES] +) -> pub [Field; NUM_HASHES + 1] { + let mut result = [0; NUM_HASHES + 1]; + for i in 0..NUM_HASHES { + let enable = enable[i]; + let to_hash = to_hash[i]; + if enable { + result[i] = poseidon_hash(to_hash); + } + } + + // We want to make sure that the foldable function with a numeric generic + // is monomorphized correctly. + let mut double_preimage = [0; 20]; + for i in 0..HASH_LENGTH * 2 { + double_preimage[i] = to_hash[0][i % HASH_LENGTH]; + } + result[NUM_HASHES] = poseidon_hash(double_preimage); + + result +} diff --git a/test_programs/execution_success/hashmap/src/main.nr b/test_programs/execution_success/hashmap/src/main.nr index 4d2cbd45993..76daa594a89 100644 --- a/test_programs/execution_success/hashmap/src/main.nr +++ b/test_programs/execution_success/hashmap/src/main.nr @@ -2,7 +2,7 @@ mod utils; use dep::std::collections::map::HashMap; use dep::std::hash::BuildHasherDefault; -use dep::std::hash::pedersen::PedersenHasher; +use dep::std::hash::poseidon2::Poseidon2Hasher; use dep::std::cmp::Eq; use utils::cut; @@ -25,7 +25,7 @@ global K_CMP = FIELD_CMP; global V_CMP = FIELD_CMP; global KV_CMP = |a: (K, V), b: (K, V)| a.0.lt(b.0); -global ALLOCATE_HASHMAP = || -> HashMap> +global ALLOCATE_HASHMAP = || -> HashMap> HashMap::default(); fn main(input: [Entry; HASHMAP_LEN]) { @@ -194,24 +194,24 @@ fn test_mut_iterators() { } // docs:start:type_alias -type MyMap = HashMap>; +type MyMap = HashMap>; // docs:end:type_alias /// Tests examples from the stdlib hashmap documentation fn doc_tests() { // docs:start:default_example - let hashmap: HashMap> = HashMap::default(); + let hashmap: HashMap> = HashMap::default(); assert(hashmap.is_empty()); // docs:end:default_example // docs:start:with_hasher_example - let my_hasher: BuildHasherDefault = Default::default(); - let hashmap: HashMap> = HashMap::with_hasher(my_hasher); + let my_hasher: BuildHasherDefault = Default::default(); + let hashmap: HashMap> = HashMap::with_hasher(my_hasher); assert(hashmap.is_empty()); // docs:end:with_hasher_example // docs:start:insert_example - let mut map: HashMap> = HashMap::default(); + let mut map: HashMap> = HashMap::default(); map.insert(12, 42); assert(map.len() == 1); // docs:end:insert_example @@ -255,7 +255,7 @@ fn doc_tests() { // docs:end:len_example // docs:start:capacity_example - let empty_map: HashMap> = HashMap::default(); + let empty_map: HashMap> = HashMap::default(); assert(empty_map.len() == 0); assert(empty_map.capacity() == 42); // docs:end:capacity_example @@ -283,8 +283,8 @@ fn doc_tests() { // docs:end:retain_example // docs:start:eq_example - let mut map1: HashMap> = HashMap::default(); - let mut map2: HashMap> = HashMap::default(); + let mut map1: HashMap> = HashMap::default(); + let mut map2: HashMap> = HashMap::default(); map1.insert(1, 2); map1.insert(3, 4); @@ -297,7 +297,7 @@ fn doc_tests() { } // docs:start:get_example -fn get_example(map: HashMap>) { +fn get_example(map: HashMap>) { let x = map.get(12); if x.is_some() { @@ -306,7 +306,7 @@ fn get_example(map: HashMap> } // docs:end:get_example -fn entries_examples(map: HashMap>) { +fn entries_examples(map: HashMap>) { // docs:start:entries_example let entries = map.entries(); @@ -344,7 +344,7 @@ fn entries_examples(map: HashMap>) { +fn iter_examples(mut map: HashMap>) { // docs:start:iter_mut_example // Add 1 to each key in the map, and double the value associated with that key. map.iter_mut(|k, v| (k + 1, v * 2)); diff --git a/test_programs/execution_success/mock_oracle/Prover.toml b/test_programs/execution_success/mock_oracle/Prover.toml deleted file mode 100644 index 2b26a4ce471..00000000000 --- a/test_programs/execution_success/mock_oracle/Prover.toml +++ /dev/null @@ -1,2 +0,0 @@ -x = "10" - diff --git a/test_programs/execution_success/mock_oracle/src/main.nr b/test_programs/execution_success/mock_oracle/src/main.nr deleted file mode 100644 index 90fca7993cc..00000000000 --- a/test_programs/execution_success/mock_oracle/src/main.nr +++ /dev/null @@ -1,27 +0,0 @@ -use dep::std::test::OracleMock; - -struct Point { - x: Field, - y: Field, -} - -#[oracle(foo)] -unconstrained fn foo_oracle(_point: Point, _array: [Field; 4]) -> Field {} - -unconstrained fn main() { - let array = [1, 2, 3, 4]; - let another_array = [4, 3, 2, 1]; - let point = Point { x: 14, y: 27 }; - - OracleMock::mock("foo").returns(42).times(1); - let mock = OracleMock::mock("foo").returns(0); - assert_eq(42, foo_oracle(point, array)); - assert_eq(0, foo_oracle(point, array)); - mock.clear(); - - OracleMock::mock("foo").with_params((point, array)).returns(10); - OracleMock::mock("foo").with_params((point, another_array)).returns(20); - assert_eq(10, foo_oracle(point, array)); - assert_eq(20, foo_oracle(point, another_array)); -} - diff --git a/test_programs/execution_success/regression_sha256_slice/Prover.toml b/test_programs/execution_success/regression_sha256_slice/Prover.toml deleted file mode 100644 index 8a027e9eca9..00000000000 --- a/test_programs/execution_success/regression_sha256_slice/Prover.toml +++ /dev/null @@ -1 +0,0 @@ -x = ["5", "10"] diff --git a/test_programs/execution_success/regression_sha256_slice/src/main.nr b/test_programs/execution_success/regression_sha256_slice/src/main.nr deleted file mode 100644 index 60b0911cf09..00000000000 --- a/test_programs/execution_success/regression_sha256_slice/src/main.nr +++ /dev/null @@ -1,12 +0,0 @@ -use dep::std; - -fn main(x: [u8; 2]) { - let mut y = x.as_slice(); - let digest1 = std::hash::sha256_slice(y); - let mut v = y; - if x[0] != 0 { - v = y.push_back(x[0]); - } - let digest2 = std::hash::sha256_slice(v); - assert(digest1 != digest2); -} diff --git a/test_programs/execution_success/mock_oracle/Nargo.toml b/test_programs/execution_success/slice_loop/Nargo.toml similarity index 72% rename from test_programs/execution_success/mock_oracle/Nargo.toml rename to test_programs/execution_success/slice_loop/Nargo.toml index b2916487e8c..09ad90c4187 100644 --- a/test_programs/execution_success/mock_oracle/Nargo.toml +++ b/test_programs/execution_success/slice_loop/Nargo.toml @@ -1,5 +1,5 @@ [package] -name = "mock_oracle" +name = "slice_loop" type = "bin" authors = [""] diff --git a/test_programs/execution_success/slice_loop/Prover.toml b/test_programs/execution_success/slice_loop/Prover.toml new file mode 100644 index 00000000000..089a1764b54 --- /dev/null +++ b/test_programs/execution_success/slice_loop/Prover.toml @@ -0,0 +1,11 @@ +[[points]] +x = "1" +y = "2" + +[[points]] +x = "3" +y = "4" + +[[points]] +x = "5" +y = "6" diff --git a/test_programs/execution_success/slice_loop/src/main.nr b/test_programs/execution_success/slice_loop/src/main.nr new file mode 100644 index 00000000000..4ff3e865b1f --- /dev/null +++ b/test_programs/execution_success/slice_loop/src/main.nr @@ -0,0 +1,32 @@ +struct Point { + x: Field, + y: Field, +} + +impl Point { + fn serialize(self) -> [Field; 2] { + [self.x, self.y] + } +} + +fn sum(values: [Field]) -> Field { + let mut sum = 0; + for value in values { + sum = sum + value; + } + sum +} + +fn main(points: [Point; 3]) { + let mut serialized_points = &[]; + for point in points { + serialized_points = serialized_points.append(point.serialize().as_slice()); + } + // Do a compile-time check that needs the previous loop to be unrolled + if serialized_points.len() > 5 { + let empty_point = Point { x: 0, y: 0 }; + serialized_points = serialized_points.append(empty_point.serialize().as_slice()); + } + // Do a sum that needs both the previous loop and the previous if to have been simplified + assert_eq(sum(serialized_points), 21); +} diff --git a/test_programs/execution_success/unit_value/Nargo.toml b/test_programs/execution_success/unit_value/Nargo.toml new file mode 100644 index 00000000000..f7e3697a7c1 --- /dev/null +++ b/test_programs/execution_success/unit_value/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "short" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/execution_success/unit_value/src/main.nr b/test_programs/execution_success/unit_value/src/main.nr new file mode 100644 index 00000000000..f3844e03cf2 --- /dev/null +++ b/test_programs/execution_success/unit_value/src/main.nr @@ -0,0 +1,7 @@ +fn get_transaction() { + dep::std::unsafe::zeroed() +} + +fn main() { + get_transaction(); +} diff --git a/test_programs/noir_test_success/mock_oracle/Nargo.toml b/test_programs/noir_test_success/mock_oracle/Nargo.toml new file mode 100644 index 00000000000..428e965899c --- /dev/null +++ b/test_programs/noir_test_success/mock_oracle/Nargo.toml @@ -0,0 +1,7 @@ +[package] +name = "mock_oracle" +type = "bin" +authors = [""] +compiler_version = ">=0.23.0" + +[dependencies] \ No newline at end of file diff --git a/test_programs/noir_test_success/mock_oracle/Prover.toml b/test_programs/noir_test_success/mock_oracle/Prover.toml new file mode 100644 index 00000000000..e69de29bb2d diff --git a/test_programs/noir_test_success/mock_oracle/src/main.nr b/test_programs/noir_test_success/mock_oracle/src/main.nr new file mode 100644 index 00000000000..d840ffaef66 --- /dev/null +++ b/test_programs/noir_test_success/mock_oracle/src/main.nr @@ -0,0 +1,130 @@ +use dep::std::test::OracleMock; + +struct Point { + x: Field, + y: Field, +} + +impl Eq for Point { + fn eq(self, other: Point) -> bool { + (self.x == other.x) & (self.y == other.y) + } +} + +#[oracle(void_field)] +unconstrained fn void_field_oracle() -> Field {} + +unconstrained fn void_field() -> Field { + void_field_oracle() +} + +#[oracle(field_field)] +unconstrained fn field_field_oracle(_x: Field) -> Field {} + +unconstrained fn field_field(x: Field) -> Field { + field_field_oracle(x) +} + +#[oracle(struct_field)] +unconstrained fn struct_field_oracle(_point: Point, _array: [Field; 4]) -> Field {} + +unconstrained fn struct_field(point: Point, array: [Field; 4]) -> Field { + struct_field_oracle(point, array) +} + +#[test(should_fail)] +fn test_mock_no_returns() { + OracleMock::mock("void_field"); + void_field(); // Some return value must be set +} + +#[test] +fn test_mock() { + OracleMock::mock("void_field").returns(10); + assert_eq(void_field(), 10); +} + +#[test] +fn test_multiple_mock() { + let first_mock = OracleMock::mock("void_field").returns(10); + OracleMock::mock("void_field").returns(42); + + // The mocks are searched for in creation order, so the first one prevents the second from being called. + assert_eq(void_field(), 10); + + first_mock.clear(); + assert_eq(void_field(), 42); +} + +#[test] +fn test_multiple_mock_times() { + OracleMock::mock("void_field").returns(10).times(2); + OracleMock::mock("void_field").returns(42); + + assert_eq(void_field(), 10); + assert_eq(void_field(), 10); + assert_eq(void_field(), 42); +} + +#[test] +fn test_mock_with_params() { + OracleMock::mock("field_field").with_params((5,)).returns(10); + assert_eq(field_field(5), 10); +} + +#[test] +fn test_multiple_mock_with_params() { + OracleMock::mock("field_field").with_params((5,)).returns(10); + OracleMock::mock("field_field").with_params((7,)).returns(14); + + assert_eq(field_field(5), 10); + assert_eq(field_field(7), 14); +} + +#[test] +fn test_mock_last_params() { + let mock = OracleMock::mock("field_field").returns(10); + assert_eq(field_field(5), 10); + + assert_eq(mock.get_last_params(), 5); +} + +#[test] +fn test_mock_last_params_many_calls() { + let mock = OracleMock::mock("field_field").returns(10); + assert_eq(field_field(5), 10); + assert_eq(field_field(7), 10); + + assert_eq(mock.get_last_params(), 7); +} + +#[test] +fn test_mock_struct_field() { + // Combination of simpler test cases + + let array = [1, 2, 3, 4]; + let another_array = [4, 3, 2, 1]; + let point = Point { x: 14, y: 27 }; + + OracleMock::mock("struct_field").returns(42).times(2); + let timeless_mock = OracleMock::mock("struct_field").returns(0); + + assert_eq(42, struct_field(point, array)); + assert_eq(42, struct_field(point, array)); + // The times(2) mock is now cleared + + assert_eq(0, struct_field(point, array)); + + let last_params: (Point, [Field; 4]) = timeless_mock.get_last_params(); + assert_eq(last_params.0, point); + assert_eq(last_params.1, array); + + // We clear the mock with no times() to allow other mocks to be callable + timeless_mock.clear(); + + OracleMock::mock("struct_field").with_params((point, array)).returns(10); + OracleMock::mock("struct_field").with_params((point, another_array)).returns(20); + assert_eq(10, struct_field(point, array)); + assert_eq(20, struct_field(point, another_array)); +} + diff --git a/tooling/acvm_cli/src/cli/execute_cmd.rs b/tooling/acvm_cli/src/cli/execute_cmd.rs index 86e7277451f..4e36dbd1f22 100644 --- a/tooling/acvm_cli/src/cli/execute_cmd.rs +++ b/tooling/acvm_cli/src/cli/execute_cmd.rs @@ -6,11 +6,10 @@ use bn254_blackbox_solver::Bn254BlackBoxSolver; use clap::Args; use crate::cli::fs::inputs::{read_bytecode_from_file, read_inputs_from_file}; -use crate::cli::fs::witness::save_witness_to_dir; use crate::errors::CliError; use nargo::ops::{execute_program, DefaultForeignCallExecutor}; -use super::fs::witness::create_output_witness_string; +use super::fs::witness::{create_output_witness_string, save_witness_to_dir}; /// Executes a circuit to calculate its return value #[derive(Debug, Clone, Args)] @@ -46,9 +45,9 @@ fn run_command(args: ExecuteCommand) -> Result { )?; if args.output_witness.is_some() { save_witness_to_dir( - &output_witness_string, - &args.working_directory, + output_witness, &args.output_witness.unwrap(), + &args.working_directory, )?; } Ok(output_witness_string) diff --git a/tooling/acvm_cli/src/cli/fs/witness.rs b/tooling/acvm_cli/src/cli/fs/witness.rs index 2daaa5a3a58..30ef4278f4b 100644 --- a/tooling/acvm_cli/src/cli/fs/witness.rs +++ b/tooling/acvm_cli/src/cli/fs/witness.rs @@ -5,24 +5,29 @@ use std::{ path::{Path, PathBuf}, }; -use acvm::acir::native_types::WitnessMap; +use acvm::acir::native_types::{WitnessMap, WitnessStack}; use crate::errors::{CliError, FilesystemError}; -/// Saves the provided output witnesses to a toml file created at the given location -pub(crate) fn save_witness_to_dir>( - output_witness: &String, - witness_dir: P, - file_name: &String, -) -> Result { - let witness_path = witness_dir.as_ref().join(file_name); +fn create_named_dir(named_dir: &Path, name: &str) -> PathBuf { + std::fs::create_dir_all(named_dir) + .unwrap_or_else(|_| panic!("could not create the `{name}` directory")); + + PathBuf::from(named_dir) +} - let mut file = File::create(&witness_path) - .map_err(|_| FilesystemError::OutputWitnessCreationFailed(file_name.clone()))?; - write!(file, "{}", output_witness) - .map_err(|_| FilesystemError::OutputWitnessWriteFailed(file_name.clone()))?; +fn write_to_file(bytes: &[u8], path: &Path) -> String { + let display = path.display(); - Ok(witness_path) + let mut file = match File::create(path) { + Err(why) => panic!("couldn't create {display}: {why}"), + Ok(file) => file, + }; + + match file.write_all(bytes) { + Err(why) => panic!("couldn't write to {display}: {why}"), + Ok(_) => display.to_string(), + } } /// Creates a toml representation of the provided witness map @@ -34,3 +39,19 @@ pub(crate) fn create_output_witness_string(witnesses: &WitnessMap) -> Result>( + witnesses: WitnessStack, + witness_name: &str, + witness_dir: P, +) -> Result { + create_named_dir(witness_dir.as_ref(), "witness"); + let witness_path = witness_dir.as_ref().join(witness_name).with_extension("gz"); + + let buf: Vec = witnesses + .try_into() + .map_err(|_op| FilesystemError::OutputWitnessCreationFailed(witness_name.to_string()))?; + write_to_file(buf.as_slice(), &witness_path); + + Ok(witness_path) +} diff --git a/tooling/acvm_cli/src/errors.rs b/tooling/acvm_cli/src/errors.rs index 923046410ea..8bc79347159 100644 --- a/tooling/acvm_cli/src/errors.rs +++ b/tooling/acvm_cli/src/errors.rs @@ -20,9 +20,6 @@ pub(crate) enum FilesystemError { #[error(" Error: failed to create output witness file {0}.")] OutputWitnessCreationFailed(String), - - #[error(" Error: failed to write output witness file {0}.")] - OutputWitnessWriteFailed(String), } #[derive(Debug, Error)] diff --git a/tooling/backend_interface/src/cli/info.rs b/tooling/backend_interface/src/cli/info.rs index 8ca3d4dd0a3..6e6603ce53e 100644 --- a/tooling/backend_interface/src/cli/info.rs +++ b/tooling/backend_interface/src/cli/info.rs @@ -56,7 +56,7 @@ fn info_command() -> Result<(), BackendError> { let expression_width = InfoCommand { crs_path }.run(backend.binary_path())?; - assert!(matches!(expression_width, ExpressionWidth::Bounded { width: 3 })); + assert!(matches!(expression_width, ExpressionWidth::Bounded { width: 4 })); Ok(()) } diff --git a/tooling/backend_interface/src/proof_system.rs b/tooling/backend_interface/src/proof_system.rs index 3b47a7ced3a..fa1f82a5722 100644 --- a/tooling/backend_interface/src/proof_system.rs +++ b/tooling/backend_interface/src/proof_system.rs @@ -39,16 +39,16 @@ impl Backend { InfoCommand { crs_path: self.crs_directory() }.run(binary_path) } - /// If we cannot get a valid backend, returns `ExpressionWidth::Bound { width: 3 }`` + /// If we cannot get a valid backend, returns `ExpressionWidth::Bound { width: 4 }`` /// The function also prints a message saying we could not find a backend pub fn get_backend_info_or_default(&self) -> ExpressionWidth { if let Ok(expression_width) = self.get_backend_info() { expression_width } else { warn!( - "No valid backend found, ExpressionWidth defaulting to Bounded with a width of 3" + "No valid backend found, ExpressionWidth defaulting to Bounded with a width of 4" ); - ExpressionWidth::Bounded { width: 3 } + ExpressionWidth::Bounded { width: 4 } } } diff --git a/tooling/backend_interface/src/smart_contract.rs b/tooling/backend_interface/src/smart_contract.rs index f6beeeb09d9..153ab52c83f 100644 --- a/tooling/backend_interface/src/smart_contract.rs +++ b/tooling/backend_interface/src/smart_contract.rs @@ -51,7 +51,7 @@ mod tests { let circuit = Circuit { current_witness_index: 4, - expression_width: ExpressionWidth::Bounded { width: 3 }, + expression_width: ExpressionWidth::Bounded { width: 4 }, opcodes: vec![constraint], private_parameters: BTreeSet::from([Witness(1), Witness(2)]), public_parameters: PublicInputs::default(), @@ -59,7 +59,7 @@ mod tests { assert_messages: Default::default(), recursive: false, }; - let program = Program { functions: vec![circuit] }; + let program = Program { functions: vec![circuit], unconstrained_functions: Vec::new() }; let contract = get_mock_backend()?.eth_contract(&program)?; diff --git a/tooling/backend_interface/test-binaries/mock_backend/src/info_cmd.rs b/tooling/backend_interface/test-binaries/mock_backend/src/info_cmd.rs index fd8cf602125..75a6d323e7b 100644 --- a/tooling/backend_interface/test-binaries/mock_backend/src/info_cmd.rs +++ b/tooling/backend_interface/test-binaries/mock_backend/src/info_cmd.rs @@ -5,7 +5,7 @@ use std::path::PathBuf; const INFO_RESPONSE: &str = r#"{ "language": { "name": "PLONK-CSAT", - "width": 3 + "width": 4 }, "opcodes_supported": ["arithmetic", "directive", "brillig", "memory_init", "memory_op"], "black_box_functions_supported": [ diff --git a/tooling/bb_abstraction_leaks/build.rs b/tooling/bb_abstraction_leaks/build.rs index 52f7783851a..b3dfff9e94c 100644 --- a/tooling/bb_abstraction_leaks/build.rs +++ b/tooling/bb_abstraction_leaks/build.rs @@ -10,7 +10,7 @@ use const_format::formatcp; const USERNAME: &str = "AztecProtocol"; const REPO: &str = "aztec-packages"; -const VERSION: &str = "0.32.0"; +const VERSION: &str = "0.35.1"; const TAG: &str = formatcp!("aztec-packages-v{}", VERSION); const API_URL: &str = diff --git a/tooling/debugger/ignored-tests.txt b/tooling/debugger/ignored-tests.txt index 4507aeb8545..3b63f8d5542 100644 --- a/tooling/debugger/ignored-tests.txt +++ b/tooling/debugger/ignored-tests.txt @@ -15,3 +15,6 @@ to_bytes_integration fold_basic fold_basic_nested_call fold_call_witness_condition +fold_after_inlined_calls +fold_numeric_generic_poseidon + diff --git a/tooling/debugger/src/context.rs b/tooling/debugger/src/context.rs index 1acd581b2be..9b535075484 100644 --- a/tooling/debugger/src/context.rs +++ b/tooling/debugger/src/context.rs @@ -1,4 +1,5 @@ use crate::foreign_calls::DebugForeignCallExecutor; +use acvm::acir::circuit::brillig::BrilligBytecode; use acvm::acir::circuit::{Circuit, Opcode, OpcodeLocation}; use acvm::acir::native_types::{Witness, WitnessMap}; use acvm::brillig_vm::brillig::ForeignCallResult; @@ -42,10 +43,17 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { debug_artifact: &'a DebugArtifact, initial_witness: WitnessMap, foreign_call_executor: Box, + unconstrained_functions: &'a [BrilligBytecode], ) -> Self { let source_to_opcodes = build_source_to_opcode_debug_mappings(debug_artifact); Self { - acvm: ACVM::new(blackbox_solver, &circuit.opcodes, initial_witness), + // TODO: need to handle brillig pointer in the debugger + acvm: ACVM::new( + blackbox_solver, + &circuit.opcodes, + initial_witness, + unconstrained_functions, + ), brillig_solver: None, foreign_call_executor, debug_artifact, @@ -331,7 +339,8 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { self.handle_foreign_call(foreign_call) } Err(err) => DebugCommandResult::Error(NargoError::ExecutionError( - ExecutionError::SolvingError(err), + // TODO: debugger does not not handle multiple acir calls + ExecutionError::SolvingError(err, None), )), } } @@ -374,7 +383,8 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { } } ACVMStatus::Failure(error) => DebugCommandResult::Error(NargoError::ExecutionError( - ExecutionError::SolvingError(error), + // TODO: debugger does not not handle multiple acir calls + ExecutionError::SolvingError(error, None), )), ACVMStatus::RequiresForeignCall(_) => { unreachable!("Unexpected pending foreign call resolution"); @@ -513,7 +523,11 @@ impl<'a, B: BlackBoxFunctionSolver> DebugContext<'a, B> { pub(super) fn write_brillig_memory(&mut self, ptr: usize, value: FieldElement, bit_size: u32) { if let Some(solver) = self.brillig_solver.as_mut() { - solver.write_memory_at(ptr, MemoryValue::new(value, bit_size)); + solver.write_memory_at( + ptr, + MemoryValue::new_checked(value, bit_size) + .expect("Invalid value for the given bit size"), + ); } } @@ -626,6 +640,7 @@ fn build_source_to_opcode_debug_mappings( result } +// TODO: update all debugger tests to use unconstrained brillig pointers #[cfg(test)] mod tests { use super::*; @@ -692,12 +707,14 @@ mod tests { let foreign_call_executor = Box::new(DefaultDebugForeignCallExecutor::from_artifact(true, debug_artifact)); + let brillig_funcs = &vec![]; let mut context = DebugContext::new( &StubbedBlackBoxSolver, circuit, debug_artifact, initial_witness, foreign_call_executor, + brillig_funcs, ); assert_eq!(context.get_current_opcode_location(), Some(OpcodeLocation::Acir(0))); @@ -799,12 +816,14 @@ mod tests { let foreign_call_executor = Box::new(DefaultDebugForeignCallExecutor::from_artifact(true, debug_artifact)); + let brillig_funcs = &vec![]; let mut context = DebugContext::new( &StubbedBlackBoxSolver, circuit, debug_artifact, initial_witness, foreign_call_executor, + brillig_funcs, ); // set breakpoint @@ -856,12 +875,14 @@ mod tests { let circuit = Circuit { opcodes, ..Circuit::default() }; let debug_artifact = DebugArtifact { debug_symbols: vec![], file_map: BTreeMap::new(), warnings: vec![] }; + let brillig_funcs = &vec![]; let context = DebugContext::new( &StubbedBlackBoxSolver, &circuit, &debug_artifact, WitnessMap::new(), Box::new(DefaultDebugForeignCallExecutor::new(true)), + brillig_funcs, ); assert_eq!(context.offset_opcode_location(&None, 0), (None, 0)); diff --git a/tooling/debugger/src/dap.rs b/tooling/debugger/src/dap.rs index ea3204ebbbc..060945132f5 100644 --- a/tooling/debugger/src/dap.rs +++ b/tooling/debugger/src/dap.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use std::io::{Read, Write}; use std::str::FromStr; +use acvm::acir::circuit::brillig::BrilligBytecode; use acvm::acir::circuit::{Circuit, OpcodeLocation}; use acvm::acir::native_types::WitnessMap; use acvm::BlackBoxFunctionSolver; @@ -64,6 +65,7 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { circuit: &'a Circuit, debug_artifact: &'a DebugArtifact, initial_witness: WitnessMap, + unconstrained_functions: &'a [BrilligBytecode], ) -> Self { let context = DebugContext::new( solver, @@ -71,6 +73,7 @@ impl<'a, R: Read, W: Write, B: BlackBoxFunctionSolver> DapSession<'a, R, W, B> { debug_artifact, initial_witness, Box::new(DefaultDebugForeignCallExecutor::from_artifact(true, debug_artifact)), + unconstrained_functions, ); Self { server, @@ -603,7 +606,7 @@ pub fn run_session( initial_witness: WitnessMap, ) -> Result<(), ServerError> { let debug_artifact = DebugArtifact { - debug_symbols: vec![program.debug], + debug_symbols: program.debug, file_map: program.file_map, warnings: program.warnings, }; @@ -613,6 +616,7 @@ pub fn run_session( &program.program.functions[0], &debug_artifact, initial_witness, + &program.program.unconstrained_functions, ); session.run_loop() diff --git a/tooling/debugger/src/lib.rs b/tooling/debugger/src/lib.rs index 4a25e3417a0..a8fc61c893f 100644 --- a/tooling/debugger/src/lib.rs +++ b/tooling/debugger/src/lib.rs @@ -9,6 +9,7 @@ use std::io::{Read, Write}; use ::dap::errors::ServerError; use ::dap::server::Server; +use acvm::acir::circuit::brillig::BrilligBytecode; use acvm::BlackBoxFunctionSolver; use acvm::{acir::circuit::Circuit, acir::native_types::WitnessMap}; @@ -22,8 +23,9 @@ pub fn debug_circuit( circuit: &Circuit, debug_artifact: DebugArtifact, initial_witness: WitnessMap, + unconstrained_functions: &[BrilligBytecode], ) -> Result, NargoError> { - repl::run(blackbox_solver, circuit, &debug_artifact, initial_witness) + repl::run(blackbox_solver, circuit, &debug_artifact, initial_witness, unconstrained_functions) } pub fn run_dap_loop( diff --git a/tooling/debugger/src/repl.rs b/tooling/debugger/src/repl.rs index 1c077c6ee9b..2a92698e5ce 100644 --- a/tooling/debugger/src/repl.rs +++ b/tooling/debugger/src/repl.rs @@ -1,5 +1,6 @@ use crate::context::{DebugCommandResult, DebugContext}; +use acvm::acir::circuit::brillig::BrilligBytecode; use acvm::acir::circuit::{Circuit, Opcode, OpcodeLocation}; use acvm::acir::native_types::{Witness, WitnessMap}; use acvm::{BlackBoxFunctionSolver, FieldElement}; @@ -20,6 +21,7 @@ pub struct ReplDebugger<'a, B: BlackBoxFunctionSolver> { debug_artifact: &'a DebugArtifact, initial_witness: WitnessMap, last_result: DebugCommandResult, + unconstrained_functions: &'a [BrilligBytecode], } impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { @@ -28,6 +30,7 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { circuit: &'a Circuit, debug_artifact: &'a DebugArtifact, initial_witness: WitnessMap, + unconstrained_functions: &'a [BrilligBytecode], ) -> Self { let foreign_call_executor = Box::new(DefaultDebugForeignCallExecutor::from_artifact(true, debug_artifact)); @@ -37,6 +40,7 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { debug_artifact, initial_witness.clone(), foreign_call_executor, + unconstrained_functions, ); let last_result = if context.get_current_opcode_location().is_none() { // handle circuit with no opcodes @@ -44,7 +48,15 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { } else { DebugCommandResult::Ok }; - Self { context, blackbox_solver, circuit, debug_artifact, initial_witness, last_result } + Self { + context, + blackbox_solver, + circuit, + debug_artifact, + initial_witness, + last_result, + unconstrained_functions, + } } pub fn show_current_vm_status(&self) { @@ -271,6 +283,7 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { self.debug_artifact, self.initial_witness.clone(), foreign_call_executor, + self.unconstrained_functions, ); for opcode_location in breakpoints { self.context.add_breakpoint(opcode_location); @@ -319,7 +332,7 @@ impl<'a, B: BlackBoxFunctionSolver> ReplDebugger<'a, B> { return; }; - for (index, value) in memory.iter().enumerate().filter(|(_, value)| value.bit_size > 0) { + for (index, value) in memory.iter().enumerate().filter(|(_, value)| value.bit_size() > 0) { println!("{index} = {}", value); } } @@ -361,9 +374,15 @@ pub fn run( circuit: &Circuit, debug_artifact: &DebugArtifact, initial_witness: WitnessMap, + unconstrained_functions: &[BrilligBytecode], ) -> Result, NargoError> { - let context = - RefCell::new(ReplDebugger::new(blackbox_solver, circuit, debug_artifact, initial_witness)); + let context = RefCell::new(ReplDebugger::new( + blackbox_solver, + circuit, + debug_artifact, + initial_witness, + unconstrained_functions, + )); let ref_context = &context; ref_context.borrow().show_current_vm_status(); diff --git a/tooling/lsp/src/requests/profile_run.rs b/tooling/lsp/src/requests/profile_run.rs index 89719947689..7d06bc87c85 100644 --- a/tooling/lsp/src/requests/profile_run.rs +++ b/tooling/lsp/src/requests/profile_run.rs @@ -84,9 +84,11 @@ fn on_profile_run_request_inner( let compiled_program = nargo::ops::transform_program(compiled_program, expression_width); - let span_opcodes = compiled_program.debug.count_span_opcodes(); - let debug_artifact: DebugArtifact = compiled_program.clone().into(); - opcodes_counts.extend(span_opcodes); + for function_debug in compiled_program.debug.iter() { + let span_opcodes = function_debug.count_span_opcodes(); + opcodes_counts.extend(span_opcodes); + } + let debug_artifact: DebugArtifact = compiled_program.into(); file_map.extend(debug_artifact.file_map); } @@ -94,14 +96,17 @@ fn on_profile_run_request_inner( let compiled_contract = nargo::ops::transform_contract(compiled_contract, expression_width); - let function_debug_info: Vec<_> = - compiled_contract.functions.iter().map(|func| &func.debug).cloned().collect(); - let debug_artifact: DebugArtifact = compiled_contract.into(); - file_map.extend(debug_artifact.file_map); + let function_debug_info = compiled_contract + .functions + .iter() + .flat_map(|func| &func.debug) + .collect::>(); for contract_function_debug in function_debug_info { let span_opcodes = contract_function_debug.count_span_opcodes(); opcodes_counts.extend(span_opcodes); } + let debug_artifact: DebugArtifact = compiled_contract.into(); + file_map.extend(debug_artifact.file_map); } let result = NargoProfileRunResult { file_map, opcodes_counts }; diff --git a/tooling/lsp/src/solver.rs b/tooling/lsp/src/solver.rs index d0acbf1aec5..0fea9b16b54 100644 --- a/tooling/lsp/src/solver.rs +++ b/tooling/lsp/src/solver.rs @@ -10,7 +10,7 @@ impl BlackBoxFunctionSolver for WrapperSolver { &self, public_key_x: &acvm::FieldElement, public_key_y: &acvm::FieldElement, - signature: &[u8], + signature: &[u8; 64], message: &[u8], ) -> Result { self.0.schnorr_verify(public_key_x, public_key_y, signature, message) diff --git a/tooling/nargo/src/artifacts/contract.rs b/tooling/nargo/src/artifacts/contract.rs index c0316a6d1a2..83bb4b94f82 100644 --- a/tooling/nargo/src/artifacts/contract.rs +++ b/tooling/nargo/src/artifacts/contract.rs @@ -1,14 +1,26 @@ use acvm::acir::circuit::Program; -use noirc_abi::{Abi, ContractEvent}; -use noirc_driver::{CompiledContract, ContractFunction}; +use noirc_abi::{Abi, AbiType, AbiValue}; +use noirc_driver::{CompiledContract, CompiledContractOutputs, ContractFunction}; use serde::{Deserialize, Serialize}; use noirc_driver::DebugFile; -use noirc_errors::debug_info::DebugInfo; -use std::collections::BTreeMap; +use noirc_errors::debug_info::ProgramDebugInfo; +use std::collections::{BTreeMap, HashMap}; use fm::FileId; +#[derive(Serialize, Deserialize)] +pub struct ContractOutputsArtifact { + pub structs: HashMap>, + pub globals: HashMap>, +} + +impl From for ContractOutputsArtifact { + fn from(outputs: CompiledContractOutputs) -> Self { + ContractOutputsArtifact { structs: outputs.structs, globals: outputs.globals } + } +} + #[derive(Serialize, Deserialize)] pub struct ContractArtifact { /// Version of noir used to compile this contract @@ -17,8 +29,8 @@ pub struct ContractArtifact { pub name: String, /// Each of the contract's functions are compiled into a separate program stored in this `Vec`. pub functions: Vec, - /// All the events defined inside the contract scope. - pub events: Vec, + + pub outputs: ContractOutputsArtifact, /// Map of file Id to the source code so locations in debug info can be mapped to source code they point to. pub file_map: BTreeMap, } @@ -29,7 +41,7 @@ impl From for ContractArtifact { noir_version: contract.noir_version, name: contract.name, functions: contract.functions.into_iter().map(ContractFunctionArtifact::from).collect(), - events: contract.events, + outputs: contract.outputs.into(), file_map: contract.file_map, } } @@ -56,10 +68,10 @@ pub struct ContractFunctionArtifact { pub bytecode: Program, #[serde( - serialize_with = "DebugInfo::serialize_compressed_base64_json", - deserialize_with = "DebugInfo::deserialize_compressed_base64_json" + serialize_with = "ProgramDebugInfo::serialize_compressed_base64_json", + deserialize_with = "ProgramDebugInfo::deserialize_compressed_base64_json" )] - pub debug_symbols: DebugInfo, + pub debug_symbols: ProgramDebugInfo, } impl From for ContractFunctionArtifact { @@ -70,7 +82,7 @@ impl From for ContractFunctionArtifact { custom_attributes: func.custom_attributes, abi: func.abi, bytecode: func.bytecode, - debug_symbols: func.debug, + debug_symbols: ProgramDebugInfo { debug_infos: func.debug }, } } } diff --git a/tooling/nargo/src/artifacts/debug.rs b/tooling/nargo/src/artifacts/debug.rs index fbdf59805c9..496896468cc 100644 --- a/tooling/nargo/src/artifacts/debug.rs +++ b/tooling/nargo/src/artifacts/debug.rs @@ -121,7 +121,7 @@ impl DebugArtifact { impl From for DebugArtifact { fn from(compiled_program: CompiledProgram) -> Self { DebugArtifact { - debug_symbols: vec![compiled_program.debug], + debug_symbols: compiled_program.debug, file_map: compiled_program.file_map, warnings: compiled_program.warnings, } @@ -133,7 +133,7 @@ impl From for DebugArtifact { let all_functions_debug: Vec = compiled_artifact .functions .into_iter() - .map(|contract_function| contract_function.debug) + .flat_map(|contract_function| contract_function.debug) .collect(); DebugArtifact { diff --git a/tooling/nargo/src/artifacts/program.rs b/tooling/nargo/src/artifacts/program.rs index 9e660cbd359..67ac9f53ec8 100644 --- a/tooling/nargo/src/artifacts/program.rs +++ b/tooling/nargo/src/artifacts/program.rs @@ -5,7 +5,7 @@ use fm::FileId; use noirc_abi::Abi; use noirc_driver::CompiledProgram; use noirc_driver::DebugFile; -use noirc_errors::debug_info::DebugInfo; +use noirc_errors::debug_info::ProgramDebugInfo; use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug)] @@ -27,10 +27,10 @@ pub struct ProgramArtifact { pub bytecode: Program, #[serde( - serialize_with = "DebugInfo::serialize_compressed_base64_json", - deserialize_with = "DebugInfo::deserialize_compressed_base64_json" + serialize_with = "ProgramDebugInfo::serialize_compressed_base64_json", + deserialize_with = "ProgramDebugInfo::deserialize_compressed_base64_json" )] - pub debug_symbols: DebugInfo, + pub debug_symbols: ProgramDebugInfo, /// Map of file Id to the source code so locations in debug info can be mapped to source code they point to. pub file_map: BTreeMap, @@ -45,7 +45,7 @@ impl From for ProgramArtifact { abi: compiled_program.abi, noir_version: compiled_program.noir_version, bytecode: compiled_program.program, - debug_symbols: compiled_program.debug, + debug_symbols: ProgramDebugInfo { debug_infos: compiled_program.debug }, file_map: compiled_program.file_map, names: compiled_program.names, } @@ -59,7 +59,7 @@ impl From for CompiledProgram { abi: program.abi, noir_version: program.noir_version, program: program.bytecode, - debug: program.debug_symbols, + debug: program.debug_symbols.debug_infos, file_map: program.file_map, warnings: vec![], names: program.names, diff --git a/tooling/nargo/src/errors.rs b/tooling/nargo/src/errors.rs index ff238d79a46..ac03330a7c8 100644 --- a/tooling/nargo/src/errors.rs +++ b/tooling/nargo/src/errors.rs @@ -1,5 +1,5 @@ use acvm::{ - acir::circuit::OpcodeLocation, + acir::circuit::{OpcodeLocation, ResolvedOpcodeLocation}, pwg::{ErrorLocation, OpcodeResolutionError}, }; use noirc_errors::{ @@ -61,13 +61,15 @@ impl NargoError { match execution_error { ExecutionError::AssertionFailed(message, _) => Some(message), - ExecutionError::SolvingError(error) => match error { + ExecutionError::SolvingError(error, _) => match error { OpcodeResolutionError::IndexOutOfBounds { .. } | OpcodeResolutionError::OpcodeNotSolvable(_) | OpcodeResolutionError::UnsatisfiedConstrain { .. } | OpcodeResolutionError::AcirMainCallAttempted { .. } | OpcodeResolutionError::AcirCallOutputsMismatch { .. } => None, - OpcodeResolutionError::BrilligFunctionFailed { message, .. } => Some(message), + OpcodeResolutionError::BrilligFunctionFailed { message, .. } => { + message.as_ref().map(|s| s.as_str()) + } OpcodeResolutionError::BlackBoxFunctionFailed(_, reason) => Some(reason), }, } @@ -77,81 +79,105 @@ impl NargoError { #[derive(Debug, Error)] pub enum ExecutionError { #[error("Failed assertion: '{}'", .0)] - AssertionFailed(String, Vec), + AssertionFailed(String, Vec), - #[error(transparent)] - SolvingError(#[from] OpcodeResolutionError), + #[error("Failed to solve program: '{}'", .0)] + SolvingError(OpcodeResolutionError, Option>), } /// Extracts the opcode locations from a nargo error. fn extract_locations_from_error( error: &ExecutionError, - debug: &DebugInfo, + debug: &[DebugInfo], ) -> Option> { let mut opcode_locations = match error { - ExecutionError::SolvingError(OpcodeResolutionError::BrilligFunctionFailed { - call_stack, - .. - }) - | ExecutionError::AssertionFailed(_, call_stack) => Some(call_stack.clone()), - ExecutionError::SolvingError(OpcodeResolutionError::IndexOutOfBounds { - opcode_location: error_location, - .. - }) - | ExecutionError::SolvingError(OpcodeResolutionError::UnsatisfiedConstrain { - opcode_location: error_location, - }) => match error_location { + ExecutionError::SolvingError( + OpcodeResolutionError::BrilligFunctionFailed { .. }, + acir_call_stack, + ) => acir_call_stack.clone(), + ExecutionError::AssertionFailed(_, call_stack) => Some(call_stack.clone()), + ExecutionError::SolvingError( + OpcodeResolutionError::IndexOutOfBounds { opcode_location: error_location, .. }, + acir_call_stack, + ) + | ExecutionError::SolvingError( + OpcodeResolutionError::UnsatisfiedConstrain { opcode_location: error_location }, + acir_call_stack, + ) => match error_location { ErrorLocation::Unresolved => { unreachable!("Cannot resolve index for unsatisfied constraint") } - ErrorLocation::Resolved(opcode_location) => Some(vec![*opcode_location]), + ErrorLocation::Resolved(_) => acir_call_stack.clone(), }, _ => None, }?; - if let Some(OpcodeLocation::Brillig { acir_index, .. }) = opcode_locations.first() { - opcode_locations.insert(0, OpcodeLocation::Acir(*acir_index)); + // Insert the top-level Acir location where the Brillig function failed + for (i, resolved_location) in opcode_locations.iter().enumerate() { + if let ResolvedOpcodeLocation { + acir_function_index, + opcode_location: OpcodeLocation::Brillig { acir_index, .. }, + } = resolved_location + { + let acir_location = ResolvedOpcodeLocation { + acir_function_index: *acir_function_index, + opcode_location: OpcodeLocation::Acir(*acir_index), + }; + + opcode_locations.insert(i, acir_location); + // Go until the first brillig opcode as that means we have the start of a Brillig call stack. + // We have to loop through the opcode locations in case we had ACIR calls + // before the brillig function failure. + break; + } } Some( opcode_locations .iter() - .flat_map(|opcode_location| debug.opcode_location(opcode_location).unwrap_or_default()) + .flat_map(|resolved_location| { + debug[resolved_location.acir_function_index] + .opcode_location(&resolved_location.opcode_location) + .unwrap_or_default() + }) .collect(), ) } -/// Tries to generate a runtime diagnostic from a nargo error. It will successfully do so if it's a runtime error with a call stack. -pub fn try_to_diagnose_runtime_error( - nargo_err: &NargoError, - debug: &DebugInfo, -) -> Option { - let execution_error = match nargo_err { - NargoError::ExecutionError(execution_error) => execution_error, - _ => return None, - }; - - let source_locations = extract_locations_from_error(execution_error, debug)?; - - // The location of the error itself will be the location at the top - // of the call stack (the last item in the Vec). - let location = source_locations.last()?; - - let message = match nargo_err { +fn extract_message_from_error(nargo_err: &NargoError) -> String { + match nargo_err { NargoError::ExecutionError(ExecutionError::AssertionFailed(message, _)) => { format!("Assertion failed: '{message}'") } NargoError::ExecutionError(ExecutionError::SolvingError( OpcodeResolutionError::IndexOutOfBounds { index, array_size, .. }, + _, )) => { format!("Index out of bounds, array has size {array_size:?}, but index was {index:?}") } NargoError::ExecutionError(ExecutionError::SolvingError( OpcodeResolutionError::UnsatisfiedConstrain { .. }, + _, )) => "Failed constraint".into(), _ => nargo_err.to_string(), - }; + } +} +/// Tries to generate a runtime diagnostic from a nargo error. It will successfully do so if it's a runtime error with a call stack. +pub fn try_to_diagnose_runtime_error( + nargo_err: &NargoError, + debug: &[DebugInfo], +) -> Option { + let source_locations = match nargo_err { + NargoError::ExecutionError(execution_error) => { + extract_locations_from_error(execution_error, debug)? + } + _ => return None, + }; + // The location of the error itself will be the location at the top + // of the call stack (the last item in the Vec). + let location = source_locations.last()?; + let message = extract_message_from_error(nargo_err); Some( CustomDiagnostic::simple_error(message, String::new(), location.span) .in_file(location.file) diff --git a/tooling/nargo/src/ops/execute.rs b/tooling/nargo/src/ops/execute.rs index 6d328d65119..97584aff150 100644 --- a/tooling/nargo/src/ops/execute.rs +++ b/tooling/nargo/src/ops/execute.rs @@ -1,4 +1,5 @@ -use acvm::acir::circuit::Program; +use acvm::acir::circuit::brillig::BrilligBytecode; +use acvm::acir::circuit::{OpcodeLocation, Program, ResolvedOpcodeLocation}; use acvm::acir::native_types::WitnessStack; use acvm::brillig_vm::brillig::ForeignCallResult; use acvm::pwg::{ACVMStatus, ErrorLocation, OpcodeNotSolvable, OpcodeResolutionError, ACVM}; @@ -12,25 +13,41 @@ use super::foreign_calls::{ForeignCallExecutor, NargoForeignCallResult}; struct ProgramExecutor<'a, B: BlackBoxFunctionSolver, F: ForeignCallExecutor> { functions: &'a [Circuit], + + unconstrained_functions: &'a [BrilligBytecode], + // This gets built as we run through the program looking at each function call witness_stack: WitnessStack, blackbox_solver: &'a B, foreign_call_executor: &'a mut F, + + // The Noir compiler codegens per function and call stacks are not shared across ACIR function calls. + // We must rebuild a call stack when executing a program of many circuits. + call_stack: Vec, + + // Tracks the index of the current function we are executing. + // This is used to fetch the function we want to execute + // and to resolve call stack locations across many function calls. + current_function_index: usize, } impl<'a, B: BlackBoxFunctionSolver, F: ForeignCallExecutor> ProgramExecutor<'a, B, F> { fn new( functions: &'a [Circuit], + unconstrained_functions: &'a [BrilligBytecode], blackbox_solver: &'a B, foreign_call_executor: &'a mut F, ) -> Self { ProgramExecutor { functions, + unconstrained_functions, witness_stack: WitnessStack::default(), blackbox_solver, foreign_call_executor, + call_stack: Vec::default(), + current_function_index: 0, } } @@ -39,12 +56,14 @@ impl<'a, B: BlackBoxFunctionSolver, F: ForeignCallExecutor> ProgramExecutor<'a, } #[tracing::instrument(level = "trace", skip_all)] - fn execute_circuit( - &mut self, - circuit: &Circuit, - initial_witness: WitnessMap, - ) -> Result { - let mut acvm = ACVM::new(self.blackbox_solver, &circuit.opcodes, initial_witness); + fn execute_circuit(&mut self, initial_witness: WitnessMap) -> Result { + let circuit = &self.functions[self.current_function_index]; + let mut acvm = ACVM::new( + self.blackbox_solver, + &circuit.opcodes, + initial_witness, + self.unconstrained_functions, + ); // This message should be resolved by a nargo foreign call only when we have an unsatisfied assertion. let mut assert_message: Option = None; @@ -60,9 +79,26 @@ impl<'a, B: BlackBoxFunctionSolver, F: ForeignCallExecutor> ProgramExecutor<'a, let call_stack = match &error { OpcodeResolutionError::UnsatisfiedConstrain { opcode_location: ErrorLocation::Resolved(opcode_location), - } => Some(vec![*opcode_location]), + } + | OpcodeResolutionError::IndexOutOfBounds { + opcode_location: ErrorLocation::Resolved(opcode_location), + .. + } => { + let resolved_location = ResolvedOpcodeLocation { + acir_function_index: self.current_function_index, + opcode_location: *opcode_location, + }; + self.call_stack.push(resolved_location); + Some(self.call_stack.clone()) + } OpcodeResolutionError::BrilligFunctionFailed { call_stack, .. } => { - Some(call_stack.clone()) + let brillig_call_stack = + call_stack.iter().map(|location| ResolvedOpcodeLocation { + acir_function_index: self.current_function_index, + opcode_location: *location, + }); + self.call_stack.extend(brillig_call_stack); + Some(self.call_stack.clone()) } _ => None, }; @@ -70,26 +106,35 @@ impl<'a, B: BlackBoxFunctionSolver, F: ForeignCallExecutor> ProgramExecutor<'a, return Err(NargoError::ExecutionError(match call_stack { Some(call_stack) => { // First check whether we have a runtime assertion message that should be resolved on an ACVM failure - // If we do not have a runtime assertion message, we should check whether the circuit has any hardcoded - // messages associated with a specific `OpcodeLocation`. + // If we do not have a runtime assertion message, we check wether the error is a brillig error with a user-defined message, + // and finally we should check whether the circuit has any hardcoded messages associated with a specific `OpcodeLocation`. // Otherwise return the provided opcode resolution error. if let Some(assert_message) = assert_message { ExecutionError::AssertionFailed( assert_message.to_owned(), call_stack, ) + } else if let OpcodeResolutionError::BrilligFunctionFailed { + message: Some(message), + .. + } = &error + { + ExecutionError::AssertionFailed(message.to_owned(), call_stack) } else if let Some(assert_message) = circuit.get_assert_message( - *call_stack.last().expect("Call stacks should not be empty"), + call_stack + .last() + .expect("Call stacks should not be empty") + .opcode_location, ) { ExecutionError::AssertionFailed( assert_message.to_owned(), call_stack, ) } else { - ExecutionError::SolvingError(error) + ExecutionError::SolvingError(error, Some(call_stack)) } } - None => ExecutionError::SolvingError(error), + None => ExecutionError::SolvingError(error, None), })); } ACVMStatus::RequiresForeignCall(foreign_call) => { @@ -109,10 +154,24 @@ impl<'a, B: BlackBoxFunctionSolver, F: ForeignCallExecutor> ProgramExecutor<'a, } } ACVMStatus::RequiresAcirCall(call_info) => { + // Store the parent function index whose context we are currently executing + let acir_function_caller = self.current_function_index; + // Add call opcode to the call stack with a reference to the parent function index + self.call_stack.push(ResolvedOpcodeLocation { + acir_function_index: acir_function_caller, + opcode_location: OpcodeLocation::Acir(acvm.instruction_pointer()), + }); + + // Set current function to the circuit we are about to execute + self.current_function_index = call_info.id as usize; + // Execute the ACIR call let acir_to_call = &self.functions[call_info.id as usize]; let initial_witness = call_info.initial_witness; - let call_solved_witness = - self.execute_circuit(acir_to_call, initial_witness)?; + let call_solved_witness = self.execute_circuit(initial_witness)?; + + // Set tracking index back to the parent function after ACIR call execution + self.current_function_index = acir_function_caller; + let mut call_resolved_outputs = Vec::new(); for return_witness_index in acir_to_call.return_values.indices() { if let Some(return_value) = @@ -122,6 +181,7 @@ impl<'a, B: BlackBoxFunctionSolver, F: ForeignCallExecutor> ProgramExecutor<'a, } else { return Err(ExecutionError::SolvingError( OpcodeNotSolvable::MissingAssignment(return_witness_index).into(), + None, // Missing assignment errors do not supply user-facing diagnostics so we do not need to attach a call stack ) .into()); } @@ -143,11 +203,13 @@ pub fn execute_program( blackbox_solver: &B, foreign_call_executor: &mut F, ) -> Result { - let main = &program.functions[0]; - - let mut executor = - ProgramExecutor::new(&program.functions, blackbox_solver, foreign_call_executor); - let main_witness = executor.execute_circuit(main, initial_witness)?; + let mut executor = ProgramExecutor::new( + &program.functions, + &program.unconstrained_functions, + blackbox_solver, + foreign_call_executor, + ); + let main_witness = executor.execute_circuit(initial_witness)?; executor.witness_stack.push(0, main_witness); Ok(executor.finalize()) diff --git a/tooling/nargo/src/ops/foreign_calls.rs b/tooling/nargo/src/ops/foreign_calls.rs index ea67f17af2a..33767314a37 100644 --- a/tooling/nargo/src/ops/foreign_calls.rs +++ b/tooling/nargo/src/ops/foreign_calls.rs @@ -75,6 +75,7 @@ pub enum ForeignCall { AssertMessage, CreateMock, SetMockParams, + GetMockLastParams, SetMockReturns, SetMockTimes, ClearMock, @@ -93,6 +94,7 @@ impl ForeignCall { ForeignCall::AssertMessage => "assert_message", ForeignCall::CreateMock => "create_mock", ForeignCall::SetMockParams => "set_mock_params", + ForeignCall::GetMockLastParams => "get_mock_last_params", ForeignCall::SetMockReturns => "set_mock_returns", ForeignCall::SetMockTimes => "set_mock_times", ForeignCall::ClearMock => "clear_mock", @@ -105,6 +107,7 @@ impl ForeignCall { "assert_message" => Some(ForeignCall::AssertMessage), "create_mock" => Some(ForeignCall::CreateMock), "set_mock_params" => Some(ForeignCall::SetMockParams), + "get_mock_last_params" => Some(ForeignCall::GetMockLastParams), "set_mock_returns" => Some(ForeignCall::SetMockReturns), "set_mock_times" => Some(ForeignCall::SetMockTimes), "clear_mock" => Some(ForeignCall::ClearMock), @@ -122,6 +125,8 @@ struct MockedCall { name: String, /// Optionally match the parameters params: Option>, + /// The parameters with which the mock was last called + last_called_params: Option>, /// The result to return when this mock is called result: ForeignCallResult, /// How many times should this mock be called before it is removed @@ -134,6 +139,7 @@ impl MockedCall { id, name, params: None, + last_called_params: None, result: ForeignCallResult { values: vec![] }, times_left: None, } @@ -161,8 +167,15 @@ pub struct DefaultForeignCallExecutor { impl DefaultForeignCallExecutor { pub fn new(show_output: bool, resolver_url: Option<&str>) -> Self { let oracle_resolver = resolver_url.map(|resolver_url| { - let transport_builder = + let mut transport_builder = Builder::new().url(resolver_url).expect("Invalid oracle resolver URL"); + + if let Some(Ok(timeout)) = + std::env::var("NARGO_FOREIGN_CALL_TIMEOUT").ok().map(|timeout| timeout.parse()) + { + let timeout_duration = std::time::Duration::from_millis(timeout); + transport_builder = transport_builder.timeout(timeout_duration); + }; Client::with_transport(transport_builder.build()) }); DefaultForeignCallExecutor { @@ -185,7 +198,11 @@ impl DefaultForeignCallExecutor { Ok((id, params)) } - fn find_mock_by_id(&mut self, id: usize) -> Option<&mut MockedCall> { + fn find_mock_by_id(&self, id: usize) -> Option<&MockedCall> { + self.mocked_responses.iter().find(|response| response.id == id) + } + + fn find_mock_by_id_mut(&mut self, id: usize) -> Option<&mut MockedCall> { self.mocked_responses.iter_mut().find(|response| response.id == id) } @@ -250,15 +267,27 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { } Some(ForeignCall::SetMockParams) => { let (id, params) = Self::extract_mock_id(&foreign_call.inputs)?; - self.find_mock_by_id(id) + self.find_mock_by_id_mut(id) .unwrap_or_else(|| panic!("Unknown mock id {}", id)) .params = Some(params.to_vec()); Ok(ForeignCallResult::default().into()) } + Some(ForeignCall::GetMockLastParams) => { + let (id, _) = Self::extract_mock_id(&foreign_call.inputs)?; + let mock = + self.find_mock_by_id(id).unwrap_or_else(|| panic!("Unknown mock id {}", id)); + + let last_called_params = mock + .last_called_params + .clone() + .unwrap_or_else(|| panic!("Mock {} was never called", mock.name)); + + Ok(last_called_params.into()) + } Some(ForeignCall::SetMockReturns) => { let (id, params) = Self::extract_mock_id(&foreign_call.inputs)?; - self.find_mock_by_id(id) + self.find_mock_by_id_mut(id) .unwrap_or_else(|| panic!("Unknown mock id {}", id)) .result = ForeignCallResult { values: params.to_vec() }; @@ -269,7 +298,7 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { let times = params[0].unwrap_field().try_to_u64().expect("Invalid bit size of times"); - self.find_mock_by_id(id) + self.find_mock_by_id_mut(id) .unwrap_or_else(|| panic!("Unknown mock id {}", id)) .times_left = Some(times); @@ -292,6 +321,9 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { .mocked_responses .get_mut(response_position) .expect("Invalid position of mocked response"); + + mock.last_called_params = Some(foreign_call.inputs.clone()); + let result = mock.result.values.clone(); if let Some(times_left) = &mut mock.times_left { @@ -316,7 +348,10 @@ impl ForeignCallExecutor for DefaultForeignCallExecutor { Ok(parsed_response.into()) } - (None, None) => panic!("Unknown foreign call {}", foreign_call_name), + (None, None) => panic!( + "No mock for foreign call {}({:?})", + foreign_call_name, &foreign_call.inputs + ), } } } diff --git a/tooling/nargo/src/ops/optimize.rs b/tooling/nargo/src/ops/optimize.rs index cfaaf27ea98..a62f4696328 100644 --- a/tooling/nargo/src/ops/optimize.rs +++ b/tooling/nargo/src/ops/optimize.rs @@ -1,25 +1,36 @@ +use acvm::acir::circuit::Program; use iter_extended::vecmap; use noirc_driver::{CompiledContract, CompiledProgram}; - -/// TODO(https://github.com/noir-lang/noir/issues/4428): Need to update how these passes are run to account for -/// multiple ACIR functions +use noirc_errors::debug_info::DebugInfo; pub fn optimize_program(mut compiled_program: CompiledProgram) -> CompiledProgram { - let (optimized_circuit, location_map) = - acvm::compiler::optimize(std::mem::take(&mut compiled_program.program.functions[0])); - compiled_program.program.functions[0] = optimized_circuit; - compiled_program.debug.update_acir(location_map); + compiled_program.program = + optimize_program_internal(compiled_program.program, &mut compiled_program.debug); compiled_program } pub fn optimize_contract(contract: CompiledContract) -> CompiledContract { let functions = vecmap(contract.functions, |mut func| { - let (optimized_bytecode, location_map) = - acvm::compiler::optimize(std::mem::take(&mut func.bytecode.functions[0])); - func.bytecode.functions[0] = optimized_bytecode; - func.debug.update_acir(location_map); + func.bytecode = optimize_program_internal(func.bytecode, &mut func.debug); func }); CompiledContract { functions, ..contract } } + +fn optimize_program_internal(mut program: Program, debug: &mut [DebugInfo]) -> Program { + let functions = std::mem::take(&mut program.functions); + + let optimized_functions = functions + .into_iter() + .enumerate() + .map(|(i, function)| { + let (optimized_circuit, location_map) = acvm::compiler::optimize(function); + debug[i].update_acir(location_map); + optimized_circuit + }) + .collect::>(); + + program.functions = optimized_functions; + program +} diff --git a/tooling/nargo/src/ops/test.rs b/tooling/nargo/src/ops/test.rs index 45b1a88f99c..b216fff827d 100644 --- a/tooling/nargo/src/ops/test.rs +++ b/tooling/nargo/src/ops/test.rs @@ -84,7 +84,7 @@ fn test_status_program_compile_fail(err: CompileError, test_function: &TestFunct /// passed/failed to determine the test status. fn test_status_program_compile_pass( test_function: &TestFunction, - debug: DebugInfo, + debug: Vec, circuit_execution: Result, ) -> TestStatus { let circuit_execution_err = match circuit_execution { diff --git a/tooling/nargo/src/ops/transform.rs b/tooling/nargo/src/ops/transform.rs index 274286a46e4..b4811bd5780 100644 --- a/tooling/nargo/src/ops/transform.rs +++ b/tooling/nargo/src/ops/transform.rs @@ -1,21 +1,17 @@ -use acvm::acir::circuit::ExpressionWidth; +use acvm::acir::circuit::{ExpressionWidth, Program}; use iter_extended::vecmap; use noirc_driver::{CompiledContract, CompiledProgram}; - -/// TODO(https://github.com/noir-lang/noir/issues/4428): Need to update how these passes are run to account for -/// multiple ACIR functions +use noirc_errors::debug_info::DebugInfo; pub fn transform_program( mut compiled_program: CompiledProgram, expression_width: ExpressionWidth, ) -> CompiledProgram { - let (optimized_circuit, location_map) = acvm::compiler::compile( - std::mem::take(&mut compiled_program.program.functions[0]), + compiled_program.program = transform_program_internal( + compiled_program.program, + &mut compiled_program.debug, expression_width, ); - - compiled_program.program.functions[0] = optimized_circuit; - compiled_program.debug.update_acir(location_map); compiled_program } @@ -24,14 +20,33 @@ pub fn transform_contract( expression_width: ExpressionWidth, ) -> CompiledContract { let functions = vecmap(contract.functions, |mut func| { - let (optimized_bytecode, location_map) = acvm::compiler::compile( - std::mem::take(&mut func.bytecode.functions[0]), - expression_width, - ); - func.bytecode.functions[0] = optimized_bytecode; - func.debug.update_acir(location_map); + func.bytecode = + transform_program_internal(func.bytecode, &mut func.debug, expression_width); + func }); CompiledContract { functions, ..contract } } + +fn transform_program_internal( + mut program: Program, + debug: &mut [DebugInfo], + expression_width: ExpressionWidth, +) -> Program { + let functions = std::mem::take(&mut program.functions); + + let optimized_functions = functions + .into_iter() + .enumerate() + .map(|(i, function)| { + let (optimized_circuit, location_map) = + acvm::compiler::compile(function, expression_width); + debug[i].update_acir(location_map); + optimized_circuit + }) + .collect::>(); + + program.functions = optimized_functions; + program +} diff --git a/tooling/nargo_cli/Cargo.toml b/tooling/nargo_cli/Cargo.toml index 1629ae86edb..111caaa9c92 100644 --- a/tooling/nargo_cli/Cargo.toml +++ b/tooling/nargo_cli/Cargo.toml @@ -72,13 +72,9 @@ assert_cmd = "2.0.8" assert_fs = "1.0.10" predicates = "2.1.5" fm.workspace = true -criterion = "0.5.0" +criterion.workspace = true +pprof.workspace = true paste = "1.0.14" -pprof = { version = "0.12", features = [ - "flamegraph", - "frame-pointer", - "criterion", -] } iai = "0.1.1" test-binary = "3.0.2" diff --git a/tooling/nargo_cli/src/cli/debug_cmd.rs b/tooling/nargo_cli/src/cli/debug_cmd.rs index 4f3e2886b2e..7cb5cd7846b 100644 --- a/tooling/nargo_cli/src/cli/debug_cmd.rs +++ b/tooling/nargo_cli/src/cli/debug_cmd.rs @@ -232,7 +232,7 @@ pub(crate) fn debug_program( let initial_witness = compiled_program.abi.encode(inputs_map, None)?; let debug_artifact = DebugArtifact { - debug_symbols: vec![compiled_program.debug.clone()], + debug_symbols: compiled_program.debug.clone(), file_map: compiled_program.file_map.clone(), warnings: compiled_program.warnings.clone(), }; @@ -242,6 +242,7 @@ pub(crate) fn debug_program( &compiled_program.program.functions[0], debug_artifact, initial_witness, + &compiled_program.program.unconstrained_functions, ) .map_err(CliError::from) } diff --git a/tooling/nargo_cli/src/cli/execute_cmd.rs b/tooling/nargo_cli/src/cli/execute_cmd.rs index 697f6d7c1ea..a353065491f 100644 --- a/tooling/nargo_cli/src/cli/execute_cmd.rs +++ b/tooling/nargo_cli/src/cli/execute_cmd.rs @@ -149,7 +149,7 @@ pub(crate) fn execute_program( Ok(solved_witness_stack) => Ok(solved_witness_stack), Err(err) => { let debug_artifact = DebugArtifact { - debug_symbols: vec![compiled_program.debug.clone()], + debug_symbols: compiled_program.debug.clone(), file_map: compiled_program.file_map.clone(), warnings: compiled_program.warnings.clone(), }; diff --git a/tooling/nargo_cli/src/cli/info_cmd.rs b/tooling/nargo_cli/src/cli/info_cmd.rs index 72784013e17..67825362f92 100644 --- a/tooling/nargo_cli/src/cli/info_cmd.rs +++ b/tooling/nargo_cli/src/cli/info_cmd.rs @@ -97,17 +97,21 @@ pub(crate) fn run( if args.profile_info { for compiled_program in &compiled_programs { - let span_opcodes = compiled_program.debug.count_span_opcodes(); let debug_artifact = DebugArtifact::from(compiled_program.clone()); - print_span_opcodes(span_opcodes, &debug_artifact); + for function_debug in compiled_program.debug.iter() { + let span_opcodes = function_debug.count_span_opcodes(); + print_span_opcodes(span_opcodes, &debug_artifact); + } } for compiled_contract in &compiled_contracts { let debug_artifact = DebugArtifact::from(compiled_contract.clone()); let functions = &compiled_contract.functions; for contract_function in functions { - let span_opcodes = contract_function.debug.count_span_opcodes(); - print_span_opcodes(span_opcodes, &debug_artifact); + for function_debug in contract_function.debug.iter() { + let span_opcodes = function_debug.count_span_opcodes(); + print_span_opcodes(span_opcodes, &debug_artifact); + } } } } @@ -289,8 +293,11 @@ fn count_opcodes_and_gates_in_program( Ok(FunctionInfo { name: compiled_program.names[i].clone(), acir_opcodes: function.opcodes.len(), - circuit_size: backend - .get_exact_circuit_size(&Program { functions: vec![function] })?, + // Unconstrained functions do not matter to a backend circuit count so we pass nothing here + circuit_size: backend.get_exact_circuit_size(&Program { + functions: vec![function], + unconstrained_functions: Vec::new(), + })?, }) }) .collect::>()?; diff --git a/tooling/nargo_fmt/build.rs b/tooling/nargo_fmt/build.rs index 6f41768c1dc..7d5f07c43bf 100644 --- a/tooling/nargo_fmt/build.rs +++ b/tooling/nargo_fmt/build.rs @@ -49,28 +49,55 @@ fn generate_formatter_tests(test_file: &mut File, test_data_dir: &Path) { let output_source_path = outputs_dir.join(file_name).display().to_string(); let output_source = std::fs::read_to_string(output_source_path.clone()).unwrap(); + let skip_idempotent_test = + // TODO(https://github.com/noir-lang/noir/issues/4766): spurious trailing space + test_name == "array" || + // TODO(https://github.com/noir-lang/noir/issues/4767): pre-comment space + // TODO(https://github.com/noir-lang/noir/issues/4768): spurious newline + test_name == "tuple"; + write!( test_file, r##" -#[test] -fn format_{test_name}() {{ - let input = r#"{input_source}"#; - let expected_output = r#"{output_source}"#; + #[test] + fn format_{test_name}() {{ + let input = r#"{input_source}"#; + let expected_output = r#"{output_source}"#; - let (parsed_module, _errors) = noirc_frontend::parse_program(input); + let (parsed_module, _errors) = noirc_frontend::parse_program(input); - let config = nargo_fmt::Config::of("{config}").unwrap(); - let fmt_text = nargo_fmt::format(input, parsed_module, &config); + let config = nargo_fmt::Config::of("{config}").unwrap(); + let fmt_text = nargo_fmt::format(input, parsed_module, &config); - if std::env::var("UPDATE_EXPECT").is_ok() {{ - std::fs::write("{output_source_path}", fmt_text.clone()).unwrap(); - }} + if std::env::var("UPDATE_EXPECT").is_ok() {{ + std::fs::write("{output_source_path}", fmt_text.clone()).unwrap(); + }} - similar_asserts::assert_eq!(fmt_text, expected_output); -}} + similar_asserts::assert_eq!(fmt_text, expected_output); + }} "## ) .expect("Could not write templated test file."); + + if !skip_idempotent_test { + write!( + test_file, + r##" + #[test] + fn format_idempotent_{test_name}() {{ + let expected_output = r#"{output_source}"#; + + let (parsed_module, _errors) = noirc_frontend::parse_program(expected_output); + + let config = nargo_fmt::Config::of("{config}").unwrap(); + let fmt_text = nargo_fmt::format(expected_output, parsed_module, &config); + + similar_asserts::assert_eq!(fmt_text, expected_output); + }} + "## + ) + .expect("Could not write templated test file."); + } } } diff --git a/tooling/nargo_fmt/src/rewrite/typ.rs b/tooling/nargo_fmt/src/rewrite/typ.rs index 922337cdb74..980d02ee5dc 100644 --- a/tooling/nargo_fmt/src/rewrite/typ.rs +++ b/tooling/nargo_fmt/src/rewrite/typ.rs @@ -64,6 +64,7 @@ pub(crate) fn rewrite(visitor: &FmtVisitor, _shape: Shape, typ: UnresolvedType) | UnresolvedTypeData::Expression(_) | UnresolvedTypeData::String(_) | UnresolvedTypeData::FormatString(_, _) + | UnresolvedTypeData::Code | UnresolvedTypeData::TraitAsType(_, _) => visitor.slice(typ.span.unwrap()).into(), UnresolvedTypeData::Error => unreachable!(), } diff --git a/tooling/noir_codegen/package.json b/tooling/noir_codegen/package.json index 1eabc6a1398..569841b2c6a 100644 --- a/tooling/noir_codegen/package.json +++ b/tooling/noir_codegen/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.26.0", + "version": "0.27.0", "packageManager": "yarn@3.5.1", "license": "(MIT OR Apache-2.0)", "type": "module", diff --git a/tooling/noir_js/package.json b/tooling/noir_js/package.json index c8d4873e095..838f317c622 100644 --- a/tooling/noir_js/package.json +++ b/tooling/noir_js/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.26.0", + "version": "0.27.0", "packageManager": "yarn@3.5.1", "license": "(MIT OR Apache-2.0)", "type": "module", diff --git a/tooling/noir_js/test/node/e2e.test.ts b/tooling/noir_js/test/node/e2e.test.ts index 8921314e8ea..979841c47e6 100644 --- a/tooling/noir_js/test/node/e2e.test.ts +++ b/tooling/noir_js/test/node/e2e.test.ts @@ -1,7 +1,7 @@ import { expect } from 'chai'; import assert_lt_json from '../noir_compiled_examples/assert_lt/target/assert_lt.json' assert { type: 'json' }; import { Noir } from '@noir-lang/noir_js'; -import { BarretenbergBackend as Backend } from '@noir-lang/backend_barretenberg'; +import { BarretenbergBackend as Backend, BarretenbergVerifier as Verifier } from '@noir-lang/backend_barretenberg'; import { CompiledCircuit } from '@noir-lang/types'; const assert_lt_program = assert_lt_json as CompiledCircuit; @@ -47,6 +47,28 @@ it('end-to-end proof creation and verification (outer) -- Program API', async () expect(isValid).to.be.true; }); +it('end-to-end proof creation and verification (outer) -- Verifier API', async () => { + // Noir.Js part + const inputs = { + x: '2', + y: '3', + }; + + // Initialize backend + const backend = new Backend(assert_lt_program); + // Initialize program + const program = new Noir(assert_lt_program, backend); + // Generate proof + const proof = await program.generateProof(inputs); + + const verificationKey = await backend.getVerificationKey(); + + // Proof verification + const verifier = new Verifier(); + const isValid = await verifier.verifyProof(proof, verificationKey); + expect(isValid).to.be.true; +}); + // TODO: maybe switch to using assert_statement_recursive here to test both options it('end-to-end proof creation and verification (inner)', async () => { // Noir.Js part diff --git a/tooling/noir_js_backend_barretenberg/package.json b/tooling/noir_js_backend_barretenberg/package.json index 251dd80c2f4..af9e47a8e63 100644 --- a/tooling/noir_js_backend_barretenberg/package.json +++ b/tooling/noir_js_backend_barretenberg/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.26.0", + "version": "0.27.0", "packageManager": "yarn@3.5.1", "license": "(MIT OR Apache-2.0)", "type": "module", @@ -42,7 +42,7 @@ "lint": "NODE_NO_WARNINGS=1 eslint . --ext .ts --ignore-path ./.eslintignore --max-warnings 0" }, "dependencies": { - "@aztec/bb.js": "0.32.0", + "@aztec/bb.js": "0.35.1", "@noir-lang/types": "workspace:*", "fflate": "^0.8.0" }, diff --git a/tooling/noir_js_backend_barretenberg/src/backend.ts b/tooling/noir_js_backend_barretenberg/src/backend.ts new file mode 100644 index 00000000000..d07681dd8c1 --- /dev/null +++ b/tooling/noir_js_backend_barretenberg/src/backend.ts @@ -0,0 +1,143 @@ +import { decompressSync as gunzip } from 'fflate'; +import { acirToUint8Array } from './serialize.js'; +import { Backend, CompiledCircuit, ProofData, VerifierBackend } from '@noir-lang/types'; +import { BackendOptions } from './types.js'; +import { deflattenPublicInputs } from './public_inputs.js'; +import { reconstructProofWithPublicInputs } from './verifier.js'; +import { type Barretenberg } from '@aztec/bb.js'; + +// This is the number of bytes in a UltraPlonk proof +// minus the public inputs. +const numBytesInProofWithoutPublicInputs: number = 2144; + +export class BarretenbergVerifierBackend implements VerifierBackend { + // These type assertions are used so that we don't + // have to initialize `api` and `acirComposer` in the constructor. + // These are initialized asynchronously in the `init` function, + // constructors cannot be asynchronous which is why we do this. + + protected api!: Barretenberg; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + protected acirComposer: any; + protected acirUncompressedBytecode: Uint8Array; + + constructor( + acirCircuit: CompiledCircuit, + protected options: BackendOptions = { threads: 1 }, + ) { + const acirBytecodeBase64 = acirCircuit.bytecode; + this.acirUncompressedBytecode = acirToUint8Array(acirBytecodeBase64); + } + + /** @ignore */ + async instantiate(): Promise { + if (!this.api) { + if (typeof navigator !== 'undefined' && navigator.hardwareConcurrency) { + this.options.threads = navigator.hardwareConcurrency; + } else { + try { + const os = await import('os'); + this.options.threads = os.cpus().length; + } catch (e) { + console.log('Could not detect environment. Falling back to one thread.', e); + } + } + const { Barretenberg, RawBuffer, Crs } = await import('@aztec/bb.js'); + const api = await Barretenberg.new(this.options); + + const [_exact, _total, subgroupSize] = await api.acirGetCircuitSizes(this.acirUncompressedBytecode); + const crs = await Crs.new(subgroupSize + 1); + await api.commonInitSlabAllocator(subgroupSize); + await api.srsInitSrs(new RawBuffer(crs.getG1Data()), crs.numPoints, new RawBuffer(crs.getG2Data())); + + this.acirComposer = await api.acirNewAcirComposer(subgroupSize); + await api.acirInitProvingKey(this.acirComposer, this.acirUncompressedBytecode); + this.api = api; + } + } + + /** @description Verifies a proof */ + async verifyProof(proofData: ProofData): Promise { + const proof = reconstructProofWithPublicInputs(proofData); + await this.instantiate(); + await this.api.acirInitVerificationKey(this.acirComposer); + return await this.api.acirVerifyProof(this.acirComposer, proof); + } + + async getVerificationKey(): Promise { + await this.instantiate(); + await this.api.acirInitVerificationKey(this.acirComposer); + return await this.api.acirGetVerificationKey(this.acirComposer); + } + + async destroy(): Promise { + if (!this.api) { + return; + } + await this.api.destroy(); + } +} + +export class BarretenbergBackend extends BarretenbergVerifierBackend implements Backend { + /** @description Generates a proof */ + async generateProof(compressedWitness: Uint8Array): Promise { + await this.instantiate(); + const proofWithPublicInputs = await this.api.acirCreateProof( + this.acirComposer, + this.acirUncompressedBytecode, + gunzip(compressedWitness), + ); + + const splitIndex = proofWithPublicInputs.length - numBytesInProofWithoutPublicInputs; + + const publicInputsConcatenated = proofWithPublicInputs.slice(0, splitIndex); + const proof = proofWithPublicInputs.slice(splitIndex); + const publicInputs = deflattenPublicInputs(publicInputsConcatenated); + + return { proof, publicInputs }; + } + + /** + * Generates artifacts that will be passed to a circuit that will verify this proof. + * + * Instead of passing the proof and verification key as a byte array, we pass them + * as fields which makes it cheaper to verify in a circuit. + * + * The proof that is passed here will have been created using a circuit + * that has the #[recursive] attribute on its `main` method. + * + * The number of public inputs denotes how many public inputs are in the inner proof. + * + * @example + * ```typescript + * const artifacts = await backend.generateRecursiveProofArtifacts(proof, numOfPublicInputs); + * ``` + */ + async generateRecursiveProofArtifacts( + proofData: ProofData, + numOfPublicInputs = 0, + ): Promise<{ + proofAsFields: string[]; + vkAsFields: string[]; + vkHash: string; + }> { + await this.instantiate(); + const proof = reconstructProofWithPublicInputs(proofData); + const proofAsFields = ( + await this.api.acirSerializeProofIntoFields(this.acirComposer, proof, numOfPublicInputs) + ).slice(numOfPublicInputs); + + // TODO: perhaps we should put this in the init function. Need to benchmark + // TODO how long it takes. + await this.api.acirInitVerificationKey(this.acirComposer); + + // Note: If you don't init verification key, `acirSerializeVerificationKeyIntoFields`` will just hang on serialization + const vk = await this.api.acirSerializeVerificationKeyIntoFields(this.acirComposer); + + return { + proofAsFields: proofAsFields.map((p) => p.toString()), + vkAsFields: vk[0].map((vk) => vk.toString()), + vkHash: vk[1].toString(), + }; + } +} diff --git a/tooling/noir_js_backend_barretenberg/src/index.ts b/tooling/noir_js_backend_barretenberg/src/index.ts index bfdf1005a93..f28abb9a658 100644 --- a/tooling/noir_js_backend_barretenberg/src/index.ts +++ b/tooling/noir_js_backend_barretenberg/src/index.ts @@ -1,150 +1,7 @@ -import { decompressSync as gunzip } from 'fflate'; -import { acirToUint8Array } from './serialize.js'; -import { Backend, CompiledCircuit, ProofData } from '@noir-lang/types'; -import { BackendOptions } from './types.js'; -import { deflattenPublicInputs, flattenPublicInputsAsArray } from './public_inputs.js'; -import { type Barretenberg } from '@aztec/bb.js'; - +export { BarretenbergBackend } from './backend.js'; +export { BarretenbergVerifier } from './verifier.js'; export { publicInputsToWitnessMap } from './public_inputs.js'; -// This is the number of bytes in a UltraPlonk proof -// minus the public inputs. -const numBytesInProofWithoutPublicInputs: number = 2144; - -export class BarretenbergBackend implements Backend { - // These type assertions are used so that we don't - // have to initialize `api` and `acirComposer` in the constructor. - // These are initialized asynchronously in the `init` function, - // constructors cannot be asynchronous which is why we do this. - - private api!: Barretenberg; - // eslint-disable-next-line @typescript-eslint/no-explicit-any - private acirComposer: any; - private acirUncompressedBytecode: Uint8Array; - - constructor( - acirCircuit: CompiledCircuit, - private options: BackendOptions = { threads: 1 }, - ) { - const acirBytecodeBase64 = acirCircuit.bytecode; - this.acirUncompressedBytecode = acirToUint8Array(acirBytecodeBase64); - } - - /** @ignore */ - async instantiate(): Promise { - if (!this.api) { - if (typeof navigator !== 'undefined' && navigator.hardwareConcurrency) { - this.options.threads = navigator.hardwareConcurrency; - } else { - try { - const os = await import('os'); - this.options.threads = os.cpus().length; - } catch (e) { - console.log('Could not detect environment. Falling back to one thread.', e); - } - } - const { Barretenberg, RawBuffer, Crs } = await import('@aztec/bb.js'); - const api = await Barretenberg.new(this.options); - const [_exact, _total, subgroupSize] = await api.acirGetCircuitSizes(this.acirUncompressedBytecode); - const crs = await Crs.new(subgroupSize + 1); - await api.commonInitSlabAllocator(subgroupSize); - await api.srsInitSrs(new RawBuffer(crs.getG1Data()), crs.numPoints, new RawBuffer(crs.getG2Data())); - - this.acirComposer = await api.acirNewAcirComposer(subgroupSize); - await api.acirInitProvingKey(this.acirComposer, this.acirUncompressedBytecode); - this.api = api; - } - } - - /** @description Generates a proof */ - async generateProof(compressedWitness: Uint8Array): Promise { - await this.instantiate(); - // TODO: Change once `@aztec/bb.js` version is updated to use methods without isRecursive flag - const proofWithPublicInputs = await this.api.acirCreateProof( - this.acirComposer, - this.acirUncompressedBytecode, - gunzip(compressedWitness), - ); - - const splitIndex = proofWithPublicInputs.length - numBytesInProofWithoutPublicInputs; - - const publicInputsConcatenated = proofWithPublicInputs.slice(0, splitIndex); - const proof = proofWithPublicInputs.slice(splitIndex); - const publicInputs = deflattenPublicInputs(publicInputsConcatenated); - - return { proof, publicInputs }; - } - - /** - * Generates artifacts that will be passed to a circuit that will verify this proof. - * - * Instead of passing the proof and verification key as a byte array, we pass them - * as fields which makes it cheaper to verify in a circuit. - * - * The proof that is passed here will have been created using a circuit - * that has the #[recursive] attribute on its `main` method. - * - * The number of public inputs denotes how many public inputs are in the inner proof. - * - * @example - * ```typescript - * const artifacts = await backend.generateRecursiveProofArtifacts(proof, numOfPublicInputs); - * ``` - */ - async generateRecursiveProofArtifacts( - proofData: ProofData, - numOfPublicInputs = 0, - ): Promise<{ - proofAsFields: string[]; - vkAsFields: string[]; - vkHash: string; - }> { - await this.instantiate(); - const proof = reconstructProofWithPublicInputs(proofData); - const proofAsFields = ( - await this.api.acirSerializeProofIntoFields(this.acirComposer, proof, numOfPublicInputs) - ).slice(numOfPublicInputs); - - // TODO: perhaps we should put this in the init function. Need to benchmark - // TODO how long it takes. - await this.api.acirInitVerificationKey(this.acirComposer); - - // Note: If you don't init verification key, `acirSerializeVerificationKeyIntoFields`` will just hang on serialization - const vk = await this.api.acirSerializeVerificationKeyIntoFields(this.acirComposer); - - return { - proofAsFields: proofAsFields.map((p) => p.toString()), - vkAsFields: vk[0].map((vk) => vk.toString()), - vkHash: vk[1].toString(), - }; - } - - /** @description Verifies a proof */ - async verifyProof(proofData: ProofData): Promise { - const proof = reconstructProofWithPublicInputs(proofData); - await this.instantiate(); - await this.api.acirInitVerificationKey(this.acirComposer); - // TODO: Change once `@aztec/bb.js` version is updated to use methods without isRecursive flag - return await this.api.acirVerifyProof(this.acirComposer, proof); - } - - async destroy(): Promise { - if (!this.api) { - return; - } - await this.api.destroy(); - } -} - -function reconstructProofWithPublicInputs(proofData: ProofData): Uint8Array { - // Flatten publicInputs - const publicInputsConcatenated = flattenPublicInputsAsArray(proofData.publicInputs); - - // Concatenate publicInputs and proof - const proofWithPublicInputs = Uint8Array.from([...publicInputsConcatenated, ...proofData.proof]); - - return proofWithPublicInputs; -} - // typedoc exports -export { Backend, BackendOptions, CompiledCircuit, ProofData }; +export { Backend, CompiledCircuit, ProofData } from '@noir-lang/types'; +export { BackendOptions } from './types.js'; diff --git a/tooling/noir_js_backend_barretenberg/src/verifier.ts b/tooling/noir_js_backend_barretenberg/src/verifier.ts new file mode 100644 index 00000000000..fe9fa9cfffd --- /dev/null +++ b/tooling/noir_js_backend_barretenberg/src/verifier.ts @@ -0,0 +1,78 @@ +import { ProofData } from '@noir-lang/types'; +import { BackendOptions } from './types.js'; +import { flattenPublicInputsAsArray } from './public_inputs.js'; +import { type Barretenberg } from '@aztec/bb.js'; + +export class BarretenbergVerifier { + // These type assertions are used so that we don't + // have to initialize `api` and `acirComposer` in the constructor. + // These are initialized asynchronously in the `init` function, + // constructors cannot be asynchronous which is why we do this. + + private api!: Barretenberg; + // eslint-disable-next-line @typescript-eslint/no-explicit-any + private acirComposer: any; + + constructor(private options: BackendOptions = { threads: 1 }) {} + + /** @ignore */ + async instantiate(): Promise { + if (!this.api) { + if (typeof navigator !== 'undefined' && navigator.hardwareConcurrency) { + this.options.threads = navigator.hardwareConcurrency; + } else { + try { + const os = await import('os'); + this.options.threads = os.cpus().length; + } catch (e) { + console.log('Could not detect environment. Falling back to one thread.', e); + } + } + const { Barretenberg, RawBuffer, Crs } = await import('@aztec/bb.js'); + + // This is the number of CRS points necessary to verify a Barretenberg proof. + const NUM_CRS_POINTS_FOR_VERIFICATION: number = 0; + const [api, crs] = await Promise.all([Barretenberg.new(this.options), Crs.new(NUM_CRS_POINTS_FOR_VERIFICATION)]); + + await api.commonInitSlabAllocator(NUM_CRS_POINTS_FOR_VERIFICATION); + await api.srsInitSrs( + new RawBuffer([] /* crs.getG1Data() */), + NUM_CRS_POINTS_FOR_VERIFICATION, + new RawBuffer(crs.getG2Data()), + ); + + this.acirComposer = await api.acirNewAcirComposer(NUM_CRS_POINTS_FOR_VERIFICATION); + this.api = api; + } + } + + /** @description Verifies a proof */ + async verifyProof(proofData: ProofData, verificationKey: Uint8Array): Promise { + const { RawBuffer } = await import('@aztec/bb.js'); + + await this.instantiate(); + // The verifier can be used for a variety of ACIR programs so we should not assume that it + // is preloaded with the correct verification key. + await this.api.acirLoadVerificationKey(this.acirComposer, new RawBuffer(verificationKey)); + + const proof = reconstructProofWithPublicInputs(proofData); + return await this.api.acirVerifyProof(this.acirComposer, proof); + } + + async destroy(): Promise { + if (!this.api) { + return; + } + await this.api.destroy(); + } +} + +export function reconstructProofWithPublicInputs(proofData: ProofData): Uint8Array { + // Flatten publicInputs + const publicInputsConcatenated = flattenPublicInputsAsArray(proofData.publicInputs); + + // Concatenate publicInputs and proof + const proofWithPublicInputs = Uint8Array.from([...publicInputsConcatenated, ...proofData.proof]); + + return proofWithPublicInputs; +} diff --git a/tooling/noir_js_types/package.json b/tooling/noir_js_types/package.json index eadb6f49665..316612a7c51 100644 --- a/tooling/noir_js_types/package.json +++ b/tooling/noir_js_types/package.json @@ -4,7 +4,7 @@ "The Noir Team " ], "packageManager": "yarn@3.5.1", - "version": "0.26.0", + "version": "0.27.0", "license": "(MIT OR Apache-2.0)", "homepage": "https://noir-lang.org/", "repository": { diff --git a/tooling/noir_js_types/src/types.ts b/tooling/noir_js_types/src/types.ts index 3a62d79a807..456e5a57f40 100644 --- a/tooling/noir_js_types/src/types.ts +++ b/tooling/noir_js_types/src/types.ts @@ -29,7 +29,17 @@ export type Abi = { return_witnesses: number[]; }; -export interface Backend { +export interface VerifierBackend { + /** + * @description Verifies a proof */ + verifyProof(proofData: ProofData): Promise; + + /** + * @description Destroys the backend */ + destroy(): Promise; +} + +export interface Backend extends VerifierBackend { /** * @description Generates a proof */ generateProof(decompressedWitness: Uint8Array): Promise; @@ -49,14 +59,6 @@ export interface Backend { /** @description A Field containing the verification key hash */ vkHash: string; }>; - - /** - * @description Verifies a proof */ - verifyProof(proofData: ProofData): Promise; - - /** - * @description Destroys the backend */ - destroy(): Promise; } /** diff --git a/tooling/noirc_abi/src/errors.rs b/tooling/noirc_abi/src/errors.rs index 687fecfcc1d..4209a9e218b 100644 --- a/tooling/noirc_abi/src/errors.rs +++ b/tooling/noirc_abi/src/errors.rs @@ -1,4 +1,7 @@ -use crate::{input_parser::InputValue, AbiParameter, AbiType}; +use crate::{ + input_parser::{InputTypecheckingError, InputValue}, + AbiType, +}; use acvm::acir::native_types::Witness; use thiserror::Error; @@ -38,8 +41,8 @@ impl From for InputParserError { pub enum AbiError { #[error("Received parameters not expected by ABI: {0:?}")] UnexpectedParams(Vec), - #[error("The parameter {} is expected to be a {:?} but found incompatible value {value:?}", .param.name, .param.typ)] - TypeMismatch { param: AbiParameter, value: InputValue }, + #[error("The value passed for parameter `{}` does not match the specified type:\n{0}", .0.path())] + TypeMismatch(#[from] InputTypecheckingError), #[error("ABI expects the parameter `{0}`, but this was not found")] MissingParam(String), #[error( diff --git a/tooling/noirc_abi/src/input_parser/mod.rs b/tooling/noirc_abi/src/input_parser/mod.rs index f66e069d487..4cf66820b8d 100644 --- a/tooling/noirc_abi/src/input_parser/mod.rs +++ b/tooling/noirc_abi/src/input_parser/mod.rs @@ -1,6 +1,7 @@ use num_bigint::{BigInt, BigUint}; use num_traits::{Num, Zero}; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashSet}; +use thiserror::Error; use acvm::FieldElement; use serde::Serialize; @@ -22,63 +23,165 @@ pub enum InputValue { Struct(BTreeMap), } +#[derive(Debug, Error)] +pub enum InputTypecheckingError { + #[error("Value {value:?} does not fall within range of allowable values for a {typ:?}")] + OutsideOfValidRange { path: String, typ: AbiType, value: InputValue }, + #[error("Type {typ:?} is expected to have length {expected_length} but value {value:?} has length {actual_length}")] + LengthMismatch { + path: String, + typ: AbiType, + value: InputValue, + expected_length: usize, + actual_length: usize, + }, + #[error("Could not find value for required field `{expected_field}`. Found values for fields {found_fields:?}")] + MissingField { path: String, expected_field: String, found_fields: Vec }, + #[error("Additional unexpected field was provided for type {typ:?}. Found field named `{extra_field}`")] + UnexpectedField { path: String, typ: AbiType, extra_field: String }, + #[error("Type {typ:?} and value {value:?} do not match")] + IncompatibleTypes { path: String, typ: AbiType, value: InputValue }, +} + +impl InputTypecheckingError { + pub(crate) fn path(&self) -> &str { + match self { + InputTypecheckingError::OutsideOfValidRange { path, .. } + | InputTypecheckingError::LengthMismatch { path, .. } + | InputTypecheckingError::MissingField { path, .. } + | InputTypecheckingError::UnexpectedField { path, .. } + | InputTypecheckingError::IncompatibleTypes { path, .. } => path, + } + } +} + impl InputValue { /// Checks whether the ABI type matches the InputValue type - /// and also their arity - pub fn matches_abi(&self, abi_param: &AbiType) -> bool { + pub(crate) fn find_type_mismatch( + &self, + abi_param: &AbiType, + path: String, + ) -> Result<(), InputTypecheckingError> { match (self, abi_param) { - (InputValue::Field(_), AbiType::Field) => true, + (InputValue::Field(_), AbiType::Field) => Ok(()), (InputValue::Field(field_element), AbiType::Integer { width, .. }) => { - field_element.num_bits() <= *width + if field_element.num_bits() <= *width { + Ok(()) + } else { + Err(InputTypecheckingError::OutsideOfValidRange { + path, + typ: abi_param.clone(), + value: self.clone(), + }) + } } (InputValue::Field(field_element), AbiType::Boolean) => { - field_element.is_one() || field_element.is_zero() + if field_element.is_one() || field_element.is_zero() { + Ok(()) + } else { + Err(InputTypecheckingError::OutsideOfValidRange { + path, + typ: abi_param.clone(), + value: self.clone(), + }) + } } (InputValue::Vec(array_elements), AbiType::Array { length, typ, .. }) => { if array_elements.len() != *length as usize { - return false; + return Err(InputTypecheckingError::LengthMismatch { + path, + typ: abi_param.clone(), + value: self.clone(), + expected_length: *length as usize, + actual_length: array_elements.len(), + }); } // Check that all of the array's elements' values match the ABI as well. - array_elements.iter().all(|input_value| input_value.matches_abi(typ)) + for (i, element) in array_elements.iter().enumerate() { + let mut path = path.clone(); + path.push_str(&format!("[{i}]")); + + element.find_type_mismatch(typ, path)?; + } + Ok(()) } (InputValue::String(string), AbiType::String { length }) => { - string.len() == *length as usize + if string.len() == *length as usize { + Ok(()) + } else { + Err(InputTypecheckingError::LengthMismatch { + path, + typ: abi_param.clone(), + value: self.clone(), + actual_length: string.len(), + expected_length: *length as usize, + }) + } } (InputValue::Struct(map), AbiType::Struct { fields, .. }) => { - if map.len() != fields.len() { - return false; + for (field_name, field_type) in fields { + if let Some(value) = map.get(field_name) { + let mut path = path.clone(); + path.push_str(&format!(".{field_name}")); + value.find_type_mismatch(field_type, path)?; + } else { + return Err(InputTypecheckingError::MissingField { + path, + expected_field: field_name.to_string(), + found_fields: map.keys().cloned().collect(), + }); + } } - let field_types = BTreeMap::from_iter(fields.iter().cloned()); + if map.len() > fields.len() { + let expected_fields: HashSet = + fields.iter().map(|(field, _)| field.to_string()).collect(); + let extra_field = map.keys().find(|&key| !expected_fields.contains(key)).cloned().expect("`map` is larger than the expected type's `fields` so it must contain an unexpected field"); + return Err(InputTypecheckingError::UnexpectedField { + path, + typ: abi_param.clone(), + extra_field: extra_field.to_string(), + }); + } - // Check that all of the struct's fields' values match the ABI as well. - map.iter().all(|(field_name, field_value)| { - if let Some(field_type) = field_types.get(field_name) { - field_value.matches_abi(field_type) - } else { - false - } - }) + Ok(()) } (InputValue::Vec(vec_elements), AbiType::Tuple { fields }) => { if vec_elements.len() != fields.len() { - return false; + return Err(InputTypecheckingError::LengthMismatch { + path, + typ: abi_param.clone(), + value: self.clone(), + actual_length: vec_elements.len(), + expected_length: fields.len(), + }); } - - vec_elements - .iter() - .zip(fields) - .all(|(input_value, abi_param)| input_value.matches_abi(abi_param)) + // Check that all of the array's elements' values match the ABI as well. + for (i, (element, expected_typ)) in vec_elements.iter().zip(fields).enumerate() { + let mut path = path.clone(); + path.push_str(&format!(".{i}")); + element.find_type_mismatch(expected_typ, path)?; + } + Ok(()) } // All other InputValue-AbiType combinations are fundamentally incompatible. - _ => false, + _ => Err(InputTypecheckingError::IncompatibleTypes { + path, + typ: abi_param.clone(), + value: self.clone(), + }), } } + + /// Checks whether the ABI type matches the InputValue type. + pub fn matches_abi(&self, abi_param: &AbiType) -> bool { + self.find_type_mismatch(abi_param, String::new()).is_ok() + } } /// The different formats that are supported when parsing diff --git a/tooling/noirc_abi/src/lib.rs b/tooling/noirc_abi/src/lib.rs index d0dcb373963..6ad13500bdd 100644 --- a/tooling/noirc_abi/src/lib.rs +++ b/tooling/noirc_abi/src/lib.rs @@ -10,9 +10,7 @@ use acvm::{ use errors::AbiError; use input_parser::InputValue; use iter_extended::{try_btree_map, try_vecmap, vecmap}; -use noirc_frontend::{ - hir::Context, Signedness, StructType, Type, TypeBinding, TypeVariableKind, Visibility, -}; +use noirc_frontend::{hir::Context, Signedness, Type, TypeBinding, TypeVariableKind, Visibility}; use serde::{Deserialize, Serialize}; use std::ops::Range; use std::{collections::BTreeMap, str}; @@ -309,15 +307,7 @@ impl Abi { .ok_or_else(|| AbiError::MissingParam(param_name.clone()))? .clone(); - if !value.matches_abi(&expected_type) { - let param = self - .parameters - .iter() - .find(|param| param.name == param_name) - .unwrap() - .clone(); - return Err(AbiError::TypeMismatch { param, value }); - } + value.find_type_mismatch(&expected_type, param_name.clone())?; Self::encode_value(value, &expected_type).map(|v| (param_name, v)) }) @@ -515,31 +505,35 @@ fn decode_string_value(field_elements: &[FieldElement]) -> String { final_string.to_owned() } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ContractEvent { - /// Event name - name: String, - /// The fully qualified path to the event definition - path: String, - - /// Fields of the event - #[serde( - serialize_with = "serialization::serialize_struct_fields", - deserialize_with = "serialization::deserialize_struct_fields" - )] - fields: Vec<(String, AbiType)>, -} - -impl ContractEvent { - pub fn from_struct_type(context: &Context, struct_type: &StructType) -> Self { - let fields = vecmap(struct_type.get_fields(&[]), |(name, typ)| { - (name, AbiType::from_type(context, &typ)) - }); - // For the ABI, we always want to resolve the struct paths from the root crate - let path = context.fully_qualified_struct_path(context.root_crate_id(), struct_type.id); - - Self { name: struct_type.name.0.contents.clone(), path, fields } - } +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(tag = "kind", rename_all = "lowercase")] +pub enum AbiValue { + Field { + value: FieldElement, + }, + Integer { + sign: bool, + value: String, + }, + Boolean { + value: bool, + }, + String { + value: String, + }, + Array { + value: Vec, + }, + Struct { + #[serde( + serialize_with = "serialization::serialize_struct_field_values", + deserialize_with = "serialization::deserialize_struct_field_values" + )] + fields: Vec<(String, AbiValue)>, + }, + Tuple { + fields: Vec, + }, } fn range_to_vec(ranges: &[Range]) -> Vec { diff --git a/tooling/noirc_abi/src/serialization.rs b/tooling/noirc_abi/src/serialization.rs index ed838803fab..4f91d9b7dfd 100644 --- a/tooling/noirc_abi/src/serialization.rs +++ b/tooling/noirc_abi/src/serialization.rs @@ -1,8 +1,7 @@ +use crate::{AbiType, AbiValue}; use iter_extended::vecmap; use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use crate::AbiType; - // This module exposes a custom serializer and deserializer for `BTreeMap` // (representing the fields of a struct) to serialize it as a `Vec`. // @@ -41,6 +40,37 @@ where Ok(vecmap(fields_vector, |StructField { name, typ }| (name, typ))) } +#[derive(Serialize, Deserialize)] +struct StructFieldValue { + name: String, + value: AbiValue, +} + +pub(crate) fn serialize_struct_field_values( + fields: &[(String, AbiValue)], + s: S, +) -> Result +where + S: Serializer, +{ + let fields_vector = vecmap(fields, |(name, value)| StructFieldValue { + name: name.to_owned(), + value: value.to_owned(), + }); + + fields_vector.serialize(s) +} + +pub(crate) fn deserialize_struct_field_values<'de, D>( + deserializer: D, +) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + let fields_vector = Vec::::deserialize(deserializer)?; + Ok(vecmap(fields_vector, |StructFieldValue { name, value }| (name, value))) +} + #[cfg(test)] mod tests { use crate::{AbiParameter, AbiType, AbiVisibility, Sign}; diff --git a/tooling/noirc_abi_wasm/build.sh b/tooling/noirc_abi_wasm/build.sh index fe0b4dcbfff..58724dee02c 100755 --- a/tooling/noirc_abi_wasm/build.sh +++ b/tooling/noirc_abi_wasm/build.sh @@ -14,11 +14,17 @@ function run_or_fail { exit $status fi } +function run_if_available { + if command -v "$1" >/dev/null 2>&1; then + "$@" + else + echo "$1 is not installed. Please install it to use this feature." >&2 + fi +} require_command jq require_command cargo require_command wasm-bindgen -require_command wasm-opt self_path=$(dirname "$(readlink -f "$0")") pname=$(cargo read-manifest | jq -r '.name') @@ -42,5 +48,5 @@ BROWSER_WASM=${BROWSER_DIR}/${pname}_bg.wasm run_or_fail cargo build --lib --release --target $TARGET --package ${pname} run_or_fail wasm-bindgen $WASM_BINARY --out-dir $NODE_DIR --typescript --target nodejs run_or_fail wasm-bindgen $WASM_BINARY --out-dir $BROWSER_DIR --typescript --target web -run_or_fail wasm-opt $NODE_WASM -o $NODE_WASM -O -run_or_fail wasm-opt $BROWSER_WASM -o $BROWSER_WASM -O +run_if_available wasm-opt $NODE_WASM -o $NODE_WASM -O +run_if_available wasm-opt $BROWSER_WASM -o $BROWSER_WASM -O diff --git a/tooling/noirc_abi_wasm/package.json b/tooling/noirc_abi_wasm/package.json index 14e528c3b15..0e4aaceeae3 100644 --- a/tooling/noirc_abi_wasm/package.json +++ b/tooling/noirc_abi_wasm/package.json @@ -3,7 +3,7 @@ "contributors": [ "The Noir Team " ], - "version": "0.26.0", + "version": "0.27.0", "license": "(MIT OR Apache-2.0)", "homepage": "https://noir-lang.org/", "repository": { diff --git a/tooling/noirc_abi_wasm/src/lib.rs b/tooling/noirc_abi_wasm/src/lib.rs index ce15f6d502e..fad5abaebba 100644 --- a/tooling/noirc_abi_wasm/src/lib.rs +++ b/tooling/noirc_abi_wasm/src/lib.rs @@ -5,7 +5,7 @@ // See Cargo.toml for explanation. use getrandom as _; -use acvm::acir::native_types::WitnessMap; +use acvm::acir::native_types::{WitnessMap, WitnessStack}; use iter_extended::try_btree_map; use noirc_abi::{ errors::InputParserError, @@ -113,3 +113,12 @@ pub fn abi_decode(abi: JsAbi, witness_map: JsWitnessMap) -> Result::from_serde(&return_struct) .map_err(|err| err.to_string().into()) } + +#[wasm_bindgen(js_name = serializeWitness)] +pub fn serialise_witness(witness_map: JsWitnessMap) -> Result, JsAbiError> { + console_error_panic_hook::set_once(); + let converted_witness: WitnessMap = witness_map.into(); + let witness_stack: WitnessStack = converted_witness.into(); + let output = witness_stack.try_into(); + output.map_err(|_| JsAbiError::new("Failed to convert to Vec".to_string())) +} diff --git a/tooling/noirc_abi_wasm/test/browser/errors.test.ts b/tooling/noirc_abi_wasm/test/browser/errors.test.ts index 429a2d446a3..0f75ff64a3e 100644 --- a/tooling/noirc_abi_wasm/test/browser/errors.test.ts +++ b/tooling/noirc_abi_wasm/test/browser/errors.test.ts @@ -9,7 +9,7 @@ it('errors when an integer input overflows', async () => { const { abi, inputs } = await import('../shared/uint_overflow'); expect(() => abiEncode(abi, inputs)).to.throw( - 'The parameter foo is expected to be a Integer { sign: Unsigned, width: 32 } but found incompatible value Field(2³⁸)', + 'The value passed for parameter `foo` does not match the specified type:\nValue Field(2³⁸) does not fall within range of allowable values for a Integer { sign: Unsigned, width: 32 }', ); }); diff --git a/tooling/noirc_abi_wasm/test/node/errors.test.ts b/tooling/noirc_abi_wasm/test/node/errors.test.ts index 0d007e64803..fba451b4a8c 100644 --- a/tooling/noirc_abi_wasm/test/node/errors.test.ts +++ b/tooling/noirc_abi_wasm/test/node/errors.test.ts @@ -5,7 +5,7 @@ it('errors when an integer input overflows', async () => { const { abi, inputs } = await import('../shared/uint_overflow'); expect(() => abiEncode(abi, inputs)).to.throw( - 'The parameter foo is expected to be a Integer { sign: Unsigned, width: 32 } but found incompatible value Field(2³⁸)', + 'The value passed for parameter `foo` does not match the specified type:\nValue Field(2³⁸) does not fall within range of allowable values for a Integer { sign: Unsigned, width: 32 }', ); }); diff --git a/yarn.lock b/yarn.lock index a39ae9921da..e9915882fac 100644 --- a/yarn.lock +++ b/yarn.lock @@ -221,9 +221,9 @@ __metadata: languageName: node linkType: hard -"@aztec/bb.js@npm:0.32.0": - version: 0.32.0 - resolution: "@aztec/bb.js@npm:0.32.0" +"@aztec/bb.js@npm:0.35.1": + version: 0.35.1 + resolution: "@aztec/bb.js@npm:0.35.1" dependencies: comlink: ^4.4.1 commander: ^10.0.1 @@ -231,7 +231,7 @@ __metadata: tslib: ^2.4.0 bin: bb.js: dest/node/main.js - checksum: 0919957e141ae0a65cfab961dce122fa06de628a10b7cb661d31d8ed4793ce80980fcf315620ceffffa45581db941bad43c392f4b2aa9becaaf7d2faaba01ffc + checksum: 8e3551f059523d9494af4721a9219e2c6e63c8ed1df447a2d0daa9f8526a794758ae708bd1d9c9b1fbfb89c56dc867d9f0b87250dbabfcde23ec02dabbb5a32a languageName: node linkType: hard @@ -4396,7 +4396,7 @@ __metadata: version: 0.0.0-use.local resolution: "@noir-lang/backend_barretenberg@workspace:tooling/noir_js_backend_barretenberg" dependencies: - "@aztec/bb.js": 0.32.0 + "@aztec/bb.js": 0.35.1 "@noir-lang/types": "workspace:*" "@types/node": ^20.6.2 "@types/prettier": ^3