From 6e89d53dcb6db97ff54678ac7883b7fa0b29a109 Mon Sep 17 00:00:00 2001 From: Lasse Herskind <16536249+LHerskind@users.noreply.github.com> Date: Mon, 12 Feb 2024 19:06:44 +0000 Subject: [PATCH] feat: update rollup circuits and contracts in yp (#4536) Updates the yellow paper to align with the changes made by #4250 + updates the rollup circuits to align with one kernel per base. --- .../docs/l1-smart-contracts/frontier.md | 159 +++++++ .../images/{proposal => frontier}/image-1.png | Bin .../images/{proposal => frontier}/image-2.png | Bin .../images/{proposal => frontier}/image-3.png | Bin .../images/{proposal => frontier}/image-4.png | Bin .../images/{proposal => frontier}/image-5.png | Bin .../images/{proposal => frontier}/image-6.png | Bin .../images/{proposal => frontier}/image-7.png | Bin .../{proposal/image.png => tree-order.png} | Bin yellow-paper/docs/l1-smart-contracts/index.md | 390 ++++++++++++------ .../docs/l1-smart-contracts/proposal.md | 383 ----------------- .../docs/rollup-circuits/base-rollup.md | 53 ++- yellow-paper/docs/rollup-circuits/index.md | 155 ++++--- .../docs/rollup-circuits/root-rollup.md | 38 +- yellow-paper/sidebars.js | 2 +- 15 files changed, 580 insertions(+), 600 deletions(-) create mode 100644 yellow-paper/docs/l1-smart-contracts/frontier.md rename yellow-paper/docs/l1-smart-contracts/images/{proposal => frontier}/image-1.png (100%) rename yellow-paper/docs/l1-smart-contracts/images/{proposal => frontier}/image-2.png (100%) rename yellow-paper/docs/l1-smart-contracts/images/{proposal => frontier}/image-3.png (100%) rename yellow-paper/docs/l1-smart-contracts/images/{proposal => frontier}/image-4.png (100%) rename yellow-paper/docs/l1-smart-contracts/images/{proposal => frontier}/image-5.png (100%) rename yellow-paper/docs/l1-smart-contracts/images/{proposal => frontier}/image-6.png (100%) rename yellow-paper/docs/l1-smart-contracts/images/{proposal => frontier}/image-7.png (100%) rename yellow-paper/docs/l1-smart-contracts/images/{proposal/image.png => tree-order.png} (100%) delete mode 100644 yellow-paper/docs/l1-smart-contracts/proposal.md diff --git a/yellow-paper/docs/l1-smart-contracts/frontier.md b/yellow-paper/docs/l1-smart-contracts/frontier.md new file mode 100644 index 00000000000..682c4b0e0a2 --- /dev/null +++ b/yellow-paper/docs/l1-smart-contracts/frontier.md @@ -0,0 +1,159 @@ +--- +title: Frontier Merkle Tree +--- + +The Frontier Merkle Tree is an append only Merkle tree that is optimized for minimal storage on chain. +By storing only the right-most non-empty node at each level of the tree we can always extend the tree with a new leaf or compute the root without needing to store the entire tree. +We call these values the frontier of the tree. +If we have the next index to insert at and the current frontier, we have everything needed to extend the tree or compute the root, with much less storage than a full merkle tree. +Note that we're not actually keeping track of the data in the tree: we only store what's minimally required in order to be able to compute the root after inserting a new element. + +We will go through a few diagrams and explanations to understand how this works. +And then a pseudo implementation is provided. + + +## Insertion +Whenever we are inserting, we need to update the "root" of the largest subtree possible. +This is done by updating the node at the level of the tree, where we have just inserted its right-most descendant. +This can sound a bit confusing, so we will go through a few examples. + +At first, say that we have the following tree, and that it is currently entirely empty. + +![alt text](images/frontier/image-1.png) + +### The first leaf + +When we are inserting the first leaf (lets call it A), the largest subtree is that leaf value itself (level 0). +In this case, we simply need to store the leaf value in `frontier[0]` and then we are done. +For the sake of visualization, we will be drawing the elements in the `frontier` in blue. + +![alt text](images/frontier/image-2.png) + +Notice that this will be the case whenever we are inserting a leaf at an even index. + +### The second leaf + +When we are inserting the second leaf (lets call it B), the largest subtree will not longer be at level 0. +Instead it will be level 1, since the entire tree below it is now filled! +Therefore, we will compute the root of this subtree, `H(frontier[0],B)` and store it in `frontier[1]`. + +Notice, that we don't need to store the leaf B itself, since we won't be needing it for any future computations. +This is what makes the frontier tree efficient - we get away with storing very little data. + +![alt text](images/frontier/image-3.png) + +### Third leaf +When inserting the third leaf, we are again back to the largest subtree being filled by the insertion being itself at level 0. +The update will look similar to the first, where we only update `frontier[0]` with the new leaf. + +![alt text](images/frontier/image-4.png) + +### Fourth leaf + +When inserting the fourth leaf, things get a bit more interesting. +Now the largest subtree getting filled by the insertion is at level 2. + +To compute the new subtree root, we have to compute `F = H(frontier[0], E)` and then `G = H(frontier[1], F)`. +G is then stored in `frontier[2]`. + + +As before, notice that we are only updating one value in the frontier. +![alt text](images/frontier/image-5.png) + +## Figuring out what to update + +To figure out which level to update in the frontier, we simply need to figure out what the height is of the largest subtree that is filled by the insertion. +While this might sound complex, it is actually quite simple. +Consider the following extension of the diagram. +We have added the level to update, along with the index of the leaf in binary. +Seeing any pattern? + +![alt text](images/frontier/image-6.png) + +The level to update is simply the number of trailing ones in the binary representation of the index. +For a binary tree, we have that every `1` in the binary index represents a "right turn" down the tree. +Walking up the tree from the leaf, we can simply count the number of right turns until we hit a left-turn. + +## How to compute the root + +Computing the root based on the frontier is also quite simple. +We can use the last index inserted a leaf at to figure out how high up the frontier we should start. +Then we know that anything that is at the right of the frontier has not yet been inserted, so all of these values are simply "zeros" values. +Zeros here are understood as the root for a subtree only containing zeros. + +For example, if we take the tree from above and compute the root for it, we would see that level 2 was updated last. +Meaning that we can simply compute the root as `H(frontier[2], zeros[2])`. + +![alt text](images/frontier/image-7.png) + +For cases where we have built further, we simply "walk" up the tree and use either the frontier value or the zero value for the level. + +## Pseudo implementation +```python +class FrontierTree: + HEIGHT: immutable(uint256) + SIZE: immutable(uint256) + + frontier: HashMap[uint256, bytes32] # level => node + zeros: HashMap[uint256, uint256] # level => root of empty subtree of height level + + next_index: uint256 = 0 + + # Can entirely be removed with optimizations + def __init__(self, _height_: uint256): + self.HEIGHT = _height + self.SIZE = 2**_height + # Populate zeros + + def compute_level(_index: uint256) -> uint256: + ''' + We can get the right of the most filled subtree by + counting the number of trailing ones in the index + ''' + count = 0 + x = _index + while (x & 1 == 1): + count += 1 + x >>= 1 + return count + + def root() -> bytes32: + ''' + Compute the root of the tree + ''' + if self.next_index == 0: + return self.zeros[self.HEIGHT] + elif self.next_index == SIZE: + return self.frontier[self.HEIGHT] + else: + index = self.next_index - 1 + level = self.compute_level(index) + + temp: bytes32 = self.frontier[level] + + bits = index >> level + for i in range(level, self.HEIGHT): + is_right = bits & 1 == 1 + if is_right: + temp = sha256(frontier[i], temp) + else: + temp = sha256(temp, self.zeros[i]) + bits >>= 1 + return temp + + def insert(self, _leaf: bytes32): + ''' + Insert a leaf into the tree + ''' + level = self.compute_level(next_index) + right = _leaf + for i in range(0, level): + right = sha256(frontier[i], right) + self.frontier[level] = right + self.next_index += 1 +``` + +## Optimizations +- The `zeros` can be pre-computed and stored in the `Inbox` directly, this way they can be shared across all of the trees. + + diff --git a/yellow-paper/docs/l1-smart-contracts/images/proposal/image-1.png b/yellow-paper/docs/l1-smart-contracts/images/frontier/image-1.png similarity index 100% rename from yellow-paper/docs/l1-smart-contracts/images/proposal/image-1.png rename to yellow-paper/docs/l1-smart-contracts/images/frontier/image-1.png diff --git a/yellow-paper/docs/l1-smart-contracts/images/proposal/image-2.png b/yellow-paper/docs/l1-smart-contracts/images/frontier/image-2.png similarity index 100% rename from yellow-paper/docs/l1-smart-contracts/images/proposal/image-2.png rename to yellow-paper/docs/l1-smart-contracts/images/frontier/image-2.png diff --git a/yellow-paper/docs/l1-smart-contracts/images/proposal/image-3.png b/yellow-paper/docs/l1-smart-contracts/images/frontier/image-3.png similarity index 100% rename from yellow-paper/docs/l1-smart-contracts/images/proposal/image-3.png rename to yellow-paper/docs/l1-smart-contracts/images/frontier/image-3.png diff --git a/yellow-paper/docs/l1-smart-contracts/images/proposal/image-4.png b/yellow-paper/docs/l1-smart-contracts/images/frontier/image-4.png similarity index 100% rename from yellow-paper/docs/l1-smart-contracts/images/proposal/image-4.png rename to yellow-paper/docs/l1-smart-contracts/images/frontier/image-4.png diff --git a/yellow-paper/docs/l1-smart-contracts/images/proposal/image-5.png b/yellow-paper/docs/l1-smart-contracts/images/frontier/image-5.png similarity index 100% rename from yellow-paper/docs/l1-smart-contracts/images/proposal/image-5.png rename to yellow-paper/docs/l1-smart-contracts/images/frontier/image-5.png diff --git a/yellow-paper/docs/l1-smart-contracts/images/proposal/image-6.png b/yellow-paper/docs/l1-smart-contracts/images/frontier/image-6.png similarity index 100% rename from yellow-paper/docs/l1-smart-contracts/images/proposal/image-6.png rename to yellow-paper/docs/l1-smart-contracts/images/frontier/image-6.png diff --git a/yellow-paper/docs/l1-smart-contracts/images/proposal/image-7.png b/yellow-paper/docs/l1-smart-contracts/images/frontier/image-7.png similarity index 100% rename from yellow-paper/docs/l1-smart-contracts/images/proposal/image-7.png rename to yellow-paper/docs/l1-smart-contracts/images/frontier/image-7.png diff --git a/yellow-paper/docs/l1-smart-contracts/images/proposal/image.png b/yellow-paper/docs/l1-smart-contracts/images/tree-order.png similarity index 100% rename from yellow-paper/docs/l1-smart-contracts/images/proposal/image.png rename to yellow-paper/docs/l1-smart-contracts/images/tree-order.png diff --git a/yellow-paper/docs/l1-smart-contracts/index.md b/yellow-paper/docs/l1-smart-contracts/index.md index 9767dfc07e2..a362cac4e67 100644 --- a/yellow-paper/docs/l1-smart-contracts/index.md +++ b/yellow-paper/docs/l1-smart-contracts/index.md @@ -4,34 +4,49 @@ title: Cross-chain communication This section describes what our L1 contracts do, what they are responsible for and how they interact with the circuits. -Note that the only reason that we even have any contracts is to facilitate cross-chain communication. The contracts are not required for the rollup to function, but required to bridge assets and to reduce the cost of light nodes. +Note that the only reason that we even have any contracts is to facilitate cross-chain communication. +The contracts are not required for the rollup to function, but required to bridge assets and to reduce the cost of light nodes. :::info Purpose of contracts The purpose of the L1 contracts are simple: - Facilitate cross-chain communication such that L1 liquidity can be used on L2 - Act as a validating light node for L2 that every L1 node implicitly run - ::: +::: ## Overview -When presented with a new [`ProvenBlock`](../rollup-circuits/root-rollup.md) and its proof, an Aztec node can be convinced of its validity if the proof passes and the `Header.last_archive` matches the `archive` of the node (archive here represents a root of [archive tree](../state/archive.md)). The `archive` used as public input is the archive after the new header is inserted (see [root rollup](./../rollup-circuits/root-rollup.md)). +When presented with a new [`ProvenBlock`](../rollup-circuits/root-rollup.md) and its proof, an Aztec node can be convinced of its validity if the proof passes and the `Header.last_archive` matches the `archive` of the node (archive here represents a root of [archive tree](../state/archive.md)). +The `archive` used as public input is the archive after the new header is inserted (see [root rollup](./../rollup-circuits/root-rollup.md)). ```python def process(block: ProvenBlock, proof: Proof): - assert self.archive == block.header.last_archive - assert proof.verify(block.header, block.archive) - assert self.inbox.consume(block.body.l1_to_l2_msgs) - for tx_effect in block.body.tx_effects: - assert self.outbox.insert(tx_effect.l2_to_l1_msgs) - + header = block.header + block_number = header.global_variables.block_number + + # Ensure that the body is available + assert block.body.compute_commitment() == header.content_commitment + + assert self.archive == header.last_archive + assert proof.verify(header, block.archive) + assert self.inbox.consume() == header.in_hash + assert self.outbox.insert( + block_number, + header.content_commitment.out_hash, + header.content_commitment.tx_tree_height + ) self.archive = block.archive + + emit BlockProcessed(block_number) ``` -While the `ProvenBlock` must be published and available for nodes to build the state of the rollup, we can build the validating light node (the contract) such that as long as the node can be _convinced_ that the data is available we can progress the state. This means our light node can be built to only require a subset of the `ProvenBlock` to be published to Ethereum L1 and use a different data availability layer for most of the block body. Namely, we need the cross-chain messages to be published to L1, but the rest of the block body can be published to a different data availability layer. +While the `ProvenBlock` must be published and available for nodes to build the state of the rollup, we can build the validating light node (the contract) such that as long as the node can be _convinced_ that the data is available we can progress the state. +This means our light node can be built to only require a subset of the `ProvenBlock` to be published to Ethereum L1 as calldata and use a different data availability layer for most of the block body. +Namely, we need the cross-chain messages to be published to L1, but the rest of the block body can be published to a different data availability layer. :::info Validium or Rollup -If a different data availability layer than Ethereum is used for the block body, we are effectively building a Validium. If we use Ethereum for the block body, we are building a Rollup. +If a different data availability layer than Ethereum is used for the block body, we are effectively building a Validium. +If we use Ethereum for the block body, we are building a Rollup. ::: Using the data structures defined throughout the [rollup circuits](./../rollup-circuits/index.md) section, we can outline the validating light node structure as follows: @@ -46,11 +61,11 @@ class AvailabilityOracle { } class Inbox { - consume(entries: Fr[]) + consume(): bytes32 } class Outbox { - insert(entries: Fr[]) + insert(block_number, out_hash, height) } class Verifier { @@ -59,7 +74,7 @@ class Verifier { class StateTransitioner { archive: Snapshot - process(header: Header, archive: Fr, txs_hash: Fr, l1_to_l2_msgs: Fr[], l2_to_l1_msgs: Fr[], proof: Proof) + process(header: Header, archive: Fr, proof: Proof) } StateTransitioner --> AvailabilityOracle: is_available() StateTransitioner --> Inbox: consume() @@ -69,53 +84,46 @@ StateTransitioner --> Verifier: verify() ### State transitioner -The state transitioner is the heart of the validating light node for the L2. The contract keeps track of the current state of the L2 and progresses this state when a valid L2 block is received. It also facilitates cross-chain communication (communication between the L1 inbox and outbox contracts). +The state transitioner is the heart of the validating light node for the L2. +The contract keeps track of the current state of the L2 and progresses this state when a valid L2 block is received. +It also facilitates cross-chain communication (communication between the L1 inbox and outbox contracts). ```python class StateTransitioner: - def __init__( - self, - verifier: Verifier, - registry: Registry, - version: Fr, - archive: Snapshot - ): - self.verifier = verifier - self.registry = registry - self.version = version - self.archive = archive - self.last_block_ts = block.timestamp - self.block_number = 0 + VERIFIER: immutable(IVerifier) + AVAILABILITY_ORACLE: immutable(IAvailabilityOracle) + INBOX: immutable(IInbox) + OUTBOX: immutable(IOutbox) + VERSION: immutable(uint256) + + archive: TreeSnapshot + block_number: uint256 + last_block_ts: uint256 + + def __init__(self, ...): + ''' + Initialize the state transitioner + ''' def process( self, header: Header, archive: Fr, - txs_hash: Fr, - l1_to_l2_msgs: Fr[], - l2_to_l1_msgs: Fr[], proof: Proof ): - assert self.body_available(header, txs_hash, l1_to_l2_msgs, l2_to_l1_msgs) + assert self.AVAILABILITY_ORACLE.is_available(txs_hash) assert self.validate_header(header) - assert self.verifier.verify(proof, header, archive) - assert self.registry.inbox.consume(l1_to_l2_msgs) - assert self.registry.outbox.insert(l2_to_l1_msgs) - self.archive = archive - - def body_available( - self, - body_hash: Fr, - txs_hash: Fr, - l1_to_l2_msgs: Fr[], - l2_to_l1_msgs: Fr[] - ) -> bool: - assert self.registry.availability_oracle.is_available(txs_hash) - in_hash = SHA256(pad(l1_to_l2_msgs)) - out_hash = MerkleTree(pad(l2_to_l1_msgs), SHA256) - return body_hash == SHA256(txs_hash, out_hash, in_hash) - ) + assert self.archive == header.last_archive + assert VERIFIER.verify(header, block.archive, proof) + assert self.INBOX.consume() == header.in_hash + assert self.OUTBOX.insert( + block_number, + header.content_commitment.out_hash, + header.content_commitment.tx_tree_height + ) + self.archive = block.archive + emit BlockProcessed(block_number) def validate_header( self, @@ -127,7 +135,6 @@ class StateTransitioner: assert header.global_variables.timestamp < block.timestamp assert header.global_variables.timestamp > self.last_block_ts assert header.archive == self.archive - return True ``` @@ -135,7 +142,8 @@ class StateTransitioner: The state transitioner should be connected to an oracle which addresses the availability condition. -For the case of a rollup, this "oracle" will be deriving the `TxsHash` from calldata and blobs. For a validium it should be connected to a bridge that it can use to verify that the data is available on the other chain. +For the case of a rollup, this "oracle" will be deriving the `TxsHash` from calldata and blobs. +For a validium it should be connected to a bridge that it can use to verify that the data is available on the other chain. For a generic DA that publishes data commitments to Ethereum, the oracle could be a snark proof that opens the data commitment from the bridge and computes the `TxsHash` from it. @@ -143,11 +151,20 @@ By having the availability oracle be independent from state progression we can e For more information around the requirements we have for the availability oracle, see [Data Availability](../data-publication-and-availability/index.md). +An interesting observation around the availability oracle is that the `OutHash` and `InHash` don't need to be explicitly proven available through it. +The `InHash` is already proven as part of the L1 inbox, as we will see in a second. +And the `OutHash` consists entirely of a subset of the contents of the `TxsHash`, which is already proven available. + ### Registry -To keep one location where all the core rollup contracts can be found, we have a registry contract. The registry is a contract that holds the current and historical addresses of the core rollup contracts. The addresses of a rollup deployment are contained in a snapshot, and the registry is tracking version-snapshot pairs. Depending on the upgrade scheme, it might be used to handle upgrades, or it could entirely be removed. It is generally the one address that a node MUST know about, as it can then tell the node where to find the remainder of the contracts. This is for example used when looking for the address new L2 blocks should be published to. +To keep one location where all the core rollup contracts can be found, we have a registry contract. +The registry is a contract that holds the current and historical addresses of the core rollup contracts. +The addresses of a rollup deployment are contained in a snapshot, and the registry is tracking version-snapshot pairs. +Depending on the upgrade scheme, it might be used to handle upgrades, or it could entirely be removed. +It is generally the one address that a node MUST know about, as it can then tell the node where to find the remainder of the contracts. +This is for example used when looking for the address new L2 blocks should be published to. ## Message Bridges @@ -158,11 +175,15 @@ To let users communicate between L1 and the L2, we are using message bridges, na :::info Naming is based from the PoV of the state transitioner. ::: -While we logically have 4 boxes, we practically only require 3 of those. The L2 inbox is not real - but only logical. This is due to the fact that they are always inserted and then consumed in the same block! Insertions require a L2 transaction, and it is then to be consumed and moved to the L1 outbox by the state transitioner in the same block. +While we logically have 4 boxes, we practically only require 3 of those. +The L2 inbox is not real - but only logical. +This is due to the fact that they are always inserted and then consumed in the same block! +Insertions require a L2 transaction, and it is then to be consumed and moved to the L1 outbox by the state transitioner in the same block. ### Portals -When deploying a contract on L2, it is possible to specify its "portal" address. This is an immutable variable, that can be used to constrain who the L2 contract expects messages from, and who it sends messages to. +When deploying a contract on L2, it is possible to specify its "portal" address. +This is an immutable variable, that can be used to constrain who the L2 contract expects messages from, and who it sends messages to. ### Messages @@ -184,8 +205,6 @@ struct L1ToL2Msg { L2Actor: recipient, bytes32: content, bytes32: secretHash, - uint32 deadline, // timestamp - uint64 fee, } struct L2ToL1Msg { @@ -195,82 +214,233 @@ struct L2ToL1Msg { } ``` -Beware, that while we speak of messages, we are practically passing around only their **hashes** to reduce cost. The `version` value of the `L2Actor` is the version of the rollup, which is intended to be used to specify which version of the rollup the message is intended for or sent from. This way, multiple rollup instances can use the same inbox/outbox contracts. +Beware, that while we speak of messages, we are practically passing around only their **hashes** to reduce cost. +The `version` value of the `L2Actor` is the version of the rollup, which is intended to be used to specify which version of the rollup the message is intended for or sent from. +This way, multiple rollup instances can use the same inbox/outbox contracts. :::info Why a single hash? -Compute on L1 is expensive, but storage is extremely expensive! To reduce overhead, we trade storage for computation and only commit to the messages and then "open" these for consumption later. However, since computation also bears significant we need to use a hash function that is relatively cheap on L1, while still being doable inside a snark. For this purpose a modded SHA256 was chosen, modded here meaning that it fits the output value into a single field element using the modulo operator. +Compute on L1 is expensive, but storage is extremely expensive! +To reduce overhead, we trade storage for computation and only commit to the messages and then "open" these for consumption later. +However, since computation also bears significant we need to use a hash function that is relatively cheap on L1, while still being doable inside a snark. +For this purpose a modded SHA256 was chosen, modded here meaning that it fits the output value into a single field element using the modulo operator. ::: Some additional discussion/comments on the message structure can be found in [The Republic](https://forum.aztec.network/t/the-republic-a-flexible-optional-governance-proposal-with-self-governed-portals/609/2#supporting-pending-messages-5). -Since any data that is moving from one chain to the other at some point will live on L1, it will be PUBLIC. While this is fine for L1 consumption (which is public in itself), we want to ensure that the L2 consumption can be private. -To support this, we use a nullifier scheme similar to what we are doing for the other [notes](./../state/note-hash-tree.md). As part of the nullifier computation we then use the `secret` which hashes to the `secretHash`, this ensures that only actors with knowledge of `secret` will be able to see when it is spent on L2. +Since any data that is moving from one chain to the other at some point will live on L1, it will be PUBLIC. +While this is fine for L1 consumption (which is public in itself), we want to ensure that the L2 consumption can be private. +To support this, we use a nullifier scheme similar to what we are doing for the other [notes](./../state/note-hash-tree.md). +As part of the nullifier computation we then use the `secret` which hashes to the `secretHash`, this ensures that only actors with knowledge of `secret` will be able to see when it is spent on L2. -Any message that is consumed on one side MUST be moved to the other side. This is to ensure that the messages exist AND are only consumed once. The L1 contracts can handle one side, but the circuits must handle the other. +Any message that is consumed on one side MUST be moved to the other side. +This is to ensure that the messages exist AND are only consumed once. +The L1 contracts can handle one side, but the circuits must handle the other. :::info Is `secretHash` required? -We are using the `secretHash` to ensure that the user can spend the message privately with a generic nullifier computation. However, as the nullifier computation is almost entirely controlled by the app circuit (except the siloing, see [Nullifier Tree](./../state/nullifier-tree.md) ) applications could be made to simply use a different nullifier computation and have it become part of the content. However, this reduces the developer burden and is quite easy to mess up. For those reasons we have decided to use the `secretHash` as part of the message. +We are using the `secretHash` to ensure that the user can spend the message privately with a generic nullifier computation. +However, as the nullifier computation is almost entirely controlled by the app circuit (except the siloing, see [Nullifier Tree](./../state/nullifier-tree.md) ) applications could be made to simply use a different nullifier computation and have it become part of the content. +However, this reduces the developer burden and is quite easy to mess up. +For those reasons we have decided to use the `secretHash` as part of the message. ::: + + ### Inbox When we say inbox, we are generally referring to the L1 contract that handles the L1 to L2 messages. - +The inbox takes messages from L1 contracts and inserts them into a series of message trees. +We build multiple "trees" instead of a single tree, since we are building one tree for every block and not one large with all the messages. + +The reasoning is fairly straight-forward; we need to split it into epochs such that a sequencer can build a proof based on a tree that is not going to update in the middle of the proof building. Such updates would allow DOS attacks on the sequencer, which is undesirable. + +To support this, we can simply introduce a "lag" between when trees are built and when they must be included. +We can actually do this quite easily. +Say that whenever a new block is published, we start building a new tree. +Essentially meaning that at block $n$ we include tree $n$ which was created earlier (during block $n-1$). + + +Example visualized below. +Here we have that tree $n$ is "fixed" when block $n$ needs to be published. +And that tree $n+1$ is being built upon until block $n$ is being published. -The inbox is logically a [multi-set](https://en.wikipedia.org/wiki/Multiset) that builds messages based on the caller and user-provided content (multi-set meaning that repetitions are allowed). While anyone can insert messages into the inbox, only the recipient state transitioner can consume messages from it (as specified by the version). When the state transitioner is consuming a message, it MUST insert it into the "L2 outbox" ([message tree](./../state/index.md)). +![Feeding trees into the blocks](images/tree-order.png) + +When the state transitioner is consuming a tree, it MUST insert the subtree into the "L2 outbox" ([message tree](./../state/index.md)). When a message is inserted into the inbox, the inbox **MUST** fill in the `sender`: - `L1Actor.actor`: The sender of the message (the caller), `msg.sender` - `L1Actor.chainId`: The chainId of the L1 chain sending the message, `block.chainId` -We MUST populate these values in the inbox, since we cannot rely on the user providing anything meaningful. From the `L1ToL2Msg` we compute a hash of the message. This hash is what is moved by the state transitioner to the L2 outbox. +We MUST populate these values in the inbox, since we cannot rely on the user providing anything meaningful. +From the `L1ToL2Msg` we compute a hash of the message. +This hash is what is moved by the state transitioner to the L2 outbox. -Since message from L1 to L2 can be inserted independently of the L2 block, the message transfer (insert into inbox move to outbox) are not synchronous as it is for L2 to L1. This means that the message can be inserted into the inbox, but not yet moved to the outbox. The message will then be moved to the outbox when the state transitioner is consuming the message as part of a block. Since the sequencers are responsible for the ordering of the messages, there is not a known time for this pickup to happen, it is async. +Since message from L1 to L2 can be inserted independently of the L2 block, the message transfer (moving from inbox into outbox) is not synchronous as it is for L2 to L1 messages. +This means that the message can be inserted into the inbox, but not yet moved to the outbox. +The message will then be moved to the outbox when the state transitioner is consuming the message as part of a block. +Since the sequencers are required to move the entire subtree at once, you can be sure that the message will be moved to the outbox at some point. -This is done to ensure that the messages are not used to DOS the state transitioner. If the state transitioner was forced to pick up the messages in a specific order or at a fixed rate, it could be used to DOS the state transitioner by inserting a message just before an L2 block goes through. -While this can be addressed by having a queue of messages and let the sequencer specify the order, this require extra logic and might be difficult to price correctly. To keep this out of protocol, we simply allow the user to attach a fee to the message (see `fee` in `L1ToL2Msg` above). This way, the user can incentivize the sequencer to pick up the message faster. +As mentioned earlier, this is done to ensure that the messages are not used to DOS the state transitioner. -Since it is possible to land in a case where the sequencer will never pick up the message (e.g., if it is underpriced), the sender must be able to cancel the message. To ensure that this cancellation cannot happen under the feet of the sequencer we use a `deadline`, only after the deadline can it be cancelled. +Since we will be building the tree on L1, we need to use a gas-friendly hash-function such as SHA256. +However, as we need to allow users to prove inclusion in this tree, we cannot just insert the SHA256 tree into the rollup state, it requires too many constraints to be used by most small users. +Therefore, we need to "convert" the tree into a tree using a more snark-friendly hash. +This part is done in a to-be-defined circuit. +:::info TODO +Write about the `MessageCompression` circuits +::: -The contract that sent the message must decide how to handle the cancellation. It could for example ignore the cancelled message, or it could refund the user. This is up to the contract to decide. +Furthermore, to build the tree on L1, we need to put some storage on L1 such that the insertions don't need to provide a lot of merkle-related data which could be cumbersome to do and prone to race-conditions. +For example two insertions based on inclusion paths that are created at the same time will invalidate each other. +As storage costs an arm and a leg on L1, we need to be careful with how we store this. + +Luckily for us, we can use a "frontier" merkle tree to store the messages. +This is a special kind of append-only merkle tree that allows us to store very few elements in storage, but just enough for us to be able to extend it, and compute the root of the tree. +Consult [Frontier Merkle Tree](#frontier-merkle-tree]) for more information on this. + +Assuming that we have these trees, we can build an `inbox` utilizing them as follows. +When a new block is published, we start building a new tree. +Notice however, that if we have entirely filled the current tree, we can start building a new one immediately, and the blocks can then "catch up". -:::info Error handling -While we have ensured that the message either arrives to the L2 outbox or is cancelled, we have not ensured that the message is consumed by the L2 contract. This is up to the L2 contract to handle. If the L2 contract does not handle the message, it will be stuck in the outbox forever. Similarly, it is up to the L1 contract to handle the cancellation. If the L1 contract does not handle the cancellation, the user might have a message that is pending forever. Error handling is entirely on the contract developer. -::: + +```python +class Inbox: + STATE_TRANSITIONER: immutable(address) + ZERO: immutable(bytes32) + + HEIGHT: immutable(uint256) + SIZE: immutable(uint256) + + trees: HashMap[uint256, FrontierTree] + + to_include: uint256 = 0 + in_progress: uint256 = 1 + + def __init__(self, _height: uint256, _zero: bytes32, _state_transitioner: address): + self.HEIGHT = _height + self.SIZE = 2**_height + self.ZERO = _zero + self.STATE_TRANSITIONER = _state_transitioner + + self.trees[1] = FrontierTree(self.HEIGHT) + + def insert(self, message: L1ToL2Message) -> bytes32: + ''' + Insert into the next FrontierTree. If the tree is full, creates a new one + ''' + if self.trees[self.in_progress].next_index == 2**self.HEIGHT: + self.in_progress += 1 + self.trees[self.in_progress] = FrontierTree(self.HEIGHT) + + message.sender.actor = msg.sender + message.sender.chain_id = block.chainid + + leaf = message.hash_to_field() + self.trees[self.in_progress].insert(leaf) + return leaf + + def consume(self) -> bytes32: + ''' + Consumes the current tree, and starts a new one if needed + ''' + assert msg.sender == self.STATE_TRANSITIONER + + root = self.ZERO + if self.to_include > 0: + root = self.trees[self.to_include].root() + + # If we are "catching up" we can skip the creation as it is already there + if self.to_include + 1 == self.in_progress: + self.in_progress += 1 + self.trees[self.in_progress] = FrontierTree(self.HEIGHT) + + self.to_include += 1 + + return root +``` #### L2 Inbox -While the L2 inbox is not a real contract, it is a logical contract that apply mutations to the data similar to the L1 inbox to ensure that the sender cannot fake his position. This logic is handled by the kernel and rollup circuits. +While the L2 inbox is not a real contract, it is a logical contract that apply mutations to the data similar to the L1 inbox to ensure that the sender cannot fake his position. +This logic is handled by the kernel and rollup circuits. Just like the L1 variant, we must populate the `sender`: - `L2Actor.actor`: The sender of the message (the caller) - `L2Actor.version`: The version of the L2 chain sending the message -In practice, this is done in the kernel circuit of the L2, and the message hash is a public output of the circuit that is inserted into the L1 outbox for later consumption. +In practice, this is done in the kernel circuit of the L2, and the message hashes are then aggregated into a tree as outlined in the [Rollup Circuits section](./../rollup-circuits/index.md) before it is inserted into the L1 outbox which we will address now. ### Outbox -The outboxes are the location where a user can consume messages from. An outbox can only contain elements that have previously been removed from the paired inbox. +The outboxes are the location where a user can consume messages from. +An outbox can only contain elements that have previously been removed from the paired inbox. -Our L1 outbox is pretty simple, Like the L1 inbox, it is a multi-set. It should allow the state transitioner to insert messages and the recipient of the message can consume it (removing it from the outbox). +Our L1 outbox is pretty simple, like the L1 inbox, it is a series of trees. +The trees are built from the messages of all the transactions in the block, and the root and height is then pushed to the L1 outbox. + +Whenever a portal wishes to consume a message, it must prove that it is included in one of these roots and that it has not been consumed before. This is a very similar structure to what we are doing within the rollup for UTXO's already, so it should be familiar. + +To address the nullifier (marking it is spent), we can simply use a bitmap and flip just 1 bit per message. This shares some of the cost of consuming. + +This structure is the same as is used in many merkle airdrop contracts, and is a well known pattern. Nevertheless, it require a bit more work from the developers side, as they need to prepare the inclusion proof before they can consume the message. The proof can be prepared based on the published data, so with good libraries it should be very straight forward for most cases. :::info Checking sender -When consuming a message on L1, the portal contract must check that it was sent from the expected contract given that it is possible for multiple contracts on L2 to send to it. If the check is not done this could go horribly wrong. +When consuming a message on L1, the portal contract must check that it was sent from the expected contract given that it is possible for multiple contracts on L2 to send to it. +If the check is not done this could go horribly wrong. ::: + +```python +class Outbox: + STATE_TRANSITIONER: immutable(address) + + struct RootData: + root: bytes32 + height: uint256 + nullified: HashMap[uint256, bool] + + roots: HashMap[uint256, RootData] + + def __init__(self, _state_transitioner: address): + self.STATE_TRANSITIONER = _state_transitioner + + def insert(index: uint256, root: bytes32, height: uint256): + assert msg.sender == self.STATE_TRANSITIONER + self.roots[index] = RootData(root, height, {}) + + def consume( + root_index: uint256, + leaf_index: uint256, + message: L2ToL1Message, + inclusion_proof: bytes[] + ): + leaf = message.hash_to_field() + assert merkle_verify( + self.roots[root_index].root, + self.roots[root_index].height, + leaf, + inclusion_proof + ) + assert not(self.roots[root_index].nullified[leaf_index]) + self.roots[root_index].nullified[leaf_index] = True +``` + #### L2 Outbox -The L2 outbox is quite different. It is a merkle tree that is populated with the messages moved by the state transitioner. As mentioned earlier, the messages are consumed on L2 by emitting a nullifier from the application circuit. +The L2 outbox is quite different. +It is a merkle tree that is populated with the messages moved by the state transitioner through the converted tree as seen in [Rollup Circuits](./../rollup-circuits/index.md). +As mentioned earlier, the messages are consumed on L2 by emitting a nullifier from the application circuit. -This means that all validation is done by the application circuit. The application should: +This means that all validation is done by the application circuit. +The application should: - Ensure that the message exists in the outbox (message tree) - Ensure that the message sender is the expected contract @@ -281,9 +451,11 @@ This means that all validation is done by the application circuit. The applicati ## Validity conditions -While there are multiple contracts, they work in unison to ensure that the rollup is valid and that messages are correctly moved between the chains. In practice this means that the contracts are to ensure that the following constraints are met in order for the validating light node to accept a block. +While there are multiple contracts, they work in unison to ensure that the rollup is valid and that messages are correctly moved between the chains. +In practice this means that the contracts are to ensure that the following constraints are met in order for the validating light node to accept a block. -Note that some conditions are marked as SHOULD, which is not strictly needed for security of the rollup, but the security of the individual applications or for UX. Also, some of the conditions are repetitions of what we saw earlier from the [state transitioner](#state-transitioner). +Note that some conditions are marked as SHOULD, which is not strictly needed for security of the rollup, but the security of the individual applications or for UX. +Also, some of the conditions are repetitions of what we saw earlier from the [state transitioner](#state-transitioner). - **Data Availability**: The block content MUST be available. To validate this, the `AvailabilityOracle` is used. - **Header Validation**: See the checks from the [state transitioner](#state-transitioner) @@ -297,16 +469,10 @@ Note that some conditions are marked as SHOULD, which is not strictly needed for - The `deadline` MUST be in the future, `> block.timestamp` - The `secretHash` MUST fit in a field element - The caller MAY append a `fee` to incentivize the sequencer to pick up the message -- **Message Cancellation**: To remove messages from the L1 inbox: - - The message MUST exist in the inbox - - The caller MUST be `sender.actor` - - The `deadline` MUST be in the future, `> block.timestamp` - - The `fee` SHOULD be refunded to the caller -- **Moving messages**: +- **Moving tree roots**: - Moves MUST be atomic: - Any message that is inserted into an outbox MUST be consumed from the matching inbox - Any message that is consumed from an inbox MUST be inserted into the matching outbox - - Messages MUST be moved by the state transitioner whose `version` match the `version` of the message - **Consuming messages**: for messages that are consumed from the outboxes: - L2 to L1 messages (on L1): - The consumer (caller) MUST match the `recipient.actor` @@ -320,14 +486,15 @@ Note that some conditions are marked as SHOULD, which is not strictly needed for - The consumer contract SHOULD check that the message exists in the state :::info - - For cost purposes, it can be useful to commit to the public inputs to just pass a single value into the circuit. - Time constraints might change depending on the exact sequencer selection mechanism. - ::: +::: ## Logical Execution -Below, we will outline the **LOGICAL** execution of a L2 block and how the contracts interact with the circuits. We will be executing cross-chain communication before and after the block itself. Note that in reality, the L2 inbox does not exists, and its functionality is handled by the kernel and the rollup circuits. +Below, we will outline the **LOGICAL** execution of a L2 block and how the contracts interact with the circuits. +We will be executing cross-chain communication before and after the block itself. +Note that in reality, the L2 inbox does not exists, and its functionality is handled by the kernel and the rollup circuits. ```mermaid sequenceDiagram @@ -340,7 +507,6 @@ sequenceDiagram participant O2 as Outbox (L2) participant R2 as Rollup (L2) participant R as Validating Light Node (L1) - participant Reg as Registry participant I as Inbox participant O as Outbox @@ -368,8 +534,8 @@ sequenceDiagram end loop msg in L2 inbox - R2->>O2: Consume msg - O2->>O2: Update state (delete) + R2->>I2: Consume msg + I2->>I2: Update state (delete) end loop msg in l1ToL2Msgs @@ -382,30 +548,25 @@ sequenceDiagram R->>R: Verify proof R->>R: Update State - R->>Reg: Where is the Inbox? - Reg->>R: Here is the address - R->>I: Consume l1ToL2Msgs from L1 - I->>I: Update state (delete) - - R->>Reg: Where is the Outbox? - Reg->>R: Here is the address + I->>I: Update state (next tree) R->>O: Insert Messages from L2 - O->>O: Update state (insert) + O->>O: Update state (insert root) end P->>O: Consume a msg O->>O: Validate msg - O->>O: Update state (delete) + O->>O: Update state (nullify) ``` -We will walk briefly through the steps of the diagram above. The numbering matches the numbering of nodes in the diagram, the start of the action. +We will walk briefly through the steps of the diagram above. +The numbering matches the numbering of nodes in the diagram, the start of the action. 1. A portal contract on L1 wants to send a message for L2 1. The L1 inbox populates the message with information of the `sender` (using `msg.sender` and `block.chainid`) -1. The L1 inbox contract inserts the message into its storage +1. The L1 inbox contract inserts the message into its the tree 1. On the L2, as part of a L2 block, a transaction wish to consume a message from the L2 outbox. 1. The L2 outbox ensures that the message is included, and that the caller is the recipient and knows the secret to spend. (This is practically done by the application circuit) 1. The nullifier of the message is emitted to privately spend the message (This is practically done by the application circuit) @@ -419,17 +580,13 @@ We will walk briefly through the steps of the diagram above. The numbering match 1. The L2 block is submitted to L1 1. The state transitioner receives the block and verifies the proof + validate constraints on block. 1. The state transitioner updates it state to the ending state of the block -1. The state transitioner ask the registry for the L1 inbox address -1. The state transitioner retrieves the L1 inbox address 1. The state transitioner consumes the messages from the L1 inbox that was specified in the block. Note that they have logically been inserted into the L2 outbox, ensuring atomicity. -1. The L1 inbox updates it local state by deleting the messages that was consumed -1. The state transitioner ask the registry for the L1 outbox address -1. The state transitioner retrieves the L1 outbox address -1. The state transitioner inserts the messages into the L1 inbox that was specified in the block. Note that they have logically been consumed from the L2 outbox, ensuring atomicity. -1. The L1 outbox updates it local state by inserting the messages -1. The portal later consumes the message from the L1 outbox +1. The L1 inbox updates it local state by marking the message tree messages as consumed +1. The state transitioner inserts the messages tree root into the L1 Outbox. Note that they have logically been consumed from the L2 inbox, ensuring atomicity. +1. The L1 outbox updates it local state by inserting the message root and height +1. The portal later consumes a message from the L1 outbox 1. The L1 outbox validates that the message exists and that the caller is indeed the recipient -1. The L1 outbox updates it local state by deleting the message +1. The L1 outbox updates it local state by nullifying the message :::info L2 inbox is not real As should be clear from above, the L2 inbox doesn't need to exist for itself, it keeps no state between blocks, as every message created in the block will also be consumed in the same block. @@ -440,9 +597,6 @@ As should be clear from above, the L2 inbox doesn't need to exist for itself, it - Sequencer selection contract(s) - Relies on the sequencer selection scheme being more explicitly defined - Relies on being able to validate the sequencer selection scheme -- Improve public inputs hash computation - - Currently it is using calldata and blocks to be passed along with the proof, but it should be adapted to better allow other DA layers. - - Modularize the computation such that the state transitioner need not know the exact computation but merely use a separate contract as an oracle. - Governance/upgrade contract(s) - Relies on the governance/upgrade scheme being more explicitly defined - Forced transaction inclusion diff --git a/yellow-paper/docs/l1-smart-contracts/proposal.md b/yellow-paper/docs/l1-smart-contracts/proposal.md deleted file mode 100644 index 6971fe2ebfc..00000000000 --- a/yellow-paper/docs/l1-smart-contracts/proposal.md +++ /dev/null @@ -1,383 +0,0 @@ ---- -title: Message Boxes Proposal ---- - -Proposal to update the message boxes for improved costs but with reduced usability. - - -## Background - -The current cross-chain communication mechanism was optimized for a few things: -- easy to consume messages from the L1 at a portal level -- giving the sequencer agency to decide what to include in the L2 - -This was achieved by having a `one-message-one-slot` design where each message is stored separately and the sequencer can decide which to include in the L2. -As part of a new block being inserted, it would need to "move" messages from one layer to the others. -This is done by updating the slot of the message. -For incoming messages (L1 -> L2) the moved messages are inserted into a merkle tree in the L2 state as part of the block proof. - -This design has two main drawbacks: -- you must bribe the sequencer to have your L1 -> L2 message included (adding another fee market) -- because messages need to update one slot per message, the number of messages constrains the block-size of the L2 - -## Introduction - -In this document, we propose 2 changes that can be applied independently. -Namely we propose a change to the inbox and a change to the outbox. - -The inbox change will primarily change how the sequencer builds his block, while the outbox change will change how the portal consumes messages. - -The changes have **no** impact on how the aztec contracts are written, but will alter the portal consumption on L1 with slightly increased complexity. - -### Ideas at a glimpse - -Instead of updating individual slots at the time we transition to a new state, we can build trees of messages ahead of time, and move just the roots of the trees (in L1 storage). - -This allows us to move a lot of messages at once without having to update a lot of storage — making it cheaper to move messages independent on the number of messages. - -For the incoming messages, this means that we can get rid of the fee market that was induced by the need for the sequencer to choose the messages to include messages in the L2. -And instead force it to move a root of a message tree. - -For outgoing, this means that we only have to insert one root into the L1 storage, and that the consumer will need to provide an inclusion proof (or have prepared the inclusion already). - -## The Inbox - -The inbox changes requires a larger remodelling of the current design. -Namely, it needs to update the L1 contract to build a merkle root of messages to be included, and the circuits need to be altered to correctly insert messages from this update. - -### The L1 Contract - -The L1 contract (Inbox) needs to build trees of incoming messages. -We say "trees" since we will be building a tree for each block, instead of one large collective tree. - -The reasoning is fairly straight-forward; we need to split it into epochs such that a sequencer can build a proof based on a tree that is not going to update in the middle of the proof building. -Such updates would allow DOS attacks on the sequencer, which is undesirable. - -To support this, we can simply introduce a "lag" between when trees are built and when they must be included. -We can actually do this quite easily. -Say that whenever a new block is published, we start building a new tree. -Essentially meaning that at block $n$ we include tree $n$ which was created earlier (during block $n-1$). - -Example visualized below. -Here we have that tree $n$ is "fixed" when block $n$ needs to be published. -And that tree $n+1$ is being built upon until block $n$ is being published. - -![Feeding trees into the blocks](images/proposal/image.png) - -Since we will be building the tree on L1, we need to use a gas-friendly hash-function such as SHA256. -However, as we need to allow users to prove inclusion in this tree, we cannot just insert the SHA256 tree into the rollup state, it requires too many constraints to be used by most small users. -Therefore, we need to "convert" the tree into a tree using a more snark-friendly hash. - -Furthermore, to build the tree on L1, we need to put some storage on L1 such that the insertions don't need to provide a lot of merkle-related data which could be cumbersome to do and prone to race-conditions. -For example two insertions based on inclusion paths that are created at the same time will invalidate each other. -As storage costs an arm and a leg on L1, we need to be careful with how we store this. - -Luckily for us, we can use a "frontier" merkle tree to store the messages. -This is a special kind of append-only merkle tree that allows us to store very few elements in storage, but just enough for us to be able to extend it, and compute the root of the tree. -Consult [Frontier Merkle Tree](#frontier-merkle-tree]) for more information on this. - -Assuming that we have these trees, we can build an `inbox` utilizing them as follows. -When a new block is published, we start building a new tree. -Notice however, that if we have entirely filled the current tree, we can start building a new one immediately, and the blocks can then "catch up". - - -```python -class Inbox: - STATE_TRANSITIONER: immutable(address) - ZERO: immutable(bytes32) - - HEIGHT: immutable(uint256) - SIZE: immutable(uint256) - - trees: HashMap[uint256, FrontierTree] - - to_include: uint256 = 0 - in_progress: uint256 = 1 - - def __init__(self, _height: uint256, _zero: bytes32, _state_transitioner: address): - self.HEIGHT = _height - self.SIZE = 2**_height - self.ZERO = _zero - self.STATE_TRANSITIONER = _state_transitioner - - self.trees[1] = FrontierTree(self.HEIGHT) - - def insert(self, message: L1ToL2Message) -> bytes32: - ''' - Insert into the next FrontierTree. If the tree is full, creates a new one - ''' - if self.trees[self.in_progress].next_index == 2**self.HEIGHT: - self.in_progress += 1 - self.trees[self.in_progress] = FrontierTree(self.HEIGHT) - - message.sender.actor = msg.sender - message.sender.chain_id = block.chainid - - leaf = message.hash_to_field() - self.trees[self.in_progress].insert(leaf) - return leaf - - def consume(self) -> bytes32: - ''' - Consumes the current tree, and starts a new one if needed - ''' - assert msg.sender == self.STATE_TRANSITIONER - - root = self.ZERO - if self.to_include > 0: - root = self.trees[self.to_include].root() - - # If we are "catching up" we can skip the creation as it is already there - if self.to_include + 1 == self.in_progress: - self.in_progress += 1 - self.trees[self.in_progress] = FrontierTree(self.HEIGHT) - - self.to_include += 1 - - return root -``` - - -Briefly our idea is as follows: -1. Build a SHA256 merkle tree of the messages on L1 for every block -2. Store the root of the tree in the L1 contract -3. Generate a new block which includes the "pending" messages into the L2 state -4. Publish this block, which loads in the pending root and starts the next epoch. -5. Repeat - - - -### Circuits -The exact mechanism of inserting the messages into the L2 state is not fully decided, but it will essentially be one of the following: -- Split the work across the existing base-rollup circuits -- Create a new circuit that only handles the insertion of the messages - -Both solutions will compute and update the snark-friendly L1 to L2 message tree that is part of the [rollup state](./../state/index.md). - -#### Divide and Conquer - -Divide the tree into smaller sub-trees, and have every base-rollup circuit convert a sub-tree into a snark-friendly sub-tree. -Then at every merge we merge two sub-trees into a larger sub-tree, exactly as most other trees in the rollup circuits. - -The main issue of this solution is that it doesn't fit nicely with small blocks. -For example, as the tree size will likely be fixed, you will have to construct at minimum enough base rollups to include those. -For smaller blocks this means that the overhead of the message tree could be quite large, and larger than doing it separately. - -However, a benefit is that we don't need a separate circuit type that feeds into the current ones, so the structure is kept simple. - -#### New Circuit - -The idea is simple. Create a new circuit (or circuits) that deals with converting the message tree into a snark-friendly tree. -The cost of producing a block (compute wise) will be more stable in this case, but we need to coordinate it separately and it will require additional circuits to be combined with the current ones. - - -## The Outbox - -The outbox change is actually pretty straight forward. As part of our block proof we are already constructing an `out_hash` as defined in [Rollup Circuits](./../rollup-circuits/index.md#state-availability). - -This hash is a merkle root of all the messages that are to be added to the outbox and we can simply insert it and its height into the outbox when processing a block. -We use the height to ensure that the merkle inclusion proofs are of the correct length. - -Whenever a portal wishes to consume a message, it must prove that it is included in one of these roots and that it has not been consumed before. -This is a very similar structure to what we are doing within the rollup for UTXO's already, so it should be familiar. - -To address the nullifier (marking it is spent), we can simply use a bitmap and flip just 1 bit per message. -This shares some of the cost of consuming. - -This structure is the same as is used in many merkle airdrop contracts, and is a well known pattern. -Nevertheless, it require a bit more work from the developers side, as they need to prepare the inclusion proof before they can consume the message. -The proof can be prepared based on the published data, so with good libraries it should be very straight forward for most cases. - -```python -class Outbox: - STATE_TRANSITIONER: immutable(address) - - struct RootData: - root: bytes32 - height: uint256 - nullified: HashMap[uint256, bool] - - roots: HashMap[uint256, RootData] - - def __init__(self, _state_transitioner: address): - self.STATE_TRANSITIONER = _state_transitioner - - def insert(index: uint256, root: bytes32, height: uint256): - assert msg.sender == self.STATE_TRANSITIONER - self.roots[index] = RootData(root, height, {}) - - def consume( - root_index: uint256, - leaf_index: uint256, - message: L2ToL1Message, - inclusion_proof: bytes[] - ): - leaf = message.hash_to_field() - assert merkle_verify( - self.roots[root_index].root, - self.roots[root_index].height, - leaf, - inclusion_proof - ) - assert not(self.roots[root_index].nullified[leaf_index]) - self.roots[root_index].nullified[leaf_index] = True -``` - ---- - -## Frontier Merkle Tree - -The Frontier Merkle Tree is an append only Merkle tree that is optimized for minimal storage on chain. -By storing only the right-most non-empty node at each level of the tree we can always extend the tree with a new leaf or compute the root without needing to store the entire tree. -We call these values the frontier of the tree. -If we have the next index to insert at and the current frontier, we have everything we need to extend the tree or compute the root, with much less storage than a full merkle tree. - -We will go through a few diagrams and explanations to understand how this works. -And then a pseudo implementation is provided. - - -### Insertion -Whenever we are inserting, we need to update the "root" of the largest subtree possible. -This is done by updating the node at the level of the tree, where we have just inserted its right-most descendant. -This can sound a bit confusing, so we will go through a few examples. - -At first, say that we have the following tree, and that it is currently entirely empty. - -![alt text](images/proposal/image-1.png) - -#### The first leaf - -When we are inserting the first leaf (lets call it A), the largest subtree is that leaf value itself (level 0). -In this case, we simply need to store the leaf value in `frontier[0]` and then we are done. -For the sake of visualization, we will be drawing the elements in the `frontier` in blue. - -![alt text](images/proposal/image-2.png) - -Notice that this will be the case whenever we are inserting a leaf at an even index. - -#### The second leaf - -When we are inserting the second leaf (lets call it B), the largest subtree will not longer be at level 0. -Instead it will be level 1, since the entire tree below it is now filled! -Therefore, we will compute the root of this subtree, `H(frontier[0],B)` and store it in `frontier[1]`. - -Notice, that we don't need to store the leaf B itself, since we won't be needing it for any future computations. -This is what makes the frontier tree efficient - we get away with storing very little data. - -![alt text](images/proposal/image-3.png) - -#### Third leaf -When inserting the third leaf, we are again back to the largest subtree being filled by the insertion being itself at level 0. -The update will look similar to the first, where we only update `frontier[0]` with the new leaf. - -![alt text](images/proposal/image-4.png) - -#### Fourth leaf - -When inserting the fourth leaf, things get a bit more interesting. -Now the largest subtree getting filled by the insertion is at level 2. - -To compute the new subtree root, we have to compute `F = H(frontier[0], E)` and then `G = H(frontier[1], F)`. -G is then stored in `frontier[2]`. - - -As before, notice that we are only updating one value in the frontier. -![alt text](images/proposal/image-5.png) - -### Figuring out what to update - -To figure out which level to update in the frontier, we simply need to figure out what the height is of the largest subtree that is filled by the insertion. -While this might sound complex, it is actually quite simple. -Consider the following extension of the diagram. -We have added the level to update, along with the index of the leaf in binary. -Seeing any pattern? - -![alt text](images/proposal/image-6.png) - -The level to update is simply the number of trailing ones in the binary representation of the index. -For a binary tree, we have that every `1` in the binary index represents a "right turn" down the tree. -Walking up the tree from the leaf, we can simply count the number of right turns until we hit a left-turn. - -### How to compute the root - -Computing the root based on the frontier is also quite simple. -We can use the last index inserted a leaf at to figure out how high up the frontier we should start. -Then we know that anything that is at the right of the frontier has not yet been inserted, so all of these values are simply "zeros" values. -Zeros here are understood as the root for a subtree only containing zeros. - -For example, if we take the tree from above and compute the root for it, we would see that level 2 was updated last. -Meaning that we can simply compute the root as `H(frontier[2], zeros[2])`. - -![alt text](images/proposal/image-7.png) - -For cases where we have built further, we simply "walk" up the tree and use either the frontier value or the zero value for the level. - -### Pseudo implementation -```python -class FrontierTree: - HEIGHT: immutable(uint256) - SIZE: immutable(uint256) - - frontier: HashMap[uint256, bytes32] # level => node - zeros: HashMap[uint256, uint256] # level => root of empty subtree of height level - - next_index: uint256 = 0 - - # Can entirely be removed with optimizations - def __init__(self, _height_: uint256): - self.HEIGHT = _height - self.SIZE = 2**_height - # Populate zeros - - def compute_level(_index: uint256) -> uint256: - ''' - We can get the right of the most filled subtree by - counting the number of trailing ones in the index - ''' - count = 0 - x = _index - while (x & 1 == 1): - count += 1 - x >>= 1 - return count - - def root() -> bytes32: - ''' - Compute the root of the tree - ''' - if self.next_index == 0: - return self.zeros[self.HEIGHT] - elif self.next_index == SIZE: - return self.frontier[self.HEIGHT] - else: - index = self.next_index - 1 - level = self.compute_level(index) - - temp: bytes32 = self.frontier[level] - - bits = index >> level - for i in range(level, self.HEIGHT): - is_right = bits & 1 == 1 - if is_right: - temp = sha256(frontier[i], temp) - else: - temp = sha256(temp, self.zeros[i]) - bits >>= 1 - return temp - - def insert(self, _leaf: bytes32): - ''' - Insert a leaf into the tree - ''' - level = self.compute_level(next_index) - right = _leaf - for i in range(0, level): - right = sha256(frontier[i], right) - self.frontier[level] = right - self.next_index += 1 -``` - -### Optimizations -- The `zeros` can be pre-computed and stored in the `Inbox` directly, this way they can be shared across all of the trees. - - diff --git a/yellow-paper/docs/rollup-circuits/base-rollup.md b/yellow-paper/docs/rollup-circuits/base-rollup.md index 140f62fd30c..1dbb48d1ddc 100644 --- a/yellow-paper/docs/rollup-circuits/base-rollup.md +++ b/yellow-paper/docs/rollup-circuits/base-rollup.md @@ -42,13 +42,21 @@ class GlobalVariables { fee_recipient: Address } +class ContentCommitment { + tx_tree_height: Fr + txs_hash: Fr[2] + in_hash: Fr[2] + out_hash: Fr[2] +} + class Header { last_archive: Snapshot - body_hash: Fr[2] + content_commitment: ContentCommitment state: StateReference global_variables: GlobalVariables } -Header *.. Body : body_hash +Header *.. Body : txs_hash +Header *-- ContentCommitment: content_commitment Header *-- StateReference : state Header *-- GlobalVariables : global_variables @@ -81,7 +89,6 @@ TxEffect *-- "m" PublicDataWrite: public_writes TxEffect *-- Logs : logs class Body { - l1_to_l2_messages: List~Fr~ tx_effects: List~TxEffect~ } Body *-- "m" TxEffect @@ -195,12 +202,12 @@ class StateDiffHints { } class BaseRollupInputs { - historical_header_membership_witnesses: List~HeaderMembershipWitness~ - kernel_data: List~KernelData~ + historical_header_membership_witnesses: HeaderMembershipWitness + kernel_data: KernelData partial: PartialStateReference state_diff_hints: StateDiffHints } -BaseRollupInputs *-- "m" KernelData : kernelData +BaseRollupInputs *-- KernelData : kernelData BaseRollupInputs *-- PartialStateReference : partial BaseRollupInputs *-- StateDiffHints : state_diff_hints BaseRollupInputs *-- ConstantRollupData : constants @@ -229,28 +236,21 @@ Fee structs and contract deployment structs will need to be revised, in line wit ```python def BaseRollupCircuit( state_diff_hints: StateDiffHints, - historical_header_membership_witnesses: HeaderMembershipWitness[], - kernel_data: KernelData[], + historical_header_membership_witnesses: HeaderMembershipWitness, + kernel_data: KernelData, partial: PartialStateReference, constants: ConstantRollupData, ) -> BaseOrMergeRollupPublicInputs: - tx_hashes = Fr[][2] - contracts = Fr[] public_data_tree_root = partial.public_data_tree - for i in len(kernel_data): - tx_hash, _c, public_data_tree_root = kernel_checks( - kernel_data[i], - constants, - public_data_tree_root, - historical_header_membership_witnesses[i], - ) - tx_hashes.push(tx_hash) - contracts.push_array(_c) - - note_hash_subtree = MerkleTree( - [...note_hashes for kernel_data.public_inputs.end.note_hashes in kernel_data] + tx_hash, contracts, public_data_tree_root = kernel_checks( + kernel_data, + constants, + public_data_tree_root, + historical_header_membership_witnesses, ) + + note_hash_subtree = MerkleTree(kernel_data.public_inputs.end.note_hashes) note_hash_snapshot = merkle_insertion( partial.note_hash_tree.root, note_hash_subtree.root, @@ -263,7 +263,7 @@ def BaseRollupCircuit( # The sorting can be checked with a permutation nullifier_snapshot = successor_merkle_batch_insertion( partial.nullifier_tree.root, - [...nullifiers for kernel_data.public_inputs.end.nullifiers in kernel_data], + kernel_data.public_inputs.end.nullifiers, state_diff_hints.sorted_nullifiers, state_diff_hints.sorted_nullifier_indexes, state_diff_hints.nullifier_subtree_sibling_path, @@ -282,16 +282,13 @@ def BaseRollupCircuit( CONTRACTS_TREE_HEIGHT, ) - txs_hash = SHA256(tx_hashes) - out_hash = SHA256( - [...l2_to_l1_messages for kernel_data.public_inputs.end.l2_to_l1_messages in kernel_data] - ) + out_hash = SHA256(kernel_data.public_inputs.end.l2_to_l1_messages) return BaseOrMergeRollupPublicInputs( type=0, height_in_block_tree=0, aggregation_object= - txs_hash=txs_hash + txs_hash=tx_hash out_hash=out_hash start=partial, end=PartialStateReference( diff --git a/yellow-paper/docs/rollup-circuits/index.md b/yellow-paper/docs/rollup-circuits/index.md index 397b16a2941..d09ceec0f71 100644 --- a/yellow-paper/docs/rollup-circuits/index.md +++ b/yellow-paper/docs/rollup-circuits/index.md @@ -6,16 +6,21 @@ title: Rollup Circuits Together with the [validating light node](../l1-smart-contracts/index.md) the rollup circuits must ensure that incoming blocks are valid, that state is progressed correctly and that anyone can rebuild the state. -To support this, we construct a single proof for the entire block, which is then verified by the validating light node. This single proof is constructed by recursively merging proofs together in a binary tree structure. This structure allows us to keep the workload of each individual proof small, while making it very parallelizable. This works very well for the case where we want many actors to be able to participate in the proof generation. +To support this, we construct a single proof for the entire block, which is then verified by the validating light node. +This single proof is constructed by recursively merging proofs together in a binary tree structure. +This structure allows us to keep the workload of each individual proof small, while making it very parallelizable. +This works very well for the case where we want many actors to be able to participate in the proof generation. -The tree structure is outlined below, but the general idea is that we have a tree where all the leaves are transactions (kernel proofs) and through $\log(n)$ steps we can then "compress" them down to just a single root proof. Note that we have two different types of "merger" circuit, namely: +The tree structure is outlined below, but the general idea is that we have a tree where all the leaves are transactions (kernel proofs) and through $\log(n)$ steps we can then "compress" them down to just a single root proof. +Note that we have two different types of "merger" circuit, namely: - The merge rollup - Merges two base rollup proofs OR two merge rollup proofs - The root rollup - Merges two merge rollup proofs -In the diagram the size of the tree is limited for show, but a larger tree will have more layers of merge rollups proofs. Circles mark the different types of proofs, while squares mark the different circuit types. +In the diagram the size of the tree is limited for show, but a larger tree will have more layers of merge rollups proofs. +Circles mark the different types of proofs, while squares mark the different circuit types. ```mermaid graph BT @@ -43,7 +48,6 @@ graph BT B2_p --> M1_c B3_p --> M1_c - B0_c[Base 0] B1_c[Base 1] B2_c[Base 2] @@ -57,18 +61,10 @@ graph BT K1((Kernel 1)) K2((Kernel 2)) K3((Kernel 3)) - K4((Kernel 4)) - K5((Kernel 5)) - K6((Kernel 6)) - K7((Kernel 7)) K0 --> B0_c - K1 --> B0_c - K2 --> B1_c - K3 --> B1_c - K4 --> B2_c - K5 --> B2_c - K6 --> B3_c - K7 --> B3_c + K1 --> B1_c + K2 --> B2_c + K3 --> B3_c style R_p fill:#1976D2; style M0_p fill:#1976D2; @@ -81,18 +77,15 @@ graph BT style K1 fill:#1976D2; style K2 fill:#1976D2; style K3 fill:#1976D2; - style K4 fill:#1976D2; - style K5 fill:#1976D2; - style K6 fill:#1976D2; - style K7 fill:#1976D2; ``` To understand what the circuits are doing and what checks they need to apply it is useful to understand what data is going into the circuits and what data is coming out. -Below is a figure of the data structures thrown around for the block proof creation. Note that the diagram does not include much of the operations for kernels, but mainly the data structures that are used for the rollup circuits. +Below is a figure of the data structures thrown around for the block proof creation. +Note that the diagram does not include much of the operations for kernels, but mainly the data structures that are used for the rollup circuits. - + @@ -123,13 +116,21 @@ class GlobalVariables { fee_recipient: Address } +class ContentCommitment { + tx_tree_height: Fr + txs_hash: Fr[2] + in_hash: Fr[2] + out_hash: Fr[2] +} + class Header { last_archive: Snapshot - body_hash: Fr[2] + content_commitment: ContentCommitment state: StateReference global_variables: GlobalVariables } -Header *.. Body : body_hash +Header *.. Body : txs_hash +Header *-- ContentCommitment: content_commitment Header *-- StateReference : state Header *-- GlobalVariables : global_variables @@ -162,7 +163,6 @@ TxEffect *-- "m" PublicDataWrite: public_writes TxEffect *-- Logs : logs class Body { - l1_to_l2_messages: List~Fr~ tx_effects: List~TxEffect~ } Body *-- "m" TxEffect @@ -276,12 +276,12 @@ class StateDiffHints { } class BaseRollupInputs { - historical_header_membership_witnesses: List~HeaderMembershipWitness~ - kernel_data: List~KernelData~ + historical_header_membership_witnesses: HeaderMembershipWitness + kernel_data: KernelData partial: PartialStateReference state_diff_hints: StateDiffHints } -BaseRollupInputs *-- "m" KernelData : kernelData +BaseRollupInputs *-- KernelData : kernelData BaseRollupInputs *-- PartialStateReference : partial BaseRollupInputs *-- StateDiffHints : state_diff_hints BaseRollupInputs *-- ConstantRollupData : constants @@ -313,9 +313,8 @@ class MergeRollupInputs { MergeRollupInputs *-- ChildRollupData: left MergeRollupInputs *-- ChildRollupData: right - class RootRollupInputs { - l1_to_l2_msgs: List~Fr~ + l1_to_l2_roots: MessageCompressionBaseOrMergePublicInputs l1_to_l2_msgs_sibling_path: List~Fr~ parent: Header, parent_sibling_path: List~Fr~ @@ -323,10 +322,27 @@ class RootRollupInputs { left: ChildRollupData right: ChildRollupData } +RootRollupInputs *-- MessageCompressionBaseOrMergePublicInputs: l1_to_l2_roots RootRollupInputs *-- ChildRollupData: left RootRollupInputs *-- ChildRollupData: right RootRollupInputs *-- Header : parent +class MessageCompressionBaseInputs { + l1_to_l2_msgs: List~Fr~ +} + +class MessageCompressionBaseOrMergePublicInputs { + sha_root: Fr[2] + converted_root: Fr +} + +class MessageCompressionMergeInputs { + left: MessageCompressionBaseInputs + right: MessageCompressionBaseInputs +} +MessageCompressionMergeInputs *-- MessageCompressionBaseOrMergePublicInputs: left +MessageCompressionMergeInputs *-- MessageCompressionBaseOrMergePublicInputs: right + class RootRollupPublicInputs { aggregation_object: AggregationObject @@ -337,7 +353,8 @@ RootRollupPublicInputs *--Header : header ``` :::info CombinedAccumulatedData -Note that the `CombinedAccumulatedData` contains elements that we won't be using throughout the rollup circuits. However, as the data is used for the kernel proofs (when it is build recursively), we will include it here anyway. +Note that the `CombinedAccumulatedData` contains elements that we won't be using throughout the rollup circuits. +However, as the data is used for the kernel proofs (when it is build recursively), we will include it here anyway. ::: :::warning TODO @@ -348,13 +365,18 @@ Since the diagram can be quite overwhelming, we will go through the different da ### Higher-level tasks -Before looking at the circuits individually, it can however be a good idea to recall the reason we had them in the first place. For this, we are especially interested in the tasks that span multiple circuits and proofs. +Before looking at the circuits individually, it can however be a good idea to recall the reason we had them in the first place. +For this, we are especially interested in the tasks that span multiple circuits and proofs. #### State consistency -While the individual kernels are validated on their own, they might rely on state changes earlier in the block. For the block to be correctly validated, this means that when validating kernel $n$, it must be executed on top of the state after all kernels $ B3 ``` -The roots of these trees, together with incoming messages, makes up the `body_hash`. - ```mermaid graph BT - R[body_hash] - M0[TxsHash] - M1[OutHash] - M2[InHash] - - M3[l1_to_l2_messages] + R[InHash] + M0[Hash 0-1] + M1[Hash 2-3] + B0[Hash 0.0-0.1] + B1[Hash 1.0-1.1] + B2[Hash 2.0-2.1] + B3[Hash 3.0-3.1] + K0[l1_to_l2_msgs 0.0] + K1[l1_to_l2_msgs 0.1] + K2[l1_to_l2_msgs 1.0] + K3[l1_to_l2_msgs 1.1] + K4[l1_to_l2_msgs 2.0] + K5[l1_to_l2_msgs 2.1] + K6[l1_to_l2_msgs 3.0] + K7[l1_to_l2_msgs 3.1] M0 --> R M1 --> R - M2 --> R - M3 --> M2 -``` - -```python -def body_hash(body: Body): - txs_hash = merkle_tree(body.txs, SHA256).root - out_hash = merkle_tree([tx.l1_to_l2_msgs for tx in body.txs], SHA256).root - in_hash = SHA256(body.l1_to_l2_messages) - return SHA256(txs_hash, out_hash, in_hash) + B0 --> M0 + B1 --> M0 + B2 --> M1 + B3 --> M1 + K0 --> B0 + K1 --> B0 + K2 --> B1 + K3 --> B1 + K4 --> B2 + K5 --> B2 + K6 --> B3 + K7 --> B3 ``` -:::info SHA256 -SHA256 is used since as the hash function since it will likely be reconstructed outside the circuit in a resource constrained environment (Ethereum L1). -::: +While the `TxsHash` merely require the data to be published and known to L1, the `InHash` and `OutHash` needs to be computable on L1 as well. +This reason require them to be efficiently computable on L1 while still being non-horrible inside a snark - leading us to rely on SHA256. ## Next Steps diff --git a/yellow-paper/docs/rollup-circuits/root-rollup.md b/yellow-paper/docs/rollup-circuits/root-rollup.md index c75efad220a..31e5e6cba0b 100644 --- a/yellow-paper/docs/rollup-circuits/root-rollup.md +++ b/yellow-paper/docs/rollup-circuits/root-rollup.md @@ -42,13 +42,21 @@ class GlobalVariables { coinbase: EthAddress fee_recipient: Address} +class ContentCommitment { + tx_tree_height: Fr + txs_hash: Fr[2] + in_hash: Fr[2] + out_hash: Fr[2] +} + class Header { last_archive: Snapshot - body_hash: Fr[2] + content_commitment: ContentCommitment state: StateReference global_variables: GlobalVariables } -Header *.. Body : body_hash +Header *.. Body : txs_hash +Header *-- ContentCommitment: content_commitment Header *-- StateReference : state Header *-- GlobalVariables : global_variables @@ -81,7 +89,6 @@ TxEffect *-- "m" PublicDataWrite: public_writes TxEffect *-- Logs : logs class Body { - l1_to_l2_messages: List~Fr~ tx_effects: List~TxEffect~ } Body *-- "m" TxEffect @@ -123,8 +130,13 @@ class ChildRollupData { } ChildRollupData *-- BaseOrMergeRollupPublicInputs: public_inputs +class MessageCompressionBaseOrMergePublicInputs { + sha_root: Fr[2] + converted_root: Fr +} + class RootRollupInputs { - l1_to_l2_msgs: List~Fr~ + l1_to_l2_roots: MessageCompressionBaseOrMergePublicInputs l1_to_l2_msgs_sibling_path: List~Fr~ parent: Header, parent_sibling_path: List~Fr~ @@ -132,9 +144,10 @@ class RootRollupInputs { left: ChildRollupData right: ChildRollupData } -RootRollupInputs *-- Header : parent +RootRollupInputs *-- MessageCompressionBaseOrMergePublicInputs: l1_to_l2_roots RootRollupInputs *-- ChildRollupData: left RootRollupInputs *-- ChildRollupData: right +RootRollupInputs *-- Header : parent class RootRollupPublicInputs { aggregation_object: AggregationObject @@ -148,7 +161,7 @@ RootRollupPublicInputs *--Header : header ```python def RootRollupCircuit( - l1_to_l2_msgs: List[Fr], + l1_to_l2_roots: MessageCompressionBaseOrMergePublicInputs, l1_to_l2_msgs_sibling_path: List[Fr], parent: Header, parent_sibling_path: List[Fr], @@ -175,21 +188,22 @@ def RootRollupCircuit( ) # Update the l1 to l2 msg tree - l1_to_l2_msg_subtree = MerkleTree(l1_to_l2_msgs) l1_to_l2_msg_tree = merkle_insertion( parent.state.l1_to_l2_message_tree, - l1_to_l2_msg_subtree.root, + l1_to_l2_roots.converted_root, l1_to_l2_msgs_sibling_path, L1_TO_L2_SUBTREE_HEIGHT, L1_To_L2_HEIGHT ) - txs_hash = SHA256(left.public_inputs.txs_hash | right.public_inputs.txs_hash) - out_hash = SHA256(left.public_inputs.txs_hash | right.public_inputs.out_hash) - header = Header( last_archive = left.public_inputs.constants.last_archive, - body_hash = SHA256(txs_hash | out_hash | SHA256(l1_to_l2_msgs)), + content_commitment: ContentCommitment( + tx_tree_height = left.public_inputs.height_in_block_tree + 1, + tsx_hash = SHA256(left.public_inputs.txs_hash | right.public_inputs.txs_hash), + in_hash = l1_to_l2_roots.sha_root, + out_hash = SHA256(left.public_inputs.out_hash | right.public_inputs.out_hash), + ), state = StateReference( l1_to_l2_message_tree = l1_to_l2_msg_tree, partial = right.public_inputs.end, diff --git a/yellow-paper/sidebars.js b/yellow-paper/sidebars.js index 4d3ae29ae54..b15773ba563 100644 --- a/yellow-paper/sidebars.js +++ b/yellow-paper/sidebars.js @@ -116,7 +116,7 @@ const sidebars = { type: "category", link: { type: "doc", id: "l1-smart-contracts/index" }, items: [ - "l1-smart-contracts/proposal", + "l1-smart-contracts/frontier", ], }, {