diff --git a/da-contracts/contracts/IL1DAValidator.sol b/da-contracts/contracts/IL1DAValidator.sol index c22e9c557..cb2b640e5 100644 --- a/da-contracts/contracts/IL1DAValidator.sol +++ b/da-contracts/contracts/IL1DAValidator.sol @@ -17,7 +17,7 @@ struct L1DAValidatorOutput { interface IL1DAValidator { /// @notice The function that checks the data availability for the given batch input. /// @param _chainId The chain id of the chain that is being committed. - /// @param _chainId The batch number for which the data availability is being checked. + /// @param _batchNumber The batch number for which the data availability is being checked. /// @param _l2DAValidatorOutputHash The hash of that was returned by the l2DAValidator. /// @param _operatorDAInput The DA input by the operator provided on L1. /// @param _maxBlobsSupported The maximal number of blobs supported by the chain. diff --git a/docs/Overview.md b/docs/Overview.md index 161b2e745..2bd2d8182 100644 --- a/docs/Overview.md +++ b/docs/Overview.md @@ -1,4 +1,5 @@ # Overview + [back to readme](./README.md) Ethereum's future is rollup-centric. This means breaking with the current paradigm of isolated EVM chains to infrastructure that is focused on an ecosystem of interconnected zkEVMs/zkVMs, (which we name ZK chain). This ecosystem will be grounded on Ethereum, requiring the appropriate L1 smart contracts. Here we outline our ZK Stack approach for these contracts, their interfaces, the needed changes to the existing architecture, as well as future features to be implemented. @@ -15,7 +16,6 @@ We want to create a system where: - Interop is seamless and enables unified liquidity for assets across the ecosystem. - Multi-chain smart contracts need to be easy to develop, which means easy access to traditional bridges, and other supporting architecture. - ZKsync Era is a permissionless general-purpose ZK rollup. Similar to many L1 blockchains and sidechains it enables deployment and interaction with Turing-complete smart contracts. diff --git a/docs/README.md b/docs/README.md index 22d9cdc0e..97edab33b 100644 --- a/docs/README.md +++ b/docs/README.md @@ -5,48 +5,48 @@ The order of the files here only roughly represents the order of reading. A lot - [Glossary](./glossary.md) - [Overview](./overview.md) - Contracts of an individual chain - - [ZK Chain basics](./settlement_contracts/zkchain_basics.md) - - Data availability - - [Custom DA support](./settlement_contracts/data_availability/custom_da.md) - - [Rollup DA support](./settlement_contracts/data_availability/rollup_da.md) - - [Standard pubdata format](./settlement_contracts/data_availability/standard_pubdata_format.md) - - [State diff compression v1 spec](./settlement_contracts/data_availability/state_diff_compression_v1_spec.md) - - L1->L2 transaction handling - - [Processing of L1->L2 transactions](./settlement_contracts/priority_queue/processing_of_l1->l2_txs.md) - - [Priority queue](./settlement_contracts/priority_queue/priority-queue.md) + - [ZK Chain basics](./settlement_contracts/zkchain_basics.md) + - Data availability + - [Custom DA support](./settlement_contracts/data_availability/custom_da.md) + - [Rollup DA support](./settlement_contracts/data_availability/rollup_da.md) + - [Standard pubdata format](./settlement_contracts/data_availability/standard_pubdata_format.md) + - [State diff compression v1 spec](./settlement_contracts/data_availability/state_diff_compression_v1_spec.md) + - L1->L2 transaction handling + - [Processing of L1->L2 transactions](./settlement_contracts/priority_queue/processing_of_l1->l2_txs.md) + - [Priority queue](./settlement_contracts/priority_queue/priority-queue.md) - Chain Management - - [Chain type manager](./chain_management/chain_type_manager.md) - - [Admin role](./chain_management/admin_role.md) - - [Chain genesis](./chain_management/chain_genesis.md) - - [Standard Upgrade process](./chain_management/upgrade_process.md) + - [Chain type manager](./chain_management/chain_type_manager.md) + - [Admin role](./chain_management/admin_role.md) + - [Chain genesis](./chain_management/chain_genesis.md) + - [Standard Upgrade process](./chain_management/upgrade_process.md) - Bridging - - Bridgehub - - [Overview of the bridgehub functionality](./bridging/bridgehub/overview.md) - - [Asset Router](./bridging/asset_router/Overview.md) + - Bridgehub + - [Overview of the bridgehub functionality](./bridging/bridgehub/overview.md) + - [Asset Router](./bridging/asset_router/Overview.md) - L2 System Contracts - - [System contracts bootloader description](./l2_system_contracts/system_contracts_bootloader_description.md) - - [Batches and blocks on ZKsync](./l2_system_contracts/batches_and_blocks_on_zksync.md) - - [Elliptic curve precompiles](./l2_system_contracts/elliptic_curve_precompiles.md) - - [ZKsync fee model](./l2_system_contracts/zksync_fee_model.md) + - [System contracts bootloader description](./l2_system_contracts/system_contracts_bootloader_description.md) + - [Batches and blocks on ZKsync](./l2_system_contracts/batches_and_blocks_on_zksync.md) + - [Elliptic curve precompiles](./l2_system_contracts/elliptic_curve_precompiles.md) + - [ZKsync fee model](./l2_system_contracts/zksync_fee_model.md) - Gateway - - [General overview](./gateway/overview.md) - - [Chain migration](./gateway/chain_migration.md) - - [L1->L3 messaging via gateway](./gateway/messaging_via_gateway.md) - - [L3->L1 messaging via gateway](./gateway/nested_l3_l1_messaging.md) - - [Gateway protocol versioning](./gateway/gateway_protocol_upgrades.md) - - [DA handling on Gateway](./gateway/gateway_da.md) + - [General overview](./gateway/overview.md) + - [Chain migration](./gateway/chain_migration.md) + - [L1->L3 messaging via gateway](./gateway/messaging_via_gateway.md) + - [L3->L1 messaging via gateway](./gateway/nested_l3_l1_messaging.md) + - [Gateway protocol versioning](./gateway/gateway_protocol_upgrades.md) + - [DA handling on Gateway](./gateway/gateway_da.md) - Upgrade history - - [Gateway upgrade diff](./upgrade_history/gateway_upgrade/gateway_diff_review.md) - - [Gateway upgrade process](./upgrade_history/gateway_upgrade/upgrade_process.md) + - [Gateway upgrade diff](./upgrade_history/gateway_upgrade/gateway_diff_review.md) + - [Gateway upgrade process](./upgrade_history/gateway_upgrade/upgrade_process.md) ![Reading order](./img/reading_order.png) -# Invariants/tricky places to look out for +## For auditors: Invariants/tricky places to look out for This section is for auditors of the codebase. It includes some of the important invariants that the system relies on and which if broken could have bad consequences. - Assuming that the accepting CTM is correct & efficient, the L1→GW part of the L1→GW→L3 transaction never fails. It is assumed that the provided max amount for gas is always enough for any transaction that can realistically come from L1. -- GW → L1 migration never fails. If it is possible to get into a state where the migration is not possible to finish, then the chain is basically lost. There are some exceptions where for now it is the expected behavior. (check out the “Migration invariants & protocol upgradability” section) +- GW → L1 migration never fails. If it is possible to get into a state where the migration is not possible to finish, then the chain is basically lost. There are some exceptions where for now it is the expected behavior. (check out the “Migration invariants & protocol upgradability” section) - The general consistency of chains when migration between different settlement layers is done. Including the feasibility of emergency upgrades, etc. I.e. whether the whole system is thought-through. - Preimage attacks in the L3→L1 tree, we apply special prefixes to ensure that the tree structure is fixed, i.e. all logs are 88 bytes long (this is for backwards compatibility reasons). For batch leafs and chain id leafs we use special prefixes. -- Data availability guarantees. Whether rollup users can always restore all their storage slots, etc. An example of a potential tricky issue can be found in “Security notes for Gateway-based rollups” [in this document](./gateway/gateway_da.md). \ No newline at end of file +- Data availability guarantees. Whether rollup users can always restore all their storage slots, etc. An example of a potential tricky issue can be found in “Security notes for Gateway-based rollups” [in this document](./gateway/gateway_da.md). diff --git a/docs/bridging/asset_router/Overview.md b/docs/bridging/asset_router/Overview.md index 3da01cfbd..3ca2c650e 100644 --- a/docs/bridging/asset_router/Overview.md +++ b/docs/bridging/asset_router/Overview.md @@ -1,4 +1,5 @@ # Overview of Custom Asset Bridging with the Asset Router + [back to readme](../../README.md) Bridges are completely separate contracts from the ZKChains and system contracts. They are a wrapper for L1 <-> L2 communication on both L1 and L2. Upon locking assets on one layer, a request is sent to mint these bridged assets on the other layer. @@ -6,13 +7,13 @@ Upon burning assets on one layer, a request is sent to unlock them on the other. Custom asset bridging is a new bridging model that allows to: -1. Minimize the effort needed by custom tokens to be able to become part of the elastic chain ecosystem. Before, each custom token would have to build its own bridge, but now just custom asset deployment trackers / asset handler is needed. This is achieved by building a modular bridge which separates the logic of L1<>L2 messaging from the holding of the asset. +1. Minimize the effort needed by custom tokens to be able to become part of the elastic chain ecosystem. Before, each custom token would have to build its own bridge, but now just custom asset deployment trackers / asset handler is needed. This is achieved by building a modular bridge which separates the logic of L1<>L2 messaging from the holding of the asset. 2. Unify the interfaces between L1 and L2 bridge contracts, paving the way for easy cross chain bridging. It will especially become valuable once interop is enabled. #### New concepts - assetId => identifier to track bridged assets across chains. This is used to link messages to specific asset handlers in the AssetRouters. -- AssetHandler => contract that manages liquidity (burns/mints, locks/unlocks, etc.) for specific token (or a set of them) on a chain. Every asset +- AssetHandler => contract that manages liquidity (burns/mints, locks/unlocks, etc.) for specific token (or a set of them) on a chain. Every asset - AssetDeploymentTracker => contract that manages the deployment of asset handlers across chains. This is the contract that registers these asset handlers in the AssetRouters. ### Normal flow diff --git a/docs/bridging/asset_router/asset_router.md b/docs/bridging/asset_router/asset_router.md index 8333b2254..99f678f58 100644 --- a/docs/bridging/asset_router/asset_router.md +++ b/docs/bridging/asset_router/asset_router.md @@ -1,4 +1,5 @@ # AssetRouters (L1/L2) and NativeTokenVault + [back to readme](../../README.md) The main job of the asset router is to be the central point of coordination for bridging. All crosschain token bridging is done between asset routers only and once the message reaches asset router, it then routes it to the corresponding asset handler. @@ -10,7 +11,6 @@ The endgame is to have L1 asset router have the same functionality as the L2 one The specifics of the L2AssetRouter is the need to interact with the previously deployed L2SharedBridgeLegacy if it was already present. It has less “rights” than the L1AssetRouter: at the moment it is assumed that all asset deployment trackers are from L1, the only way to register an asset handler on L2 is to make an L1→L2 transaction. > Note, that today registering new asset deployment trackers will be permissioned, but the plan is to make it permissionless in the future -> The specifics of the L1AssetRouter come from the need to be backwards compatible with the old L1SharedBridge. Yes, it will not share the same storage, but it will inherit the need to be backwards compatible with the current SDK. Also, L1AssetRouter needs to facilitate L1-only operations, such as recovering from failed deposits. @@ -18,15 +18,15 @@ Also, L1AssetRouter is the only base token bridge contract that can participate ### L1Nullifier -While the endgoal is to unify L1 and L2 asset routers, in reality, it may not be that easy: while L2 asset routers get called by L1→L2 transactions, L1 ones don't and require manual finalization of transactions, which involves proof verification, etc. To move this logic outside of the L1AssetRouter, it was moved into a separate L1Nullifier contract. +While the endgoal is to unify L1 and L2 asset routers, in reality, it may not be that easy: while L2 asset routers get called by L1→L2 transactions, L1 ones don't and require manual finalization of transactions, which involves proof verification, etc. To move this logic outside of the L1AssetRouter, it was moved into a separate L1Nullifier contract. -*This is the contract the previous L1SharedBridge will be upgraded to, so it should have the backwards compatible storage.* +_This is the contract the previous L1SharedBridge will be upgraded to, so it should have the backwards compatible storage._ ### NativeTokenVault (L1/L2) NativeTokenVault is an asset handler that is available on all chains and is also predeployed. It is provides the functionality of the most basic bridging: locking funds on one chain and minting the bridged equivalent on the other one. On L2 chains NTV is predeployed at the `0x10004` address. -The L1 and L2 versions of the NTV are almost identical in functionality, the main differences come from the differences of the deployment functionality in L1 and L2 envs, where the former uses standard CREATE2 and the latter uses low level calls to `CONTRACT_DEPLOYER`system contract. +The L1 and L2 versions of the NTV are almost identical in functionality, the main differences come from the differences of the deployment functionality in L1 and L2 envs, where the former uses standard CREATE2 and the latter uses low level calls to `CONTRACT_DEPLOYER`system contract. Also, the L1NTV has the following specifics: @@ -43,5 +43,5 @@ This contract is never deployed for new chains. ### Summary ![image.png](./img/bridge_contracts.png) + > New bridge contracts -> diff --git a/docs/bridging/bridgehub/overview.md b/docs/bridging/bridgehub/overview.md index 83183797b..e43d80880 100644 --- a/docs/bridging/bridgehub/overview.md +++ b/docs/bridging/bridgehub/overview.md @@ -1,4 +1,5 @@ # BridgeHub & Asset Routers + [back to readme](../../README.md) ## Bridgehub as the main chain registry @@ -28,7 +29,7 @@ For the purpose of this document, it is enough to treat the Asset Router as a bl ### Handling base tokens -On L2, *a base token* (not to be consfused with a *native token*, i.e. an ERC20 token with a main contract on the chain) is the one that is used for `msg.value` and it is managed at `L2BaseToken` system contract. We need its logic to be strictly defined in `L2BaseToken`, since the base asset is expected to behave the exactly the same as ether on EVM. For now this token contract does not support base minting and burning of the asset, nor further customization. +On L2, _a base token_ (not to be consfused with a _native token_, i.e. an ERC20 token with a main contract on the chain) is the one that is used for `msg.value` and it is managed at `L2BaseToken` system contract. We need its logic to be strictly defined in `L2BaseToken`, since the base asset is expected to behave the exactly the same as ether on EVM. For now this token contract does not support base minting and burning of the asset, nor further customization. In other words, in the current release base assets can only be transfered through `msg.value`. They can also only be minted when they are backed 1-1 on L1. @@ -42,42 +43,41 @@ To request an L1→L2 transaction, the `BridgeHub.requestL2TransactionDirect` fu ```solidity struct L2TransactionRequestDirect { - uint256 chainId; - uint256 mintValue; - address l2Contract; - uint256 l2Value; - bytes l2Calldata; - uint256 l2GasLimit; - uint256 l2GasPerPubdataByteLimit; - bytes[] factoryDeps; - address refundRecipient; + uint256 chainId; + uint256 mintValue; + address l2Contract; + uint256 l2Value; + bytes l2Calldata; + uint256 l2GasLimit; + uint256 l2GasPerPubdataByteLimit; + bytes[] factoryDeps; + address refundRecipient; } ``` Most of the params are self-explanatory & replicate the logic of zkSync Era. The only non-trivial fields are: -- `mintValue` is the total amount of the base tokens that should be minted on L2 as the result of this transaction. The requirement is that `request.mintValue >= request.l2Value + request.l2GasLimit * derivedL2GasPrice(...)`, where `derivedL2GasPrice(...)` is the gas price to be used by this L1→L2 transaction. The exact price is defined by the ZKChain. - -Here is a quick guide on how this transaction is routed through the bridgehub. +- `mintValue` is the total amount of the base tokens that should be minted on L2 as the result of this transaction. The requirement is that `request.mintValue >= request.l2Value + request.l2GasLimit * derivedL2GasPrice(...)`, where `derivedL2GasPrice(...)` is the gas price to be used by this L1→L2 transaction. The exact price is defined by the ZKChain. -1. The bridgehub retrieves the `baseTokenAssetId` of the chain with the corresponding `chainId` and calls `L1AssetRouter.bridgehubDepositBaseToken` method. The `L1AssetRouter` will then use standard token depositing mechanism to burn/escrow the respective amount of the `baseTokenAssetId`. You can read more about it in [the asset router doc](../asset_router/Overview.md). +Here is a quick guide on how this transaction is routed through the bridgehub. -This step ensures that the baseToken will be backed 1-1 on L1. +1. The bridgehub retrieves the `baseTokenAssetId` of the chain with the corresponding `chainId` and calls `L1AssetRouter.bridgehubDepositBaseToken` method. The `L1AssetRouter` will then use standard token depositing mechanism to burn/escrow the respective amount of the `baseTokenAssetId`. You can read more about it in [the asset router doc](../asset_router/Overview.md). This step ensures that the baseToken will be backed 1-1 on L1. 2. After that, it just routes the corresponding call to the ZKChain with the corresponding `chainId` . It is now the responsibility of the ZKChain to validate that the transaction is correct and can be accepted by it. This validation includes, but not limited to: -- The fact that the user paid enough funds for the transaction (basically `request.l2GasLimit * derivedL2GasPrice(...) + request.l2Value >= request.mintValue`. -- The fact the transaction is always executable (the `request.l2GasLimit` is not high enough). -- etc. + - The fact that the user paid enough funds for the transaction (basically `request.l2GasLimit * derivedL2GasPrice(...) + request.l2Value >= request.mintValue`. + - The fact the transaction is always executable (the `request.l2GasLimit` is not high enough). + - etc. + 3. After the ZKChain validates the tx, it includes it into its priority queue. Once the operator executes this transaction on L2, the `mintValue` of the baseToken will be minted on L2. The `derivedL2GasPrice(...) * gasUsed` will be given to the operator’s balance. The other funds can be routed either of the following way: -If the transaction is successful, the `request.l2Value` will be minted on the `request.l2Contract` address (it can potentially transfer these funds within the transaction). The rest are minted to the `request.refundRecipient` address. In case the transaction is not successful, all of the base token will be minted to the `request.refundRecipient` address. These are the same rules as for the zkSync Era. +If the transaction is successful, the `request.l2Value` will be minted on the `request.l2Contract` address (it can potentially transfer these funds within the transaction). The rest are minted to the `request.refundRecipient` address. In case the transaction is not successful, all of the base token will be minted to the `request.refundRecipient` address. These are the same rules as for the zkSync Era. -***Diagram of the L1→L2 transaction flow on L1 for direct user calls, the baseToken can be ETH or an ERC20:*** +**_Diagram of the L1→L2 transaction flow on L1 for direct user calls, the baseToken can be ETH or an ERC20:_** ![requestL2TransactionDirect (ETH) (2).png](./img/requestL2TransactionDirect.png) -***Diagram of the L1→L2 transaction flow on L2 (it is the same regardless of the baseToken):*** +**_Diagram of the L1→L2 transaction flow on L2 (it is the same regardless of the baseToken):_** ![requestL2TransactionTwoBridges](./img/requestL2TransactionTwoBridges_token.png) @@ -85,7 +85,7 @@ If the transaction is successful, the `request.l2Value` will be minted on the ` ### Limitations of custom base tokens in the current release -zkSync Era uses ETH as a base token. Upon creation of an ZKChain other chains may want to use their own custom base tokens. Note, that for the current release all the possible base tokens are whitelisted. The other limitation is that all the base tokens must be backed 1-1 on L1 as well as they are solely implemented with `L2BaseToken` contract. In other words: +zkSync Era uses ETH as a base token. Upon creation of an ZKChain other chains may want to use their own custom base tokens. Note, that for the current release all the possible base tokens are whitelisted. The other limitation is that all the base tokens must be backed 1-1 on L1 as well as they are solely implemented with `L2BaseToken` contract. In other words: - No custom logic is allowed on L2 for base tokens - Base tokens can not be minted on L2 without being backed by the corresponding L1 amount. @@ -100,9 +100,9 @@ Once the chain is created, its L2AssetRouter will be automatically deployed upon `L1AssetRouter` is used as the main "glue" for value bridging across chains. Whenever a token that is not native needs to be bridged between two chains an L1<>L2 transaction out of the name of an AssetRouter needs to be performed. For more details, check out the [asset router documentation](../asset_router/Overview.md). But for this section it is enough to understand that we need to somehow make a transaction out of the name of `L1AssetRouter` to its L2 counterpart to deliver the message about certain amount of asset being bridged. -> In the next paragraphs we will often refer to `L1AssetRouter` as performing something. It is good enough for understanding of how bridgehub functionality works. Under the hood though, it mainly serves as common entry that calls various asset handlers that are chosen based on asset id. You can read more about it in the [asset router documentation](../asset_router/asset_router.md). +> In the next paragraphs we will often refer to `L1AssetRouter` as performing something. It is good enough for understanding of how bridgehub functionality works. Under the hood though, it mainly serves as common entry that calls various asset handlers that are chosen based on asset id. You can read more about it in the [asset router documentation](../asset_router/asset_router.md). -Let’s say that a ZKChain has ETH as its base token. Let’s say that the depositor wants to bridge USDC to that chain. We can not use `BridgeHub.requestL2TransactionDirect`, because it only takes base token `mintValue` and then starts an L1→L2 transaction rightaway out of the name of the user and not the `L1AssetRouter`. +Let’s say that a ZKChain has ETH as its base token. Let’s say that the depositor wants to bridge USDC to that chain. We can not use `BridgeHub.requestL2TransactionDirect`, because it only takes base token `mintValue` and then starts an L1→L2 transaction rightaway out of the name of the user and not the `L1AssetRouter`. We need some way to atomically deposit both ETH and USDC to the shared bridge + start a transaction from `L1AssetRouter`. For that we have a separate function on `Bridgehub`: `BridgeHub.requestL2TransactionTwoBridges`. The reason behind the name “two bridges” is a bit historical: the transaction supposed compose to do actions with two bridges: the bridge responsible for base tokens and the second bridge responsible for any other token. @@ -112,47 +112,45 @@ When calling `BridgeHub.requestL2TransactionTwoBridges` the following struct nee ```solidity struct L2TransactionRequestTwoBridgesOuter { - uint256 chainId; - uint256 mintValue; - uint256 l2Value; - uint256 l2GasLimit; - uint256 l2GasPerPubdataByteLimit; - address refundRecipient; - address secondBridgeAddress; - uint256 secondBridgeValue; - bytes secondBridgeCalldata; -} + uint256 chainId; + uint256 mintValue; + uint256 l2Value; + uint256 l2GasLimit; + uint256 l2GasPerPubdataByteLimit; + address refundRecipient; + address secondBridgeAddress; + uint256 secondBridgeValue; + bytes secondBridgeCalldata; +} ``` The first few fields are the same as for the simple L1→L2 transaction case. However there are three new fields: - `secondBridgeAddress` is the address of the bridge (or contract in general) which will need to perform the L1->L2 transaction. In this case it should be the same `L1AssetRouter` -- `secondBridgeValue` is the `msg.value` to be sent to the bridge which is responsible for the asset being deposited (in this case it is `L1AssetRouter` ). This can be used to deposit ETH to ZKChains that have base token that is not ETH. -- `secondBridgeCalldata` is the data to pass to the second contract. `L1AssetRouter` supports multiple formats of calldata, the list can be seen in the `bridgehubDeposit` function of the `L1AssetRouter`. +- `secondBridgeValue` is the `msg.value` to be sent to the bridge which is responsible for the asset being deposited (in this case it is `L1AssetRouter` ). This can be used to deposit ETH to ZKChains that have base token that is not ETH. +- `secondBridgeCalldata` is the data to pass to the second contract. `L1AssetRouter` supports multiple formats of calldata, the list can be seen in the `bridgehubDeposit` function of the `L1AssetRouter`. The function will do the following: -**L1** +#### L1 -1. It will deposit the `request.mintValue` of the ZKChain’s base token the same way as during a simple L1→L2 transaction. These funds will be used for funding the `l2Value` and the fee to the operator. +1. It will deposit the `request.mintValue` of the ZKChain’s base token the same way as during a simple L1→L2 transaction. These funds will be used for funding the `l2Value` and the fee to the operator. 2. It will call the `secondBridgeAddress` (`L1AssetRouter`) once again and this time it will deposit the funds to the `L1AssetRouter`, but this time it will be deposit not to pay the fees, but rather for the sake of bridging the desired token. -This call will return the parameters to call the l2 contract with (the address of the L2 bridge counterpart, the calldata and factory deps to call it with). -3. After the BridgeHub will call the ZKChain to add the corresponding L1→L2 transaction to the priority queue. -4. The BridgeHub will call the `SharedBridge` once again so that it can remember the hash of the corresponding deposit transaction. [This is needed in case the deposit fails](#claiming-failed-deposits). +This call will return the parameters to call the l2 contract with (the address of the L2 bridge counterpart, the calldata and factory deps to call it with). 3. After the BridgeHub will call the ZKChain to add the corresponding L1→L2 transaction to the priority queue. 4. The BridgeHub will call the `SharedBridge` once again so that it can remember the hash of the corresponding deposit transaction. [This is needed in case the deposit fails](#claiming-failed-deposits). -**L2** +#### L2 1. After some time, the corresponding L1→L2 is created. -2. The L2AssetRouter will receive the message and re-route it to the asset handler of the bridged token. To read more about how it works, check out the [asset router documentation](../asset_router/Overview.md). +2. The L2AssetRouter will receive the message and re-route it to the asset handler of the bridged token. To read more about how it works, check out the [asset router documentation](../asset_router/Overview.md). -***Diagram of a depositing ETH onto a chain with USDC as the baseToken. Note that some contract calls (like `USDC.transerFrom` are omitted for the sake of consiceness):*** +**_Diagram of a depositing ETH onto a chain with USDC as the baseToken. Note that some contract calls (like `USDC.transerFrom` are omitted for the sake of consiceness):_** ![requestL2TransactionTwoBridges (SharedBridge) (1).png](./img/requestL2TransactionTwoBridges_depositEthToUSDC.png) ## Generic usage of `BridgeHub.requestL2TransactionTwoBridges` -`L1AssetRouter` is the only bridge that can handle base tokens. However, the `BridgeHub.requestL2TransactionTwoBridges` could be used by `secondBridgeAddress` on L1. A notable example of how it is done is how our [CTMDeploymentTracker](../../l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol) uses it to register the correct CTM address on Gateway. You can read more about how Gateway works in [its documentation](../../gateway/overview.md). +`L1AssetRouter` is the only bridge that can handle base tokens. However, the `BridgeHub.requestL2TransactionTwoBridges` could be used by `secondBridgeAddress` on L1. A notable example of how it is done is how our [CTMDeploymentTracker](../../l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol) uses it to register the correct CTM address on Gateway. You can read more about how Gateway works in [its documentation](../../gateway/overview.md). Let’s do a quick recap on how it works: @@ -160,49 +158,49 @@ When calling `BridgeHub.requestL2TransactionTwoBridges` the following struct nee ```solidity struct L2TransactionRequestTwoBridgesOuter { - uint256 chainId; - uint256 mintValue; - uint256 l2Value; - uint256 l2GasLimit; - uint256 l2GasPerPubdataByteLimit; - address refundRecipient; - address secondBridgeAddress; - uint256 secondBridgeValue; - bytes secondBridgeCalldata; -} + uint256 chainId; + uint256 mintValue; + uint256 l2Value; + uint256 l2GasLimit; + uint256 l2GasPerPubdataByteLimit; + address refundRecipient; + address secondBridgeAddress; + uint256 secondBridgeValue; + bytes secondBridgeCalldata; +} ``` - `secondBridgeAddress` is the address of the L1 contract that needs to perform the L1->L2 transaction. -- `secondBridgeValue` is the `msg.value` to be sent to the `secondBridgeAddress`. +- `secondBridgeValue` is the `msg.value` to be sent to the `secondBridgeAddress`. - `secondBridgeCalldata` is the data to pass to the `secondBridgeAddress`. This can be interpreted any way it wants. -1. Firstly, the Bridgehub will deposit the `request.mintValue` the same way as during a simple L1→L2 transaction. These funds will be used for funding the `l2Value` and the fee to the operator. +1. Firstly, the Bridgehub will deposit the `request.mintValue` the same way as during a simple L1→L2 transaction. These funds will be used for funding the `l2Value` and the fee to the operator. 2. After that, the `secondBridgeAddress.bridgehubDeposit` with the following signature is called ```solidity struct L2TransactionRequestTwoBridgesInner { - // Should be equal to a constant `keccak256("TWO_BRIDGES_MAGIC_VALUE")) - 1` - bytes32 magicValue; - // The L2 contract to call - address l2Contract; - // The calldata to call it with - bytes l2Calldata; - // The factory deps to call it with - bytes[] factoryDeps; - // Just some 32-byte value that can be used for later processing - // It is called `txDataHash` as it *should* be used as a way to facilitate - // reclaiming failed deposits. - bytes32 txDataHash; + // Should be equal to a constant `keccak256("TWO_BRIDGES_MAGIC_VALUE")) - 1` + bytes32 magicValue; + // The L2 contract to call + address l2Contract; + // The calldata to call it with + bytes l2Calldata; + // The factory deps to call it with + bytes[] factoryDeps; + // Just some 32-byte value that can be used for later processing + // It is called `txDataHash` as it *should* be used as a way to facilitate + // reclaiming failed deposits. + bytes32 txDataHash; } function bridgehubDeposit( - uint256 _chainId, - // The actual user that does the deposit - address _prevMsgSender, - // The msg.value of the L1->L2 transaction to be created - uint256 _l2Value, - // Custom bridge-specific data - bytes calldata _data + uint256 _chainId, + // The actual user that does the deposit + address _prevMsgSender, + // The msg.value of the L1->L2 transaction to be created + uint256 _l2Value, + // Custom bridge-specific data + bytes calldata _data ) external payable returns (L2TransactionRequestTwoBridgesInner memory request); ``` @@ -212,23 +210,23 @@ Ultimately, the correctly processed `bridgehubDeposit` function basically grants Aside from the magical constant, the method should also return the information an L1→L2 transaction will start its call with: the `l2Contract` , `l2Calldata`, `factoryDeps`. It also should return the `txDataHash` field. The meaning `txDataHash` will be needed in the next paragraphs. But generally it can be any 32-byte value the bridge wants. -1. After that, an L1→L2 transaction is invoked. Note, that the “trusted” `L1AssetRouter` has enforced that the baseToken was deposited correctly (again, the step (1) can *only* be handled by the `L1AssetRouter`), while the second bridge can provide any data to call its L2 counterpart with. +1. After that, an L1→L2 transaction is invoked. Note, that the “trusted” `L1AssetRouter` has enforced that the baseToken was deposited correctly (again, the step (1) can _only_ be handled by the `L1AssetRouter`), while the second bridge can provide any data to call its L2 counterpart with. 2. As a final step, following function is called: ```solidity function bridgehubConfirmL2Transaction( - // `chainId` of the ZKChain - uint256 _chainId, - // the same value that was returned by `bridgehubDeposit` - bytes32 _txDataHash, - // the hash of the L1->L2 transaction - bytes32 _txHash + // `chainId` of the ZKChain + uint256 _chainId, + // the same value that was returned by `bridgehubDeposit` + bytes32 _txDataHash, + // the hash of the L1->L2 transaction + bytes32 _txHash ) external; ``` -This function is needed for whatever actions are needed to be done after the L1→L2 transaction has been invoked. +This function is needed for whatever actions are needed to be done after the L1→L2 transaction has been invoked. -On `L1AssetRouter` it is used to remember the hash of each deposit transaction, so that later on, the funds could be returned to user if the `L1->L2` transaction fails. The `_txDataHash` is stored so that the whenever the users will want to reclaim funds from a failed deposit, they would provide the token and the amount as well as the sender to send the money to. +On `L1AssetRouter` it is used to remember the hash of each deposit transaction, so that later on, the funds could be returned to user if the `L1->L2` transaction fails. The `_txDataHash` is stored so that the whenever the users will want to reclaim funds from a failed deposit, they would provide the token and the amount as well as the sender to send the money to. ## Claiming failed deposits diff --git a/docs/chain_management/admin_role.md b/docs/chain_management/admin_role.md index 5f1e78e96..30bee167b 100644 --- a/docs/chain_management/admin_role.md +++ b/docs/chain_management/admin_role.md @@ -1,4 +1,5 @@ # Safe ChainAdmin management + [back to readme](../README.md) While the ecosystem does a [decentralized trusted governance](https://blog.zknation.io/introducing-zk-nation/), each chain has its own Chain Admin. While the upgrade parameters are chosen by the governance, chain admin is still a powerful role and should be managed carefully. @@ -7,11 +8,11 @@ In this document we will explore what are the abilities of the ChainAdmin, how d ## General guidelines -The system does not restrict in any way how the admin of the chain should be implemented. However special caution should be taken to keep it safe. +The system does not restrict in any way how the admin of the chain should be implemented. However special caution should be taken to keep it safe. -The general guideline is that an admin of a ZK chain should be *at least* a well-distributed multisig. Having it as an EOA is definitely a bad idea since having this address stolen can lead to [chain being permanently frozen](#setting-da-layer). +The general guideline is that an admin of a ZK chain should be _at least_ a well-distributed multisig. Having it as an EOA is definitely a bad idea since having this address stolen can lead to [chain being permanently frozen](#setting-da-layer). -Additional measures may be taken [to self-restrict](#proposed-modular-chainadmin-implementation) the ChainAdmin to ensure that some operations can be only done in safe fasion. +Additional measures may be taken [to self-restrict](#proposed-modular-chainadmin-implementation) the ChainAdmin to ensure that some operations can be only done in safe fasion. Generally all the functionality of chain admin should be treated with maximal security and caution, and having hotkey separate roles in rare circuimstances, e.g. to call `setTokenMultiplier` in case of an ERC-20 based chain. @@ -27,7 +28,7 @@ In case the malicious block has not been executed yet, it can be reverted. ### Setting DA layer -This is one of the most powerful settings that a chain can have: setting a custom DA layer. The dangers of doing this wrong are obvious: lack of proper data availability solution may lead to funds being frozen. (Note: that funds can never be *stolen* due to ZKP checks of the VM execution). +This is one of the most powerful settings that a chain can have: setting a custom DA layer. The dangers of doing this wrong are obvious: lack of proper data availability solution may lead to funds being frozen. (Note: that funds can never be _stolen_ due to ZKP checks of the VM execution). Sometimes, users may need assurances that a chain will never become frozen even under a malicious chain admin. A general though unstable approach is discussed [here](#proposed-modular-chainadmin-implementation), however this release comes with a solution specially taylored for rollups: the `isPermanentRollup` setting. @@ -37,13 +38,11 @@ Chain also exposes the `AdminFacet.makePermanentRollup` function. It will turn a This functionality is obviously dangerous in a sense that it is permanent and revokes the right of the chain to change its DA layer. On the other hand, it ensures perpetual safety for users. This is the option that zkSync Era plans to use. -FIXME: may add links about the migration process - -This setting is preserved even when migrating to gateway. If this setting was set while chain is on top of Gateway, and it migrates back to L1, it will keep this status, i.e. it is fully irrevocable. +This setting is preserved even when migrating to [gateway](../gateway/overview.md). If this setting was set while chain is on top of Gateway, and it migrates back to L1, it will keep this status, i.e. it is fully irrevocable. ### `changeFeeParams` method -This method allows to change how the fees are charged for priority operations. +This method allows to change how the fees are charged for priority operations. The worst impact of setting this value wrongly is having L1->L2 transactions underpriced. @@ -67,17 +66,18 @@ This method allows to set a transaction filterer, i.e. an additional validator f ### Migration to another settlement layer -The upgrade can start migration of a chain to another settlement layer. Currently all the settlement layers are whitelisted, so generally this operation is harmless (except for the inconvenience in case the migration was unplanned). +The upgrade can start migration of a chain to another settlement layer. Currently all the settlement layers are whitelisted, so generally this operation is harmless (except for the inconvenience in case the migration was unplanned). However, some caution needs to be applied to migrate properly as described in the section below. ## Chain admin when migrating to gateway When a chain migrates to gateway, it provides the address of the new admin on L2. The following rules apply: + - If a ZK chain has already been deployed on a settlement layer, its admin stays the same. - If a ZK chain has not been deployed yet, then the new admin is set. -The above means that in the current release the admin of the chain on the new settlement layer is "detached" from the admin on L1. It is the responsibility of the chain to set the L2 admin correctly: either it should have the same signers or, even better in the long run, put the aliased L1 admin to have most of the abilities inside the L2 chain admin. +The above means that in the current release the admin of the chain on the new settlement layer is "detached" from the admin on L1. It is the responsibility of the chain to set the L2 admin correctly: either it should have the same signers or, even better in the long run, put the aliased L1 admin to have most of the abilities inside the L2 chain admin. Since most of the Admin's functionality above are related to L1->L2 operations, the L1 chain admin will continue playing a crucial role even after the chain migrates to Gateway. However, some of the new functionality are relevant on the chain admin on the settlement layer only: @@ -96,7 +96,7 @@ Overall **very special care** needs to be taken when selecting an admin for the > **Warning**. The proposed implementation here will likely **not** be used by the Matter Labs team for zkSync Era due to the issues listed in the issues section. This code, however, is still in scope of the audit and may serve as a future basis of a more long term solution. -In order to ensure that the architecture here flexible enough for future other chains to use, it uses a modular architecture to ensure that other chains could fit it to their needs. By default, this contract is not even `Ownable`, and anyone can execute transactions out of the name of it. In order to add new features such as restricting calling dangerous methods and access control, *restrictions* should be added there. Each restriction is a contract that implements the `IRestriction` interface. The following restrictions have been implemented so far: +In order to ensure that the architecture here flexible enough for future other chains to use, it uses a modular architecture to ensure that other chains could fit it to their needs. By default, this contract is not even `Ownable`, and anyone can execute transactions out of the name of it. In order to add new features such as restricting calling dangerous methods and access control, _restrictions_ should be added there. Each restriction is a contract that implements the `IRestriction` interface. The following restrictions have been implemented so far: - `AccessControlRestriction` that allows to specify which addresses can call which methods. In the case of Era, only the `DEFAULT_ADMIN_ROLE` will be able to call any methods. Other chains with non-ETH base token may need an account that would periodically call the L1 contract to update the ETH price there. They may create the `SET_TOKEN_MULTIPLIER_ROLE` role that is required to update the token price and give its rights to some hot private key. @@ -112,4 +112,3 @@ The approach above does not only helps to protect the chain, but also provides c Due to specifics of [migration to another settlement layers](#migration-to-another-settlement-layer) (i.e. that migrations do not overwrite the admin), maintaining the same `PermanentRestriction` becomes hard in case a restriction has been added on top of the chain admin inside one chain, but not the other. While very flexible, this modular approach should still be polished enough before recommending it as a generic solution for everyone. However, the provided new [ChainAdmin](../../l1-contracts/contracts/governance/ChainAdmin.sol) can still be helpful for new chains as with the `AccessControlRestriction` it provides a ready-to-use framework for role-based managing of the chain. Using `PermanentRestriction` for now is discouraged however. - diff --git a/docs/chain_management/chain_genesis.md b/docs/chain_management/chain_genesis.md index 9105ac2c4..8431868a9 100644 --- a/docs/chain_management/chain_genesis.md +++ b/docs/chain_management/chain_genesis.md @@ -1,7 +1,8 @@ # Creating new chains with BridgeHub + [back to readme](../README.md) -The main contract of the whole hyperchain ecosystem is called *`BridgeHub`*. It contains: +The main contract of the whole hyperchain ecosystem is called _`BridgeHub`_. It contains: - the registry from chainId to CTMs that is responsible for that chainId - the base token for each chainId. @@ -36,7 +37,7 @@ function createNewChain( ) external ``` -BridgeHub will check that the CTM as well as the base token are whitelisted and route the call to the State +BridgeHub will check that the CTM as well as the base token are whitelisted and route the call to the State ![newChain (2).png](./img/create_new_chain.png) @@ -44,14 +45,14 @@ BridgeHub will check that the CTM as well as the base token are whitelisted and In the future, ST creation will be permissionless. A securely random `chainId` will be generated for each chain to be registered. However, generating 32-byte chainId is not feasible with the current SDK expectations on EVM and so for now chainId is of type `uint48`. And so it has to be chosen by the admin of `BridgeHub`. Also, for the current release we would want to avoid chains being able to choose their own initialization parameter to prevent possible malicious input. -For this reason, there will be an entity called `admin` which is basically a hot key managed by us and it will be used to deploy new STs. +For this reason, there will be an entity called `admin` which is basically a hot key managed by us and it will be used to deploy new STs. So the flow for deploying their own ST for users will be the following: -1. Users tell us that they want to deploy a ST with certain governance, CTM (we’ll likely allow only one for now), and baseToken. +1. Users tell us that they want to deploy a ST with certain governance, CTM (we’ll likely allow only one for now), and baseToken. 2. Our server will generate a chainId not reserved by any other major chain and the `admin` will call the `BridgeHub.createNewChain` . This will call the `CTM.createNewChain` that will deploy the instance of the rollup as well as initialize the first transaction there — the system upgrade transaction needed to set the chainId on L2. -After that, the ST is ready to be used. Note, that the admin of the newly created chain (this will be the organization that will manage this chain from now on) will have to conduct certain configurations before the chain can be used securely (FIXME: link). +After that, the ST is ready to be used. Note, that the admin of the newly created chain (this will be the organization that will manage this chain from now on) will have to conduct certain configurations before the chain [can be used securely](../chain_management/admin_role.md). ## Built-in contracts and their initialization @@ -64,8 +65,8 @@ Each single ZK Chain has a set of the following contracts that, while not belong To reuse as much code as possible from L1 and also to allow easier initialization, most of these contracts are not initialized as just part of the genesis storage root. Instead, the data for their initialization is part of the original diamondcut for the chain. In the same initial upgrade transaction when the chainId is initialized, these contracts are force-deployed and initialized also. An important part in it plays the new `L2GenesisUpgrade` contract, which is pre-deployed in a user-space contract, but it is delegate-called by the `ComplexUpgrader` system contract (already exists as part of genesis and existed before this upgrade). -# Additional limitations for the current version +## Additional limitations for the current version -In the current version creating new chains will not be permissionless. That is needed to ensure that no malicious input can be provided there. +In the current version creating new chains will not be permissionless. That is needed to ensure that no malicious input can be provided there. Also, since in the current release, there will be little benefits from shared liquidity, i.e. the there will be no direct ZKChain<>ZKChain transfers supported, as a measure of additional security we’ll also keep track of balances for each individual ZKChain and will not allow it to withdraw more than it has deposited into the system. diff --git a/docs/chain_management/chain_type_manager.md b/docs/chain_management/chain_type_manager.md index d9a55b809..39c158130 100644 --- a/docs/chain_management/chain_type_manager.md +++ b/docs/chain_management/chain_type_manager.md @@ -1,17 +1,18 @@ -## Chain Type Manager (CTM) +# Chain Type Manager (CTM) + [back to readme](../README.md) > If someone is already familiar with the [previous version](https://github.com/code-423n4/2024-03-zksync) of zkSync architecture, this contract was previously known as "State Transition Manager (CTM)". Currently bridging between different zk rollups requires the funds to pass through L1. This is slow & expensive. -The vision of seamless internet of value requires transfers of value to be *both* seamless and trustless. This means that for instance different STs need to share the same L1 liquidity, i.e. a transfer of funds should never touch L1 in the process. However, it requires some sort of trust between two chains. If a malicious (or broken) rollup becomes a part of the shared liquidity pool it can steal all the funds. +The vision of seamless internet of value requires transfers of value to be _both_ seamless and trustless. This means that for instance different STs need to share the same L1 liquidity, i.e. a transfer of funds should never touch L1 in the process. However, it requires some sort of trust between two chains. If a malicious (or broken) rollup becomes a part of the shared liquidity pool it can steal all the funds. However, can two instances of the same zk rollup trust each other? The answer is yes, because no new additions of rollups introduce new trust assumptions. Assuming there are no bugs in circuits, the system will work as intended. -How can two rollups know that they are two different instances of the same system? We can create a factory of such contracts (and so we would know that each new rollup created by this instance is correct one). But just creating correct contracts is not enough. Ethereum changes, new bugs may be found in the original system & so an instance that does not keep itself up-to-date with the upgrades may exploit some bug from the past and jeopardize the entire system. Just deploying is not enough. We need to constantly make sure that all STs are up to date and maintain whatever other invariants are needed for these STs to trust each other. +How can two rollups know that they are two different instances of the same system? We can create a factory of such contracts (and so we would know that each new rollup created by this instance is correct one). But just creating correct contracts is not enough. Ethereum changes, new bugs may be found in the original system & so an instance that does not keep itself up-to-date with the upgrades may exploit some bug from the past and jeopardize the entire system. Just deploying is not enough. We need to constantly make sure that all STs are up to date and maintain whatever other invariants are needed for these STs to trust each other. -Let’s define as *Chain Type Manager* (CTM) **as a contract that is responsible for the following: +Let’s define as _Chain Type Manager_ (CTM) \*\*as a contract that is responsible for the following: - It serves as a factory to deploy STs (new ZK chains) - It is responsible for ensuring that all the STs deployed by it are up-to-date. @@ -22,23 +23,23 @@ In the long term vision STs deployment will be permissionless, however CTM will ## Configurability in the current release -For now, only one CTM will be supported — the one that deploys instances of zkSync Era, possibly using other DA layers. To read more about different DA layers, check out this document (FIXME link). +For now, only one CTM will be supported — the one that deploys instances of zkSync Era, possibly using other DA layers. To read more about different DA layers, check out [this document](../settlement_contracts/data_availability/custom_da.md). -The exact process of deploying & registering a ST will be described in [sections below](#creating-new-chains-with-bridgehub). Overall, each ST in the current release will have the following parameters: +The exact process of deploying & registering a ST can be [read here](./chain_genesis.md). Overall, each ST in the current release will have the following parameters: -| ST parameter | Updatability | Comment | -| --- | --- | --- | -| chainId | Permanent | Permanent identifier of the ST. Due to wallet support reasons, for now chainId has to be small (48 bits). This is one of the reasons why for now we’ll deploy STs manually, to prevent STs from having the same chainId as some another popular chain. In the future it will be trustlessly assigned as a random 32-byte value.| -| baseTokenAssetId | Permanent | Each ST can have their own custom base token (i.e. token used for paying the fees). It is set once during creation and can never be changed. Note, that we refer to and "asset id" here instead of an L1 address. To read more about what is assetId and how it works check out the document for custom asset bridging (FIXME: link) | -| chainTypeManager | Permanent | The CTM that deployed the ST. In principle, it could be possible to migrate between CTMs (assuming both CTMs support that). However, in practice it may be very hard and as of now such functionality is not supported. | -| admin | By admin of ST | The admin of the ST. It has some limited powers to govern the chain. To read more about which powers are available to a chain admin and which precautions should be taken, check out this document (FIXME: link to document about admin precauotions) | -| validatorTimelock | CTM | For now, we want all the chains to use the same 21h timelock period before their batches are finalized. Only CTM can update the address that can submit state transitions to the rollup (that is, the validatorTimelock). | -| validatorTimelock.validator | By admin of ST | The admin of ST can choose who can submit new batches to the ValidatorTimelock. | -| priorityTx FeeParams | By admin of ST | The admin of a ZK chain can amend the priority transaction fee params. | -| transactionFilterer | By admin of ST | A chain may put an additional filter to the incoming L1->L2 transactions. This may be needed by a permissioned chain (e.g. a Validium bank-lile corporate chain). | -| DA validation / permanent rollup status | By admin of ST | A chain can decide which DA layer to use. You check out more about safe DA management here (FIXME: link to admin doc) | -| executing upgrades | By admin of ST | While exclusively CTM governance can set the content of the upgrade, STs will typically be able to choose suitable time for them to actually execute it. In the current release, STs will have to follow our upgrades. | -| settlement layer | By admin of ST | The admin of the chain can enact migrations to other settlement layers. | +| ST parameter | Updatability | Comment | +| --------------------------------------- | -------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| chainId | Permanent | Permanent identifier of the ST. Due to wallet support reasons, for now chainId has to be small (48 bits). This is one of the reasons why for now we’ll deploy STs manually, to prevent STs from having the same chainId as some another popular chain. In the future it will be trustlessly assigned as a random 32-byte value. | +| baseTokenAssetId | Permanent | Each ST can have their own custom base token (i.e. token used for paying the fees). It is set once during creation and can never be changed. Note, that we refer to and "asset id" here instead of an L1 address. To read more about what is assetId and how it works check out the document for [asset router](../bridging/asset_router/Overview.md) | +| chainTypeManager | Permanent | The CTM that deployed the ST. In principle, it could be possible to migrate between CTMs (assuming both CTMs support that). However, in practice it may be very hard and as of now such functionality is not supported. | +| admin | By admin of ST | The admin of the ST. It has some limited powers to govern the chain. To read more about which powers are available to a chain admin and which precautions should be taken, check [out this document](../chain_management/admin_role.md) | +| validatorTimelock | CTM | For now, we want all the chains to use the same 21h timelock period before their batches are finalized. Only CTM can update the address that can submit state transitions to the rollup (that is, the validatorTimelock). | +| validatorTimelock.validator | By admin of ST | The admin of ST can choose who can submit new batches to the ValidatorTimelock. | +| priorityTx FeeParams | By admin of ST | The admin of a ZK chain can amend the priority transaction fee params. | +| transactionFilterer | By admin of ST | A chain may put an additional filter to the incoming L1->L2 transactions. This may be needed by a permissioned chain (e.g. a Validium bank-lile corporate chain). | +| DA validation / permanent rollup status | By admin of ST | A chain can decide which DA layer to use. You check out more about [safe DA management here](./admin_role.md) | +| executing upgrades | By admin of ST | While exclusively CTM governance can set the content of the upgrade, STs will typically be able to choose suitable time for them to actually execute it. In the current release, STs will have to follow our upgrades. | +| settlement layer | By admin of ST | The admin of the chain can enact migrations to other settlement layers. | > Note, that if we take a look at the access control for the corresponding functions inside the [AdminFacet](../../l1-contracts/contracts/state-transition/chain-deps/facets/Admin.sol), the may see that a lot of methods from above that are marked as "By admin of ST" could be in theory amended by the ChainTypeManager. However, this sort of action requires approval from decentralized governance. Also, in case of an urgent high risk situation, the decentralized governance might force upgrade the contract via CTM. @@ -49,17 +50,17 @@ In the current release, each chain will be an instance of zkSync Era and so the 1. Firstly, the governance of the CTM will publish the server (including sequencer, prover, etc) that support the new version . This is done offchain. Enough time should be given to various zkStack devs to update their version. 2. The governance of the CTM will publish the upgrade onchain by automatically executing the following three transactions: -- `setChainCreationParams` ⇒ to ensure that new chains will be created with the version -- `setValidatorTimelock` (if needed) ⇒ to ensure that the new chains will use the new validator timelock right-away -- `setNewVersionUpgrade` ⇒ to save the upgrade information that each ST will need to follow to conduct the upgrade on their side. + - `setChainCreationParams` ⇒ to ensure that new chains will be created with the version + - `setValidatorTimelock` (if needed) ⇒ to ensure that the new chains will use the new validator timelock right-away + - `setNewVersionUpgrade` ⇒ to save the upgrade information that each ST will need to follow to conduct the upgrade on their side. -3. After that, each ChainAdmin can upgrade to the new version in suitable time for them. +3. After that, each ChainAdmin can upgrade to the new version in suitable time for them. > Note, that while the governance does try to give the maximal possible time for chains to upgrade, the governance will typically put restrictions (aka deadlines) on the time by which the chain has to be upgraded. If the deadline is passed, the chain can not commit new batches until the upgrade is executed. ### Emergency upgrade -In case of an emergency, the [security council](https://blog.zknation.io/introducing-zk-nation/) has the ability to freeze the ecosystem and conduct an emergency upgrade (FIXME: link to governance doc). +In case of an emergency, the [security council](https://blog.zknation.io/introducing-zk-nation/) has the ability to freeze the ecosystem and conduct an emergency upgrade. In case we are aware that some of the committed batches on an ST are dangerous to be executed, the CTM can call `revertBatches` on that ST. For faster reaction, the admin of the ChainTypeManager has the ability to do so without waiting for govenrnace approval that may take a lot of time. This action does not lead to funds being lost, so it is considered suitable for the partially trusted role of the admin of the ChainTypeManager. diff --git a/docs/chain_management/upgrade_process.md b/docs/chain_management/upgrade_process.md index d662f9749..de8177e0b 100644 --- a/docs/chain_management/upgrade_process.md +++ b/docs/chain_management/upgrade_process.md @@ -1,67 +1,41 @@ -# TODO # Upgrade process document + [back to readme](../README.md) ## Intro +This document assumes that you have understanding about [the structure](../settlement_contracts/zkchain_basics.md) on individual chains' L1 contracts. + Upgrading the ecosystem of ZKChains is a complicated process. ZKSync is a complex ecosystem with many chains and contracts and each upgrade is unique, but there are some steps that repeat for most upgrades. These are mostly how we interact with the CTM, the diamond facets, the L1→L2 upgrade, how we update the verification keys. Where each upgrade consists of two parameters: - Facet cuts - change of the internal implementation of the diamond proxy -- Diamond Initialization - delegate call to the specified address with specified data +- Diamond Initialization - delegate call to the specified address wit`h specified data -The second parameter is very powerful and flexible enough to move majority of upgrade logic there. However, until this day we had no ready or semi-finished template for the diamond initialization and now we are did the template for the upgrades with the most common and more likely needs. +The second parameter is very powerful and flexible enough to move majority of upgrade logic there. -There are two contracts for this, +## Upgrade structure -1. [BaseZkSyncUpgrade](https://github.com/matter-labs/zksync-2-contracts/blob/sb-new-upgrade-system/ethereum/contracts/upgrades/BaseZkSyncUpgrade.sol) - Generic template with function that can be useful for upgrades -2. [DefaultUpgrade](https://github.com/matter-labs/zksync-2-contracts/blob/sb-new-upgrade-system/ethereum/contracts/upgrades/DefaultUpgrade.sol) - Default implementation of the [BaseZkSyncUpgrade](https://github.com/matter-labs/zksync-2-contracts/blob/sb-new-upgrade-system/ethereum/contracts/upgrades/BaseZkSyncUpgrade.sol), contract that is most often planned to be used as diamond intialization when doing upgrades. +Upgrade information is composed in the form of a [DiamondCutData](../../l1-contracts/contracts/state-transition/libraries/Diamond.sol#L75) struct. During the upgrade, the chain's DiamondProxy will delegateCall the `initAddress` with the provided `initCalldata`, while the facets that the `DiamondProxy` will be changed according to the `facetCuts`. This scheme is very powerful and it allows to change anything in the contract. However, we typically have a very specific set of changes that we need to do. To facilitate these, two contracts have been created: -While usually every upgrade is different, a common part can be distinguished, that’s their job. +1. [BaseZkSyncUpgrade](../../l1-contracts/contracts/upgrades/BaseZkSyncUpgrade.sol) - Generic template with function that can be useful for upgrades +2. [DefaultUpgrade](../../l1-contracts/contracts/upgrades/DefaultUpgrade.sol) - Default implementation of the `BaseZkSyncUpgrade`, contract that is most often planned to be used as diamond intialization when doing upgrades. + +> Note, that the Gateway upgrade will be more complex than the usual ones and so a similar, but separate [process](../upgrade_history/gateway_upgrade/upgrade_process.md) will be used for it. It will also use its own custom implementation of the `BaseZkSyncUpgrade`: [GatewayUpgrade](../../l1-contracts/contracts/upgrades/GatewayUpgrade.sol). ### Protocol version For tracking upgrade versions on different networks (private testnet, public testnet, mainnet) we use protocol version, which is basically just a number denoting the deployed version. The protocol version is different from Diamond Cut `proposalId`, since `protocolId` only shows how much upgrade proposal was proposed/executed, but nothing about the content of upgrades, while the protocol version is needed to understand what version is deployed. -In the [BaseZkSyncUpgrade](https://github.com/matter-labs/zksync-2-contracts/blob/sb-new-upgrade-system/ethereum/contracts/upgrades/BaseZkSyncUpgrade.sol) & [DefaultUpgrade](https://github.com/matter-labs/zksync-2-contracts/blob/sb-new-upgrade-system/ethereum/contracts/upgrades/DefaultUpgrade.sol) we allow to arbitrarily increase the proposal version while upgrading a system, but only increase it. We are doing that since we can skip some protocol versions if for example found a bug there (but it was deployed on another network already). - - - -# - -This upgrade tx: - -- force deploys and updates base system contracts -- updates base system contracts, bootloader, default AA - - - -## STM interactions - -- Context: all upgrade txs are sent via the Governance contract via a scheduleTransparent and execute operations. This can call any of our contracts, the Governance is the owner of all of them. -- Previously we called the DiamondProxy directly with the diamond cut. After v0.24.1 we call the STM which records the diamondCut hash in a mapping(protocolVersion ⇒ upgradeCutHash) . After this the chainAdmin can call the Admin facet of their chain with the same diamondCut data, and this executes the diamondCut ( the provided diamondCut is checked against the upgradeCutHash in the STM) +In the [BaseZkSyncUpgrade](../../l1-contracts/contracts/upgrades/BaseZkSyncUpgrade.sol) & [DefaultUpgrade](../../l1-contracts/contracts/upgrades/DefaultUpgrade.sol) we allow to arbitrarily increase the proposal version while upgrading a system, but only increase it. We are doing that since we can skip some protocol versions if for example found a bug there (but it was deployed on another network already). -## Contracts involved in the L1→L2 tx: +## Protocol upgrade transaction -### L1 +During upgrade, we typically need not only update the L1 contracts, but also the L2 ones. This is achieved by creating an upgrade transactions. More details on how those are processed inside the system can be read [here](../settlement_contracts/priority_queue/processing_of_l1->l2_txs.md). -- The L1→L2 upgrade tx is set via the DiamondCut via the DefaultUpgrade / BaseZkSyncUpgrade. The server picks the diamondCut up similarly to how it pick up the PQ transaction, and executes it. The upgrade tx is special, it has its own tx type, an L2→L1 system log is sent with the upgrade tx hash, and this L2→L1 log is compared in the Executor facet with the currently recorded upgrade tx hash when the batch is executed. If the hash is incorrect the upgrade fails. +## Whitelisting and executing upgrade -### L2 +Note, that due to how powerful the upgrades are, if we allowed any [chain admin](../chain_management/admin_role.md) to inact any upgrade it wants, it could allow malicious chains to potentially break some of the ecosystem invariants. Because of that, any upgrade should be firstly whitelisted by the decentralized governance through calling the `setNewVersionUpgrade` function of the [ChainTypeManager](../../l1-contracts/contracts/state-transition/ChainTypeManager.sol). -- ComplexUpgrader.sol - - This is the general L2 System Contract that can execute any upgrade by delegateCalling an implementation upgrade contract. This implementation can change for each upgrade, it might inherit the ForceDeployUpgrader.sol -- ForceDeployUpgrader.sol - - Is a standard L2 implementation. It is used to call the ContractDeployer to force deploy contracts. In itself it is not really useful ( as the FORCE_DEPLOYER address can also do this), but if a custom upgrade implementation inherits it then it is useful. -- ContractDeployer.sol - - The contract that deploys all contracts. It also supports ForceDeployments when called from the ComplexUpgrader or the FORCE_DEPLOYER address ( which is not the address of the ForceDeployUpgrader) . -- GenesisUpgrade (WIP for Gateway). This will be the upgrade used in the genesis upgrade to deploy the Bridgehub. ( we need to use it as the BH has a constructor). We will also use this in the future to deploy the custom base token contract. \ No newline at end of file +In order to execute the upgrade, the chain admin would call the `upgradeChainFromVersion` function from the [Admin](../../l1-contracts/contracts/state-transition/chain-deps/facets/Admin.sol) facet. diff --git a/docs/gateway/chain_migration.md b/docs/gateway/chain_migration.md index 9003800cf..2fe254e7b 100644 --- a/docs/gateway/chain_migration.md +++ b/docs/gateway/chain_migration.md @@ -1,4 +1,5 @@ # Chain migration + [back to readme](../README.md) ## Ecosystem Setup @@ -9,8 +10,9 @@ CTMDeployer is a very lightweight contract used to facilitate chain migration. I - Assign bridgehub as the asset handler for the “asset” of the CTM on the supported settlement layer. -Currently, it can only be done by the owner of the CTMDeployer, but in the future, this method can become either permissionless or callable by the CTM owner. -- Tell bridgehub which address on the L2 should serve as the L2 representation of the CTM on L1. Currently, it can only be done by the owner of the CTMDeployer, but in the future, this method can become callable by the CTM owner. +Currently, it can only be done by the owner of the CTMDeployer, but in the future, this method can become either permissionless or callable by the CTM owner. + +- Tell bridgehub which address on the L2 should serve as the L2 representation of the CTM on L1. Currently, it can only be done by the owner of the CTMDeployer, but in the future, this method can become callable by the CTM owner. ![image.png](./img/ctm_gw_registration.png) @@ -20,11 +22,11 @@ Currently, it can only be done by the owner of the CTMDeployer, but in the futu ## Chain migration GW → L1 -Chain migration from from L1 to GW works similar to how NFT bridging from L1 to another chain would work. Migrating back will use the same mechanism as for withdrawals. +Chain migration from from L1 to GW works similar to how NFT bridging from L1 to another chain would work. Migrating back will use the same mechanism as for withdrawals. -Note, that for L2→L1 withdrawals via bridges we never provide a recovery mechanism. The same is the case with GW → L1 messaging, i.e. it is assumed that such migrations are always executable on L1. +Note, that for L2→L1 withdrawals via bridges we never provide a recovery mechanism. The same is the case with GW → L1 messaging, i.e. it is assumed that such migrations are always executable on L1. -You can read more about how the safety is ensured in the “Migration invariants & protocol upgradability” section. +You can read more about how the safety is ensured in the “Migration invariants & protocol upgradability” section. ![image.png](./img/migrate_from_gw.png) @@ -34,7 +36,7 @@ In this release we plan to only support a single whitelisted settlement layer, b ## Chain migration invariants & protocol upgradability -Note, that once a chain migrates to a new settlement layer, there are two deployments of contracts for the same ZKChain. What’s more, the L1 part will always be used. +Note, that once a chain migrates to a new settlement layer, there are two deployments of contracts for the same ZKChain. What’s more, the L1 part will always be used. There is a need to ensure that the chains work smoothly during migration and there are not many issues during the protocol upgrade. diff --git a/docs/gateway/gateway_da.md b/docs/gateway/gateway_da.md index 93e685e28..67bd2081b 100644 --- a/docs/gateway/gateway_da.md +++ b/docs/gateway/gateway_da.md @@ -1,4 +1,5 @@ # Custom DA layers + [back to readme](../README.md) ## Prerequisites @@ -7,7 +8,7 @@ To better understand this document, it is better to have grasp on how [custom DA ## Rollup DA -If a chain intends to be a rollup, it needs to relay its pubdata to L1 via L1Messenger system contract. Thus, typically the L1DAValidator will be different from the one that they used on Ethereum. +If a chain intends to be a rollup, it needs to relay its pubdata to L1 via L1Messenger system contract. Thus, typically the L1DAValidator will be different from the one that they used on Ethereum. For chains that use our [standard pubdata format](../settlement_contracts/data_availability/rollup_da.md), we provide the [following relayed L1 DA validator](../../l1-contracts/contracts/state-transition/data-availability/RelayedSLDAValidator.sol) that relays all the data to L1. diff --git a/docs/gateway/gateway_protocol_upgrades.md b/docs/gateway/gateway_protocol_upgrades.md index 4fa0816d2..11558d533 100644 --- a/docs/gateway/gateway_protocol_upgrades.md +++ b/docs/gateway/gateway_protocol_upgrades.md @@ -1,13 +1,14 @@ # Gateway protocol versioning and upgradability + [back to readme](../README.md) -One of the hardest part about gateway (GW) is how do we synchronize interaction between L1 and L2 parts that can potentially have different versions of contracts. This synchronization should be compatible with any future CTM that may be present on the gateway. +One of the hardest part about gateway (GW) is how do we synchronize interaction between L1 and L2 parts that can potentially have different versions of contracts. This synchronization should be compatible with any future CTM that may be present on the gateway. -Here we describe various scenarios of standard/emergency upgrades and how will those play out in the gateway setup. +Here we describe various scenarios of standard/emergency upgrades and how will those play out in the gateway setup. ## General idea -We do not enshrine any particular approach on the protocol level of the GW. The following is the approach used by the standard Era CTM, which also manages GW. +We do not enshrine any particular approach on the protocol level of the GW. The following is the approach used by the standard Era CTM, which also manages GW. Upgrades will be split into two parts: @@ -18,7 +19,7 @@ In other words: `active upgrade = inactive upgrade + bootloader changes + setting upgrade tx` -The other difference is that while “active chain upgrades” are usually always needed to be forced in order to ensure that contracts/protocol are up to date, the “inactive chain upgrades” typically involve changes in the facets’ bytecode and will only be needed before migration is complete to ensure that contracts are compatible. +The other difference is that while “active chain upgrades” are usually always needed to be forced in order to ensure that contracts/protocol are up to date, the “inactive chain upgrades” typically involve changes in the facets’ bytecode and will only be needed before migration is complete to ensure that contracts are compatible. To reduce the boilerplate / make management of the upgrades easier, the abstraction will be basically implemented at the upgrade implementation level, that will check `if block.chainid == s.settlementLayer { ... perform active upgrade stuff } else { ... perform inactive upgrade stuff, typically nothing m}.` @@ -32,8 +33,7 @@ When a chain starts its migration to a new settlement layer (regardless of wheth 2. The `s.settlementLayer` will be set for the chain. Now the chain becomes inactive and it can only take “inactive” upgrades. 3. When migration finishes, it will be double checked that the `protocolVersion` is the same as the one in the target chains’ CTM. -If the chain has already been deployed there, it will be checked that the `protocolVersion` of the deployed contracts there is the same as the one of the chain that is being moved. -4. All “inactive” instances of a chain can receive “inactive” upgrades of a chain. The single “active” instance of a chain (the one on the settlement layer) can receive only active upgrades. +If the chain has already been deployed there, it will be checked that the `protocolVersion` of the deployed contracts there is the same as the one of the chain that is being moved. 4. All “inactive” instances of a chain can receive “inactive” upgrades of a chain. The single “active” instance of a chain (the one on the settlement layer) can receive only active upgrades. In case step (3) fails (or for any other reason the chain fails), the migration recovery process should be available. (`L1AssetRouter.bridgeRecoverFailedTransfer` method). Recovering a chain id basically just changing its `settlementLayerId` to the current block.chainid. It will be double checked that the chain has not conducted any inactive upgrades in the meantime, i.e. the `protocolVersion` of the chain is the same as the one when the chain started its migration. @@ -47,11 +47,13 @@ The only unrecoverable state that a chain can achieve is: - While the migration has happening an “inactive” upgrade has been conducted. - Now recovery of the chain is not possible as the “protocol version” check will fail. -This is considered to be a rare event, but it will be strongly recommended that before conducting any inactive upgrades the migration transaction should be finalized. (TODO: we could actively force it, but it is a separate feature, i.e. require confirmation of a successful migration before any upgrades on a migrated chain could be done). +This is considered to be a rare event, but it will be strongly recommended that before conducting any inactive upgrades the migration transaction should be finalized. + +In the future, we could actively force it, i.e. require confirmation of a successful migration before any upgrades on a migrated chain could be done. ## Safety guards for GW→L1 migrations -Migrations from GW to L1 do not have any chain recovery mechanism, i.e. if the step (3) from the above fails for some reason (e.g. a new protocol version id is available on the CTM), then the chain is basically lost. +Migrations from GW to L1 do not have any chain recovery mechanism, i.e. if the step (3) from the above fails for some reason (e.g. a new protocol version id is available on the CTM), then the chain is basically lost. ### Protocol version safety guards @@ -59,16 +61,17 @@ Migrations from GW to L1 do not have any chain recovery mechanism, i.e. if the s - Assuming that no new protocol versions are published to CTM during the migration, the migration must succeed, since both CTM on GW and on L1 will have the same version and so the checks will work fine. - The finalization of any chain withdrawal is permissionless and so in the short term the team could help finalize the outstanding migrations to prevent funds loss. -> The approach above is somewhat tricky as it requires careful coordination with the governance to ensure that at the time of when the new protocol version is published to CTM, there are no outstanding migrations. +> The approach above is somewhat tricky as it requires careful coordination with the governance to ensure that at the time of when the new protocol version is published to CTM, there are no outstanding migrations. In the future we will either make it more robust or add a recovery mechanism for failed GW → L1 migrations. -> + +> ### Batch number safety guards -Another potential place that may lead for a chain to not be migratable to L1 is if the number of outstanding batches is very high, which can lead to migration to cost too much gas and being not executable no L1. +Another potential place that may lead for a chain to not be migratable to L1 is if the number of outstanding batches is very high, which can lead to migration to cost too much gas and being not executable no L1. -To prevent that, it is required for chains that migrate from GW that all their batches are executed. This ensures that the number of batches’ hashes to be copied on L1 is constant (i.e. just 1 last batch). +To prevent that, it is required for chains that migrate from GW that all their batches are executed. This ensures that the number of batches’ hashes to be copied on L1 is constant (i.e. just 1 last batch). ## Motivation @@ -77,7 +80,7 @@ The job of this proposal is to reduce the number of potential states in which th - Need to be able to migrate to a chain that has contracts from a different protocol version - Need to be able for CTM to support migration of chains with different versions. Only `bridgeRecoverFailedTransfer` has to be supported for all the versions, but its logic is very trivial. -The reason why we can not conduct “active” upgrades everywhere on both L1 and L2 part is that for the settlement layer we need to write the new protocol upgrade tx, while NOT allowing to override it. On other hand, for the “inactive” chain contracts, we need to ignore the upgrade transaction. +The reason why we can not conduct “active” upgrades everywhere on both L1 and L2 part is that for the settlement layer we need to write the new protocol upgrade tx, while NOT allowing to override it. On other hand, for the “inactive” chain contracts, we need to ignore the upgrade transaction. ## Forcing “active chain upgrade” @@ -87,7 +90,7 @@ The admin of the CTM (GW) will call the CTM (GW) with the new protocol version ### Case of malicious Gateway operator -In the future, malicious Gateway operator may try to exploit a known vulnerability in an CTM. +In the future, malicious Gateway operator may try to exploit a known vulnerability in an CTM. The recommended approach here is the following: @@ -96,10 +99,11 @@ The recommended approach here is the following: > The approach above basically states that “if operator is censoring, we’ll be able to use standard censorship-resistance mechanism of a chain to bypass it”. The freezing part is just a way to not tell to the world the issue before all chains are safe from exploits. -It is the responsibility of the CTM to ensure that all the supported settlement layers are trusted enough to uphold to the above protocol. Using any sort of Validiums will be especially discouraged, since in theory those could get frozen forever without any true censorship resistance mechanisms. +It is the responsibility of the CTM to ensure that all the supported settlement layers are trusted enough to uphold to the above protocol. Using any sort of Validiums will be especially discouraged, since in theory those could get frozen forever without any true censorship resistance mechanisms. Also, note that the freezing period should be long enough to ensure that censorship resistance mechanisms have enough time to kick in -> + +> ## Forcing “inactive chain upgrade” @@ -112,9 +116,9 @@ In case such situation does happen however, the current plan is just to: ## Backwards compatibility -With this proposal the protocol version on the L1 part and on the settlement layer part is completely out of sync. This means that all new mailboxes need to support both accepting and sending all versions of relayed (L1 → GW → L2) transactions. +With this proposal the protocol version on the L1 part and on the settlement layer part is completely out of sync. This means that all new mailboxes need to support both accepting and sending all versions of relayed (L1 → GW → L2) transactions. -For now, this is considered okay. In the future, some stricter versioning could apply. +For now, this is considered okay. In the future, some stricter versioning could apply. ## Notes @@ -130,50 +134,26 @@ Definition: 1. check that `ZKChain(X).protocol_version == CTM(X).protocol_version` on chain Y. 2. Start ‘burn’ process (on chain Y) - 1. collect `‘payload’` from `ZKChain(X)` and `CTM(X)` and `protocol_version` on chain Y. - 2. set `ZKChain(X).settlement_layer` to `address(ZKChain(Z))` on chain Y. + 1. collect `‘payload’` from `ZKChain(X)` and `CTM(X)` and `protocol_version` on chain Y. + 2. set `ZKChain(X).settlement_layer` to `address(ZKChain(Z))` on chain Y. 3. Start ‘mint’ process (on chain Z) - 1. check that `CTM(X).protocol_version == payload.protocol_version` - 2. Create new `ZKChain(X)` on chain Z and register in the local bridgehub & CTM. - 3. pass `payload` to `ZKChain(X)` and `CTM(X)` to initialize the state. + 1. check that `CTM(X).protocol_version == payload.protocol_version` + 2. Create new `ZKChain(X)` on chain Z and register in the local bridgehub & CTM. + 3. pass `payload` to `ZKChain(X)` and `CTM(X)` to initialize the state. 4. If ‘mint’ fails - recover (on chain Y) - 1. check that `ZKChain(X).protocol_version == payload.protocol_version` - 1. important, here we’re actually looking at the ‘HYPERCHAIN’ protocol version and not necessarily CTM protocol version. - 2. set `ZKChain(X).settlement_layer` to `0` on chain Y. - 3. pass `payload` to `IZKChain(X)` and `CTM(X)` to initialize the state. + 1. check that `ZKChain(X).protocol_version == payload.protocol_version` + 1. important, here we’re actually looking at the ‘HYPERCHAIN’ protocol version and not necessarily CTM protocol version. + 2. set `ZKChain(X).settlement_layer` to `0` on chain Y. + 3. pass `payload` to `IZKChain(X)` and `CTM(X)` to initialize the state. -### ‘Reverse’ chain migration - moving chain X ‘back’ from Z to Y. +### ‘Reverse’ chain migration - moving chain X ‘back’ from Z to Y - (moving back from gateway to L1). +(moving back from gateway to L1). 1. Same as above (check protocol version - but on chain Z) 2. Same as above (start burn process - but on chain Z) - 1. same as above - 2. TODO: should we ‘remove’ the IZKChain from Z completely? (’parent’ chain Y doesn’t really have an address on Z). 3. Same as above (start ‘mint’ - but on chain Y) - 1. same as above - 2. creation is probably not needed - as the contract was already there in a first place. - 3. same as above - but the state is ‘re-initialized’ + 1. same as above + 2. creation is probably not needed - as the contract was already there in a first place. + 3. same as above - but the state is ‘re-initialized’ 4. Same as above - but on chain ‘Z’ - -### What can go wrong: - -**Check 1 - protocol version** - -- chain is on the older protocol version before the migration start -- resolution: don’t allow the migration, tell protocol to upgrade itself first. - -**Check 3a — protocol version on destination chain** - -- destination chain CTM is on the OLDER version than the payload - - resolution: fail the transfer - seems that CTMs were not upgraded. -- destination chain CTM is on the NEWER version that then payload - - For simplicity - we could fail the transfer here too. - -**Check 4a — protocol version on the source chain in case of transfer failure** - -- source IZKChain is on the ‘older’ protocol version than the payload - - in theory - impossible, as this means that the IZKChain protocol version was ‘reverted’. -- source IZKChain is on the ‘newer’ protocol version than the payload - - This is the **main** worst case scenario - as this means that the IZKChain was updated (via ‘inactive’ update) while the protocol transfer was ongoing. - - This is the ‘Stuck state’ case described in the paragraph above. \ No newline at end of file diff --git a/docs/gateway/messaging_via_gateway.md b/docs/gateway/messaging_via_gateway.md index 08ff67821..fc6681ad3 100644 --- a/docs/gateway/messaging_via_gateway.md +++ b/docs/gateway/messaging_via_gateway.md @@ -1,9 +1,10 @@ # Messaging via Gateway + [back to readme](../README.md) ## Deeper dive into MessageRoot contract and how L3→L1 communication works -Before, when were just settling on L1, a chain’s message root was just the merkle tree of L2→L1 logs that were sent within this batch. However, this new model will have to be amended to be able to perform messages to L1 coming from an L3 that settles on top of Gateway. +Before, when were just settling on L1, a chain’s message root was just the merkle tree of L2→L1 logs that were sent within this batch. However, this new model will have to be amended to be able to perform messages to L1 coming from an L3 that settles on top of Gateway. The description of how L3→L1 messages are aggregated in the MessageRoots and proved on L1 can be read in the [nested l3 l1 messaging](./nested_l3_l1_messaging.md) section. @@ -11,10 +12,8 @@ The description of how L3→L1 messages are aggregated in the MessageRoots and p As a recap, here is how messaging works for chains that settle on L1: - ![Direct L1->L2 messaging](./img/l1_l2_messaging.png) - - The user calls the bridgehub, which routes the message to the chain. - The operator eventually sees the transaction via an event on L1 and it will process it on L2. @@ -47,5 +46,5 @@ To simplify things, for now, we provide the L1→GW with a large amount of gas ( - Creating a large transaction on L1 that would cause the L1→GW part to fail is not possible due to high L1 gas costs that would be required to create such a tx. Both of the assumptions above will be removed in subsequent releases, but for now this is how things are. -> +> diff --git a/docs/gateway/nested_l3_l1_messaging.md b/docs/gateway/nested_l3_l1_messaging.md index 9a7cb198e..75d9b399d 100644 --- a/docs/gateway/nested_l3_l1_messaging.md +++ b/docs/gateway/nested_l3_l1_messaging.md @@ -1,4 +1,5 @@ # Nested L3→L1 messages tree design for Gateway + [back to readme](../README.md) ## Introduction @@ -10,14 +11,15 @@ This document assumes that the reader is already aware of what SyncLayer (or how > Note: -“Multiple arrows” from `AggregatedRoot` to `chainIdRoot` and from each `chainIdRoot` to `batchRoot` are for illustrational purposes only. +“Multiple arrows” from `AggregatedRoot` to `chainIdRoot` and from each `chainIdRoot` to `batchRoot` are for illustrational purposes only. In fact, the tree above will be a binary merkle tree, where the `AggregatedRoot` will be the root of the tree of `chainIdRoot`, while `chainIdRoot` is the merkle root of a binary merkle tree of `batchRoot`. -> + +> For each chain that settles on L1, the root will have the following format: -`settledMessageRoot = keccak256(LocalRoot, AggregatedRoot)` +`settledMessageRoot = keccak256(LocalRoot, AggregatedRoot)` where `localRoot` is the root of the tree of messages that come from the chain itself, while the `AggregatedRoot` is the root of aggregated messages from all of the chains that settle on top of the chain. @@ -38,62 +40,60 @@ In other words, we get the recursive structure, where for leaves of it, i.e. cha ## Appending new batch root leafs -At the execution stage of every batch, the ZK Chain would call the `MessageRoot.addChainBatchRoot` function, while providing the `SettledRootOfBatch` for the chain. Then, the `BatchRootLeaf` will be calculated and appended to the incremental merkle tree with which the `ChainIdRoot` & `ChainIdLeaf` is calculated, which will be updated in the merkle tree of `ChainIdLeafs`. +At the execution stage of every batch, the ZK Chain would call the `MessageRoot.addChainBatchRoot` function, while providing the `SettledRootOfBatch` for the chain. Then, the `BatchRootLeaf` will be calculated and appended to the incremental merkle tree with which the `ChainIdRoot` & `ChainIdLeaf` is calculated, which will be updated in the merkle tree of `ChainIdLeafs`. At the end of the batch, the L1Messenger system contract would query the MessageRoot contract for the total aggregated root, i.e. the root of all `ChainIdLeafs` . Calculate the settled root `settledMessageRoot = keccak256(LocalRoot, AggregatedRoot)` and propagate it to L1. Only the final aggregated root will be stored on L1. -# Proving that a message belongs to a chain on top of SyncLayer +## Proving that a message belongs to a chain on top of SyncLayer The process will consist of two steps: 1. Construct the needed `SettledRootOfBatch` for the current chain’s batch. -2. Prove that it belonged to the gateway. +2. Prove that it belonged to the gateway. -If the depth of recursion is larger than 1, then step (1) could be repeated multiple times. +If the depth of recursion is larger than 1, then step (1) could be repeated multiple times. Right now for proving logs the following interface is exposed on L1 side: ```solidity - struct L2Log { - uint8 l2ShardId; - bool isService; - uint16 txNumberInBatch; - address sender; - bytes32 key; - bytes32 value; - } - - - function proveL2LogInclusion( - uint256 _chainId, - uint256 _batchNumber, - uint256 _index, - L2Log calldata _log, - bytes32[] calldata _proof - ) external view override returns (bool) { - address hyperchain = getHyperchain(_chainId); - return IZkSyncHyperchain(hyperchain).proveL2LogInclusion(_batchNumber, _index, _log, _proof); - } +struct L2Log { + uint8 l2ShardId; + bool isService; + uint16 txNumberInBatch; + address sender; + bytes32 key; + bytes32 value; +} + +function proveL2LogInclusion( + uint256 _chainId, + uint256 _batchNumber, + uint256 _index, + L2Log calldata _log, + bytes32[] calldata _proof +) external view override returns (bool) { + address hyperchain = getHyperchain(_chainId); + return IZkSyncHyperchain(hyperchain).proveL2LogInclusion(_batchNumber, _index, _log, _proof); +} ``` Let’s define a new function: ```solidity - function proveL2LeafInclusion( - uint256 _chainId, - uint256 _batchNumber, - uint256 _mask, - bytes32 _leaf, - bytes32[] calldata _proof - ) external view override returns (bool) { - } +function proveL2LeafInclusion( + uint256 _chainId, + uint256 _batchNumber, + uint256 _mask, + bytes32 _leaf, + bytes32[] calldata _proof +) external view override returns (bool) {} ``` -This function will prove that a certain 32-byte leaf belongs to the tree. Note, that the fact that the `leaf` is 32-bytes long means that the function could work successfully for internal leaves also. To prevent this it will be the callers responsibility to ensure that the preimage of the leaf is larger than 32-bytes long and/or use other ways to ensuring that the function will be called securely. +This function will prove that a certain 32-byte leaf belongs to the tree. Note, that the fact that the `leaf` is 32-bytes long means that the function could work successfully for internal leaves also. To prevent this it will be the callers responsibility to ensure that the preimage of the leaf is larger than 32-bytes long and/or use other ways to ensuring that the function will be called securely. -This function will be internally used by the existing `_proveL2LogInclusion` function to prove that a certain log existed +This function will be internally used by the existing `_proveL2LogInclusion` function to prove that a certain log existed We want to avoid breaking changes to SDKs, so we will modify the `zks_getL2ToL1LogProof` to return the data in the following format (the results of it are directly passed into the `proveL2LeafInclusion` method, so returned value must be supported by the contract): @@ -111,10 +111,10 @@ If the chain is not a settlement layer of itself, we then need to calculate: - `BatchRootLeaf = keccak256(BATCH_LEAF_HASH_PADDING, SettledRootOfBatch, batch_number).` - Consume one element from the `_proofs` array to get the mask for the merkle path of the batch leaf in the chain id tree. -- Consume `batchLeafProofLen` elements to construct the `ChainIdRoot` +- Consume `batchLeafProofLen` elements to construct the `ChainIdRoot` - After that, we calculate the `chainIdLeaf = keccak256(CHAIN_ID_LEAF_PADDING, chainIdRoot, chainId` -Now, we have the *supposed* `chainIdRoot` for the chain inside its settlement layer. The only thing left to prove is that this root belonged to some batch of the settlement layer. +Now, we have the _supposed_ `chainIdRoot` for the chain inside its settlement layer. The only thing left to prove is that this root belonged to some batch of the settlement layer. Then, the following happens: @@ -130,7 +130,7 @@ Now, we can call the function to verify that the batch belonged to the settlemen chainIdLeaf, // Basically pass the rest of the `_proof` array extractSliceUntilEnd(_proof, ptr) - ); + ); ``` The other slice of the `_proof` array is expected to have the same structure as before: @@ -141,7 +141,7 @@ The other slice of the `_proof` array is expected to have the same structure as ## Trust assumptions -Note, that the `_proof` field is provided by potentially malicious users. The only part that really checks anything with L1 state is the final step of the aggregated proof verification, i.e. that the settled root of batch of the final top layer was present on L1. +Note, that the `_proof` field is provided by potentially malicious users. The only part that really checks anything with L1 state is the final step of the aggregated proof verification, i.e. that the settled root of batch of the final top layer was present on L1. It puts a lot of trust in the settlement layers as it can steal funds from chains and “verify” incorrect L3→L1 logs if it wants to. It is the job of the chain itself to ensure that it trusts the aggregation layer. It is also the job of the STM to ensure that the settlement layers that are used by its chains are secure. @@ -164,21 +164,21 @@ Another notable example of the redundancy of data, is that we also have total `M We want to maintain the security invariant that users can always withdraw their funds from rollup chains. In other words, all L3→L1 logs that come from rollups should be eventually propagated to L1, and also regardless of how other chains behave an honest chain should always provide the ability for their users to withdraw. -Firstly, unless the chain settles on L1, this requires a trusted settlement layer. That is, not trusted operator of the gateway, but it works properly, i.e. appends messages correctly, publishes the data that it promises to publish, etc. This is already the case for the Gateway as it is a ZK rollup fork of Era, and while the operator may censor transactions, it can not lie and is always forced to publish all state diffs. +Firstly, unless the chain settles on L1, this requires a trusted settlement layer. That is, not trusted operator of the gateway, but it works properly, i.e. appends messages correctly, publishes the data that it promises to publish, etc. This is already the case for the Gateway as it is a ZK rollup fork of Era, and while the operator may censor transactions, it can not lie and is always forced to publish all state diffs. Secondly, we guarantee that all the stored `ChainIdLeafs` are published on L1, even for Validiums. Publishing a single 32 byte value per relatively big Gateway batch has little price for Validiums, but it ensures that the settlement root of the gateway can always be constructed. And, assuming that the preimage for the chain root could be constructed, this gives an ability to ability to recover the proof for any L3→L1 coming from a rollup. -But how can one reconstruct the total chain tree for a particular rollup chain? A rollup would relay all of its pubdata to L1, meaning that by observing L1, the observer would know all the L3→L1 logs that happened in a particular batch. It means that for each batch it can restore the `LocalRoot` (in case the `AggregatedRoot` is non-zero, it could be read from e.g. the storage which is available via the standard state diffs). This allows to calculate the `BatchRootLeaf` for the chain. The only thing missing is understanding which batches were finalized on gateway in order to construct the merkle path to the `ChainRootLeaf`. +But how can one reconstruct the total chain tree for a particular rollup chain? A rollup would relay all of its pubdata to L1, meaning that by observing L1, the observer would know all the L3→L1 logs that happened in a particular batch. It means that for each batch it can restore the `LocalRoot` (in case the `AggregatedRoot` is non-zero, it could be read from e.g. the storage which is available via the standard state diffs). This allows to calculate the `BatchRootLeaf` for the chain. The only thing missing is understanding which batches were finalized on gateway in order to construct the merkle path to the `ChainRootLeaf`. -To understand which SL was used by a batch for finalization, one could simply brute force over all settlement layers ever used to find out where the settledBatchRoot is stored.. This number is expected to be rather small. +To understand which SL was used by a batch for finalization, one could simply brute force over all settlement layers ever used to find out where the settledBatchRoot is stored.. This number is expected to be rather small. ## Legacy support -In order to ease the server migration, we support legacy format of L2→L1 logs proving, i.e. just provide a proof that assumes that stored `settledMessageRoot` is identical to local root, i.e. the hash of logs in the batch. +In order to ease the server migration, we support legacy format of L2→L1 logs proving, i.e. just provide a proof that assumes that stored `settledMessageRoot` is identical to local root, i.e. the hash of logs in the batch. To differentiate between legacy format and the one, the following approach is used; - Except for the first 3 bytes the first word in the new format contains 0s, which is unlikely in the old format, where leafs are hashed. - I.e. if the last 29 bytes are zeroes, then it is assumed to be the new format and vice versa. -In the next release the old format will be removed. \ No newline at end of file +In the next release the old format will be removed. diff --git a/docs/gateway/overview.md b/docs/gateway/overview.md index 2863f22d6..ea50dc459 100644 --- a/docs/gateway/overview.md +++ b/docs/gateway/overview.md @@ -1,4 +1,5 @@ # Gateway + [back to readme](../README.md) Gateway is a proof aggregation layer, created to solve the following problems: @@ -6,10 +7,9 @@ Gateway is a proof aggregation layer, created to solve the following problems: - Fast interop (interchain communication) would require quick proof generation and verification. The latter can be very expensive on L1. Gateway provides an L1-like interface for chains, while giving a stable price for compute. - Generally proof aggregation can reduce costs for users, if there are multiple chains settling on top of the same layer. It can reduce the costs of running a Validium even further. -In this release, Gateway is basically a fork of Era, that will be deployed within the same CTM as other ZK Chains. This allows us to reuse most of the existing code for Gateway. +In this release, Gateway is basically a fork of Era, that will be deployed within the same CTM as other ZK Chains. This allows us to reuse most of the existing code for Gateway. > In some places in code you can meet words such as “settlement layer” or the abbreviation “sl”. “Settlement layer” is a general term that describes a chain that other chains can settle to. Right now, the list of settlement layers is whitelisted and only Gateway will be allowed to be a settlement layer (along with L1). -> ## High level gateway architecture diff --git a/docs/glossary.md b/docs/glossary.md index 22a8c7d65..3c6f3cdd0 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -1,4 +1,5 @@ # Glossary + [back to readme](./README.md) - **Governor** - a privileged address that controls the upgradability of the network and sets other privileged @@ -10,4 +11,4 @@ L2 blocks. - **Facet** - implementation contract. The word comes from the EIP-2535. - **Gas** - a unit that measures the amount of computational effort required to execute specific operations on the - ZKsync Era network. \ No newline at end of file + ZKsync Era network. diff --git a/docs/l2_system_contracts/batches_and_blocks_on_zksync.md b/docs/l2_system_contracts/batches_and_blocks_on_zksync.md index 7830a1e9e..777b1709d 100644 --- a/docs/l2_system_contracts/batches_and_blocks_on_zksync.md +++ b/docs/l2_system_contracts/batches_and_blocks_on_zksync.md @@ -1,10 +1,11 @@ # Batches & L2 blocks on zkSync + [back to readme](../README.md) ## Glossary - Batch - a set of transactions that the bootloader processes (`commitBatches`, `proveBatches`, and `executeBatches` work with it). A batch consists of multiple transactions. -- L2 blocks - non-intersecting sub-sets of consecutively executed transactions in a batch. This is the kind of block you see in the API. This is the one that is used for `block.number`/`block.timestamp`/etc. +- L2 blocks - non-intersecting sub-sets of consecutively executed transactions in a batch. This is the kind of block you see in the API. This is the one that is used for `block.number`/`block.timestamp`/etc. > Note that sometimes in code you can see notion of "virtual blocks". In the past, we returned batch information for `block.number`/`block.timestamp`. However due to DevEx issues we decided to move to returned these values for L2 blocks. Virtual blocks were used during migration, but are not used anymore. You consider that there is one virtual block per one L2 block and it has exactly the same properties. @@ -22,25 +23,21 @@ In order to get the returned value for `block.number`, `block.timestamp`, `block These return values for L2 blocks. -# Blocks’ processing and consistency checks +## Blocks’ processing and consistency checks Our `SystemContext` contract allows to get information about batches and L2 blocks. Some of the information is hard to calculate onchain. For instace, time. The timing information (for both batches and L2 blocks) are provided by the operator. In order to check that the operator provided some realistic values, certain checks are done on L1. Generally though, we try to check as much as we can on L2. -## Initializing L1 batch - -FIXME: correct bootloader code link - -At the start of the batch, the operator [provides](https://github.com/code-423n4/2024-03-zksync/blob/e8527cab32c9fe2e1be70e414d7c73a20d357550/code/system-contracts/bootloader/bootloader.yul#L3867) the timestamp of the batch, its number and the hash of the previous batch. The root hash of the Merkle tree serves as the root hash of the batch. +### Initializing L1 batch -The SystemContext can immediately check whether the provided number is the correct batch number. It also immediately sends the previous batch hash to L1, where it will be checked during the commit operation. Also, some general consistency checks are performed. This logic can be found [here](https://github.com/code-423n4/2024-03-zksync/blob/e8527cab32c9fe2e1be70e414d7c73a20d357550/code/system-contracts/contracts/SystemContext.sol#L466). +At the start of the batch, the operator [provides](../../system-contracts/bootloader/bootloader.yul#L3935) the timestamp of the batch, its number and the hash of the previous batch. The root hash of the Merkle tree serves as the root hash of the batch. -## L2 blocks processing and consistency checks +The SystemContext can immediately check whether the provided number is the correct batch number. It also immediately sends the previous batch hash to L1, where it will be checked during the commit operation. Also, some general consistency checks are performed. This logic can be found [here](../../system-contracts/contracts/SystemContext.sol#L469). -### `setL2Block` +### L2 blocks processing and consistency checks -FIXME: fix link +#### `setL2Block` -Before each transaction, we call `setL2Block` [method](https://github.com/code-423n4/2024-03-zksync/blob/e8527cab32c9fe2e1be70e414d7c73a20d357550/code/system-contracts/bootloader/bootloader.yul#L2825). There we will provide some data about the L2 block that the transaction belongs to: +Before each transaction, we call `setL2Block` [method](../../system-contracts/bootloader/bootloader.yul#L2884). There we will provide some data about the L2 block that the transaction belongs to: - `_l2BlockNumber` The number of the new L2 block. - `_l2BlockTimestamp` The timestamp of the new L2 block. @@ -50,9 +47,9 @@ Before each transaction, we call `setL2Block` [method](https://github.com/code-4 If two transactions belong to the same L2 block, only the first one may have non-zero `_maxVirtualBlocksToCreate`. The rest of the data must be same. -The `setL2Block` [performs](https://github.com/code-423n4/2024-03-zksync/blob/e8527cab32c9fe2e1be70e414d7c73a20d357550/code/system-contracts/contracts/SystemContext.sol#L341) a lot of similar consistency checks to the ones for the L1 batch. +The `setL2Block` [performs](../../system-contracts/contracts/SystemContext.sol#L355) a lot of similar consistency checks to the ones for the L1 batch. -### L2 blockhash calculation and storage +#### L2 blockhash calculation and storage Unlike L1 batch’s hash, the L2 blocks’ hashes can be checked on L2. @@ -70,7 +67,7 @@ Since zkSync is a state-diff based rollup, there is no way to deduce the hashes We store only the last 257 blocks, since the EVM requires only 256 previous ones and we use 257 as a safe margin. -### Legacy blockhash +#### Legacy blockhash For L2 blocks that were created before we switched to the formulas from above, we use the following formula for their hash: @@ -78,7 +75,7 @@ For L2 blocks that were created before we switched to the formulas from above, w These are only very old blocks on zkSync Era and other ZK chains don't have such blocks. -### Timing invariants +#### Timing invariants While the timestamp of each L2 block is provided by the operator, there are some timing invariants that the system preserves: @@ -88,7 +85,7 @@ While the timestamp of each L2 block is provided by the operator, there are some - The timestamp of a batch must be ≥ the timestamp of the latest L2 block which belonged to the previous batch. - The timestamp of the last miniblock in batch can not go too far into the future. This is enforced by publishing an L2→L1 log, with the timestamp which is then checked on L1. -## Fictive L2 block & finalizing the batch +### Fictive L2 block & finalizing the batch At the end of the batch, the bootloader calls the `setL2Block` [one more time](../../system-contracts/bootloader/bootloader.yul#L4110) to allow the operator to create a new empty block. This is done purely for some of the technical reasons inside the node, where each batch ends with an empty L2 block. @@ -103,4 +100,4 @@ In the past, we had to apply different formulas based on whether or not the migr - When the block is out of the readable range. - When it is a normal L2 block and so its hash has to be used. -The only edge case is when we ask for miniblock block number for which the base hash is returned. This edge case will be removed in future releases. \ No newline at end of file +The only edge case is when we ask for miniblock block number for which the base hash is returned. This edge case will be removed in future releases. diff --git a/docs/l2_system_contracts/elliptic_curve_precompiles.md b/docs/l2_system_contracts/elliptic_curve_precompiles.md index b47735803..7a939bf96 100644 --- a/docs/l2_system_contracts/elliptic_curve_precompiles.md +++ b/docs/l2_system_contracts/elliptic_curve_precompiles.md @@ -1,4 +1,5 @@ # Elliptic curve precompiles + [back to readme](../README.md) Precompiled contracts for elliptic curve operations are required in order to perform zkSNARK verification. @@ -24,7 +25,7 @@ The BN254 (also known as alt-BN128) is an elliptic curve defined by the equation The arithmetic is carried out with the field elements encoded in the Montgomery form. This is done not only because operating in the Montgomery form speeds up the computation but also because the native modular multiplication, which is carried out by Yul's `mulmod` opcode, is very inefficient. -Instructions set on zkSync and EVM are different, so the performance of the same Yul/Solidity code can be efficient on EVM, but not on zkEVM and opposite. +Instructions set on zkSync and EVM are different, so the performance of the same Yul/Solidity code can be efficient on EVM, but not on zkEVM and opposite. One such very inefficient command is `mulmod`. On EVM there is a native opcode that makes modulo multiplication and it costs only 8 gas, which compared to the other opcodes costs is only 2-3 times more expensive. On zkEVM we don’t have native `mulmod` opcode, instead, the compiler does full-with multiplication (e.g. it multiplies two `uint256`s and gets as a result an `uint512`). Then the compiler performs long division for reduction (but only the remainder is kept), in the generic form it is an expensive operation and costs many opcode executions, which can’t be compared to the cost of one opcode execution. The worst thing is that `mulmod` is used a lot for the modulo inversion, so optimizing this one opcode gives a huge benefit to the precompiles. @@ -44,14 +45,13 @@ The exponentiation was carried out using the square and multiply algorithm, whic ## Montgomery Form -Let’s take a number `R`, such that `gcd(N, R) == 1` and `R` is a number by which we can efficiently divide and take module over it (for example power of two or better machine word, aka 2^256). Then transform every number to the form of `x * R mod N` / `y * R mod N` and then we get efficient modulo addition and multiplication. The only thing is that before working with numbers we need to transform them to the form from `x mod N` to the `x * R mod N` and after performing operations transform the form back. +Let’s take a number `R`, such that `gcd(N, R) == 1` and `R` is a number by which we can efficiently divide and take module over it (for example power of two or better machine word, aka 2^256). Then transform every number to the form of `x * R mod N` / `y * R mod N` and then we get efficient modulo addition and multiplication. The only thing is that before working with numbers we need to transform them to the form from `x mod N` to the `x * R mod N` and after performing operations transform the form back. -For the latter, we will assume that `N` is the module that we use in computations, and `R` is $2^{256}$, since we can efficiently divide and take module over this number and it practically satisfies the property of `gcd(N, R) == 1`. +For the latter, we will assume that `N` is the module that we use in computations, and `R` is $2^{256}$, since we can efficiently divide and take module over this number and it practically satisfies the property of `gcd(N, R) == 1`. ### Montgomery Reduction Algorithm (REDC) -> Reference: https://en.wikipedia.org/wiki/Montgomery_modular_multiplication#The_REDC_algorithm -> +> Reference: ```solidity /// @notice Implementation of the Montgomery reduction algorithm (a.k.a. REDC). @@ -162,13 +162,13 @@ To compute $2P$ (or $P+P$), there are three cases: - If $P = O$, then $2P = O$. - Else $P = (x, y)$ - - If $y = 0$, then $2P = O$. - - Else $y≠0$, then - - $$ - \begin{gather*} \lambda = \frac{3x_{p}^{2} + a}{2y_{p}} \\ x_{r} = \lambda^{2} - 2x_{p} \\ y_{r} = \lambda(x_{p} - x_{r}) - y_{p}\end{gather*} - $$ - + + - If $y = 0$, then $2P = O$. + - Else $y≠0$, then + + $$ + \begin{gather*} \lambda = \frac{3x_{p}^{2} + a}{2y_{p}} \\ x_{r} = \lambda^{2} - 2x_{p} \\ y_{r} = \lambda(x_{p} - x_{r}) - y_{p}\end{gather*} + $$ The complicated case involves approximately 6 multiplications, 4 additions/subtractions, and 1 division. There could also be 4 multiplications, 6 additions/subtractions, and 1 division, and if you want you could trade a multiplication with 2 more additions. @@ -179,15 +179,15 @@ To compute $P + Q$ where $P \neq Q$, there are four cases: - If $P = 0$ and $Q \neq 0$, then $P + Q = Q$. - If $Q = 0$ and $P \neq 0$, then $P + Q = P$. - Else $P = (x_{p},\ y_{p})$ and$Q = (x_{q},\ y_{q})$ - - If $x_{p} = x_{q}$ (and necessarily $y_{p} \neq y_{q}$), then $P + Q = O$. - - Else $x_{p} \neq x_{q}$, then - - $$ - \begin{gather*} \lambda = \frac{y_{2} - y_{1}}{x_{2} - x_{1}} \\ x_{r} = \lambda^{2} - x_{p} - x_{q} \\ y_{r} = \lambda(x_{p} - x_{r}) - y_{p}\end{gather*} - $$ - - and $P + Q = R = (x_{r},\ y_{r})$. - + + - If $x_{p} = x_{q}$ (and necessarily $y_{p} \neq y_{q}$), then $P + Q = O$. + - Else $x_{p} \neq x_{q}$, then + + $$ + \begin{gather*} \lambda = \frac{y_{2} - y_{1}}{x_{2} - x_{1}} \\ x_{r} = \lambda^{2} - x_{p} - x_{q} \\ y_{r} = \lambda(x_{p} - x_{r}) - y_{p}\end{gather*} + $$ + + and $P + Q = R = (x_{r},\ y_{r})$. The complicated case involves approximately 2 multiplications, 6 additions/subtractions, and 1 division. diff --git a/docs/l2_system_contracts/system_contracts_bootloader_description.md b/docs/l2_system_contracts/system_contracts_bootloader_description.md index e6b9c6275..e1fd960e4 100644 --- a/docs/l2_system_contracts/system_contracts_bootloader_description.md +++ b/docs/l2_system_contracts/system_contracts_bootloader_description.md @@ -1,4 +1,5 @@ # System contracts/bootloader description (VM v1.5.0) + [back to readme](../README.md) ## Bootloader @@ -9,25 +10,25 @@ On standard Ethereum clients, the workflow for executing blocks is the following 2. Gather the state changes (if the transaction has not reverted), apply them to the state. 3. Go back to step (1) if the block gas limit has not been yet exceeded. -However, having such flow on zkSync (i.e. processing transaction one-by-one) would be too inefficient, since we have to run the entire proving workflow for each individual transaction. That’s what we need the *bootloader* for: instead of running N transactions separately, we run the entire batch (set of blocks, more can be found [here](./Batches%20&%20L2%20blocks%20on%20zkSync.md)) as a single program that accepts the array of transactions as well as some other batch metadata and processes them inside a single big “transaction”. The easiest way to think about bootloader is to think in terms of EntryPoint from EIP4337: it also accepts the array of transactions and facilitates the Account Abstraction protocol. +However, having such flow on zkSync (i.e. processing transaction one-by-one) would be too inefficient, since we have to run the entire proving workflow for each individual transaction. That’s what we need the _bootloader_ for: instead of running N transactions separately, we run the entire batch (set of blocks, more can be found [here](./Batches%20&%20L2%20blocks%20on%20zkSync.md)) as a single program that accepts the array of transactions as well as some other batch metadata and processes them inside a single big “transaction”. The easiest way to think about bootloader is to think in terms of EntryPoint from EIP4337: it also accepts the array of transactions and facilitates the Account Abstraction protocol. The hash of the code of the bootloader is stored on L1 and can only be changed as a part of a system upgrade. Note, that unlike system contracts, the bootloader’s code is not stored anywhere on L2. That’s why we may sometimes refer to the bootloader’s address as formal. It only exists for the sake of providing some value to `this` / `msg.sender`/etc. When someone calls the bootloader address (e.g. to pay fees) the EmptyContract’s code is actually invoked. ## System contracts -While most of the primitive EVM opcodes can be supported out of the box (i.e. zero-value calls, addition/multiplication/memory/storage management, etc), some of the opcodes are not supported by the VM by default and they are implemented via “system contracts” — these contracts are located in a special *kernel space,* i.e. in the address space in range `[0..2^16-1]`, and they have some special privileges, which users’ contracts don’t have. These contracts are pre-deployed at the genesis and updating their code can be done only via system upgrade, managed from L1. +While most of the primitive EVM opcodes can be supported out of the box (i.e. zero-value calls, addition/multiplication/memory/storage management, etc), some of the opcodes are not supported by the VM by default and they are implemented via “system contracts” — these contracts are located in a special _kernel space,_ i.e. in the address space in range `[0..2^16-1]`, and they have some special privileges, which users’ contracts don’t have. These contracts are pre-deployed at the genesis and updating their code can be done only via system upgrade, managed from L1. The use of each system contract will be explained down below. ### Pre-deployed contracts -Some of the contracts need to be predeployed at the genesis, but they do not need the kernel space rights. To give them minimal permissiones, we predeploy them at consequtive addressess that start right at the `2^16`. These will be described in the following sections (FIXME). +Some of the contracts need to be predeployed at the genesis, but they do not need the kernel space rights. To give them minimal permissiones, we predeploy them at consequtive addressess that start right at the `2^16`. These will be described in the following sections. -# zkEVM internals +## zkEVM internals Full specification of the zkEVM is beyond the scope of this document. However, this section will give you most of the details needed for understanding the L2 system smart contracts & basic differences between EVM and zkEVM. -## Registers and memory management +### Registers and memory management On EVM, during transaction execution, the following memory areas are available: @@ -36,9 +37,9 @@ On EVM, during transaction execution, the following memory areas are available: - `returndata` the immutable slice returned by the latest call to another contract. - `stack` where the local variables are stored. -Unlike EVM, which is stack machine, zkEVM has 16 registers. Instead of receiving input from `calldata`, zkEVM starts by receiving a *pointer* in its first register *(*basically a packed struct with 4 elements: the memory page id, start and length of the slice to which it points to*)* to the calldata page of the parent. Similarly, a transaction can receive some other additional data within its registers at the start of the program: whether the transaction should invoke the constructor ([more about deployments here](#contractdeployer--immutablesimulator)), whether the transaction has `isSystem` flag, etc. The meaning of each of these flags will be expanded further in this section. +Unlike EVM, which is stack machine, zkEVM has 16 registers. Instead of receiving input from `calldata`, zkEVM starts by receiving a _pointer_ in its first register *(*basically a packed struct with 4 elements: the memory page id, start and length of the slice to which it points to*)* to the calldata page of the parent. Similarly, a transaction can receive some other additional data within its registers at the start of the program: whether the transaction should invoke the constructor ([more about deployments here](#contractdeployer--immutablesimulator)), whether the transaction has `isSystem` flag, etc. The meaning of each of these flags will be expanded further in this section. -*Pointers* are separate type in the VM. It is only possible to: +_Pointers_ are separate type in the VM. It is only possible to: - Read some value within a pointer. - Shrink the pointer by reducing the slice to which pointer points to. @@ -47,20 +48,20 @@ Unlike EVM, which is stack machine, zkEVM has 16 registers. Instead of receiving - A pointer can be converted to the u256 integer representing it, but an integer can not be converted to a pointer to prevent unallowed memory access. - It is not possible to return a pointer that points to a memory page with id smaller than the one for the current page. What this means is that it is only possible to `return` only pointer to the memory of the current frame or one of the pointers returned by the subcalls of the current frame. -### Memory areas in zkEVM +#### Memory areas in zkEVM For each frame, the following memory areas are allocated: -- *Heap* (plays the same role as `memory` on Ethereum). -- *AuxHeap* (auxiliary heap). It has the same properties as Heap, but it is used for the compiler to encode calldata/copy the returndata from the calls to system contracts to not interfere with the standard Solidity memory alignment. -- *Stack*. Unlike Ethereum, stack is not the primary place to get arguments for opcodes. The biggest difference between stack on zkEVM and EVM is that on zkSync stack can be accessed at any location (just like memory). While users do not pay for the growth of stack, the stack can be fully cleared at the end of the frame, so the overhead is minimal. -- *Code*. The memory area from which the VM executes the code of the contract. The contract itself can not read the code page, it is only done implicitly by the VM. +- _Heap_ (plays the same role as `memory` on Ethereum). +- _AuxHeap_ (auxiliary heap). It has the same properties as Heap, but it is used for the compiler to encode calldata/copy the returndata from the calls to system contracts to not interfere with the standard Solidity memory alignment. +- _Stack_. Unlike Ethereum, stack is not the primary place to get arguments for opcodes. The biggest difference between stack on zkEVM and EVM is that on zkSync stack can be accessed at any location (just like memory). While users do not pay for the growth of stack, the stack can be fully cleared at the end of the frame, so the overhead is minimal. +- _Code_. The memory area from which the VM executes the code of the contract. The contract itself can not read the code page, it is only done implicitly by the VM. Also, as mentioned in the previous section, the contract receives the pointer to the calldata. -### Managing returndata & calldata +#### Managing returndata & calldata -Whenever a contract finishes its execution, the parent’s frame receives a *pointer* as `returndata`. This pointer may point to the child frame’s Heap/AuxHeap or it can even be the same `returndata` pointer that the child frame received from some of its child frames. +Whenever a contract finishes its execution, the parent’s frame receives a _pointer_ as `returndata`. This pointer may point to the child frame’s Heap/AuxHeap or it can even be the same `returndata` pointer that the child frame received from some of its child frames. The same goes with the `calldata`. Whenever a contract starts its execution, it receives the pointer to the calldata. The parent frame can provide any valid pointer as the calldata, which means it can either be a pointer to the slice of parent’s frame memory (heap or auxHeap) or it can be some valid pointer that the parent frame has received before as calldata/returndata. @@ -78,13 +79,13 @@ A ← B ← C There is no need to copy returned data if the B returns a slice of the returndata returned by C. -Note, that you can *not* use the pointer that you received via calldata as returndata (i.e. return it at the end of the execution frame). Otherwise, it would be possible that returndata points to the memory slice of the active frame and allow editing the `returndata`. It means that in the examples above, C could not return a slice of its calldata without memory copying. +Note, that you can _not_ use the pointer that you received via calldata as returndata (i.e. return it at the end of the execution frame). Otherwise, it would be possible that returndata points to the memory slice of the active frame and allow editing the `returndata`. It means that in the examples above, C could not return a slice of its calldata without memory copying. Note, that the rule above is implemented by the principle "it is not possible to return a slice of data with memory page id lower than the memory page id of the current heap", since a memory page with smaller id could only be created before the call. That's why a user contract can usually safely return a slice of previously returned returndata (since it is guaranteed to have a higher memory page id). However, system contracts have an exemption from the rule above. It is needed in particular to the correct functionality of the `CodeOracle` system contract. You can read more about it [here](#codeoracle). So the rule of thumb is that returndata from `CodeOracle` should never be passed along. -Some of these memory optimizations can be seen utilized in the [EfficientCall](https://github.com/code-423n4/2024-03-zksync/blob/7e85e0a997fee7a6d75cadd03d3233830512c2d2/code/system-contracts/contracts/libraries/EfficientCall.sol#L32) library that allows to perform a call while reusing the slice of calldata that the frame already has, without memory copying. +Some of these memory optimizations can be seen utilized in the [EfficientCall](../../system-contracts/contracts/libraries/EfficientCall.sol#L34) library that allows to perform a call while reusing the slice of calldata that the frame already has, without memory copying. -### Returndata & precompiles +#### Returndata & precompiles Some of the operations which are opcodes on Ethereum, have become calls to some of the system contracts. The most notable examples are `Keccak256`, `SystemContext`, etc. Note, that, if done naively, the following lines of code would work differently on zkSync and Ethereum: @@ -96,60 +97,60 @@ returndatacopy(...) Since the call to keccak precompile would modify the `returndata`. To avoid this, our compiler does not override the latest `returndata` pointer after calls to such opcode-like precompiles. -## zkSync specific opcodes +### zkSync specific opcodes While some Ethereum opcodes are not supported out of the box, some of the new opcodes were added to facilitate the development of the system contracts. -Note, that this lists does not aim to be specific about the internals, but rather explain methods in the [SystemContractHelper.sol](https://github.com/code-423n4/2024-03-zksync/blob/7e85e0a997fee7a6d75cadd03d3233830512c2d2/code/system-contracts/contracts/libraries/SystemContractHelper.sol#L41) +Note, that this lists does not aim to be specific about the internals, but rather explain methods in the [SystemContractHelper.sol](../../system-contracts/contracts/libraries/SystemContractHelper.sol#L44) -### **Only for kernel space** +#### **Only for kernel space** These opcodes are allowed only for contracts in kernel space (i.e. system contracts). If executed in other places they result in `revert(0,0)`. - `mimic_call`. The same as a normal `call`, but it can alter the `msg.sender` field of the transaction. -- `to_l1`. Sends a system L2→L1 log to Ethereum. The structure of this log can be seen [here](https://github.com/code-423n4/2024-03-zksync/blob/7e85e0a997fee7a6d75cadd03d3233830512c2d2/code/contracts/ethereum/contracts/common/Messaging.sol#L23). +- `to_l1`. Sends a system L2→L1 log to Ethereum. The structure of this log can be seen [here](../../l1-contracts/contracts/common/Messaging.sol#L23). - `event`. Emits an L2 log to zkSync. Note, that L2 logs are not equivalent to Ethereum events. Each L2 log can emit 64 bytes of data (the actual size is 88 bytes, because it includes the emitter address, etc). A single Ethereum event is represented with multiple `event` logs constitute. This opcode is only used by `EventWriter` system contract. - `precompile_call`. This is an opcode that accepts two parameters: the uint256 representing the packed parameters for it as well as the ergs to burn. Besides the price for the precompile call itself, it burns the provided ergs and executes the precompile. The action that it does depend on `this` during execution: - - If it is the address of the `ecrecover` system contract, it performs the ecrecover operation - - If it is the address of the `sha256`/`keccak256` system contracts, it performs the corresponding hashing operation. - - It does nothing (i.e. just burns ergs) otherwise. It can be used to burn ergs needed for L2→L1 communication or publication of bytecodes onchain. + - If it is the address of the `ecrecover` system contract, it performs the ecrecover operation + - If it is the address of the `sha256`/`keccak256` system contracts, it performs the corresponding hashing operation. + - It does nothing (i.e. just burns ergs) otherwise. It can be used to burn ergs needed for L2→L1 communication or publication of bytecodes onchain. - `setValueForNextFarCall` sets `msg.value` for the next `call`/`mimic_call`. Note, that it does not mean that the value will be really transferred. It just sets the corresponding `msg.value` context variable. The transferring of ETH should be done via other means by the system contract that uses this parameter. Note, that this method has no effect on `delegatecall` , since `delegatecall` inherits the `msg.value` of the previous frame. - `increment_tx_counter` increments the counter of the transactions within the VM. The transaction counter used mostly for the VM’s internal tracking of events. Used only in bootloader after the end of each transaction. - `decommit` will return a pointer to a slice with the corresponding bytecode hash preimage. If this bytecode has been unpacked before, the memory page where it was unpacked will be reused. If it has never been unpacked before, it will be unpacked into the current heap. -Note, that currently we do not have access to the `tx_counter` within VM (i.e. for now it is possible to increment it and it will be automatically used for logs such as `event`s as well as system logs produced by `to_l1`, but we can not read it). We need to read it to publish the *user* L2→L1 logs, so `increment_tx_counter` is always accompanied by the corresponding call to the [SystemContext](#systemcontext) contract. +Note, that currently we do not have access to the `tx_counter` within VM (i.e. for now it is possible to increment it and it will be automatically used for logs such as `event`s as well as system logs produced by `to_l1`, but we can not read it). We need to read it to publish the _user_ L2→L1 logs, so `increment_tx_counter` is always accompanied by the corresponding call to the [SystemContext](#systemcontext) contract. -More on the difference between system and user logs can be read [here](https://github.com/code-423n4/2024-03-zksync/blob/main/docs/Smart%20contract%20Section/Handling%20pubdata.md). +More on the difference between system and user logs can be read [here](../settlement_contracts/data_availability/standard_pubdata_format.md). -### **Generally accessible** +#### **Generally accessible** Here are opcodes that can be generally accessed by any contract. Note that while the VM allows to access these methods, it does not mean that this is easy: the compiler might not have convenient support for some use-cases yet. - `near_call`. It is basically a “framed” jump to some location of the code of your contract. The difference between the `near_call` and ordinary jump are: - 1. It is possible to provide an ergsLimit for it. Note, that unlike “`far_call`”s (i.e. calls between contracts) the 63/64 rule does not apply to them. - 2. If the near call frame panics, all state changes made by it are reversed. Please note, that the memory changes will **not** be reverted. -- `getMeta`. Returns an u256 packed value of [ZkSyncMeta](https://github.com/code-423n4/2024-03-zksync/blob/7e85e0a997fee7a6d75cadd03d3233830512c2d2/code/system-contracts/contracts/libraries/SystemContractHelper.sol#L15) struct. Note that this is not tight packing. The struct is formed by the [following rust code](https://github.com/matter-labs/era-zkevm_opcode_defs/blob/7bf8016f5bb13a73289f321ad6ea8f614540ece9/src/definitions/abi/meta.rs#L4). + 1. It is possible to provide an ergsLimit for it. Note, that unlike “`far_call`”s (i.e. calls between contracts) the 63/64 rule does not apply to them. + 2. If the near call frame panics, all state changes made by it are reversed. Please note, that the memory changes will **not** be reverted. +- `getMeta`. Returns an u256 packed value of [ZkSyncMeta](../../system-contracts/contracts/libraries/SystemContractHelper.sol#L18) struct. Note that this is not tight packing. The struct is formed by the [following rust code](https://github.com/matter-labs/era-zkevm_opcode_defs/blob/7bf8016f5bb13a73289f321ad6ea8f614540ece9/src/definitions/abi/meta.rs#L4). - `getCodeAddress` — receives the address of the executed code. This is different from `this` , since in case of delegatecalls `this` is preserved, but `codeAddress` is not. -### Flags for calls +#### Flags for calls Besides the calldata, it is also possible to provide additional information to the callee when doing `call` , `mimic_call`, `delegate_call`. The called contract will receive the following information in its first 12 registers at the start of execution: -- *r1* — the pointer to the calldata. -- *r2* — the pointer with flags of the call. This is a mask, where each bit is set only if certain flags have been set to the call. Currently, two flags are supported: 0-th bit: `isConstructor` flag. This flag can only be set by system contracts and denotes whether the account should execute its constructor logic. Note, unlike Ethereum, there is no separation on constructor & deployment bytecode. More on that can be read [here](#contractdeployer--immutablesimulator). 1-st bit: `isSystem` flag. Whether the call intends a system contracts’ function. While most of the system contracts’ functions are relatively harmless, accessing some with calldata only may break the invariants of Ethereum, e.g. if the system contract uses `mimic_call`: no one expects that by calling a contract some operations may be done out of the name of the caller. This flag can be only set if the callee is in kernel space. +- _r1_ — the pointer to the calldata. +- _r2_ — the pointer with flags of the call. This is a mask, where each bit is set only if certain flags have been set to the call. Currently, two flags are supported: 0-th bit: `isConstructor` flag. This flag can only be set by system contracts and denotes whether the account should execute its constructor logic. Note, unlike Ethereum, there is no separation on constructor & deployment bytecode. More on that can be read [here](#contractdeployer--immutablesimulator). 1-st bit: `isSystem` flag. Whether the call intends a system contracts’ function. While most of the system contracts’ functions are relatively harmless, accessing some with calldata only may break the invariants of Ethereum, e.g. if the system contract uses `mimic_call`: no one expects that by calling a contract some operations may be done out of the name of the caller. This flag can be only set if the callee is in kernel space. - The rest r3..r12 registers are non-empty only if the `isSystem` flag is set. There may be arbitrary values passed, which we call `extraAbiParams`. The compiler implementation is that these flags are remembered by the contract and can be accessed later during execution via special [simulations](https://github.com/code-423n4/2024-03-zksync/blob/main/docs/VM%20Section/How%20compiler%20works/instructions/extensions/overview.md). If the caller provides inappropriate flags (i.e. tries to set `isSystem` flag when callee is not in the kernel space), the flags are ignored. -### `onlySystemCall` modifier +#### `onlySystemCall` modifier Some of the system contracts can act on behalf of the user or have a very important impact on the behavior of the account. That’s why we wanted to make it clear that users can not invoke potentially dangerous operations by doing a simple EVM-like `call`. Whenever a user wants to invoke some of the operations which we considered dangerous, they must provide “`isSystem`” flag with them. The `onlySystemCall` flag checks that the call was either done with the “isSystemCall” flag provided or the call is done by another system contract (since Matter Labs is fully aware of system contracts). -### Simulations via our compiler +#### Simulations via our compiler In the future, we plan to introduce our “extended” version of Solidity with more supported opcodes than the original one. However, right now it was beyond the capacity of the team to do, so in order to represent accessing zkSync-specific opcodes, we use `call` opcode with certain constant parameters that will be automatically replaced by the compiler with zkEVM native opcode. @@ -157,15 +158,13 @@ Example: ```solidity function getCodeAddress() internal view returns (address addr) { - address callAddr = CODE_ADDRESS_CALL_ADDRESS; - assembly { - addr := staticcall(0, callAddr, 0, 0xFFFF, 0, 0) - } + address callAddr = CODE_ADDRESS_CALL_ADDRESS; + assembly { + addr := staticcall(0, callAddr, 0, 0xFFFF, 0, 0) + } } ``` -(FIXME -- shall we update links for the compiler??). - In the example above, the compiler will detect that the static call is done to the constant `CODE_ADDRESS_CALL_ADDRESS` and so it will replace it with the opcode for getting the code address of the current execution. Full list of opcode simulations can be found [here](https://github.com/code-423n4/2024-03-zksync/blob/main/docs/VM%20Section/How%20compiler%20works/instructions/extensions/call.md). @@ -174,7 +173,7 @@ We also use [verbatim-like](https://github.com/code-423n4/2024-03-zksync/blob/ma All the usages of the simulations in our Solidity code are implemented in the [SystemContractHelper](../../system-contracts/contracts/libraries//SystemContractHelper.sol) library and the [SystemContractsCaller](../../system-contracts/contracts//libraries/SystemContractsCaller.sol) library. -**Simulating** `near_call` **(in Yul only)** +#### Simulating `near_call` (in Yul only) In order to use `near_call` i.e. to call a local function, while providing a limit of ergs (gas) that this function can use, the following syntax is used: @@ -182,17 +181,17 @@ The function should contain `ZKSYNC_NEAR_CALL` string in its name and accept at Whenever a `near_call` panics, the `ZKSYNC_CATCH_NEAR_CALL` function is called. -*Important note:* the compiler behaves in a way that if there is a `revert` in the bootloader, the `ZKSYNC_CATCH_NEAR_CALL` is not called and the parent frame is reverted as well. The only way to revert only the `near_call` frame is to trigger VM’s *panic* (it can be triggered with either invalid opcode or out of gas error). +_Important note:_ the compiler behaves in a way that if there is a `revert` in the bootloader, the `ZKSYNC_CATCH_NEAR_CALL` is not called and the parent frame is reverted as well. The only way to revert only the `near_call` frame is to trigger VM’s _panic_ (it can be triggered with either invalid opcode or out of gas error). -*Important note 2:* The 63/64 rule does not apply to `near_call`. Also, if 0 gas is provided to the near call, then actually all of the available gas will go to it. +_Important note 2:_ The 63/64 rule does not apply to `near_call`. Also, if 0 gas is provided to the near call, then actually all of the available gas will go to it. -**Notes on security** +#### Notes on security To prevent unintended substitution, the compiler requires `--system-mode` flag to be passed during compilation for the above substitutions to work. > Note, that in the more recent compiler versions this the `--system-mode` has been renamed to `enable_eravm_extensions` (this can be seen in e.g. our [foundry.toml](../../l1-contracts/foundry.toml)) -## Bytecode hashes +### Bytecode hashes On zkSync the bytecode hashes are stored in the following format: @@ -203,7 +202,7 @@ On zkSync the bytecode hashes are stored in the following format: The bytes are ordered in little-endian order (i.e. the same way as for `bytes32` ). -### Bytecode validity +#### Bytecode validity A bytecode is valid if it: @@ -215,17 +214,17 @@ Note, that it does not have to consist of only correct opcodes. In case the VM e A call to a contract with invalid bytecode can not be proven. That is why it is **essential** that no contract with invalid bytecode is ever deployed on zkSync. It is the job of the [KnownCodesStorage](#knowncodestorage) to ensure that all allowed bytecodes in the system are valid. -# Account abstraction +## Account abstraction One of the other important features of zkSync is the support of account abstraction. It is highly recommended to read the documentation on our AA protocol here: [https://era.zksync.io/docs/reference/concepts/account-abstraction.html#introduction](https://era.zksync.io/docs/reference/concepts/account-abstraction.html#introduction) -### Account versioning +#### Account versioning Each account can also specify which version of the account abstraction protocol do they support. This is needed to allow breaking changes of the protocol in the future. Currently, two versions are supported: `None` (i.e. it is a simple contract and it should never be used as `from` field of a transaction), and `Version1`. -### Nonce ordering +#### Nonce ordering Accounts can also signal to the operator which nonce ordering it should expect from these accounts: `Sequential` or `Arbitrary`. @@ -235,29 +234,29 @@ Accounts can also signal to the operator which nonce ordering it should expect f Note, that this is not enforced by system contracts in any way. Some sanity checks may be present, but the accounts are allowed to do however they like. It is more of a suggestion to the operator on how to manage the mempool. -### Returned magic value +#### Returned magic value Now, both accounts and paymasters are required to return a certain magic value upon validation. This magic value will be enforced to be correct on the mainnet, but will be ignored during fee estimation. Unlike Ethereum, the signature verification + fee charging/nonce increment are not included as part of the intrinsic costs of the transaction. These are paid as part of the execution and so they need to be estimated as part of the estimation for the transaction’s costs. Generally, the accounts are recommended to perform as many operations as during normal validation, but only return the invalid magic in the end of the validation. This will allow to correctly (or at least as correctly as possible) estimate the price for the validation of the account. -# Bootloader +## Bootloader Bootloader is the program that accepts an array of transactions and executes the entire zkSync batch. This section will expand on its invariants and methods. -## Playground bootloader vs proved bootloader +### Playground bootloader vs proved bootloader -For convenience, we use the same implementation of the bootloader both in the mainnet batches and for emulating ethCalls or other testing activities. *Only* *proved* bootloader is ever used for batch-building and thus this document describes only it. +For convenience, we use the same implementation of the bootloader both in the mainnet batches and for emulating ethCalls or other testing activities. _Only_ _proved_ bootloader is ever used for batch-building and thus this document describes only it. -## Start of the batch +### Start of the batch It is enforced by the ZKPs, that the state of the bootloader is equivalent to the state of a contract transaction with empty calldata. The only difference is that it starts with all the possible memory pre-allocated (to avoid costs for memory expansion). -For additional efficiency (and our convenience), the bootloader receives its parameters inside its memory. This is the only point of non-determinism: the bootloader *starts with its memory pre-filled with any data the operator wants*. That’s why it is responsible for validating the correctness of it and it should never rely on the initial contents of the memory to be correct & valid. +For additional efficiency (and our convenience), the bootloader receives its parameters inside its memory. This is the only point of non-determinism: the bootloader _starts with its memory pre-filled with any data the operator wants_. That’s why it is responsible for validating the correctness of it and it should never rely on the initial contents of the memory to be correct & valid. For instance, for each transaction, we check that it is [properly ABI-encoded](../../system-contracts/bootloader/bootloader.yul#L3278) and that the transactions [go exactly one after another](../../system-contracts/bootloader/bootloader.yul#L3974). We also ensure that transactions do not exceed the limits of the memory space allowed for transactions. -## Transaction types & their validation +### Transaction types & their validation While the main transaction format is the internal `Transaction` [format](../../system-contracts/contracts/libraries/TransactionHelper.sol#L25), it is a struct that is used to represent various kinds of transactions types. It contains a lot of `reserved` fields that could be used depending in the future types of transactions without need for AA to change the interfaces of their contracts. @@ -277,11 +276,11 @@ You can also read more on L1->L2 transactions and upgrade transacitons [here](./ However, as already stated, the bootloader’s memory is not deterministic and the operator is free to put anything it wants there. For all of the transaction types above the restrictions are imposed in the following ([method](../../system-contracts/bootloader/bootloader.yul#L3048)), which is called before starting processing the transaction. -## Structure of the bootloader’s memory +### Structure of the bootloader’s memory The bootloader expects the following structure of the memory (here by word we denote 32-bytes, the same machine word as on EVM): -### **Batch information** +#### **Batch information** The first 8 words are reserved for the batch information provided by the operator. @@ -289,50 +288,45 @@ The first 8 words are reserved for the batch information provided by the operato - `1` word — the hash of the previous batch. Its validation will be explained later on. - `2` word — the timestamp of the current batch. Its validation will be explained later on. - `3` word — the number of the new batch. -- `4` word — the fair pubdata price. More on how our pubdata is calculated can be read [here](https://github.com/code-423n4/2024-03-zksync/blob/main/docs/Smart%20contract%20Section/zkSync%20fee%20model.md#recommended-calculation-of-fair_l2_gas_pricefair_pubdata_price). +- `4` word — the fair pubdata price. More on how our pubdata is calculated can be read [here](../l2_system_contracts/zksync_fee_model.md). - `5` word — the “fair” price for L2 gas, i.e. the price below which the `baseFee` of the batch should not fall. For now, it is provided by the operator, but it in the future it may become hardcoded. - `6` word — the base fee for the batch that is expected by the operator. While the base fee is deterministic, it is still provided to the bootloader just to make sure that the data that the operator has coincides with the data provided by the bootloader. - `7` word — reserved word. Unused on proved batch. The batch information slots [are used at the beginning of the batch](../../system-contracts/bootloader/bootloader.yul#L3858). Once read, these slots can be used for temporary data. -### **Temporary data for debug & transaction processing purposes** - -FIXME: constants here need to be checked +#### **Temporary data for debug & transaction processing purposes** - `[8..39]` – reserved slots for debugging purposes - `[40..72]` – slots for holding the paymaster context data for the current transaction. The role of the paymaster context is similar to the [EIP4337](https://eips.ethereum.org/EIPS/eip-4337)’s one. You can read more about it in the account abstraction documentation. - `[73..74]` – slots for signed and explorer transaction hash of the currently processed L2 transaction. -- `[75..110]` – 36 slots for the calldata for the KnownCodesContract call. -- `[111..10110]` – 10000 slots for the refunds for the transactions. -- `[10111..20110]` – 10000 slots for the overhead for batch for the transactions. This overhead is suggested by the operator, i.e. the bootloader will still double-check that the operator does not overcharge the user. -- `[20111..30110]` – slots for the “trusted” gas limits by the operator. The user’s transaction will have at its disposal `min(MAX_TX_GAS(), trustedGasLimit)`, where `MAX_TX_GAS` is a constant guaranteed by the system. Currently, it is equal to 80 million gas. In the future, this feature will be removed. -- `[30111..70114]` – slots for storing L2 block info for each transaction. You can read more on the difference L2 blocks and batches [here](./Batches%20&%20L2%20blocks%20on%20zkSync.md). -- `[70115..135650]` – slots used for compressed bytecodes each in the following format: - - 32 bytecode hash - - 32 zeroes (but then it will be modified by the bootloader to contain 28 zeroes and then the 4-byte selector of the `publishCompressedBytecode` function of the `BytecodeCompresor`) - - The calldata to the bytecode compressor (without the selector). -- `[135651..135652]` – slots where the hash and the number of current priority ops is stored. More on it in the priority operations [section](./Handling%20L1→L2%20ops%20on%20zkSync.md). +- `[75..142]` – 68 slots for the calldata for the KnownCodesContract call. +- `[143..10142]` – 10000 slots for the refunds for the transactions. +- `[10143..20142]` – 10000 slots for the overhead for batch for the transactions. This overhead is suggested by the operator, i.e. the bootloader will still double-check that the operator does not overcharge the user. +- `[20143..30142]` – slots for the “trusted” gas limits by the operator. The user’s transaction will have at its disposal `min(MAX_TX_GAS(), trustedGasLimit)`, where `MAX_TX_GAS` is a constant guaranteed by the system. Currently, it is equal to 80 million gas. In the future, this feature will be removed. +- `[30143..70146]` – slots for storing L2 block info for each transaction. You can read more on the difference L2 blocks and batches [here](./Batches%20&%20L2%20blocks%20on%20zkSync.md). +- `[70147..266754]` – slots used for compressed bytecodes each in the following format: + - 32 bytecode hash + - 32 zeroes (but then it will be modified by the bootloader to contain 28 zeroes and then the 4-byte selector of the `publishCompressedBytecode` function of the `BytecodeCompresor`) + - The calldata to the bytecode compressor (without the selector). +- `[266755..266756]` – slots where the hash and the number of current priority ops is stored. More on it in the priority operations [section](./Handling%20L1→L2%20ops%20on%20zkSync.md). -### L1Messenger Pubdata +#### L1Messenger Pubdata -FIXME: constants here need to be checked +- `[266757..1626756]` – slots where the final batch pubdata is supplied to be verified by the [L2DAValidator](../settlement_contracts/data_availability/custom_da.md). -- `[135653..586652]` – slots where the final batch pubdata is supplied to be verified by the L1Messenger. More on how the L1Messenger system contracts handles the pubdata can be read [here](./Handling%20pubdata.md). +But briefly, this space is used for the calldata to the L1Messenger’s `publishPubdataAndClearState` function, which accepts the address of the L2DAValidator as well as the pubdata for it to check. The L2DAValidator is a contract that is responsible to ensure efficiency [when handling pubdata](../settlement_contracts/data_availability/custom_da.md). Typically, the calldata `L2DAValidator` would include uncompressed preimages for bytecodes, L2->L1 messages, L2->L1 logs, etc as their compressed counterparts. However, the exact implementation may vary across various ZK chains. -But briefly, this space is used for the calldata to the L1Messenger’s `publishPubdataAndClearState` function, which accepts the address of the L2DAValidator as well as the pubdata for it to check. The L2DAValidator is a contract that is responsible to ensure efficiency when handling pubdata (FIXME: provide the link). Typically, the calldata `L2DAValidator` would include uncompressed preimages for bytecodes, L2->L1 messages, L2->L1 logs, etc as their compressed counterparts). However, the exact implementation may vary across various ZK chains. +Note, that while the realistic number of pubdata that can be published in a batch is ~780kb, the size of the calldata to L1Messenger may be a lot larger due to the fact that this method also accepts the original uncompressed state diff entries. These will not be published to L1, but will be used to verify the correctness of the compression. -FIXME: constants below are not correct +One of "worst case" scenarios for the number of state diffs in a batch is when 780kb of pubdata is spent on repeated writes, that are all zeroed out. In this case, the number of diffs is 780kb / 5 = 156k. This means that they will have accoomdate 42432000 bytes of calldata for the uncompressed state diffs. Adding 780kb on top leaves us with roughly 43212000 bytes needed for calldata. 1350375 slots are needed to accommodate this amount of data. We round up to 1360000 slots just in case. -Note, that while the realistic number of pubdata that can be published in a batch is ~260kb, the size of the calldata to L1Messenger may be a lot larger due to the fact that this method also accepts the original uncompressed state diff entries. These will not be published to L1, but will be used to verify the correctness of the compression. The worst-case number of bytes that may be needed for this scratch space is if all the pubdata consists of repeated writes (i.e. we’ll need only 4 bytes to include key) that turn into 0 (i.e. they’ll need only 1 byte to describe it). However, each of these writes in the uncompressed form will be represented as 272 byte state diff entry and so we get the number of diffs is `260kb / 5 = 52k`. This means that they will have -accoomdate `52k * 272 = 14144000` bytes of calldata for the uncompressed state diffs. Adding 260k on top leaves us with roughly `14404000` bytes needed for calldata. `450125` slots are needed to accomodate this amount of data. -We round up to `451000` slots to give space for constant-size factors for ABI-encoding, like offsets, lengths, etc. +In theory though much more calldata could be used (if for instance 1 byte is used for enum index). It is the responsibility of the +operator to ensure that it can form the correct calldata for the L1Messenger. -In theory though much more calldata could be used (if for instance 1 byte is used for enum index). It is the responsibility of the operator to ensure that it can form the correct calldata for the L1Messenger. +#### **Transaction’s meta descriptions** -### **Transaction’s meta descriptions** - -- `[586653..606652]` words — 20000 slots for 10000 transaction’s meta descriptions (their structure is explained below). +- `[1626756..1646756]` words — 20000 slots for 10000 transaction’s meta descriptions (their structure is explained below). For internal reasons related to possible future integrations of zero-knowledge proofs about some of the contents of the bootloader’s memory, the array of the transactions is not passed as the ABI-encoding of the array of transactions, but: @@ -341,75 +335,77 @@ For internal reasons related to possible future integrations of zero-knowledge p ```solidity struct BootloaderTxDescription { - // The offset by which the ABI-encoded transaction's data is stored - uint256 txDataOffset; - // Auxilary data on the transaction's execution. In our internal versions - // of the bootloader it may have some special meaning, but for the - // bootloader used on the mainnet it has only one meaning: whether to execute - // the transaction. If 0, no more transactions should be executed. If 1, then - // we should execute this transaction and possibly try to execute the next one. - uint256 txExecutionMeta; + // The offset by which the ABI-encoded transaction's data is stored + uint256 txDataOffset; + // Auxilary data on the transaction's execution. In our internal versions + // of the bootloader it may have some special meaning, but for the + // bootloader used on the mainnet it has only one meaning: whether to execute + // the transaction. If 0, no more transactions should be executed. If 1, then + // we should execute this transaction and possibly try to execute the next one. + uint256 txExecutionMeta; } ``` -### **Reserved slots for the calldata for the paymaster’s postOp operation** +#### **Reserved slots for the calldata for the paymaster’s postOp operation** -- `[606653..606692]` words — 40 slots which could be used for encoding the calls for postOp methods of the paymaster. +- `[1646756..1646795]` words — 40 slots which could be used for encoding the calls for postOp methods of the paymaster. To avoid additional copying of transactions for calls for the account abstraction, we reserve some of the slots which could be then used to form the calldata for the `postOp` call for the account abstraction without having to copy the entire transaction’s data. -### **The actual transaction’s descriptions** +#### **The actual transaction’s descriptions** -- `[606693..927496]` +- `[1646796..1967599]` -Starting from the 487312 word, the actual descriptions of the transactions start. (The struct can be found by this [link](https://github.com/code-423n4/2024-03-zksync/blob/7e85e0a997fee7a6d75cadd03d3233830512c2d2/code/system-contracts/contracts/libraries/TransactionHelper.sol#L25)). The bootloader enforces that: +Starting from the 487312 word, the actual descriptions of the transactions start. (The struct can be found by this [link](../../system-contracts/contracts/libraries/TransactionHelper.sol#L25)). The bootloader enforces that: - They are correctly ABI encoded representations of the struct above. - They are located without any gaps in memory (the first transaction starts at word 653 and each transaction goes right after the next one). - The contents of the currently processed transaction (and the ones that will be processed later on are untouched). Note, that we do allow overriding data from the already processed transactions as it helps to preserve efficiency by not having to copy the contents of the `Transaction` each time we need to encode a call to the account. -### **VM hook pointers** +#### **VM hook pointers** -- `[927497..927499]` +- `[1967600..1967602]` These are memory slots that are used purely for debugging purposes (when the VM writes to these slots, the server side can catch these calls and give important insight information for debugging issues). -### **Result ptr pointer** +#### **Result ptr pointer** -- `[927500..937499]` +- `[1967602..1977602]` These are memory slots that are used to track the success status of a transaction. If the transaction with number `i` succeeded, the slot `937499 - 10000 + i` will be marked as 1 and 0 otherwise. -## General flow of the bootloader’s execution +### General flow of the bootloader’s execution 1. At the start of the batch it [reads the initial batch information](../..å/system-contracts/bootloader/bootloader.yul#L3858) and [sends the information](../../system-contracts/bootloader/bootloader.yul#L3912) about the current batch to the SystemContext system contract. 2. It goes through each of [transaction’s descriptions](../../system-contracts/bootloader/bootloader.yul#L3954) and checks whether the `execute` field is set. If not, it ends processing of the transactions and ends execution of the batch. If the execute field is non-zero, the transaction will be executed and it goes to step 3. 3. Based on the transaction’s type it decides whether the transaction is an L1 or L2 transaction and processes them accordingly. More on the processing of the L1 transactions can be read [here](#l1-l2-transactions). More on L2 transactions can be read [here](#l2-transactions). -## L2 transactions +### L2 transactions -On zkSync, every address is a contract. Users can start transactions from their EOA accounts, because every address that does not have any contract deployed on it implicitly contains the code defined in the [DefaultAccount.sol](https://github.com/code-423n4/2024-03-zksync/blob/main/code/system-contracts/contracts/DefaultAccount.sol) file. Whenever anyone calls a contract that is not in kernel space (i.e. the address is ≥ 2^16) and does not have any contract code deployed on it, the code for `DefaultAccount` will be used as the contract’s code. +On zkSync, every address is a contract. Users can start transactions from their EOA accounts, because every address that does not have any contract deployed on it implicitly contains the code defined in the [DefaultAccount.sol](../../system-contracts/contracts/DefaultAccount.sol) file. Whenever anyone calls a contract that is not in kernel space (i.e. the address is ≥ 2^16) and does not have any contract code deployed on it, the code for `DefaultAccount` will be used as the contract’s code. Note, that if you call an account that is in kernel space and does not have any code deployed there, right now, the transaction will revert. -FIXME: the link below is broken - -We process the L2 transactions according to our account abstraction protocol: [https://v2-docs.zksync.io/dev/tutorials/custom-aa-tutorial.html#prerequisite](https://v2-docs.zksync.io/dev/tutorials/custom-aa-tutorial.html#prerequisite). +We process the L2 transactions according to our account abstraction protocol: [https://docs.zksync.io/build/developer-reference/account-abstraction](https://docs.zksync.io/build/developer-reference/account-abstraction). 1. We [deduct](../../system-contracts/bootloader/bootloader.yul#L1163) the transaction’s upfront payment for the overhead for the block’s processing. You can read more on how that works in the fee model [description](./zkSync%20fee%20model.md). 2. Then we calculate the gasPrice for these transactions according to the EIP1559 rules. 3. We [conduct the validation step](../../system-contracts/bootloader/bootloader.yul#L1278) of the AA protocol: -- We calculate the hash of the transaction. -- If enough gas has been provided, we near_call the validation function in the bootloader. It sets the tx.origin to the address of the bootloader, sets the ergsPrice. It also marks the factory dependencies provided by the transaction as marked and then invokes the validation method of the account and verifies the returned magic. -- Calls the accounts and, if needed, the paymaster to receive the payment for the transaction. Note, that accounts may not use `block.baseFee` context variable, so they have no way to know what exact sum to pay. That’s why the accounts typically firstly send `tx.maxFeePerErg * tx.ergsLimit` and the bootloader [refunds](../../system-contracts/bootloader/bootloader.yul#L787) for any excess funds sent. + + - We calculate the hash of the transaction. + - If enough gas has been provided, we near_call the validation function in the bootloader. It sets the tx.origin to the address of the bootloader, sets the ergsPrice. It also marks the factory dependencies provided by the transaction as marked and then invokes the validation method of the account and verifies the returned magic. + - Calls the accounts and, if needed, the paymaster to receive the payment for the transaction. Note, that accounts may not use `block.baseFee` context variable, so they have no way to know what exact sum to pay. That’s why the accounts typically firstly send `tx.maxFeePerErg * tx.ergsLimit` and the bootloader [refunds](../../system-contracts/bootloader/bootloader.yul#L787) for any excess funds sent. + 4. [We perform the execution of the transaction](../../system-contracts/bootloader/bootloader.yul#L1343). Note, that if the sender is an EOA, tx.origin is set equal to the `from` the value of the transaction. During the execution of the transaction, the publishing of the compressed bytecodes happens: for each factory dependency if it has not been published yet and its hash is currently pointed to in the compressed bytecodes area of the bootloader, a call to the bytecode compressor is done. Also, at the end the call to the KnownCodeStorage is done to ensure all the bytecodes have indeed been published. 5. We [refund](../../system-contracts/bootloader/bootloader.yul#L1553) the user for any excess funds he spent on the transaction: -- Firstly, the `postTransaction` operation is called to the paymaster. -- The bootloader asks the operator to provide a refund. During the first VM run without proofs the provide directly inserts the refunds in the memory of the bootloader. During the run for the proved batches, the operator already knows what which values have to be inserted there. You can read more about it in the [documentation](./zkSync%20fee%20model.md) of the fee model. -- The bootloader refunds the user. -1. We notify the operator about the [refund](../../system-contracts/bootloader/bootloader.yul#L1211) that was granted to the user. It will be used for the correct displaying of gasUsed for the transaction in explorer. -## L1->L2 transactions + - Firstly, the `postTransaction` operation is called to the paymaster. + - The bootloader asks the operator to provide a refund. During the first VM run without proofs the provide directly inserts the refunds in the memory of the bootloader. During the run for the proved batches, the operator already knows what which values have to be inserted there. You can read more about it in the [documentation](./zkSync%20fee%20model.md) of the fee model. + - The bootloader refunds the user. + +6. We notify the operator about the [refund](../../system-contracts/bootloader/bootloader.yul#L1211) that was granted to the user. It will be used for the correct displaying of gasUsed for the transaction in explorer. + +### L1->L2 transactions L1->L2 transactions are transactions that were initiated on L1. We assume that `from` has already authorized the L1→L2 transactions. It also has its L1 pubdata price as well as ergsPrice set on L1. @@ -424,7 +420,7 @@ There are two kinds of L1->L2 transactions: You can read more about differences between those in the corresponding [document](./Handling%20L1%E2%86%92L2%20ops%20on%20zkSync.md). -## End of the batch +### End of the batch At the end of the batch we set `tx.origin` and `tx.gasprice` context variables to zero to save L1 gas on calldata and send the entire bootloader balance to the operator, effectively sending fees to him. @@ -434,13 +430,13 @@ After that, we publish the hash as well as the number of priority operations in Then, we call the L1Messenger system contract for it to compose the pubdata to be published on L1. You can read more about the pubdata processing [here](./Handling%20pubdata.md). -# System contracts +## System contracts Most of the details on the implementation and the requirements for the execution of system contracts can be found in the doc-comments of their respective code bases. This chapter serves only as a high-level overview of such contracts. All the codes of system contracts (including `DefaultAccount`s) are part of the protocol and can only be change via a system upgrade through L1. -## SystemContext +### SystemContext This contract is used to support various system parameters not included in the VM by default, i.e. `chainId`, `origin`, `ergsPrice`, `blockErgsLimit`, `coinbase`, `difficulty`, `baseFee`, `blockhash`, `block.number`, `block.timestamp.` @@ -448,21 +444,21 @@ It is important to note that the constructor is **not** run for this contract up This contract is also responsible for ensuring validity and consistency of batches, L2 blocks. The implementation itself is rather straightforward, but to better understand this contract, please take a look at the [page](./Batches%20&%20L2%20blocks%20on%20zkSync.md) about the block processing on zkSync. -## AccountCodeStorage +### AccountCodeStorage The code hashes of accounts are stored inside the storage of this contract. Whenever a VM calls a contract with address `address` it retrieves the value under storage slot `address` of this system contract, if this value is non-zero, it uses this as the code hash of the account. Whenever a contract is called, the VM asks the operator to provide the preimage for the codehash of the account. That is why data availability of the code hashes is paramount. -### Constructing vs Non-constructing code hash +#### Constructing vs Non-constructing code hash In order to prevent contracts from being able to call a contract during its construction, we set the marker (i.e. second byte of the bytecode hash of the account) as `1`. This way, the VM will ensure that whenever a contract is called without the `isConstructor` flag, the bytecode of the default account (i.e. EOA) will be substituted instead of the original bytecode. -## BootloaderUtilities +### BootloaderUtilities This contract contains some of the methods which are needed purely for the bootloader functionality but were moved out from the bootloader itself for the convenience of not writing this logic in Yul. -## DefaultAccount +### DefaultAccount Whenever a contract that does **not** both: @@ -471,7 +467,7 @@ Whenever a contract that does **not** both: The code of the default account is used. The main purpose of this contract is to provide EOA-like experience for both wallet users and contracts that call it, i.e. it should not be distinguishable (apart of spent gas) from EOA accounts on Ethereum. -## Ecrecover +### Ecrecover The implementation of the ecrecover precompile. It is expected to be used frequently, so written in pure yul with a custom memory layout. @@ -484,25 +480,25 @@ It also validates the input by the same rules as the EVM precompile: After that, it makes a precompile call and returns empty bytes if the call failed, and the recovered address otherwise. -## Empty contracts +### Empty contracts Some of the contracts are relied upon to have EOA-like behaviour, i.e. they can be always called and get the success value in return. An example of such address is 0 address. We also require the bootloader to be callable so that the users could transfer ETH to it. For these contracts, we insert the `EmptyContract` code upon genesis. It is basically a noop code, which does nothing and returns `success=1`. -## SHA256 & Keccak256 +### SHA256 & Keccak256 -Note that, unlike Ethereum, keccak256 is a precompile (*not an opcode*) on zkSync. +Note that, unlike Ethereum, keccak256 is a precompile (_not an opcode_) on zkSync. These system contracts act as wrappers for their respective crypto precompile implementations. They are expected to be used frequently, especially keccak256, since Solidity computes storage slots for mapping and dynamic arrays with its help. That's why we wrote contracts on pure yul with optimizing the short input case. In the past both `sha256` and `keccak256` performed padding within the smart contracts, this is no longer true with `sha256` performing padding in the smart contracts and `keccak256` in the zk-circuits. Hashing is then completed for both within the zk-circuits. It's important to note that the crypto part of the `sha256` precompile expects to work with padded data. This means that a bug in applying padding may lead to an unprovable transaction. -## EcAdd & EcMul +### EcAdd & EcMul These precompiles simulate the behaviour of the EVM's EcAdd and EcMul precompiles and are fully implemented in Yul without circuit counterparts. You can read more about them [here](./Elliptic%20curve%20precompiles.md). -## L2BaseToken & MsgValueSimulator +### L2BaseToken & MsgValueSimulator Unlike Ethereum, zkEVM does not have any notion of any special native token. That’s why we have to simulate operations with the native token (in which fees are charged) via two contracts: `L2BaseToken` & `MsgValueSimulator`. @@ -516,11 +512,11 @@ Whenever anyone wants to do a non-zero value call, they need to call `MsgValueSi More information on the extraAbiParams can be read [here](#flags-for-calls). -### Support for `.send/.transfer` +#### Support for `.send/.transfer` -On Ethereum, whenever a call with non-zero value is done, some additional gas is charged from the caller's frame and in return a `2300` gas stipend is given out to the callee frame. This stipend is usually enough to emit a small event, but it is enforced that it is not possible to change storage within these `2300` gas. This also means that in practice some users might opt to do `call` with 0 gas provided, relying on the `2300` stipend to be passed to the callee. This is the case for `.call/.transfer`. +On Ethereum, whenever a call with non-zero value is done, some additional gas is charged from the caller's frame and in return a `2300` gas stipend is given out to the callee frame. This stipend is usually enough to emit a small event, but it is enforced that it is not possible to change storage within these `2300` gas. This also means that in practice some users might opt to do `call` with 0 gas provided, relying on the `2300` stipend to be passed to the callee. This is the case for `.call/.transfer`. -While using `.send/.transfer` is generally not recommended, as a step towards better EVM compatibility, since vm1.5.0 a *partial* support of these functions is present with zkSync Era. It is the done via the following means: +While using `.send/.transfer` is generally not recommended, as a step towards better EVM compatibility, since vm1.5.0 a _partial_ support of these functions is present with zkSync Era. It is the done via the following means: - Whenever a call is done to the `MsgValueSimulator` system contract, `27000` gas is deducted from the caller's frame and it passed to the `MsgValueSimulator` on top of whatever gas the user has originally provided. The number was chosen to cover for the execution of the transfering of the balances as well as other constant size operations by the `MsgValueSimulator`. Note, that since it will be the frame of `MsgValueSimulator` that will actually call the callee, the constant must also include the cost for decommitting the code of the callee. Decoding bytecode of any size would be prohibitevely expensive and so we support only callees of size up to `100000` bytes. - `MsgValueSimulator` ensures that no more than `2300` out of the stipend above gets to the callee, ensuring the reentrancy protection invariant for these functions holds. @@ -528,18 +524,20 @@ While using `.send/.transfer` is generally not recommended, as a step towards be Note, that unlike EVM any unused gas from such calls will be refunded. The system preserves the following guarantees about `.send/.transfer`: -- No more than `2300` gas will be received by the callee. Note, [that a smaller, but a close amount](https://github.com/code-423n4/2024-03-zksync/blob/3165e07bab249591404fff36e4802f9921ef168c/code/system-contracts/contracts/test-contracts/TransferTest.sol#L33) may be passed. -- It is not possible to do any storage changes within this stipend. This is enforced by having cold write cost more than `2300` gas. Also, cold write cost always has to be prepaid whenever executing storage writes. More on it can be read [here](https://github.com/code-423n4/2024-03-zksync/blob/main/docs/Smart%20contract%20Section/zkSync%20fee%20model.md#io-pricing). + +- No more than `2300` gas will be received by the callee. Note, [that a smaller, but a close amount](../../system-contracts/contracts/test-contracts/TransferTest.sol#L33) may be passed. +- It is not possible to do any storage changes within this stipend. This is enforced by having cold write cost more than `2300` gas. Also, cold write cost always has to be prepaid whenever executing storage writes. More on it can be read [here](../l2_system_contracts/zksync_fee_model.md#io-pricing). - Any callee with bytecode size of up to `100000` will work. The system does not guarantee the following: -- That callees with bytecode size larger than `100000` will work. Note, that a malicious operator can fail any call to a callee with large bytecode even if it has been decommitted before. More on it can be read [here](https://github.com/code-423n4/2024-03-zksync/blob/main/docs/Smart%20contract%20Section/zkSync%20fee%20model.md#io-pricing). -As a conclusion, using `.send/.transfer` should be generally avoided, but when avoiding is not possible it should be used with small callees, e.g. EOAs, which implement [DefaultAccount](https://github.com/code-423n4/2024-03-zksync/blob/main/code/system-contracts/contracts/DefaultAccount.sol). +- That callees with bytecode size larger than `100000` will work. Note, that a malicious operator can fail any call to a callee with large bytecode even if it has been decommitted before. More on it can be read [here](../l2_system_contracts/zksync_fee_model.md#io-pricing). + +As a conclusion, using `.send/.transfer` should be generally avoided, but when avoiding is not possible it should be used with small callees, e.g. EOAs, which implement [DefaultAccount](../../system-contracts/contracts/DefaultAccount.sol). -## KnownCodeStorage +### KnownCodeStorage -This contract is used to store whether a certain code hash is “known”, i.e. can be used to deploy contracts. On zkSync, the L2 stores the contract’s code *hashes* and not the codes themselves. Therefore, it must be part of the protocol to ensure that no contract with unknown bytecode (i.e. hash with an unknown preimage) is ever deployed. +This contract is used to store whether a certain code hash is “known”, i.e. can be used to deploy contracts. On zkSync, the L2 stores the contract’s code _hashes_ and not the codes themselves. Therefore, it must be part of the protocol to ensure that no contract with unknown bytecode (i.e. hash with an unknown preimage) is ever deployed. The factory dependencies field provided by the user for each transaction contains the list of the contract’s bytecode hashes to be marked as known. We can not simply trust the operator to “know” these bytecodehashes as the operator might be malicious and hide the preimage. We ensure the availability of the bytecode in the following way: @@ -550,13 +548,13 @@ It is the responsibility of the [ContractDeployer](#contractdeployer--immutables The KnownCodesStorage contract is also responsible for ensuring that all the “known” bytecode hashes are also [valid](#bytecode-validity). -## ContractDeployer & ImmutableSimulator +### ContractDeployer & ImmutableSimulator `ContractDeployer` is a system contract responsible for deploying contracts on zkSync. It is better to understand how it works in the context of how the contract deployment works on zkSync. Unlike Ethereum, where `create`/`create2` are opcodes, on zkSync these are implemented by the compiler via calls to the ContractDeployer system contract. For additional security, we also distinguish the deployment of normal contracts and accounts. That’s why the main methods that will be used by the user are `create`, `create2`, `createAccount`, `create2Account`, which simulate the CREATE-like and CREATE2-like behavior for deploying normal and account contracts respectively. -### **Address derivation** +#### **Address derivation** Each rollup that supports L1→L2 communications needs to make sure that the addresses of contracts on L1 and L2 do not overlap during such communication (otherwise it would be possible that some evil proxy on L1 could mutate the state of the L2 contract). Generally, rollups solve this issue in two ways: @@ -567,11 +565,11 @@ You can see the rules for our address derivation in `getNewAddressCreate2`/ `get Note, that we still add a certain constant to the addresses during L1→L2 communication in order to allow ourselves some way to support EVM bytecodes in the future. -### **Deployment nonce** +#### **Deployment nonce** On Ethereum, the same nonce is used for CREATE for accounts and EOA wallets. On zkSync this is not the case, we use a separate nonce called “deploymentNonce” to track the nonces for accounts. This was done mostly for consistency with custom accounts and for having multicalls feature in the future. -### **General process of deployment** +#### **General process of deployment** - After incrementing the deployment nonce, the contract deployer must ensure that the bytecode that is being deployed is available. - After that, it puts the bytecode hash with a [special constructing marker](#constructing-vs-non-constructing-code-hash) as code for the address of the to-be-deployed contract. @@ -582,7 +580,7 @@ On Ethereum, the same nonce is used for CREATE for accounts and EOA wallets. On Note how it is different from the EVM approach: on EVM when the contract is deployed, it executes the initCode and returns the deployedCode. On zkSync, contracts only have the deployed code and can set immutables as storage variables returned by the constructor. -### **Constructor** +#### **Constructor** On Ethereum, the constructor is only part of the initCode that gets executed during the deployment of the contract and returns the deployment code of the contract. On zkSync, there is no separation between deployed code and constructor code. The constructor is always a part of the deployment code of the contract. In order to protect it from being called, the compiler-generated contracts invoke constructor only if the `isConstructor` flag provided (it is only available for the system contracts). You can read more about flags [here](#flags-for-calls). @@ -590,39 +588,39 @@ After execution, the constructor must return an array of: ```solidity struct ImmutableData { - uint256 index; - bytes32 value; + uint256 index; + bytes32 value; } ``` basically denoting an array of immutables passed to the contract. -### **Immutables** +#### **Immutables** Immutables are stored in the `ImmutableSimulator` system contract. The way how `index` of each immutable is defined is part of the compiler specification. This contract treats it simply as mapping from index to value for each particular address. Whenever a contract needs to access a value of some immutable, they call the `ImmutableSimulator.getImmutable(getCodeAddress(), index)`. Note that on zkSync it is possible to get the current execution address (you can read more about `getCodeAddress()` [here](#zksync-specific-opcodes). -### **Return value of the deployment methods** +#### **Return value of the deployment methods** If the call succeeded, the address of the deployed contract is returned. If the deploy fails, the error bubbles up. -## DefaultAccount +### DefaultAccount The implementation of the default account abstraction. This is the code that is used by default for all addresses that are not in kernel space and have no contract deployed on them. This address: -- Contains minimal implementation of our account abstraction protocol. Note that it supports the [built-in paymaster flows](https://v2-docs.zksync.io/dev/developer-guides/aa.html#paymasters). +- Contains minimal implementation of our account abstraction protocol. Note that it supports the [built-in paymaster flows](https://docs.zksync.io/build/developer-reference/account-abstraction/paymasters). - When anyone (except bootloader) calls it, it behaves in the same way as a call to an EOA, i.e. it always returns `success = 1, returndatasize = 0` for calls from anyone except for the bootloader. -## L1Messenger +### L1Messenger A contract used for sending arbitrary length L2→L1 messages from zkSync to L1. While zkSync natively supports a rather limited number of L1→L2 logs, which can transfer only roughly 64 bytes of data a time, we allowed sending nearly-arbitrary length L2→L1 messages with the following trick: The L1 messenger receives a message, hashes it and sends only its hash as well as the original sender via L2→L1 log. Then, it is the duty of the L1 smart contracts to make sure that the operator has provided full preimage of this hash in the commitment of the batch. -Note, that L1Messenger is calls the L2DAValidator and plays an important role in facilitating the DA validation protocol. (FIXME: provide link) +Note, that L1Messenger is calls the L2DAValidator and plays an important role in facilitating the [DA validation protocol](../settlement_contracts/data_availability/custom_da.md). -## NonceHolder +### NonceHolder Serves as storage for nonces for our accounts. Besides making it easier for operator to order transactions (i.e. by reading the current nonces of account), it also serves a separate purpose: making sure that the pair (address, nonce) is always unique. @@ -635,13 +633,13 @@ Note that nonces do not necessarily have to be monotonic (this is needed to supp The accounts upon creation can also provide which type of nonce ordering do they want: Sequential (i.e. it should be expected that the nonces grow one by one, just like EOA) or Arbitrary, the nonces may have any values. This ordering is not enforced in any way by system contracts, but it is more of a suggestion to the operator on how it should order the transactions in the mempool. -## EventWriter +### EventWriter A system contract responsible for emitting events. It accepts in its 0-th extra abi data param the number of topics. In the rest of the extraAbiParams he accepts topics for the event to emit. Note, that in reality the event the first topic of the event contains the address of the account. Generally, the users should not interact with this contract directly, but only through Solidity syntax of `emit`-ing new events. -## Compressor +### Compressor One of the most expensive resource for a rollup is data availability, so in order to reduce costs for the users we compress the published pubdata in several ways: @@ -651,17 +649,17 @@ One of the most expensive resource for a rollup is data availability, so in orde The contract provides two methods: - `publishCompressedBytecode` that verifies the correctness of the bytecode compression and publishes it in form of a message to the DA layer. -- `verifyCompressedStateDiffs` that can verify the correctness of our standard state diff compression. This method can be used by common L2DAValidators and it is for instance utilized by the [RollupL2DAValidator](../../l2-contracts/contracts/data-availability/RollupL2DAValidator.sol). +- `verifyCompressedStateDiffs` that can verify the correctness of our standard state diff compression. This method can be used by common L2DAValidators and it is for instance utilized by the [RollupL2DAValidator](../../l2-contracts/contracts/data-availability/RollupL2DAValidator.sol). -(FIXME: provide link to da providers doc) +You can read more about how custom DA is handled [here](../settlement_contracts/data_availability/custom_da.md). -## Pubdata Chunk Publisher +### Pubdata Chunk Publisher -This contract is responsible for separating pubdata into chunks that each fit into a [4844 blob](./Rollup%20DA.md) and calculating the hash of the preimage of said blob. If a chunk's size is less than the total number of bytes for a blob, we pad it on the right with zeroes as the circuits will require that the chunk is of exact size. +This contract is responsible for separating pubdata into chunks that each fit into a [4844 blob](./Rollup%20DA.md) and calculating the hash of the preimage of said blob. If a chunk's size is less than the total number of bytes for a blob, we pad it on the right with zeroes as the circuits will require that the chunk is of exact size. This contract can be utilized by L2DAValidators, e.g. [RollupL2DAValidator](../../l2-contracts/contracts/data-availability/RollupL2DAValidator.sol) uses it to compress the pubdata into blobs. -## CodeOracle +### CodeOracle It is a contract that accepts the versioned hash of a bytecode and returns the preimage of it. It is similar to the `extcodecopy` functionality on Ethereum. @@ -670,61 +668,61 @@ It works the following way: 1. It accepts a versioned hash and double checks that it is marked as “known”, i.e. the operator must know the preimage for such hash. 2. After that, it uses the `decommit` opcode, which accepts the versioned hash and the number of ergs to spent, which is proportional to the length of the preimage. If the preimage has been decommitted before, the requested cost will be refunded to the user. -Note, that the decommitment process does not only happen using the `decommit` opcode, but during calls to contracts. Whenever a contract is called, its code is decommitted into a memory page dedicated to contract code. We never decommit the same preimage twice, regardless of whether it was decommitted via an explicit opcode or during a call to another contract, the previous unpacked bytecode memory page will be reused. When executing `decommit` inside the `CodeOracle` contract, the user will be firstly precharged with maximal possilbe price and then it will be refunded in case the bytecode has been decommitted before. + Note, that the decommitment process does not only happen using the `decommit` opcode, but during calls to contracts. Whenever a contract is called, its code is decommitted into a memory page dedicated to contract code. We never decommit the same preimage twice, regardless of whether it was decommitted via an explicit opcode or during a call to another contract, the previous unpacked bytecode memory page will be reused. When executing `decommit` inside the `CodeOracle` contract, the user will be firstly precharged with maximal possilbe price and then it will be refunded in case the bytecode has been decommitted before. 3. The `decommit` opcode returns to the slice of the decommitted bytecode. Note, that the returned pointer always has length of 2^21 bytes, regardless of the length of the actual bytecode. So it is the job of the `CodeOracle` system contract to shrink the length of the returned data. -## P256Verify +### P256Verify This contract exerts the same behavior as the P256Verify precompile from [RIP-7212](https://github.com/ethereum/RIPs/blob/master/RIPS/rip-7212.md). Note, that since Era has different gas schedule, we do not comply with the gas costs, but otherwise the interface is indentical. -## GasBoundCaller +### GasBoundCaller This is not a system contract, but it will be predeployed on a fixed user space address. This contract allows users to set an upper bound of how much pubdata can a subcall take, regardless of the gas per pubdata. More on how pubdata works on zkSync can be read [here](./zkSync%20fee%20model.md). Note, that it is a deliberate decision not to deploy this contract in the kernel space, since it can relay calls to any contracts and so may break the assumption that all system contracts can be trusted. -## ComplexUpgrader +### ComplexUpgrader -Usually an upgrade is performed by calling the `forceDeployOnAddresses` function of ContractDeployer out of the name of the `FORCE_DEPLOYER` constant address. However some upgrades may require more complex iteractions, e.g. query something from a contract to determine which calls to make etc. +Usually an upgrade is performed by calling the `forceDeployOnAddresses` function of ContractDeployer out of the name of the `FORCE_DEPLOYER` constant address. However some upgrades may require more complex iteractions, e.g. query something from a contract to determine which calls to make etc. For cases like this `ComplexUpgrader` contract has been created. The assumption is that the implementation of the upgrade is predeployed and the `ComplexUpgrader` would delegatecall to it. -> Note, that while `ComplexUpgrader` existed even in the previous upgrade, it lacked `forceDeployAndUpgrade` function. This caused some serious limitations. More on that can be read here (FIXME: link to the upgrade process). +> Note, that while `ComplexUpgrader` existed even in the previous upgrade, it lacked `forceDeployAndUpgrade` function. This caused some serious limitations. More on how the gateway upgrade process will look like can be read [here](../upgrade_history/gateway_upgrade/upgrade_process.md). -# Predeployed contracts +### Predeployed contracts -There are some contracts need to predeployed, but having kernel space rights is not desirable for them. Such contracts are usuall predeployed at sequential addresses starting from `2^16`. +There are some contracts need to predeployed, but having kernel space rights is not desirable for them. Such contracts are usuall predeployed at sequential addresses starting from `2^16`. -## Create2Factory +### Create2Factory Just a built-in Create2Factory. It allows to deterministically deploy contracts to the samme address on multiple chains. -## L2GenesisUpgrade +### L2GenesisUpgrade -A contract that is responsible for facilitating initialization of a newly created chain. This is part of a chain creation flow (TODO link). +A contract that is responsible for facilitating initialization of a newly created chain. This is part of a [chain creation flow](../chain_management/chain_genesis.md). -## Bridging-related contracts +### Bridging-related contracts `L2Bridgehub`, `L2AssetRouter`, `L2NativeTokenVault`, as well as `L2MessageRoot`. -These contracts are used to facilitate cross-chain communication as well value bridging. You can read more about then in the custom asset bridging spec (FIXME link). +These contracts are used to facilitate cross-chain communication as well value bridging. You can read more about then in [the asset router spec](../bridging/asset_router/Overview.md). Note, that [L2AssetRouter](../../l1-contracts/contracts/bridge/asset-router/L2AssetRouter.sol) and [L2NativeTokenVault](../../l1-contracts/contracts/bridge/ntv/L2NativeTokenVault.sol) have unique code, the L2Bridgehub and L2MessageRoot share the same source code with their L1 precompiles, i.e. the L2Bridgehub has [this](../../l1-contracts/contracts/bridgehub/Bridgehub.sol) code and L2MessageRoot has [this](../../l1-contracts/contracts/bridgehub/MessageRoot.sol) code. -## SloadContract +### SloadContract During the L2GatewayUpgrade, the system contracts will need to read the storage of some other contracts, despite those lacking getters. The how it is implemented can be read in the `forcedSload` function of the [SystemContractHelper](../../system-contracts/contracts/libraries/SystemContractHelper.sol) contract. While it is only used for the upgrade, it was decided to leave it as a predeployed contract for future use-cases as well. -## L2WrappedBaseTokenImplementation +### L2WrappedBaseTokenImplementation -While bridging wrapped base tokens (e.g. WETH) is not yet supported. The address of it is enshrined within the native token vault (both the L1 and L2 one). For consistency with other networks, our WETH token is deployed as a TransparentUpgradeableProxy. To have the deployment process easier, we predeploy the implementation. +While bridging wrapped base tokens (e.g. WETH) is not yet supported. The address of it is enshrined within the native token vault (both the L1 and L2 one). For consistency with other networks, our WETH token is deployed as a TransparentUpgradeableProxy. To have the deployment process easier, we predeploy the implementation. -# Known issues to be resolved +## Known issues to be resolved The protocol, while conceptually complete, contains some known issues which will be resolved in the short to middle term. -- Fee modeling is yet to be improved. More on it in the [document](./zkSync%20fee%20model.md) on the fee model. +- Fee modeling is yet to be improved. More on it in the [document](./zksync_fee_model.md) on the fee model. - We may add some kind of default implementation for the contracts in the kernel space (i.e. if called, they wouldn’t revert but behave like an EOA). diff --git a/docs/l2_system_contracts/zksync_fee_model.md b/docs/l2_system_contracts/zksync_fee_model.md index 1868e9ffb..ffb039146 100644 --- a/docs/l2_system_contracts/zksync_fee_model.md +++ b/docs/l2_system_contracts/zksync_fee_model.md @@ -1,27 +1,28 @@ # zkSync fee model + [back to readme](../README.md) This document will assume that you already know how gas & fees work on Ethereum. On Ethereum, all the computational, as well as storage costs, are represented via one unit: gas. Each operation costs a certain amount of gas, which is generally constant (though it may change during [upgrades](https://blog.ethereum.org/2021/03/08/ethereum-berlin-upgrade-announcement)). -# Main differences from EVM +## Main differences from EVM zkSync as well as other L2s have the issue that does not allow the adoption of the same model as the one for Ethereum so easily: the main reason is the requirement for publishing the pubdata on Ethereum. This means that prices for L2 transactions will depend on the volatile L1 gas prices and can not be simply hard coded. Also, zkSync, being a zkRollup is required to prove every operation with zero-knowledge proofs. That comes with a few nuances. -## Different opcode pricing +### Different opcode pricing The operations tend to have different “complexity”/”pricing” in zero-knowledge proof terms than in standard CPU terms. For instance, `keccak256` which was optimized for CPU performance, will cost more to prove. That’s why you will find the prices for operations on zkSync a lot different from the ones on Ethereum. -## I/O pricing +### I/O pricing On Ethereum, whenever a storage slot is read/written to for the first time, a certain amount of gas is charged for the fact that the slot has been accessed for the first time. A similar mechanism is used for accounts: whenever an account is accessed for the first time, a certain amount of gas is charged for reading the account's data. On EVM, an account's data includes its nonce, balance, and code. We use a similar mechanism but with a few differences. -### Storage costs +#### Storage costs Just like EVM, we also support "warm" and "cold" storage slots. However, the flow is a bit different: @@ -31,51 +32,52 @@ Just like EVM, we also support "warm" and "cold" storage slots. However, the flo In other words, unlike EVM, the user should always have enough gas for the worst case (even if the storage slot is "warm"). Also, the control of the refunds is currently enforced by the operator only and not by the circuits. -### Code decommitment and account access costs +#### Code decommitment and account access costs Unlike EVM, our storage does not couple accounts' balances, nonces, and bytecodes. Balance, nonce, and code hash are three separate storage variables that use standard storage "warm" and "cold" mechanisms. A different approach is used for accessing bytecodes though. -We call the process of unpacking the bytecode as, *code decommitment*, since it is a process of transforming a commitment to code (i.e., the versioned code hash) into its preimage. Whenever a contract with a certain code hash is called, the following logic is executed: +We call the process of unpacking the bytecode as, _code decommitment_, since it is a process of transforming a commitment to code (i.e., the versioned code hash) into its preimage. Whenever a contract with a certain code hash is called, the following logic is executed: 1. The operator is asked whether this is the first time this bytecode has been decommitted. 2. If the operator returns "yes", then the user is charged the full cost. Otherwise, the user does not pay for decommit. 3. If needed, the code is decommitted to the code page. -Unlike storage interactions, the correctness of this process is *partially* enforced by circuits, i.e., if step (3) is reached, i.e., the code is being decommitted, it will be proven that the operator responded correctly on step (1). However, if the program runs out of gas on step (2), the correctness of the first statement won't be proven. The reason for that is it is hard to prove in circuits at the time the decommitment is invoked whether it is indeed the first decommitment or not. +Unlike storage interactions, the correctness of this process is _partially_ enforced by circuits, i.e., if step (3) is reached, i.e., the code is being decommitted, it will be proven that the operator responded correctly on step (1). However, if the program runs out of gas on step (2), the correctness of the first statement won't be proven. The reason for that is it is hard to prove in circuits at the time the decommitment is invoked whether it is indeed the first decommitment or not. Note that in the case of an honest operator, this approach offers a better UX, since there is no need to be precharged with the full cost beforehand. However, no program should rely on this fact. -### Conclusion +#### Conclusion As a conclusion, zkSync Era supports a similar "cold"/"warm" mechanism to EVM, but for now, these are only enforced by the operator, i.e., the users of the applications should not rely on these. The execution is guaranteed to be correct as long as the user has enough gas to pay for the worst, i.e. "cold" scenario. -## Memory pricing +### Memory pricing zkSync Era has different memory pricing rules: - Whenever a user contract is called, `2^12` bytes of memory are given out for free, before starting to charge users linearly according to its length. - Whenever a kernel space (i.e., a system) contract is called, `2^21` bytes of memory are given out for free, before starting to charge users linearly according to the length. -Note that, unlike EVM, we never use a quadratic component of the price for memory expansion. + Note that, unlike EVM, we never use a quadratic component of the price for memory expansion. -## Different intrinsic costs +### Different intrinsic costs -Unlike Ethereum, where the intrinsic cost of transactions (`21000` gas) is used to cover the price of updating the balances of the users, the nonce and signature verification, on zkSync these prices are *not* included in the intrinsic costs for transactions, due to the native support of account abstraction, meaning that each account type may have their own transaction cost. In theory, some may even use more zk-friendly signature schemes or other kinds of optimizations to allow cheaper transactions for their users. +Unlike Ethereum, where the intrinsic cost of transactions (`21000` gas) is used to cover the price of updating the balances of the users, the nonce and signature verification, on zkSync these prices are _not_ included in the intrinsic costs for transactions, due to the native support of account abstraction, meaning that each account type may have their own transaction cost. In theory, some may even use more zk-friendly signature schemes or other kinds of optimizations to allow cheaper transactions for their users. That being said, zkSync transactions do come with some small intrinsic costs, but they are mostly used to cover costs related to the processing of the transaction by the bootloader which can not be easily measured in code in real-time. These are measured via testing and are hard coded. -## Charging for pubdata +### Charging for pubdata + +An important cost factor for users is the pubdata. zkSync Era is a state diff-based rollup, meaning that the pubdata is published not for the transaction data, but for the state changes: modified storage slots, deployed bytecodes, L2->L1 messages. This allows for applications that modify the same storage slot multiple times such as oracles, to update the storage slots multiple times while maintaining a constant footprint on L1 pubdata. Correctly a state diff rollups requires a special solution to charging for pubdata. It is explored in the next section. -An important cost factor for users is the pubdata. zkSync Era is a state diff-based rollup, meaning that the pubdata is published not for the transaction data, but for the state changes: modified storage slots, deployed bytecodes, L2->L1 messages. This allows for applications that modify the same storage slot multiple times such as oracles, to update the storage slots multiple times while maintaining a constant footprint on L1 pubdata. Correctly a state diff rollups requires a special solution to charging for pubdata. It is explored in the next section. +## How L2 gas price works -# How L2 gas price works +### Batch overhead & limited resources of the batch -## Batch overhead & limited resources of the batch To process the batch, the zkSync team has to pay for proving the batch, committing to it, etc. Processing a batch involves some operational costs as well. All of these values we call “Batch overhead”. It consists of two parts: - The L2 requirements for proving the circuits (denoted in L2 gas). - The L1 requirements for the proof verification as well as general batch processing (denoted in L1 gas). -We generally try to aggregate as many transactions as possible and each transaction pays for the batch overhead proportionally to how close the transaction brings the batch to being *sealed,* i.e. closed and prepared for proof verification and submission on L1. A transaction gets closer to sealing a batch by using the batch’s *limited resources*. +We generally try to aggregate as many transactions as possible and each transaction pays for the batch overhead proportionally to how close the transaction brings the batch to being _sealed,_ i.e. closed and prepared for proof verification and submission on L1. A transaction gets closer to sealing a batch by using the batch’s _limited resources_. While on Ethereum, the main reason for the existence of a batch gas limit is to keep the system decentralized & load low, i.e. assuming the existence of the correct hardware, only time would be a requirement for a batch to adhere to. In the case of zkSync batches, there are some limited resources the batch should manage: @@ -88,11 +90,11 @@ Each transaction spends the batch overhead proportionally to how closely it cons Note, that before the transaction is executed, the system can not know how many of the limited system resources the transaction will take, so we need to charge for the worst case and provide the refund at the end of the transaction. -## `MAX_TRANSACTION_GAS_LIMIT` +### `MAX_TRANSACTION_GAS_LIMIT` -A recommended maximal amount of gas that a transaction can spend on computation is `MAX_TRANSACTION_GAS_LIMIT`. But in case the operator trusts the user, the operator may provide the [trusted gas limit](https://github.com/code-423n4/2024-03-zksync/blob/e8527cab32c9fe2e1be70e414d7c73a20d357550/code/system-contracts/bootloader/bootloader.yul#L1236), i.e. the limit which exceeds `MAX_TRANSACTION_GAS_LIMIT` assuming that the operator knows what he is doing. This can be helpful in the case of a hyperchain with different parameters. +A recommended maximal amount of gas that a transaction can spend on computation is `MAX_TRANSACTION_GAS_LIMIT`. But in case the operator trusts the user, the operator may provide the [trusted gas limit](../../system-contracts/bootloader/bootloader.yul#L1242), i.e. the limit which exceeds `MAX_TRANSACTION_GAS_LIMIT` assuming that the operator knows what he is doing. This can be helpful in the case of a hyperchain with different parameters. -## Derivation of `baseFee` and `gasPerPubdata` +### Derivation of `baseFee` and `gasPerPubdata` At the start of each batch, the operator provides the following two parameters: @@ -113,9 +115,9 @@ gasPerPubdata := ceilDiv(pubdataPrice, baseFee) While the way how we [charge for pubdata](#how-we-charge-for-pubdata) in theory allows for any `gasPerPubdata`, some SDKs expect the `gasLimit` by a transaction to be a uint64 number. We would prefer `gasLimit` for transactions to stay within JS's safe "number" range in case someone uses `number` type to denote gas there. For this reason, we will bind the `MAX_L2_GAS_PER_PUBDATA` to `2^20` gas per 1 pubdata byte. The number is chosen such that `MAX_L2_GAS_PER_PUBDATA * 2^32` is a safe JS integer. The `2^32` part is the maximal possible value for pubdata counter that could be in theory used. It is unrealistic that this value will ever appear under an honest operator, but it is needed just in case. -Note, however, that it means that the total under high L1 gas prices `gasLimit` may be larger than `u32::MAX` and it is recommended that no more than `2^20` bytes of pubdata can be published within a transaction. +Note, however, that it means that the total under high L1 gas prices `gasLimit` may be larger than `u32::MAX` and it is recommended that no more than `2^20` bytes of pubdata can be published within a transaction. -### Recommended calculation of `FAIR_L2_GAS_PRICE`/`FAIR_PUBDATA_PRICE` +#### Recommended calculation of `FAIR_L2_GAS_PRICE`/`FAIR_PUBDATA_PRICE` Let's define the following constants: @@ -137,7 +139,7 @@ Then: For L1→L2 transactions, the `MAX_GAS_PER_BATCH` variable is equal to `L2_TX_MAX_GAS_LIMIT` (since this amount of gas is enough to publish the maximal number of pubdata in the batch). Also, for additional security, for L1->L2 transactions the `COMPUTE_OVERHEAD_PART = PUBDATA_OVERHEAD_PART = 1`, i.e. since we are not sure what exactly will be the reason for us closing the batch. For L2 transactions, typically `COMPUTE_OVERHEAD_PART = 0`, since, unlike L1→L2 transactions, in case of an attack, the operator can simply censor bad transactions or increase the `FAIR_L2_GAS_PRICE` and so the operator can use average values for better UX. -### Note on operator’s responsibility +#### Note on operator’s responsibility To reiterate, the formulas above are used for L1→L2 transactions on L1 to protect the operator from malicious transactions. However, for L2 transactions, it is solely the responsibility of the operator to provide the correct values. It is designed this way for more fine-grained control over the system for the zkStack operators (including Validiums, maybe Era on top of another L1, etc). @@ -145,9 +147,9 @@ This fee model also provides a very high degree of flexibility to the operator & In the long run, the consensus will ensure the correctness of these values on the main zkSync Era (or maybe we’ll transition to a different system). -### Overhead for transaction slot and memory +#### Overhead for transaction slot and memory -We also have a limit on the number of memory that can be consumed within a batch as well as the number of transactions that can be included there. +We also have a limit on the number of memory that can be consumed within a batch as well as the number of transactions that can be included there. To simplify the codebase we've chosen the following constants: @@ -163,15 +165,15 @@ We've used roughly the following formulae to derive these values: Future work will focus on removing the limit on the number of transactions’ slots completely as well as increasing the memory limit. -### Note on L1→L2 transactions +#### Note on L1→L2 transactions The formulas above apply to L1→L2 transactions. However, note that the `gas_per_pubdata` is still kept as constant as `800`. This means that a higher `baseFee` could be used for L1->L2 transactions to ensure that `gas_per_pubdata` remains at that value regardless of the price of the pubdata. -### Refunds +#### Refunds -Note, that the used constants for the fee model are probabilistic, i.e. we never know in advance the exact reason why a batch is going to be sealed. These constants are meant to cover the expenses of the operator over a longer period so we do not refund the fact that the transaction might've been charged for overhead above the level at which the transaction has brought the batch to being closed, since these funds are used to cover transactions that did not pay in full for the limited batch's resources that they used. +Note, that the used constants for the fee model are probabilistic, i.e. we never know in advance the exact reason why a batch is going to be sealed. These constants are meant to cover the expenses of the operator over a longer period so we do not refund the fact that the transaction might've been charged for overhead above the level at which the transaction has brought the batch to being closed, since these funds are used to cover transactions that did not pay in full for the limited batch's resources that they used. -### Refunds for repeated writes +#### Refunds for repeated writes zkSync Era is a state diff-based rollup, i.e. the pubdata is published not for transactions, but for storage changes. This means that whenever a user writes into a storage slot, it incurs a certain amount of pubdata. However, not all writes are equal: @@ -179,20 +181,21 @@ zkSync Era is a state diff-based rollup, i.e. the pubdata is published not for t - Depending on the `value` written into a slot, various compression optimizations could be used and so we should reflect that too. - Maybe the slot has been already written to in this batch so we don’t have to charge anything for it. -You can read more about how we treat the pubdata [here](https://github.com/code-423n4/2024-03-zksync/blob/main/docs/Smart%20contract%20Section/Handling%20pubdata.md). +You can read more about how we treat the pubdata [here](../settlement_contracts/data_availability/standard_pubdata_format.md). The important part here is that while such refunds are inlined (i.e. unlike the refunds for overhead they happen in place during execution and not after the whole transaction has been processed), they are enforced by the operator. Right now, the operator is the one who decides what refund to provide. -# How we charge for pubdata +## How we charge for pubdata -zkSync Era is a state diff-based rollup. It means that it is not possible to know how much pubdata a transaction will take before its execution. We *could* charge for pubdata the following way: whenever a user does an operation that emits pubdata (writes to storage, publishes an L2->L1 message, etc.), we charge `pubdata_bytes_published * gas_per_pubdata` directly from the context of the execution. +zkSync Era is a state diff-based rollup. It means that it is not possible to know how much pubdata a transaction will take before its execution. We _could_ charge for pubdata the following way: whenever a user does an operation that emits pubdata (writes to storage, publishes an L2->L1 message, etc.), we charge `pubdata_bytes_published * gas_per_pubdata` directly from the context of the execution. However, such an approach has the following disadvantages: + - This would inherently make execution very divergent from EVM. - It is prone to unneeded overhead. For instance, in the case of reentrancy locks, the user will still have to pay the initial price for marking the lock as used. The price will get refunded in the end, but it still worsens the UX. - If we want to impose any sort of limit on how much computation a transaction could take (let's call this limit `MAX_TX_GAS_LIMIT`), it would mean that no more than `MAX_TX_GAS_LIMIT / gas_per_pubdata` could be published in a transaction, making this limit either too small or forcing us to increase `baseFee` to prevent the number from growing too much. -To avoid the issues above we need to somehow decouple the gas spent on pubdata from the gas spent on execution. While calldata-based rollups precharge for calldata, we cannot do it, since the exact state diffs are known only after the transaction is finished. We'll use the approach of *post-charging.* Basically, we'll keep a counter that tracks how much pubdata has been spent and charge the user for the calldata at the end of the transaction. +To avoid the issues above we need to somehow decouple the gas spent on pubdata from the gas spent on execution. While calldata-based rollups precharge for calldata, we cannot do it, since the exact state diffs are known only after the transaction is finished. We'll use the approach of _post-charging._ Basically, we'll keep a counter that tracks how much pubdata has been spent and charge the user for the calldata at the end of the transaction. A problem with post-charging is that the user may spend all their gas within the transaction so we'll have no gas to charge for pubdata from. Note, however, that if the transaction is reverted, all the state changes that were related to it will be reverted too. That's why whenever we need to charge the user for pubdata, but it doesn't provide enough gas, the transaction will get reverted. The user will pay for the computation, but no state changes (and thus, pubdata) will be produced by the transaction. @@ -200,7 +203,7 @@ So it will work the following way: 1. Firstly, we fix the amount of pubdata published so far. Let's denote it as `basePubdataSpent`. 2. We execute the validation of the transaction. -3. We check whether `(getPubdataSpent() - basePubdataSpent) * gasPerPubdata <= gasLeftAfterValidation`. If it is not, then the transaction does not cover enough funds for itself, so it should be *rejected* (unlike revert, which means that the transaction is not even included in the block). +3. We check whether `(getPubdataSpent() - basePubdataSpent) * gasPerPubdata <= gasLeftAfterValidation`. If it is not, then the transaction does not cover enough funds for itself, so it should be _rejected_ (unlike revert, which means that the transaction is not even included in the block). 4. We execute the transaction itself. 5. We do the same check as in (3), but now if the transaction does not have enough gas for pubdata, it is reverted, i.e., the user still pays the fee to cover the computation for its transaction. 6. (optional, in case a paymaster is used). We repeat steps (4-5), but now for the `postTransaction` method of the paymaster. @@ -213,17 +216,17 @@ On the internal level, the pubdata counter is modified in the following way: The approach with post-charging removes the unneeded overhead and decouples the gas used for the execution from the gas used for data availability, which removes any caps on `gasPerPubdata`. -## Security considerations for protocol +### Security considerations for protocol Now it has become easier for a transaction to use up more pubdata than what can be published within a batch. In such a case, we'll revert the transaction as well. -## Security considerations for users +### Security considerations for users -The approach with post-charging introduces one distinctive feature: it is not trivial to know the final price for a transaction at the time of its execution. When a user does `.call{gas: some_gas}` the final impact on the price of the transaction may be higher than `some_gas` since the pubdata counter will be incremented during the execution and charged only at the end of the transaction. +The approach with post-charging introduces one distinctive feature: it is not trivial to know the final price for a transaction at the time of its execution. When a user does `.call{gas: some_gas}` the final impact on the price of the transaction may be higher than `some_gas` since the pubdata counter will be incremented during the execution and charged only at the end of the transaction. While for the average user, this limitation is not relevant, some specific applications may receive certain issues. -### Example for a queue of withdrawals +#### Example for a queue of withdrawals Imagine that there is the following contract: @@ -239,10 +242,10 @@ uint256 lastProcessed; function processNWithdrawals(uint256 N) external nonReentrant { uint256 current = lastProcessed + 1; - uint256 lastToProcess = current + N - 1; - + uint256 lastToProcess = current + N - 1; + while(current <= lastToProcess) { - // If the user provided some bad token that takes more than MAX_WITHDRAWAL_GAS + // If the user provided some bad token that takes more than MAX_WITHDRAWAL_GAS // to transfer, it is the problem of the user and it will stall the queue, so // the `_success` value is ignored. Withdrawal storage currentQueue = queue[current]; @@ -253,55 +256,54 @@ function processNWithdrawals(uint256 N) external nonReentrant { } ``` -The contract above supports a queue of withdrawals. This queue supports any type of token, including potentially malicious ones. However, the queue will never get stuck, since the `MAX_WITHDRAWAL_GAS` ensures that even if the malicious token does a lot of computation, it will be bound by this number and so the caller of the `processNWithdrawals` won't spend more than `MAX_WITHDRAWAL_GAS` per token. +The contract above supports a queue of withdrawals. This queue supports any type of token, including potentially malicious ones. However, the queue will never get stuck, since the `MAX_WITHDRAWAL_GAS` ensures that even if the malicious token does a lot of computation, it will be bound by this number and so the caller of the `processNWithdrawals` won't spend more than `MAX_WITHDRAWAL_GAS` per token. -The above assumptions work in the pre-charge model (calldata based rollups) or pay-as-you-go model (pre-1.5.0 Era). However, in the post-charge model, the `MAX_WITHDRAWAL_GAS`` limits the amount of computation that can be done within the transaction, but it does not limit the amount of pubdata that can be published. Thus, if such a function publishes a very large L1→L2 message, it might make the entire top transaction fail. This effectively means that such a queue would be stalled. +The above assumptions work in the pre-charge model (calldata based rollups) or pay-as-you-go model (pre-1.5.0 Era). However, in the post-charge model, the `MAX_WITHDRAWAL_GAS`` limits the amount of computation that can be done within the transaction, but it does not limit the amount of pubdata that can be published. Thus, if such a function publishes a very large L1→L2 message, it might make the entire top transaction fail. This effectively means that such a queue would be stalled. -### How to prevent this issue on the users' side +#### How to prevent this issue on the users' side If a user really needs to limit the amount of gas that the subcall takes, all the subcalls should be routed through a special contract, that will guarantee that the total cost of the subcall wont be larger than the gas provided (by reverting if needed). -An implementation of this special contract can be seen [here](https://github.com/code-423n4/2024-03-zksync/blob/main/code/system-contracts/contracts/GasBoundCaller.sol). Note, that this contract is *not* a system one and it will be deployed on some fixed, but not kernel space address. +An implementation of this special contract can be seen [here](../../gas-bound-caller/contracts/GasBoundCaller.sol). Note, that this contract is _not_ a system one and it will be deployed on some fixed, but not kernel space address. -### 1. Case of when a malicious contract consumes a large, but processable amount of pubdata** +#### 1. Case of when a malicious contract consumes a large, but processable amount of pubdata\*\* In this case, the topmost transaction will be able to sponsor such subcalls. When a transaction is processed, at most 80M gas is allowed to be passed to the execution. The rest can only be spent on pubdata during the post-charging. -### 2. Case of when a malicious contract consumes an unprocessable amount of pubdata** +#### 2. Case of when a malicious contract consumes an unprocessable amount of pubdata\*\* -In this case, the malicious callee published so much pubdata, that such a transaction can not be included into a batch. This effectively means that no matter how much money the topmost transaction willing to pay, the queue is stalled. +In this case, the malicious callee published so much pubdata, that such a transaction can not be included into a batch. This effectively means that no matter how much money the topmost transaction willing to pay, the queue is stalled. -The only way how it is combated is by setting some minimal amount of ergs that still have to be consumed with each emission of pubdata (basically to make sure that it is not possible to publish large chunks of pubdata while using negligible computation). Unfortunately, setting this minimal amount to cover the worst possible case (i.e. 80M ergs spent with maximally 100k of pubdata available, leading to 800 L2 gas / pubdata byte) would likely be too harsh and will negatively impact average UX. Overall, this *is* the way to go, however for now the only guarantee will be that a subcall of 1M gas is always processable, which will mean that at least 80 gas will have to be spent for each published pubdata byte. Even if higher than real L1 gas costs, it is reasonable even in the long run, since all the things that are published as pubdata are state-related and so they have to be well-priced for long-term storage. +The only way how it is combated is by setting some minimal amount of ergs that still have to be consumed with each emission of pubdata (basically to make sure that it is not possible to publish large chunks of pubdata while using negligible computation). Unfortunately, setting this minimal amount to cover the worst possible case (i.e. 80M ergs spent with maximally 100k of pubdata available, leading to 800 L2 gas / pubdata byte) would likely be too harsh and will negatively impact average UX. Overall, this _is_ the way to go, however for now the only guarantee will be that a subcall of 1M gas is always processable, which will mean that at least 80 gas will have to be spent for each published pubdata byte. Even if higher than real L1 gas costs, it is reasonable even in the long run, since all the things that are published as pubdata are state-related and so they have to be well-priced for long-term storage. In the future, we will guarantee the processability of subcalls of larger size by increasing the number of pubdata that can be published per batch. -## Limiting the `gas_per_pubdata` +### Limiting the `gas_per_pubdata` As already mentioned, the transactions on zkSync depend on volatile L1 gas costs to publish the pubdata for batch, verify proofs, etc. For this reason, zkSync-specific EIP712 transactions contain the `gas_per_pubdata_limit` field, denoting the maximum `gas_per_pubdata` that the operator can charge the user for a single byte of pubdata. For Ethereum transactions (which do not contain this field), the block's `gas_per_pubdata` is used. - -# Improvements in the upcoming releases +## Improvements in the upcoming releases The fee model explained above, while fully functional, has some known issues. These will be tackled with the following upgrades. -## L1->L2 transactions do not pay for their execution on L1 +### L1->L2 transactions do not pay for their execution on L1 The `executeBatches` operation on L1 is executed in `O(N)` where N is the number of priority ops that we have in the batch. Each executed priority operation will be popped and so it incurs cost for storage modifications. As of now, we do not charge for it. -# zkSync Era Fee Components (Revenue & Costs) +## zkSync Era Fee Components (Revenue & Costs) - On-Chain L1 Costs - - L1 Commit Batches - - The commit batch transaction submits pubdata (which is the list of updated storage slots) to L1. The cost of a commit transaction is calculated as `constant overhead + price of pubdata`. The `constant overhead` cost is evenly distributed among L2 transactions in the L1 commit transaction, but only at higher transaction loads. As for the `price of pubdata`, it is known how much pubdata each L2 transaction consumed, therefore, they are charged directly for that. Multiple L1 batches can be included in a single commit transaction. - - L1 Prove Batches - - Once the off-chain proof is generated, it is submitted to L1 to make the rollup batch final. Currently, each proof contains only one L1 batch. - - L1 Execute Batches - - The execute batches transaction processes L2 -> L1 messages and marks executed priority operations as such. Multiple L1 batches can be included in a single execute transaction. - - L1 Finalize Withdrawals - - While not strictly part of the L1 fees, the cost to finalize L2 → L1 withdrawals are covered by Matter Labs. The finalize withdrawals transaction processes user token withdrawals from zkSync Era to Ethereum. Multiple L2 withdrawal transactions are included in each finalize withdrawal transaction. + - L1 Commit Batches + - The commit batch transaction submits pubdata (which is the list of updated storage slots) to L1. The cost of a commit transaction is calculated as `constant overhead + price of pubdata`. The `constant overhead` cost is evenly distributed among L2 transactions in the L1 commit transaction, but only at higher transaction loads. As for the `price of pubdata`, it is known how much pubdata each L2 transaction consumed, therefore, they are charged directly for that. Multiple L1 batches can be included in a single commit transaction. + - L1 Prove Batches + - Once the off-chain proof is generated, it is submitted to L1 to make the rollup batch final. Currently, each proof contains only one L1 batch. + - L1 Execute Batches + - The execute batches transaction processes L2 -> L1 messages and marks executed priority operations as such. Multiple L1 batches can be included in a single execute transaction. + - L1 Finalize Withdrawals + - While not strictly part of the L1 fees, the cost to finalize L2 → L1 withdrawals are covered by Matter Labs. The finalize withdrawals transaction processes user token withdrawals from zkSync Era to Ethereum. Multiple L2 withdrawal transactions are included in each finalize withdrawal transaction. - On-Chain L2 Revenue - - L2 Transaction Fee - - This fee is what the user pays to complete a transaction on zkSync Era. It is calculated as `gasLimit x baseFeePerGas - refundedGas x baseFeePerGas`, or more simply, `gasUsed x baseFeePerGas`. + - L2 Transaction Fee + - This fee is what the user pays to complete a transaction on zkSync Era. It is calculated as `gasLimit x baseFeePerGas - refundedGas x baseFeePerGas`, or more simply, `gasUsed x baseFeePerGas`. - Profit = L2 Revenue - L1 Costs - Off-Chain Infrastructure Costs diff --git a/docs/settlement_contracts/data_availability/custom_da.md b/docs/settlement_contracts/data_availability/custom_da.md index d29d11dbc..493453e06 100644 --- a/docs/settlement_contracts/data_availability/custom_da.md +++ b/docs/settlement_contracts/data_availability/custom_da.md @@ -1,4 +1,5 @@ # Custom DA support + [back to readme](../../README.md) ## Intro diff --git a/docs/settlement_contracts/data_availability/rollup_da.md b/docs/settlement_contracts/data_availability/rollup_da.md index d99198283..03d9ba7ac 100644 --- a/docs/settlement_contracts/data_availability/rollup_da.md +++ b/docs/settlement_contracts/data_availability/rollup_da.md @@ -1,32 +1,36 @@ # Rollup DA + [back to readme](../../README.md) FIXME: run a spellchecker -# EIP4844 support +## Prerequisites + +Before reading this document, it is better to understand how [custom DA](./custom_da.md) in general works. + +## EIP4844 support EIP-4844, commonly known as Proto-Danksharding, is an upgrade to the ethereum protocol that introduces a new data availability solution embedded in layer 1. More information about it can be found [here](https://ethereum.org/en/roadmap/danksharding/). To facilitate EIP4844 blob support, our circuits allow providing two arrays in our public input to the circuit: -- `blobCommitments` -- this is the commitment that helps to check the correctness of the blob content. The formula on how it is computed will be explained below in the document (FIXME: link). -- `blobHash` -- the `keccak256` hash of the inner contents of the blob. +- `blobCommitments` -- this is the commitment that helps to check the correctness of the blob content. The formula on how it is computed will be explained below in the document. +- `blobHash` -- the `keccak256` hash of the inner contents of the blob. Note, that our circuits require that each blob contains exactly `4096 * 31` bytes. The maximal number of blobs that are supported by our proving system is 16, but the system contracts support only 6 blobs at most for now. -When committing a batch, the L1DAValidator (FIXME: link to the description of pubdata processing) is called with the data provided by the operator and it should return the two arrays described above. These arrays be put inside the batch commitment and then the correctness of the commitments will be verified at the proving stage. +When committing a batch, the L1DAValidator is called with the data provided by the operator and it should return the two arrays described above. These arrays be put inside the batch commitment and then the correctness of the commitments will be verified at the proving stage. -Note, that the `Executor.sol` (and the contract itself) is not responsible for checking that the provided `blobHash` and `blobCommitments` in any way correspond to the pubdata inside the batch as it is the job of the DA Validator pair (FIXME: link). +Note, that the `Executor.sol` (and the contract itself) is not responsible for checking that the provided `blobHash` and `blobCommitments` in any way correspond to the pubdata inside the batch as it is the job of the DA Validator pair. -# Publishing pubdata to L1 +## Publishing pubdata to L1 Let's see an example of how the approach above works in rollup DA validators. -## RollupL2DAValidator +### RollupL2DAValidator ![RollupL2DAValidator.png](./L1%20smart%20contracts/Rollup_DA.png) - `RollupL2DAValidator` accepts the preimages for the data to publishes as well as their compressed format. After verifying the compression, it forms the `_totalPubdata` bytes array, which represents the entire blob of data that should be published to L1. It calls the `PubdataChunkPublisher` system contract to split this pubdata into multiple "chunks" of size `4096 * 31` bytes and return the `keccak256` hash of those, These will be the `blobHash` of from the section before. @@ -37,7 +41,7 @@ To give the flexibility of checking different DA, we send the following data to - The hash of the `_totalPubdata`. In case the size of pubdata is small, it will allow the operator also use just standard Ethereum calldata for the DA. - Send the `blobHash` array. -## RollupL1DAValidator +### RollupL1DAValidator When committing the batch, the operator will provide the preimage of the fields that the RollupL2DAValidator has sent before, and also some `l1DaInput` along with it. This `l1DaInput` will be used to prove that the pubdata was indeed provided in this batch. @@ -71,6 +75,6 @@ assert uint256(res[32:]) == BLS_MODULUS The final `blobCommitment` is calculated as the hash between the `blobVersionedHash`, `opening point` and the `claimed value`. The zero knowledge circuits will verify that the opening point and the claimed value were calculated correctly and correspond to the data that was hashed under the `blobHash`. -# Structure of the pubdata +## Structure of the pubdata Rollups maintain the same structure of pubdata and apply the same rules for compresison as those that were used in the previous versions of the system. These can be read [here](./Handling%20pubdata.md). diff --git a/docs/settlement_contracts/data_availability/standard_pubdata_format.md b/docs/settlement_contracts/data_availability/standard_pubdata_format.md index a64af3371..4edc8c083 100644 --- a/docs/settlement_contracts/data_availability/standard_pubdata_format.md +++ b/docs/settlement_contracts/data_availability/standard_pubdata_format.md @@ -1,9 +1,10 @@ # Standard pubdata format + [back to readme](../../README.md) -While with the introduction of custom DA validators (FIXME LINK), any pubdata logic could be applied for each chain (including calldata-based pubdata), ZK chains are generally optimized for using state-diffs based rollup model. +While with the introduction of [custom DA validators](./custom_da.md), any pubdata logic could be applied for each chain (including calldata-based pubdata), ZK chains are generally optimized for using state-diffs based rollup model. -This document will describe how the standard pubdata format looks like. This is the format that is enforced for permanent rollup chains (FIXME: link to permanent rollup description). +This document will describe how the standard pubdata format looks like. This is the format that is enforced for [permanent rollup chains](../../chain_management/admin_role.md#ispermanentrollup-setting). Pubdata in zkSync can be divided up into 4 different categories: @@ -14,19 +15,19 @@ Pubdata in zkSync can be divided up into 4 different categories: Using data corresponding to these 4 facets, across all executed batches, we’re able to reconstruct the full state of L2. To restore the state we just need to filter all of the transactions to the L1 zkSync contract for only the `commitBatches` transactions where the proposed block has been referenced by a corresponding `executeBatches` call (the reason for this is that a committed or even proven block can be reverted but an executed one cannot). Once we have all the committed batches that have been executed, we then will pull the transaction input and the relevant fields, applying them in order to reconstruct the current state of L2. -# L2→L1 communication +## L2→L1 communication We will implement the calculation of the Merkle root of the L2→L1 messages via a system contract as part of the `L1Messenger`. Basically, whenever a new log emitted by users that needs to be Merklized is created, the `L1Messenger` contract will append it to its rolling hash and then at the end of the batch, during the formation of the blob it will receive the original preimages from the operator, verify their consistency, and send those to the L2DAValidator to facilitate the DA protocol. -We will now call the logs that are created by users and are Merklized *user* logs and the logs that are emitted by natively by VM *system* logs. Here is a short comparison table for better understanding: +We will now call the logs that are created by users and are Merklized _user_ logs and the logs that are emitted by natively by VM _system_ logs. Here is a short comparison table for better understanding: -| System logs | User logs | -| --- | --- | -| Emitted by VM via an opcode. | VM knows nothing about them. | +| System logs | User logs | +| --------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Emitted by VM via an opcode. | VM knows nothing about them. | | Consistency and correctness is enforced by the verifier on L1 (i.e. their hash is part of the block commitment. | Consistency and correctness is enforced by the L1Messenger system contract. The correctness of the behavior of the L1Messenger is enforced implicitly by prover in a sense that it proves the correctness of the execution overall. | -| We don’t calculate their Merkle root. | We calculate their Merkle root on the L1Messenger system contract. | -| We have constant small number of those. | We can have as much as possible as long as the commitBatches function on L1 remains executable (it is the job of the operator to ensure that only such transactions are selected) | -| In EIP4844 they will remain part of the calldata. | In EIP4844 they will become part of the blobs. | +| We don’t calculate their Merkle root. | We calculate their Merkle root on the L1Messenger system contract. | +| We have constant small number of those. | We can have as much as possible as long as the commitBatches function on L1 remains executable (it is the job of the operator to ensure that only such transactions are selected) | +| In EIP4844 they will remain part of the calldata. | In EIP4844 they will become part of the blobs. | ### Backwards-compatibility @@ -34,12 +35,12 @@ Note, that to maintain a unified interface with the previous version of the prot ```solidity struct L2Log { - uint8 l2ShardId; - bool isService; - uint16 txNumberInBlock; - address sender; - bytes32 key; - bytes32 value; + uint8 l2ShardId; + bool isService; + uint16 txNumberInBlock; + address sender; + bytes32 key; + bytes32 value; } ``` @@ -63,13 +64,13 @@ The L1Messenger contract will maintain a rolling hash of all the L2ToL1 logs `ch Note, that the user is charged for necessary future the computation that will be needed to calculate the final merkle root. It is roughly 4x higher than the cost to calculate the hash of the leaf, since the eventual tree might have be 4x times the number nodes. In any case, this will likely be a relatively negligible part compared to the cost of the pubdata. -At the end of the execution, the bootloader will [provide](../../system-contracts/bootloader/bootloader.yul#L2621) (FIXME: check link) a list of all the L2ToL1 logs (this will be provided by the operator in the memory of the bootloader). The L1Messenger checks that the rolling hash from the provided logs is the same as in the `chainedLogsHash` and calculate the merkle tree of the provided messages. Right now, we always build the Merkle tree of size `16384`, but we charge the user as if the tree was built dynamically based on the number of leaves in there. The implementation of the dynamic tree has been postponed until the later upgrades. +At the end of the execution, the bootloader will [provide](../../../system-contracts/bootloader/bootloader.yul#L2676) a list of all the L2ToL1 logs (this will be provided by the operator in the memory of the bootloader). The L1Messenger checks that the rolling hash from the provided logs is the same as in the `chainedLogsHash` and calculate the merkle tree of the provided messages. Right now, we always build the Merkle tree of size `16384`, but we charge the user as if the tree was built dynamically based on the number of leaves in there. The implementation of the dynamic tree has been postponed until the later upgrades. -> Note, that unlike most other parts of pubdata, the user L2->L1 must always be validated by the trusted `L1Messenger` system contract. If we moved this responsibility to L2DAValidator it would be possible that a malicious operator provided incorrect data and forged transactions out of names of certain users. +> Note, that unlike most other parts of pubdata, the user L2->L1 must always be validated by the trusted `L1Messenger` system contract. If we moved this responsibility to L2DAValidator it would be possible that a malicious operator provided incorrect data and forged transactions out of names of certain users. ### Long L2→L1 messages & bytecodes -If the user wants to send an L2→L1 message, its preimage is [appended](../../system-contracts/contracts/L1Messenger.sol#L122) to the message’s rolling hash too `chainedMessagesHash = keccak256(chainedMessagesHash, keccak256(message))`. +If the user wants to send an L2→L1 message, its preimage is [appended](../../../system-contracts/contracts/L1Messenger.sol#L126) to the message’s rolling hash too `chainedMessagesHash = keccak256(chainedMessagesHash, keccak256(message))`. A very similar approach for bytecodes is used, where their rolling hash is calculated and then the preimages are provided at the end of the batch to form the full pubdata for the batch. @@ -77,7 +78,7 @@ Note, that in for backward compatibility, just like before any long message or b ### Using system L2→L1 logs vs the user logs -The content of the L2→L1 logs by the L1Messenger will go to the blob of EIP4844. Meaning, that all the data that belongs to the tree by L1Messenger’s L2→L1 logs should not be needed during block commitment. Also, note that in the future we will remove the calculation of the Merkle root of the built-in L2→L1 messages. +The content of the L2→L1 logs by the L1Messenger will go to the blob of EIP4844. Meaning, that all the data that belongs to the tree by L1Messenger’s L2→L1 logs should not be needed during block commitment. Also, note that in the future we will remove the calculation of the Merkle root of the built-in L2→L1 messages. The only places where the built-in L2→L1 messaging should continue to be used: @@ -87,35 +88,35 @@ The only places where the built-in L2→L1 messaging should continue to be used: ### Obtaining `txNumberInBlock` -To have the same log format, the `txNumberInBlock` must be obtained. While it is internally counted in the VM, there is currently no opcode to retrieve this number. We will have a public variable `txNumberInBlock` in the `SystemContext`, which will be incremented with each new transaction and retrieve this variable from there. It is [zeroed out](https://github.com/code-423n4/2024-03-zksync/blob/7e85e0a997fee7a6d75cadd03d3233830512c2d2/code/system-contracts/contracts/SystemContext.sol#L486) at the end of the batch. +To have the same log format, the `txNumberInBlock` must be obtained. While it is internally counted in the VM, there is currently no opcode to retrieve this number. We will have a public variable `txNumberInBlock` in the `SystemContext`, which will be incremented with each new transaction and retrieve this variable from there. It is [zeroed out](../../../system-contracts/contracts/SystemContext.sol#L515) at the end of the batch. -## Bootloader implementation +### Bootloader implementation The bootloader has a memory segment dedicated to the ABI-encoded data of the L1ToL2Messenger to perform the `publishPubdataAndClearState` call. -At the end of the execution of the batch, the operator should provide the corresponding data into the bootloader memory, i.e user L2→L1 logs, long messages, bytecodes, etc. After that, the [call](../../system-contracts/bootloader/bootloader.yul#L2635) is performed to the `L1Messenger` system contract, that would call the L2DAValidator that should check the adherence of the pubdata to the specified format. +At the end of the execution of the batch, the operator should provide the corresponding data into the bootloader memory, i.e user L2→L1 logs, long messages, bytecodes, etc. After that, the [call](../../../system-contracts/bootloader/bootloader.yul#L2676) is performed to the `L1Messenger` system contract, that would call the L2DAValidator that should check the adherence of the pubdata to the specified format. -# Bytecode Publishing +## Bytecode Publishing Within pubdata, bytecodes are published in 1 of 2 ways: (1) uncompressed as part of the bytecodes array and (2) compressed via long l2 → l1 messages. -## Uncompressed Bytecode Publishing +### Uncompressed Bytecode Publishing Uncompressed bytecodes are included within the `totalPubdata` bytes and have the following format: `number of bytecodes || forEachBytecode (length of bytecode(n) || bytecode(n))` . -## Compressed Bytecode Publishing +### Compressed Bytecode Publishing -Unlike uncompressed bytecode which are published as part of `factoryDeps`, compressed bytecodes are published as long l2 → l1 messages which can be seen [here](../../system-contracts/contracts/Compressor.sol#L73). +Unlike uncompressed bytecode which are published as part of `factoryDeps`, compressed bytecodes are published as long l2 → l1 messages which can be seen [here](../../../system-contracts/contracts/Compressor.sol#L78). -### Bytecode Compression Algorithm — Server Side +#### Bytecode Compression Algorithm — Server Side This is the part that is responsible for taking bytecode, that has already been chunked into 8 byte words, performing validation, and compressing it. -Each 8 byte word from the chunked bytecode is assigned a 2 byte index (constraint on size of dictionary of chunk → index is 2^16 - 1 elements). The length of the dictionary, dictionary entries (index assumed through order), and indexes are all concatenated together to yield the final compressed version. +Each 8 byte word from the chunked bytecode is assigned a 2 byte index (constraint on size of dictionary of chunk → index is 2^16 - 1 elements). The length of the dictionary, dictionary entries (index assumed through order), and indexes are all concatenated together to yield the final compressed version. For bytecode to be considered valid it must satisfy the following: -1. Bytecode length must be less than 2097120 ((2^16 - 1) * 32) bytes. +1. Bytecode length must be less than 2097120 ((2^16 - 1) \* 32) bytes. 2. Bytecode length must be a multiple of 32. 3. Number of 32-byte words cannot be even. @@ -127,10 +128,10 @@ dictionary: Map[chunk, index] encoded_data: List[index] for position, chunk in chunked_bytecode: - if chunk is in statistic: - statistic[chunk].count += 1 - else: - statistic[chunk] = (count=1, first_pos=pos) + if chunk is in statistic: + statistic[chunk].count += 1 + else: + statistic[chunk] = (count=1, first_pos=pos) # We want the more frequently used bytes to have smaller ids to save on calldata (zero bytes cost less) statistic.sort(primary=count, secondary=first_pos, order=desc) @@ -139,14 +140,14 @@ for index, chunk in enumerated(sorted_statistics): dictionary[chunk] = index for chunk in chunked_bytecode: - encoded_data.append(dictionary[chunk]) + encoded_data.append(dictionary[chunk]) return [len(dictionary), dictionary.keys(order=index asc), encoded_data] ``` -### Verification And Publishing — L2 Contract +#### Verification And Publishing — L2 Contract -The function `publishCompressBytecode` takes in both the original `_bytecode` and the `_rawCompressedData` , the latter of which comes from the output of the server’s compression algorithm. Looping over the encoded data, derived from `_rawCompressedData` , the corresponding chunks are pulled from the dictionary and compared to the original byte code, reverting if there is a mismatch. After the encoded data has been verified, it is published to L1 and marked accordingly within the `KnownCodesStorage` contract. +The function `publishCompressBytecode` takes in both the original `_bytecode` and the `_rawCompressedData` , the latter of which comes from the output of the server’s compression algorithm. Looping over the encoded data, derived from `_rawCompressedData` , the corresponding chunks are pulled from the dictionary and compared to the original byte code, reverting if there is a mismatch. After the encoded data has been verified, it is published to L1 and marked accordingly within the `KnownCodesStorage` contract. Pseudo-code implementation: @@ -156,33 +157,33 @@ dictionary = _rawCompressedData[2:2 + length_of_dict * 8] # need to offset by by encoded_data = _rawCompressedData[2 + length_of_dict * 8:] assert(len(dictionary) % 8 == 0) # each element should be 8 bytes -assert(num_entries(dictionary) <= 2^16) +assert(num_entries(dictionary) <= 2^16) assert(len(encoded_data) * 4 == len(_bytecode)) # given that each chunk is 8 bytes and each index is 2 bytes they should differ by a factor of 4 for (index, dict_index) in list(enumerate(encoded_data)): - encoded_chunk = dictionary[dict_index] - real_chunk = _bytecode.readUint64(index * 8) # need to pull from index * 8 to account for difference in element size - verify(encoded_chunk == real_chunk) + encoded_chunk = dictionary[dict_index] + real_chunk = _bytecode.readUint64(index * 8) # need to pull from index * 8 to account for difference in element size + verify(encoded_chunk == real_chunk) # Sending the compressed bytecode to L1 for data availability sendToL1(_rawCompressedBytecode) markAsPublished(hash(_bytecode)) ``` -# Storage diff publishing +## Storage diff publishing zkSync is a statediff-based rollup and so publishing the correct state diffs plays an integral role in ensuring data availability. -## Difference between initial and repeated writes. +### Difference between initial and repeated writes zkSync publishes state changes that happened within the batch instead of transactions themselves. Meaning, that for instance some storage slot `S` under account `A` has changed to value `V`, we could publish a triple of `A,S,V`. Users by observing all the triples could restore the state of zkSync. However, note that our tree unlike Ethereum’s one is not account based (i.e. there is no first layer of depth 160 of the merkle tree corresponding to accounts and second layer of depth 256 of the merkle tree corresponding to users). Our tree is “flat”, i.e. a slot `S` under account `A` is just stored in the leaf number `H(S,A)`. Our tree is of depth 256 + 8 (the 256 is for these hashed account/key pairs and 8 is for potential shards in the future, we currently have only one shard and it is irrelevant for the rest of the document). -We call this `H(S,A)` *derived key*, because it is derived from the address and the actual key in the storage of the account. Since our tree is flat, whenever a change happens, we can publish a pair `DK, V`, where `DK=H(S,A)`. +We call this `H(S,A)` _derived key_, because it is derived from the address and the actual key in the storage of the account. Since our tree is flat, whenever a change happens, we can publish a pair `DK, V`, where `DK=H(S,A)`. However, these is an optimization that could be done: -- Whenever a change to a key is used for the first time, we publish a pair of `DK,V` and we assign some sequential id to this derived key. This is called an *initial write*. It happens for the first time and that’s why we must publish the full key. -- If this storage slot is published in some of the subsequent batches, instead of publishing the whole `DK`, we can use the sequential id instead. This is called a *repeated write*. +- Whenever a change to a key is used for the first time, we publish a pair of `DK,V` and we assign some sequential id to this derived key. This is called an _initial write_. It happens for the first time and that’s why we must publish the full key. +- If this storage slot is published in some of the subsequent batches, instead of publishing the whole `DK`, we can use the sequential id instead. This is called a _repeated write_. For instance, if the slots `A`,`B` (I’ll use latin letters instead of 32-byte hashes for readability) changed their values to `12`,`13` accordingly, in the batch it happened they will be published in the following format: @@ -194,13 +195,13 @@ Let’s say that in the next block, they changes their values to `13`,`14`. Then The id is permanently assigned to each storage key that was ever published. While in the description above it may not seem like a huge boost, however, each `DK` is 32 bytes long and id is at most 8 bytes long. -We call this id *enumeration_index*. +We call this id _enumeration_index_. Note, that the enumeration indexes are assigned in the order of sorted array of (address, key), i.e. they are internally sorted. The enumeration indexes are part of the state merkle tree, it is **crucial** that the initial writes are published in the correct order, so that anyone could restore the correct enum indexes for the storage slots. In addition, an enumeration index of `0` indicates that the storage write is an initial write. -## State diffs structure +### State diffs structure -Firstly, let’s define what we mean by *state diffs*. A *state diff* is an element of the following structure. +Firstly, let’s define what we mean by _state diffs_. A _state diff_ is an element of the following structure. [State diff structure](https://github.com/matter-labs/era-zkevm_test_harness/blob/3cd647aa57fc2e1180bab53f7a3b61ec47502a46/circuit_definitions/src/encodings/state_diff_record.rs#L8). @@ -209,41 +210,41 @@ Basically, it contains all the values which might interest us about the state di - `address` where the storage has been changed. - `key` (the original key inside the address) - `derived_key` — `H(key, address)` as described in the previous section. - - Note, the hashing algorithm currently used here is `Blake2s` + - Note, the hashing algorithm currently used here is `Blake2s` - `enumeration_index` — Enumeration index as explained above. It is equal to 0 if the write is initial and contains the non-zero enumeration index if it is the repeated write (indexes are numerated starting from 1). - `initial_value` — The value that was present in the key at the start of the batch - `final_value` — The value that the key has changed to by the end of the batch. -We will consider `stateDiffs` an array of such objects, sorted by (address, key). +We will consider `stateDiffs` an array of such objects, sorted by (address, key). This is the internal structure that is used by the circuits to represent the state diffs. The most basic “compression” algorithm is the one described above: -- For initial writes, write the pair of (`derived_key`, `final_value`) +- For initial writes, write the pair of (`derived_key`, `final_value`) - For repeated writes write the pair of (`enumeration_index`, `final_value`). Note, that values like `initial_value`, `address` and `key` are not used in the "simplified" algorithm above, but they will be helpful for the more advanced compression algorithms in the future. The [algorithm](#state-diff-compression-format) for Boojum already utilizes the difference between the `initial_value` and `final_value` for saving up on pubdata. -## How the new pubdata verification works +### How the new pubdata verification works -**L2** +#### **L2** 1. The operator provides both full `stateDiffs` (i.e. the array of the structs above) and the compressed state diffs (i.e. the array which contains the state diffs, compressed by the algorithm explained [below](#state-diff-compression-format)). -2. The L2DAValidator must verify that the compressed version is consistent with the original stateDiffs and send the the *hash* of the original state diff to its L1 counterpart. It will also include the compressed state diffs into the totalPubdata to be published onto L1. +2. The L2DAValidator must verify that the compressed version is consistent with the original stateDiffs and send the the _hash_ of the original state diff to its L1 counterpart. It will also include the compressed state diffs into the totalPubdata to be published onto L1. -**L1** +#### **L1** -1. During committing the block, the standard DA protocol follows and the L1DAValidator is responsible to check that the operator has provided the preimage for the `_totalPubdata`. More on how this is checked can be seen [here](./Rollup%20DA.md). -2. The block commitment [includes](../../l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol#L550) *the hash of the `stateDiffs`. Thus, during ZKP verification will fail if the provided stateDiff hash is not correct. +1. During committing the block, the standard DA protocol follows and the L1DAValidator is responsible to check that the operator has provided the preimage for the `_totalPubdata`. More on how this is checked can be seen [here](./Rollup%20DA.md). +2. The block commitment [includes](../../l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol#L550) \*the hash of the `stateDiffs`. Thus, during ZKP verification will fail if the provided stateDiff hash is not correct. -It is a secure construction because the proof can be verified only if both the execution was correct and the hash of the provided hash of the `stateDiffs` is correct. This means that the L2DAValidator indeed received the array of correct `stateDiffs` and, assuming the L2DAValidator is working correctly, double-checked that the compression is of the correct format, while L1 contracts on the commit stage double checked that the operator provided the preimage for the compressed state diffs. +It is a secure construction because the proof can be verified only if both the execution was correct and the hash of the provided hash of the `stateDiffs` is correct. This means that the L2DAValidator indeed received the array of correct `stateDiffs` and, assuming the L2DAValidator is working correctly, double-checked that the compression is of the correct format, while L1 contracts on the commit stage double checked that the operator provided the preimage for the compressed state diffs. -## State diff compression format +### State diff compression format The following algorithm is used for the state diff compression: [State diff compression v1 spec](./state_diff_compression_v1_spec.md) -# General pubdata format +## General pubdata format The `totalPubdata` has the following structure: @@ -271,15 +272,15 @@ The interface for committing batches is the following one: /// @param systemLogs concatenation of all L2 -> L1 system logs in the batch /// @param totalL2ToL1Pubdata Total pubdata committed to as part of bootloader run. Contents are: l2Tol1Logs <> l2Tol1Messages <> publishedBytecodes <> stateDiffs struct CommitBatchInfo { - uint64 batchNumber; - uint64 timestamp; - uint64 indexRepeatedStorageChanges; - bytes32 newStateRoot; - uint256 numberOfLayer1Txs; - bytes32 priorityOperationsHash; - bytes32 bootloaderHeapInitialContentsHash; - bytes32 eventsQueueStateHash; - bytes systemLogs; - bytes totalL2ToL1Pubdata; + uint64 batchNumber; + uint64 timestamp; + uint64 indexRepeatedStorageChanges; + bytes32 newStateRoot; + uint256 numberOfLayer1Txs; + bytes32 priorityOperationsHash; + bytes32 bootloaderHeapInitialContentsHash; + bytes32 eventsQueueStateHash; + bytes systemLogs; + bytes totalL2ToL1Pubdata; } ``` diff --git a/docs/settlement_contracts/data_availability/state_diff_compression_v1_spec.md b/docs/settlement_contracts/data_availability/state_diff_compression_v1_spec.md index 8edcf7fb7..39f2404e4 100644 --- a/docs/settlement_contracts/data_availability/state_diff_compression_v1_spec.md +++ b/docs/settlement_contracts/data_availability/state_diff_compression_v1_spec.md @@ -1,4 +1,5 @@ # State diff compression v1 spec + [back to readme](../../README.md) The most basic strategy to publish state diffs is to publish those in either of the following two forms: @@ -10,13 +11,13 @@ This compression strategy will utilize a similar idea for treating keys and valu ## Keys -Keys will be packed in the same way as they were before. The only change is that we’ll avoid using the 8-byte enumeration index and will pack it to the minimal necessary number of bytes. This number will be part of the pubdata. Once a key has been used, it can already use the 4 or 5 byte enumeration index and it is very hard to have something cheaper for keys that has been used already. The opportunity comes when remembering the ids for accounts to spare some bytes on nonce/balance key, but ultimately the complexity may not be worth it. +Keys will be packed in the same way as they were before. The only change is that we’ll avoid using the 8-byte enumeration index and will pack it to the minimal necessary number of bytes. This number will be part of the pubdata. Once a key has been used, it can already use the 4 or 5 byte enumeration index and it is very hard to have something cheaper for keys that has been used already. The opportunity comes when remembering the ids for accounts to spare some bytes on nonce/balance key, but ultimately the complexity may not be worth it. There is some room for optimization of the keys that are being written for the first time, however, optimizing those is more complex and achieves only a one-time effect (when the key is published for the first time), so they may be in scope of the future upgrades. ## Values -Values are much easier to compress since they usually contain only zeroes. Also, we can leverage the nature of how those values are changed. For instance, if nonce has been increased only by 1, we do not need to write the entire 32-byte new value, we can just tell that the slot has been *increased* and then supply only the 1-byte value by which it was increased. This way instead of 32 bytes we need to publish only 2 bytes: first byte to denote which operation has been applied and the second by to denote the number by which the addition has been made. +Values are much easier to compress since they usually contain only zeroes. Also, we can leverage the nature of how those values are changed. For instance, if nonce has been increased only by 1, we do not need to write the entire 32-byte new value, we can just tell that the slot has been _increased_ and then supply only the 1-byte value by which it was increased. This way instead of 32 bytes we need to publish only 2 bytes: first byte to denote which operation has been applied and the second by to denote the number by which the addition has been made. We have the following 4 types of changes: `Add`, `Sub,` `Transform`, `NoCompression` where: @@ -39,18 +40,18 @@ So the format of the pubdata is the following: - `` - the number of initial writes. Since each initial write publishes at least 32 bytes for key, then `2^16 * 32 = 2097152` will be enough for a lot of time (right now with the limit of 120kb it will take more than 15 L1 txs to use up all the space there). - Then for each `` pair for each initial write: - - print key as 32-byte derived key. - - packing type as a 1 byte value, which consists of 5 bits to denote the length of the packing and 3 bits to denote the type of the packing (either `Add`, `Sub`, `Transform` or `NoCompression`). - - The packed value itself. + - print key as 32-byte derived key. + - packing type as a 1 byte value, which consists of 5 bits to denote the length of the packing and 3 bits to denote the type of the packing (either `Add`, `Sub`, `Transform` or `NoCompression`). + - The packed value itself. **Part 3. Repeated writes.** Note, that there is no need to write the number of repeated writes, since we know that until the end of the pubdata, all the writes will be repeated ones. - For each `` pair for each repeated write: - - print key as derived key by using the number of bytes provided in the header. - - packing type as a 1 byte value, which consists of 5 bits to denote the length of the packing and 3 bits to denote the type of the packing (either `Add`, `Sub`, `Transform` or `NoCompression`). - - The packed value itself. + - print key as derived key by using the number of bytes provided in the header. + - packing type as a 1 byte value, which consists of 5 bits to denote the length of the packing and 3 bits to denote the type of the packing (either `Add`, `Sub`, `Transform` or `NoCompression`). + - The packed value itself. ## Impact @@ -68,21 +69,21 @@ The worst case scenario for such packing is when we have to pack a completely ra ## Why do we need to repeat the same packing method id -You might have noticed that for each pair `` to describe value we always first write the packing type and then write the packed value. However, the reader might ask, it is more efficient to just supply the packing id once and then list all the pairs `` which use such packing. +You might have noticed that for each pair `` to describe value we always first write the packing type and then write the packed value. However, the reader might ask, it is more efficient to just supply the packing id once and then list all the pairs `` which use such packing. -I.e. instead of listing +I.e. instead of listing -(key = 0, type = 1, value = 1), (key = 1, type = 1, value = 3), (key = 2, type = 1, value = 4), … +(key = 0, type = 1, value = 1), (key = 1, type = 1, value = 3), (key = 2, type = 1, value = 4), … Just write: -type = 1, (key = 0, value = 1), (key = 1, value = 3), (key = 2, value = 4), … +type = 1, (key = 0, value = 1), (key = 1, value = 3), (key = 2, value = 4), … There are two reasons for it: - A minor reason: sometimes it is less efficient in case the packing is used for very few slots (since for correct unpacking we need to provide the number of slots for each packing type). -- A fundamental reason: currently enum indeces are stored directly in the merkle tree & have very strict order of incrementing enforced by the circuits and (they are given in order by pairs `(address, key)`), which are generally not accessible from pubdata. +- A fundamental reason: currently enum indeces are stored directly in the merkle tree & have very strict order of incrementing enforced by the circuits and (they are given in order by pairs `(address, key)`), which are generally not accessible from pubdata. All this means that we are not allowed to change the order of “first writes” above, so indexes for them are directly recoverable from their order, and so we can not permute them. If we were to reorder keys without supplying the new enumeration indeces for them, the state would be unrecoverable. Always supplying the new enum index may add additional 5 bytes for each key, which might negate the compression benefits in a lot of cases. Even if the compression will still be beneficial, the added complexity may not be worth it. -That being said, we *could* rearange those for *repeated* writes, but for now we stick to the same value compression format for simplicity. +That being said, we _could_ rearange those for _repeated_ writes, but for now we stick to the same value compression format for simplicity. diff --git a/docs/settlement_contracts/priority_queue/priority-queue.md b/docs/settlement_contracts/priority_queue/priority-queue.md index 975c5185b..7b2af8392 100644 --- a/docs/settlement_contracts/priority_queue/priority-queue.md +++ b/docs/settlement_contracts/priority_queue/priority-queue.md @@ -1,4 +1,5 @@ # Priority Queue to Merkle Tree + [back to readme](../../README.md) ## Overview of the current implementation diff --git a/docs/settlement_contracts/priority_queue/processing_of_l1->l2_txs.md b/docs/settlement_contracts/priority_queue/processing_of_l1->l2_txs.md index 3674d7d66..6f6aa7967 100644 --- a/docs/settlement_contracts/priority_queue/processing_of_l1->l2_txs.md +++ b/docs/settlement_contracts/priority_queue/processing_of_l1->l2_txs.md @@ -1,4 +1,5 @@ # Handling L1→L2 ops on zkSync + [back to readme](../../README.md) The transactions on zkSync can be initiated not only on L2, but also on L1. There are two types of transactions that can be initiated on L1: @@ -10,17 +11,17 @@ The transactions on zkSync can be initiated not only on L2, but also on L1. Ther Please read the full [article](../../l2_system_contracts/system_contracts_bootloader_description.md) on the general system contracts / bootloader structure as well as the pubdata structure to understand [the difference](../data_availability/standard_pubdata_format.md) between system and user logs. -# Priority operations +## Priority operations -## Initiation +### Initiation A new priority operation can be appended by calling the `requestL2TransactionDirect` or `requestL2TransactionTwoBridges` methods on `BridgeHub` smart contract. `BridgeHub` will ensure that the base token is deposited via `L1AssetRouter` and send transaction request to the specified state transition contract (selected by the chainID). State transition contract will perform several checks for the transaction, making sure that it is processable and provides enough fee to compensate the operator for this transaction. Then, this transaction will be [appended](../../l1-contracts/contracts/state-transition/chain-deps/facets/Mailbox.sol#569) to the priority tree (and optionally to the legacy priority queue). -> In the previous system, priority operations were structured in a queue. However, now they will be stored in an incremental merkle tree. The motivation for the tree structure will be displayed in sections below (FIXME: link). +> In the previous system, priority operations were structured in a queue. However, now they will be stored in an incremental merkle tree. The motivation for the tree structure can be read [here](./priority-queue.md). -The difference between `requestL2TransactionDirect` and `requestL2TransactionTwoBridges` is that the `msg.sender` on the L2 Transaction is the second bridge in the `requestL2TransactionTwoBridges` case, while it is the `msg.sender` of the `requestL2TransactionDirect` in the first case. For more details read the [L1 ecosystem contracts](./L1%20ecosystem%20contracts.md) +The difference between `requestL2TransactionDirect` and `requestL2TransactionTwoBridges` is that the `msg.sender` on the L2 Transaction is the second bridge in the `requestL2TransactionTwoBridges` case, while it is the `msg.sender` of the `requestL2TransactionDirect` in the first case. For more details read the [bridgehub documentation](../../bridging/bridgehub/overview.md) -## Bootloader +### Bootloader Whenever an operator sees a priority operation, it can include the transaction into the batch. While for normal L2 transaction the account abstraction protocol will ensure that the `msg.sender` has indeed agreed to start a transaction out of this name, for L1→L2 transactions there is no signature verification. In order to verify that the operator includes only transactions that were indeed requested on L1, the bootloader maintains](../../system-contracts/bootloader/bootloader.yul#L1052-L1053) two variables: @@ -29,26 +30,26 @@ Whenever an operator sees a priority operation, it can include the transaction i Whenever a priority transaction is processed, the `numberOfPriorityTransactions` gets incremented by 1, while `priorityOperationsRollingHash` is assigned to `keccak256(priorityOperationsRollingHash, processedPriorityOpHash)`, where `processedPriorityOpHash` is the hash of the priority operations that has been just processed. -Also, for each priority transaction, we [emit](../../system-contracts/bootloader/bootloader.yul#L1046) a user L2→L1 log with its hash and result, which basically means that it will get Merklized and users will be able to prove on L1 that a certain priority transaction has succeeded or failed (which can be helpful to reclaim your funds from bridges if the L2 part of the deposit has failed). +Also, for each priority transaction, we [emit](../../../system-contracts/bootloader/bootloader.yul#L1046) a user L2→L1 log with its hash and result, which basically means that it will get Merklized and users will be able to prove on L1 that a certain priority transaction has succeeded or failed (which can be helpful to reclaim your funds from bridges if the L2 part of the deposit has failed). -Then, at the end of the batch, we [submit](../../system-contracts/bootloader/bootloader.yul#L4117-L4118) 2 L2→L1 log system log with these values. +Then, at the end of the batch, we [submit](../../../system-contracts/bootloader/bootloader.yul#L4117-L4118) 2 L2→L1 log system log with these values. -## Batch commit +### Batch commit During batch commit, the contract will remember those values, but not validate them in any way. -## Batch execution +### Batch execution During batch execution, the will check that the `priorityOperationsRollingHash` rolling hash provided before was correct. There are two ways to do it: -- [Legacy one that uses priority queue](../../l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol#L397). We will pop `numberOfPriorityTransactions` from the top of priority queue and verify that the hashes match. -- [The new one that uses priority tree](../../l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol#L397). The operator would have to provide the hashes of these priority operations in an array, as well as proof that this entire segment belongs to the merkle tree. After it is verified that this array of leaves is correct, it will be checked whether the rolling hash of those is equal to the `priorityOperationsRollingHash`. +- [Legacy one that uses priority queue](../../../l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol#L397). We will pop `numberOfPriorityTransactions` from the top of priority queue and verify that the hashes match. +- [The new one that uses priority tree](../../../l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol#L397). The operator would have to provide the hashes of these priority operations in an array, as well as proof that this entire segment belongs to the merkle tree. After it is verified that this array of leaves is correct, it will be checked whether the rolling hash of those is equal to the `priorityOperationsRollingHash`. -# Upgrade transactions +## Upgrade transactions -## Initiation +### Initiation -Upgrade transactions can only be created during a system upgrade. It is done if the `DiamondProxy` delegatecalls to the implementation that manually puts this transaction into the storage of the DiamondProxy, this could happen on calling `upgradeChainFromVersion` function in `Admin.sol` on the State Transition contract. Note, that since it happens during the upgrade, there is no “real” checks on the structure of this transaction. We do have [some validation](../../l1-contracts/contracts/upgrades/BaseZkSyncUpgrade.sol#L193), but it is purely on the side of the implementation which the `DiamondProxy` delegatecalls to and so may be lifted if the implementation is changed. +Upgrade transactions can only be created during a system upgrade. It is done if the `DiamondProxy` delegatecalls to the implementation that manually puts this transaction into the storage of the DiamondProxy, this could happen on calling `upgradeChainFromVersion` function in `Admin.sol` on the State Transition contract. Note, that since it happens during the upgrade, there is no “real” checks on the structure of this transaction. We do have [some validation](../../../l1-contracts/contracts/upgrades/BaseZkSyncUpgrade.sol#L193), but it is purely on the side of the implementation which the `DiamondProxy` delegatecalls to and so may be lifted if the implementation is changed. The hash of the currently required upgrade transaction is stored under `l2SystemContractsUpgradeTxHash` variable. @@ -56,34 +57,34 @@ We will also track the batch where the upgrade has been committed in the `l2Syst We can not support multiple upgrades in parallel, i.e. the next upgrade should start only after the previous one has been complete. -## Bootloader +### Bootloader The upgrade transactions are processed just like with priority transactions, with only the following differences: - We can have only one upgrade transaction per batch & this transaction must be the first transaction in the batch. -- The system contracts upgrade transaction is not appended to `priorityOperationsRollingHash` and doesn’t increment `numberOfPriorityTransactions`. Instead, its hash is calculated via a system L2→L1 log *before* it gets executed. Note, that it is an important property. More on it [below](#security-considerations). +- The system contracts upgrade transaction is not appended to `priorityOperationsRollingHash` and doesn’t increment `numberOfPriorityTransactions`. Instead, its hash is calculated via a system L2→L1 log _before_ it gets executed. Note, that it is an important property. More on it [below](#security-considerations). -## Commit +### Commit -After an upgrade has been initiated, it will be required that the next commit batches operation already contains the system upgrade transaction. It is [checked](../../l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol#L223) by verifying the corresponding L2→L1 log. +After an upgrade has been initiated, it will be required that the next commit batches operation already contains the system upgrade transaction. It is [checked](../../../l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol#L223) by verifying the corresponding L2→L1 log. We also remember that the upgrade transaction has been processed in this batch (by amending the `l2SystemContractsUpgradeBatchNumber` variable). -## Revert +### Revert In a very rare event when the team needs to revert the batch with the upgrade on zkSync, the `l2SystemContractsUpgradeBatchNumber` is reset. Note, however, that we do not “remember” that certain batches had a version before the upgrade, i.e. if the reverted batches will have to be reexecuted, the upgrade transaction must still be present there, even if some of the deleted batches were committed before the upgrade and thus didn’t contain the transaction. -## Execute +### Execute -Once batch with the upgrade transaction has been executed, we [delete](../../l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol#L486) them from storage for efficiency to signify that the upgrade has been fully processed and that a new upgrade can be initiated. +Once batch with the upgrade transaction has been executed, we [delete](../../../l1-contracts/contracts/state-transition/chain-deps/facets/Executor.sol#L486) them from storage for efficiency to signify that the upgrade has been fully processed and that a new upgrade can be initiated. -# Security considerations +### Security considerations Since the operator can put any data into the bootloader memory and for L1→L2 transactions the bootloader has to blindly trust it and rely on L1 contracts to validate it, it may be a very powerful tool for a malicious operator. Note, that while the governance mechanism is trusted, we try to limit our trust for the operator as much as possible, since in the future anyone would be able to become an operator. -Some time ago, we *used to* have a system where the upgrades could be done via L1→L2 transactions, i.e. the implementation of the `DiamondProxy` upgrade would include a priority transaction (with `from` equal to for instance `FORCE_DEPLOYER`) with all the upgrade params. +Some time ago, we _used to_ have a system where the upgrades could be done via L1→L2 transactions, i.e. the implementation of the `DiamondProxy` upgrade would include a priority transaction (with `from` equal to for instance `FORCE_DEPLOYER`) with all the upgrade params. In the current system though having such logic would be dangerous and would allow for the following attack: @@ -91,6 +92,7 @@ In the current system though having such logic would be dangerous and would allo - The operator puts a malicious priority operation with an upgrade into the bootloader memory. This operation was never included in the priority operations queue / and it is not an upgrade transaction. However, as already mentioned above the bootloader has no idea what priority / upgrade transactions are correct and so this transaction will be processed. The most important caveat of this malicious upgrade is that it may change implementation of the `Keccak256` precompile to return any values that the operator needs. + - When the`priorityOperationsRollingHash` will be updated, instead of the “correct” rolling hash of the priority transactions, the one which would appear with the correct topmost priority operation is returned. The operator can’t amend the behaviour of `numberOfPriorityTransactions`, but it won’t help much, since the the `priorityOperationsRollingHash` will match on L1 on the execution step. -That’s why the concept of the upgrade transaction is needed: this is the only transaction that can initiate transactions out of the kernel space and thus change bytecodes of system contracts. That’s why it must be the first one and that’s why bootloader [emits](../../system-contracts/bootloader/bootloader.yul#L603) its hash via a system L2→L1 log before actually processing it. +That’s why the concept of the upgrade transaction is needed: this is the only transaction that can initiate transactions out of the kernel space and thus change bytecodes of system contracts. That’s why it must be the first one and that’s why bootloader [emits](../../../system-contracts/bootloader/bootloader.yul#L603) its hash via a system L2→L1 log before actually processing it. diff --git a/docs/settlement_contracts/zkchain_basics.md b/docs/settlement_contracts/zkchain_basics.md index decdd88bc..dd61d9297 100644 --- a/docs/settlement_contracts/zkchain_basics.md +++ b/docs/settlement_contracts/zkchain_basics.md @@ -1,4 +1,5 @@ # L1 smart contract of an individual chain + [back to readme](../README.md) ## Diamond (also mentioned as State Transition contract) @@ -20,7 +21,7 @@ even an upgrade system is a separate facet that can be replaced. One of the differences from the reference implementation is access freezability. Each of the facets has an associated parameter that indicates if it is possible to freeze access to the facet. Privileged actors can freeze the **diamond** -(not a specific facet!) and all facets with the marker `isFreezable` should be inaccessible until the governor or admin +(not a specific facet!) and all facets with the marker `isFreezable` should be inaccessible until the governor or admin unfreezes the diamond. Note that it is a very dangerous thing since the diamond proxy can freeze the upgrade system and then the diamond will be frozen forever. @@ -36,12 +37,13 @@ This contract must never be frozen. This facet responsible for the configuration setup and upgradabity, handling tasks such as: -* Privileged Address Management: Updating key roles, including the governor and validators. -* System Parameter Configuration: Adjusting critical system settings, such as the L2 bootloader bytecode hash, verifier address, verifier parameters, fee configurations. -* Freezability: Executing the freezing/unfreezing of facets within the diamond proxy to safeguard the ecosystem during upgrades or in response to detected vulnerabilities. +- Privileged Address Management: Updating key roles, including the governor and validators. +- System Parameter Configuration: Adjusting critical system settings, such as the L2 bootloader bytecode hash, verifier address, verifier parameters, fee configurations. +- Freezability: Executing the freezing/unfreezing of facets within the diamond proxy to safeguard the ecosystem during upgrades or in response to detected vulnerabilities. Control over the AdminFacet is divided between two main entities: -- CTM (Chain Type Manager, formerly known as `StateTransitionManager`) - Separate smart contract that can perform critical changes to the system as protocol upgrades. For more detailed information on its function and design, refer to the [Hyperchain section](https://github.com/code-423n4/2024-03-zksync/blob/main/docs/Smart%20contract%20Section/L1%20ecosystem%20contracts.md#st--stm). Although currently only one version of the CTM exists, the architecture allows for future versions to be introduced via subsequent upgrades. The owner of the CTM is the [decentralized governance](https://blog.zknation.io/introducing-zk-nation/), while for non-critical an Admin entity is used (see details below). + +- CTM (Chain Type Manager, formerly known as `StateTransitionManager`) - Separate smart contract that can perform critical changes to the system as protocol upgrades. For more detailed information on its function and design, refer to [this document](../chain_management/chain_type_manager.md). Although currently only one version of the CTM exists, the architecture allows for future versions to be introduced via subsequent upgrades. The owner of the CTM is the [decentralized governance](https://blog.zknation.io/introducing-zk-nation/), while for non-critical an Admin entity is used (see details below). - Chain Admin - Multisig smart contract managed by each individual chain that can perform non-critical changes to the system such as granting validator permissions. ### MailboxFacet @@ -51,9 +53,9 @@ The facet that handles L2 <-> L1 communication, an overview for which can be fou The Mailbox performs three functions: -* L1 ↔ L2 Communication: Enables data and transaction requests to be sent from L1 to L2 and vice versa, supporting the implementation of multi-layer protocols. -* Bridging Native Tokens: Allows the bridging of either ether or ERC20 tokens to L2, enabling users to use these assets within the L2 ecosystem. -* Censorship Resistance Mechanism: Currently in the research stage. +- L1 ↔ L2 Communication: Enables data and transaction requests to be sent from L1 to L2 and vice versa, supporting the implementation of multi-layer protocols. +- Bridging Native Tokens: Allows the bridging of either ether or ERC20 tokens to L2, enabling users to use these assets within the L2 ecosystem. +- Censorship Resistance Mechanism: Currently in the research stage. L1 -> L2 communication is implemented as requesting an L2 transaction on L1 and executing it on L2. This means a user can call the function on the L1 contract to save the data about the transaction in some queue. Later on, a validator can @@ -79,7 +81,6 @@ function applyL1ToL2Alias(address l1Address) internal pure returns (address l2Ad l2Address = address(uint160(l1Address) + offset); } } - ``` For most of the rollups the address aliasing needs to prevent cross-chain exploits that would otherwise be possible if @@ -98,11 +99,11 @@ More about L1->L2 operations can be found [here](./Handling%20L1→L2%20ops%20on L2 -> L1 communication, in contrast to L1 -> L2 communication, is based only on transferring the information, and not on the transaction execution on L1. The full description of the mechanism for sending information from L2 to L1 can be found [here](./Standard%20pubdata%20format.md). -The Mailbox facet also facilitates L1<>L3 communications for those chains that settle on top of Gateway. The user interfaces for those are identical to the L1<>L2 communication described above. To learn more about L1<>L3 communication works, check out this document (FIXME: link) +The Mailbox facet also facilitates L1<>L3 communications for those chains that settle on top of Gateway. The user interfaces for those are identical to the L1<>L2 communication described above. To learn more about L1<>L3 communication works, check out [this document](../gateway/messaging_via_gateway.md) and [this one](../gateway/nested_l3_l1_messaging.md). ### ExecutorFacet -A contract that accepts L2 batches, enforces data availability via DA validators and checks the validity of zk-proofs. You can read more about DA validators in this docuemnt (FIXME :link). +A contract that accepts L2 batches, enforces data availability via DA validators and checks the validity of zk-proofs. You can read more about DA validators [in this docuemnt](../settlement_contracts/data_availability/custom_da.md). The state transition is divided into three stages: @@ -111,16 +112,17 @@ The state transition is divided into three stages: - `executeBatches` - finalize the state, marking L1 -> L2 communication processing, and saving Merkle tree with L2 logs. Each L2 -> L1 system log will have a key that is part of the following: + ```solidity enum SystemLogKey { - L2_TO_L1_LOGS_TREE_ROOT_KEY, - PACKED_BATCH_AND_L2_BLOCK_TIMESTAMP_KEY, - CHAINED_PRIORITY_TXN_HASH_KEY, - NUMBER_OF_LAYER_1_TXS_KEY, - PREV_BATCH_HASH_KEY, - L2_DA_VALIDATOR_OUTPUT_HASH_KEY, - USED_L2_DA_VALIDATOR_ADDRESS_KEY, - EXPECTED_SYSTEM_CONTRACT_UPGRADE_TX_HASH_KEY + L2_TO_L1_LOGS_TREE_ROOT_KEY, + PACKED_BATCH_AND_L2_BLOCK_TIMESTAMP_KEY, + CHAINED_PRIORITY_TXN_HASH_KEY, + NUMBER_OF_LAYER_1_TXS_KEY, + PREV_BATCH_HASH_KEY, + L2_DA_VALIDATOR_OUTPUT_HASH_KEY, + USED_L2_DA_VALIDATOR_ADDRESS_KEY, + EXPECTED_SYSTEM_CONTRACT_UPGRADE_TX_HASH_KEY } ``` @@ -129,16 +131,16 @@ When a batch is committed, we process L2 -> L1 system logs. Here are the invaria - In a given batch there will be either 7 or 8 system logs. The 8th log is only required for a protocol upgrade. - There will be a single log for each key that is contained within `SystemLogKey` - Three logs from the `L2_TO_L1_MESSENGER` with keys: - - `L2_TO_L1_LOGS_TREE_ROOT_KEY` - - `L2_DA_VALIDATOR_OUTPUT_HASH_KEY` - - `USED_L2_DA_VALIDATOR_ADDRESS_KEY` +- `L2_TO_L1_LOGS_TREE_ROOT_KEY` +- `L2_DA_VALIDATOR_OUTPUT_HASH_KEY` +- `USED_L2_DA_VALIDATOR_ADDRESS_KEY` - Two logs from `L2_SYSTEM_CONTEXT_SYSTEM_CONTRACT_ADDR` with keys: - `PACKED_BATCH_AND_L2_BLOCK_TIMESTAMP_KEY` - `PREV_BATCH_HASH_KEY` - Two or three logs from `L2_BOOTLOADER_ADDRESS` with keys: - `CHAINED_PRIORITY_TXN_HASH_KEY` - `NUMBER_OF_LAYER_1_TXS_KEY` - - `EXPECTED_SYSTEM_CONTRACT_UPGRADE_TX_HASH_KEY` + - `EXPECTED_SYSTEM_CONTRACT_UPGRADE_TX_HASH_KEY` - None logs from other addresses (may be changed in the future). ### DiamondInit @@ -166,6 +168,6 @@ When the validator calls `commitBatches`, the same calldata will be propagated t the time these batches are committed by the validator to enforce a delay between committing and execution of batches. Then, the validator can prove the already committed batches regardless of the mentioned timestamp, and again the same calldata (related to the `proveBatches` function) will be propagated to the zkSync contract. After the `delay` is elapsed, the validator -is allowed to call `executeBatches` to propagate the same calldata to zkSync contract. +is allowed to call `executeBatches` to propagate the same calldata to zkSync contract. The owner of the ValidatorTimelock contract is the decentralized governance. Note, that all the chains share the same ValidatorTimelock for simplicity. diff --git a/docs/upgrade_history/gateway_upgrade/gateway_diff_review.md b/docs/upgrade_history/gateway_upgrade/gateway_diff_review.md index fd74cd2c4..e83ff4241 100644 --- a/docs/upgrade_history/gateway_upgrade/gateway_diff_review.md +++ b/docs/upgrade_history/gateway_upgrade/gateway_diff_review.md @@ -1,32 +1,33 @@ # Gateway upgrade changes ## Introduction & prerequisites + [back to readme](../../README.md) -This document assumes that the reader has general knowledge of how ZKsync Era works and how our ecosystem used to be like at the moment of shared bridge in general. +This document assumes that the reader has general knowledge of how ZKsync Era works and how our ecosystem used to be like at the moment of shared bridge in general. To read the documentation about the current system, you can read [here](../../README.md). For more info about the previous one, you can reach out to the following documentation: -[https://github.com/code-423n4/2024-03-zksync/tree/main/docs/Smart contract Section](https://github.com/code-423n4/2024-03-zksync/tree/main/docs/Smart%20contract%20Section) +[https://github.com/code-423n4/2024-03-zksync/tree/main/docs/Smart contract Section](https://github.com/code-423n4/2024-03-zksync/tree/main/docs/Smart%20contract%20Section) ## Changes from the shared bridge design -This section contains some of the important changes that happened since the shared bridge release in June. This section may not be fully complete and additional information will be provided in the sections that cover specific topics. +This section contains some of the important changes that happened since the shared bridge release in June. This section may not be fully complete and additional information will be provided in the sections that cover specific topics. ### Bridgehub now has chainId → address mapping -Before, Bridgehub contained a mapping from `chainId => stateTransitionManager`. The further resolution of the mapping should happen at the CTM level. +Before, Bridgehub contained a mapping from `chainId => stateTransitionManager`. The further resolution of the mapping should happen at the CTM level. For more intuitive management of the chains, a new mapping `chainId => hyperchainAddress` was added. This is considered more intuitive since “bridgehub is the owner of all the chains” mentality is more applicable with this new design. -The upside of the previous approach was potentially easier migration within the same CTM. However, in the end it was decided that the new approach is better. +The upside of the previous approach was potentially easier migration within the same CTM. However, in the end it was decided that the new approach is better. #### Migration -This new mapping will have to be filled up after upgrading the bridgehub. It is done by repeatedly calling the `setLegacyChainAddress` for each of the deployed chains. It is assumed that their number is relatively low. Also, this function is permissionless and so can be called by anyone after the upgrade is complete. This function will call the old CTM and ask for the implementation of the chainId. +This new mapping will have to be filled up after upgrading the bridgehub. It is done by repeatedly calling the `setLegacyChainAddress` for each of the deployed chains. It is assumed that their number is relatively low. Also, this function is permissionless and so can be called by anyone after the upgrade is complete. This function will call the old CTM and ask for the implementation of the chainId. -Until the migration is done, all transactions with the old chains will not be working, but it is a short period of time. +Until the migration is done, all transactions with the old chains will not be working, but it is a short period of time. ### baseTokenAssetId is used as a base token for the chains @@ -34,11 +35,11 @@ In order to facilitate future support of any type of asset a base token, includi #### Migration & compatibility -Today, there are some mappings of sort `chainId => baseTokenAddress`. These will no longer be filled for new chains. Instead, only assetId will be provided in a new `chainId => baseTokenAssetId` mapping. +Today, there are some mappings of sort `chainId => baseTokenAddress`. These will no longer be filled for new chains. Instead, only assetId will be provided in a new `chainId => baseTokenAssetId` mapping. To initialize the new `baseTokenAssetId` mapping the following function should be called for each chain: `setLegacyBaseTokenAssetId`. It will encode each token as the assetId of an L1 token of the Native Token Vault. This method is permissionless. -For the old tooling that may rely on getters of sort `getBaseTokenAddress(chainId)` working, we provide a getter method, but its exact behavior depends on the asset handler of the `setLegacyBaseTokenAssetId`, i.e. it is even possible that the method will revert for an incompatible assetId. +For the old tooling that may rely on getters of sort `getBaseTokenAddress(chainId)` working, we provide a getter method, but its exact behavior depends on the asset handler of the `setLegacyBaseTokenAssetId`, i.e. it is even possible that the method will revert for an incompatible assetId. ### L2 Shared bridge (not L2AssetRouter) is deployed everywhere at the same address @@ -52,7 +53,7 @@ Note, that for the chains that contained the `L2SharedBridge` before the upgrade ### StateTransitionManager was renamed to ChainTypeManager -CTM was renamed to CTM (ChainTypeManager). This was done to use more intuitive naming as the chains of the same “type” share the same CTM. +CTM was renamed to CTM (ChainTypeManager). This was done to use more intuitive naming as the chains of the same “type” share the same CTM. ### Hyperchains were renamed to ZK chains @@ -76,7 +77,7 @@ To combat all the issues above, it was decided to move from the priority queue t ## Custom DA layers -Custom DA layer support was added. +Custom DA layer support was added. ### Major changes @@ -85,13 +86,13 @@ In order to achieve CAB, we separated the liquidity managing logic from the Shar ## L1<>L2 token bridging considerations - We have the L2SharedBridgeLegacy on chains that are live before the upgrade. This contract will keep on working, and where it exists it will also be used to: - - deploy bridged tokens. This is so that the l2TokenAddress keeps working on the L1, and so that we have a predictable address for these tokens. - - send messages to L1. On the L1 finalizeWithdrawal does not specify the l2Sender. Legacy withdrawals will use the legacy bridge as their sender, while new withdrawals would use the L2_ASSET_ROUTER_ADDR. In the future we will add the sender to the L1 finalizeWithdrawal interface. Until the current method is depracated we use the l2SharedBridgeAddress even for new withdrawals on legacy chains. + - deploy bridged tokens. This is so that the l2TokenAddress keeps working on the L1, and so that we have a predictable address for these tokens. + - send messages to L1. On the L1 finalizeWithdrawal does not specify the l2Sender. Legacy withdrawals will use the legacy bridge as their sender, while new withdrawals would use the L2_ASSET_ROUTER_ADDR. In the future we will add the sender to the L1 finalizeWithdrawal interface. Until the current method is depracated we use the l2SharedBridgeAddress even for new withdrawals on legacy chains. This also means that on the L1 side we set the L2AR address when calling the function via the legacy interface even if it is a baseToken withdrawal. Later when we learn if it baseToken or not, we override the value. - We have the finalizeWithdrawal function on L1 AR, which uses the finalizeDeposit in the background. - L1→L2 deposits need to use the legacy encoding for SDK compatiblity. - - This means the legacy finalizeDeposit with tokenAddress which calls the new finalizeDeposit with assetId. - - On the other hand, new assets will use the new finalizeDeposit directly + - This means the legacy finalizeDeposit with tokenAddress which calls the new finalizeDeposit with assetId. + - On the other hand, new assets will use the new finalizeDeposit directly - The originChainId will be tracked for each assetId in the NTVs. This will be the chain where the token is originally native to. This is needed to accurately track chainBalance (especially for l2 native tokens bridged to other chains via L1), and to verify the assetId is indeed an NTV asset id (i.e. has the L2_NATIVE_TOKEN_VAULT_ADDR as deployment tracker). ## Upgrade process in detail diff --git a/docs/upgrade_history/gateway_upgrade/upgrade_process.md b/docs/upgrade_history/gateway_upgrade/upgrade_process.md index 1f606e76d..bd29dadd3 100644 --- a/docs/upgrade_history/gateway_upgrade/upgrade_process.md +++ b/docs/upgrade_history/gateway_upgrade/upgrade_process.md @@ -1,4 +1,5 @@ # The upgrade process to the new version + [back to readme](../../README.md) Gateway system introduces a lot of new contracts and so conducting so to provide the best experience for ZK chains the multistage upgrade will be provided. The upgrade will require some auxiliary contracts that will exist only for the purpose of this upgrade. @@ -9,7 +10,7 @@ The previous version can be found [here](https://github.com/matter-labs/era-cont The documentation for the previous version can be found [here](https://github.com/code-423n4/2024-03-zksync). -However, deep knowledge of the previous version should not be required for understanding. But this document *does* require understanding of the new system, so it should be the last document for you to read. +However, deep knowledge of the previous version should not be required for understanding. But this document _does_ require understanding of the new system, so it should be the last document for you to read. ## Overall design motivation @@ -29,7 +30,7 @@ There are four roles that will be mentioned within this document: This stage involves everything that is done before the voting starts. At this stage, all the details of the upgrade must be fixed, including the chain id of the gateway. -More precisely, the implementations for the contracts will have to be deployed. Also, all of the new contracts will have to be deployed along with their proxies, e.g. `CTMDeploymentTracker`, `L1AssetRouter`, etc. +More precisely, the implementations for the contracts will have to be deployed. Also, all of the new contracts will have to be deployed along with their proxies, e.g. `CTMDeploymentTracker`, `L1AssetRouter`, etc. Also, at this stage the bytecodes all L2 contracts have to be fixed, this includes bytecode for the things like `L2DAValidators`, `GatewayCTMDeployer`, etc. @@ -47,7 +48,7 @@ PS: It may be possible that for more contracts, e.g. some of the proxies we coul ### L2SharedBridge and L2WETH migration -In the current system (i.e. before the gateway upgrade), the trusted admin of the L1SharedBridge is responsible for [setting the correct L2SharedBridge address for chains](https://github.com/matter-labs/era-contracts/blob/aafee035db892689df3f7afe4b89fd6467a39313/l1-contracts/contracts/bridge/L1SharedBridge.sol#L249) (note that the links points to the old code and not the one in the scope of the contest). This is done with no additional validation. The system is generally designed to protect chains in case when a malicious admin tries to attack a chain. There are two measures to do that: +In the current system (i.e. before the gateway upgrade), the trusted admin of the L1SharedBridge is responsible for [setting the correct L2SharedBridge address for chains](https://github.com/matter-labs/era-contracts/blob/aafee035db892689df3f7afe4b89fd6467a39313/l1-contracts/contracts/bridge/L1SharedBridge.sol#L249) (note that the links points to the old code and not the one in the scope of the contest). This is done with no additional validation. The system is generally designed to protect chains in case when a malicious admin tries to attack a chain. There are two measures to do that: - The general assumption is that the L2 shared bridge is set for a chain as soon as possible. It is a realistic assumption, since without it no bridging of any funds except for the base token is possible. So if at an early stage the admin would put a malicious l2 shared bridge for a chain, it would lose its trust from the community and the chain should be discarded. - Admin can not retroactively change L2 shared bridge for any chains. So once the correct L2 shared bridge is set, there is no way a bad admin can harm the chain. @@ -89,32 +90,32 @@ This upgrade the different approach is used to ensure safe and riskless preparat ### Things to sign by the governance -The governance should sign all operations that will happen in all of the consecutive stages at this time. There will be no other voting. Unless stated otherwise, all the governance operations in this document are listed as dependencies for one another, i.e. must be executed in strictly sequential order. +The governance should sign all operations that will happen in all of the consecutive stages at this time. There will be no other voting. Unless stated otherwise, all the governance operations in this document are listed as dependencies for one another, i.e. must be executed in strictly sequential order. Note, that to support “Stage 3” it would also need to finalize all the details for Gateway, including its chain id, chain admin, etc. -## Stage 1. Publishing of the new protocol upgrade. +## Stage 1. Publishing of the new protocol upgrade ### Txs by governance (in one multicall) -1. The governance accepts ownership for all the contracts that used `TransitionaryOwner`. +1. The governance accepts ownership for all the contracts that used `TransitionaryOwner`. 2. The governance publishes the new version by calling `function setNewVersionUpgrade`. 3. The governance calls `setChainCreationParams` and sets temporary incorrect values there that use a contract that always reverts as the `genesisUpgrade`, ensuring that no new chains can be created until Stage 2. -4. The governance should call the `GovernanceUpgradeTimer.startTimer()` to ensure that the timer for the upgrade starts. +4. The governance should call the `GovernanceUpgradeTimer.startTimer()` to ensure that the timer for the upgrade starts. ### Impact The chains will get the ability to upgrade to the new protocol version. They will be advised to do so before the deadline for upgrade runs out. -Also, new chains wont be deployable during this stage due to step (3). +Also, new chains wont be deployable during this stage due to step (3). -Chains, whether upgraded or not, should work as usual as the new L2 bridging ecosystem is fully compatible with the old L1SharedBridge. +Chains, whether upgraded or not, should work as usual as the new L2 bridging ecosystem is fully compatible with the old L1SharedBridge. Chains that upgrade need to carefully coordinate this upgrade on the server side, since validator timelock changes and also there is a need to keep track of the number of already existing priority ops that were not included into the priority tree. ## Chain Upgrade flow -Let’s take a deeper look at how upgrading of an individual chain would look like. +Let’s take a deeper look at how upgrading of an individual chain would look like. ### Actions by Chain Admins @@ -126,25 +127,25 @@ As usual, the ChainAdmin should call `upgradeChainFromVersion`. What is unusual It is preferrable that all the steps above are executed in a multicall for greater convenience, though it is not mandatory. -This upgrade a lot of new chain parameters and so these should be managed carefully. (FIXME: link) +This upgrade adds a lot of new chain parameters and so these [should be managed carefully](../../chain_management/admin_role.md). ### Upgrade flow in contracts -Usually, we would perform an upgrade by simply doing a list of force deployments: basically providing an array of the contracts to deploy for the system. This array would be constant for all chains and it would work fine. +Usually, we would perform an upgrade by simply doing a list of force deployments: basically providing an array of the contracts to deploy for the system. This array would be constant for all chains and it would work fine. -However in this upgrade we have an issue that some of the constructor parameters (e.g. the address of the `L2SharedBridgeLegacy`) are specific to each chain. Thus, besides the standard parts of the upgrades each chain also has `ZKChainSpecificForceDeploymentsData` populated. Some of the params to conduct those actions are constant and so populate the `FixedForceDeploymentsData` struct. +However in this upgrade we have an issue that some of the constructor parameters (e.g. the address of the `L2SharedBridgeLegacy`) are specific to each chain. Thus, besides the standard parts of the upgrades each chain also has `ZKChainSpecificForceDeploymentsData` populated. Some of the params to conduct those actions are constant and so populate the `FixedForceDeploymentsData` struct. If the above could be composed on L1 to still reuse the old list of `(address, bytecodeHash, constructorData)` list, there are also other rather complex actions such as upgrading the L2SharedBridge to the L2SharedBridgeLegacy implementation that require rather complex logic. Due to the complexity of the actions above, it was decided to put all those into the [L2GatewayUpgrade](../../../system-contracts/contracts/L2GatewayUpgrade.sol) contract. It is supposed to be force-deployed with the constructor parameters containing the `ZKChainSpecificForceDeploymentsData` as well as `FixedForceDeploymentsData`. It will be forcedeployed to the ComplexUpgrader’s address to get the kernel space rights. -So most of the system contracts will be deployed the old way (via force deployment), but for more complex thing the `L2GatewayUpgrade` will be temporarily put onto `ComplexUpgrader` address and initialize additional contracts inside the constructor. Then the correct will be put back there. +So most of the system contracts will be deployed the old way (via force deployment), but for more complex thing the `L2GatewayUpgrade` will be temporarily put onto `ComplexUpgrader` address and initialize additional contracts inside the constructor. Then the correct will be put back there. So entire flow can be summarized by the following: 1. On L1, when `AdminFacet.upgradeChainFromVersion` is called by the Chain Admin, the contract delegatecalls to the [GatewayUpgrade](../../../l1-contracts/contracts/upgrades/GatewayUpgrade.sol) contract. 2. The `GatewayUpgrade` gathers all the needed data to compose the `ZKChainSpecificForceDeploymentsData`, while the `FixedForceDeploymentsData` is part is hardcoded inside the upgrade transaction. -3. The combined upgrade transaction consists of many forced deployments (basically tuples of `(address, bytecodeHash, constructorInput)`) and one of these that is responsible for the temporary `L2GatewayUpgrade` gets its `constructorInput` set to contain the `ZKChainSpecificForceDeploymentsData` / `FixedForceDeploymentsData`. +3. The combined upgrade transaction consists of many forced deployments (basically tuples of `(address, bytecodeHash, constructorInput)`) and one of these that is responsible for the temporary `L2GatewayUpgrade` gets its `constructorInput` set to contain the `ZKChainSpecificForceDeploymentsData` / `FixedForceDeploymentsData`. 4. When the upgrade is executed on L2, it iterates over the forced deployments, deploys most of the contracts and then executes the `L2GatewayUpgrade`. 5. `L2GatewayUpgrade` will deploy the L2 Bridgehub, MessageRoot, L2NativeTokenVault, L2AssetRouters. It will also deploy l2WrappedBaseToken if missing. It will also upgrade the implementations the L2SharedBridge as well as the UpgradaeableBeacon for these tokens. @@ -157,7 +158,7 @@ So entire flow can be summarized by the following: - upgrade the old contracts to the new implementation. - set the correct new chain creation params, upgrade the old contracts to the new one -### Txs by anyone: +### Txs by anyone After the governance has finalized the upgrade above, anyone can do the following transactions to finalize the upgrade: @@ -184,7 +185,7 @@ All the chains should start returning the address of the `L1AssetRouter` as the ## Stage 3. Deploying of Gateway -### Txs by governance (sequentially, potentially different txs): +### Txs by governance (sequentially, potentially different txs) 1. Call `Bridgehub.createNewChain` with the data for gateway. 2. It will have to register it via `bridgehub.registerSettlementLayer` @@ -198,9 +199,9 @@ In case anyone will try to migrate their chain on top of gateway while CTM is no Anyone with funds on Gateway can deploy the `GatewayCTMDeployer` and it will deploy inside its constructor the CTM described from above. -It can not be done as part of the governance transactions from above since it requires pre-publishing CTM-specific bytecodes. +It can not be done as part of the governance transactions from above since it requires pre-publishing CTM-specific bytecodes. -## Security notes +## Security notes ### Importance of preventing new batches being committed with the old version diff --git a/l1-contracts/.env b/l1-contracts/.env index 0cbe2dbd1..4f40c9e49 100644 --- a/l1-contracts/.env +++ b/l1-contracts/.env @@ -39,8 +39,12 @@ ETH_SENDER_SENDER_OPERATOR_BLOBS_ETH_ADDR=0x000000000000000000000000000000000000 CONTRACTS_SHARED_BRIDGE_UPGRADE_STORAGE_SWITCH=0 CONTRACTS_MAX_NUMBER_OF_ZK_CHAINS=100 L1_CONFIG=/script-config/config-deploy-l1.toml +L2_CONFIG=/script-config/config-deploy-l2-contracts.toml L1_OUTPUT=/script-out/output-deploy-l1.toml +L2_CONFIG=/script-config/config-deploy-l2-contracts.toml TOKENS_CONFIG=/script-config/config-deploy-erc20.toml +ZK_TOKEN_CONFIG=/script-config/config-deploy-zk.toml +ZK_TOKEN_OUTPUT=/script-out/output-deploy-zk-token.toml ZK_CHAIN_CONFIG=/script-config/register-zk-chain.toml ZK_CHAIN_OUTPUT=/script-out/output-deploy-zk-chain-era.toml FORCE_DEPLOYMENTS_CONFIG=/script-config/generate-force-deployments-data.toml diff --git a/l1-contracts/contracts/bridge/BridgeHelper.sol b/l1-contracts/contracts/bridge/BridgeHelper.sol index 24d80f324..b989084a9 100644 --- a/l1-contracts/contracts/bridge/BridgeHelper.sol +++ b/l1-contracts/contracts/bridge/BridgeHelper.sol @@ -9,7 +9,7 @@ import {DataEncoding} from "../common/libraries/DataEncoding.sol"; /** * @author Matter Labs * @custom:security-contact security@matterlabs.dev - * @notice Helper library for working with L2 contracts on L1. + * @notice Helper library for working with native tokens on both L1 and L2. */ library BridgeHelper { /// @dev Receives and parses (name, symbol, decimals) from the token contract diff --git a/l1-contracts/contracts/bridge/BridgedStandardERC20.sol b/l1-contracts/contracts/bridge/BridgedStandardERC20.sol index bbe0ed8c4..4d5a82566 100644 --- a/l1-contracts/contracts/bridge/BridgedStandardERC20.sol +++ b/l1-contracts/contracts/bridge/BridgedStandardERC20.sol @@ -95,11 +95,12 @@ contract BridgedStandardERC20 is ERC20PermitUpgradeable, IBridgedStandardToken, nativeTokenVault = msg.sender; + bytes memory nameBytes; + bytes memory symbolBytes; + bytes memory decimalsBytes; // We parse the data exactly as they were created on the L1 bridge // slither-disable-next-line unused-return - (, bytes memory nameBytes, bytes memory symbolBytes, bytes memory decimalsBytes) = DataEncoding.decodeTokenData( - _data - ); + (, nameBytes, symbolBytes, decimalsBytes) = DataEncoding.decodeTokenData(_data); ERC20Getters memory getters; string memory decodedName; diff --git a/l1-contracts/contracts/bridge/L1BridgeContractErrors.sol b/l1-contracts/contracts/bridge/L1BridgeContractErrors.sol index 4db576573..d72cf85a2 100644 --- a/l1-contracts/contracts/bridge/L1BridgeContractErrors.sol +++ b/l1-contracts/contracts/bridge/L1BridgeContractErrors.sol @@ -2,9 +2,6 @@ pragma solidity ^0.8.21; -// 0xe4efb466 -error NotNTV(); - // 0x6d963f88 error EthTransferFailed(); diff --git a/l1-contracts/contracts/bridge/L1Nullifier.sol b/l1-contracts/contracts/bridge/L1Nullifier.sol index 9dc2af62f..7be97e544 100644 --- a/l1-contracts/contracts/bridge/L1Nullifier.sol +++ b/l1-contracts/contracts/bridge/L1Nullifier.sol @@ -29,7 +29,7 @@ import {IBridgehub} from "../bridgehub/IBridgehub.sol"; import {L2_BASE_TOKEN_SYSTEM_CONTRACT_ADDR, L2_ASSET_ROUTER_ADDR} from "../common/L2ContractAddresses.sol"; import {DataEncoding} from "../common/libraries/DataEncoding.sol"; import {Unauthorized, SharedBridgeKey, DepositExists, AddressAlreadySet, InvalidProof, DepositDoesNotExist, SharedBridgeValueNotSet, WithdrawalAlreadyFinalized, L2WithdrawalMessageWrongLength, InvalidSelector, SharedBridgeValueNotSet, ZeroAddress} from "../common/L1ContractErrors.sol"; -import {WrongL2Sender, NotNTV, NativeTokenVaultAlreadySet, EthTransferFailed, WrongMsgLength} from "./L1BridgeContractErrors.sol"; +import {WrongL2Sender, NativeTokenVaultAlreadySet, EthTransferFailed, WrongMsgLength} from "./L1BridgeContractErrors.sol"; /// @author Matter Labs /// @custom:security-contact security@matterlabs.dev @@ -126,14 +126,6 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable, _; } - /// @notice Checks that the message sender is the bridgehub or ZKsync Era Diamond Proxy. - modifier onlyBridgehubOrEra(uint256 _chainId) { - if (msg.sender != address(BRIDGE_HUB) && (_chainId != ERA_CHAIN_ID || msg.sender != ERA_DIAMOND_PROXY)) { - revert Unauthorized(msg.sender); - } - _; - } - /// @notice Checks that the message sender is the legacy bridge. modifier onlyLegacyBridge() { if (msg.sender != address(legacyBridge)) { @@ -142,14 +134,6 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable, _; } - /// @notice Checks that the message sender is the legacy bridge. - modifier onlyAssetRouterOrErc20Bridge() { - if (msg.sender != address(l1AssetRouter) && msg.sender != address(legacyBridge)) { - revert Unauthorized(msg.sender); - } - _; - } - /// @dev Contract is expected to be used as proxy implementation. /// @dev Initialize the implementation to prevent Parity hack. constructor(IBridgehub _bridgehub, uint256 _eraChainId, address _eraDiamondProxy) reentrancyGuardInitializer { @@ -210,10 +194,7 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable, /// @dev This function is part of the upgrade process used to nullify chain balances once they are credited to NTV. /// @param _chainId The ID of the ZK chain. /// @param _token The address of the token which was previously deposit to shared bridge. - function nullifyChainBalanceByNTV(uint256 _chainId, address _token) external { - if (msg.sender != address(l1NativeTokenVault)) { - revert NotNTV(); - } + function nullifyChainBalanceByNTV(uint256 _chainId, address _token) external onlyL1NTV { __DEPRECATED_chainBalance[_chainId][_token] = 0; } @@ -290,7 +271,7 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable, emit BridgehubDepositFinalized(_chainId, _txDataHash, _txHash); } - /// @dev Calls the internal `_encodeTxDataHash`. Used as a wrapped for try / catch case. + /// @dev Calls the library `encodeTxDataHash`. Used as a wrapped for try / catch case. /// @dev Encodes the transaction data hash using either the latest encoding standard or the legacy standard. /// @param _encodingVersion EncodingVersion. /// @param _originalCaller The address of the entity that initiated the deposit. @@ -431,10 +412,9 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable, } isWithdrawalFinalized[chainId][l2BatchNumber][l2MessageIndex] = true; - // Handling special case for withdrawal from ZKsync Era initiated before Shared Bridge. (bytes32 assetId, bytes memory transferData) = _verifyWithdrawal(_finalizeWithdrawalParams); - // Handling special case for withdrawal from zkSync Era initiated before Shared Bridge. + // Handling special case for withdrawal from ZKsync Era initiated before Shared Bridge. if (_isPreSharedBridgeEraEthWithdrawal(chainId, l2BatchNumber)) { // Checks that the withdrawal wasn't finalized already. bool alreadyFinalized = IGetters(ERA_DIAMOND_PROXY).isEthWithdrawalFinalized(l2BatchNumber, l2MessageIndex); @@ -593,8 +573,8 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable, address baseToken = BRIDGE_HUB.baseToken(_chainId); transferData = DataEncoding.encodeBridgeMintData({ _originalCaller: address(0), - _l2Receiver: l1Receiver, - _l1Token: baseToken, + _remoteReceiver: l1Receiver, + _originToken: baseToken, _amount: amount, _erc20Metadata: new bytes(0) }); @@ -618,8 +598,8 @@ contract L1Nullifier is IL1Nullifier, ReentrancyGuard, Ownable2StepUpgradeable, assetId = DataEncoding.encodeNTVAssetId(block.chainid, l1Token); transferData = DataEncoding.encodeBridgeMintData({ _originalCaller: address(0), - _l2Receiver: l1Receiver, - _l1Token: l1Token, + _remoteReceiver: l1Receiver, + _originToken: l1Token, _amount: amount, _erc20Metadata: new bytes(0) }); diff --git a/l1-contracts/contracts/bridge/L2SharedBridgeLegacy.sol b/l1-contracts/contracts/bridge/L2SharedBridgeLegacy.sol index 4ae901593..b7c762b71 100644 --- a/l1-contracts/contracts/bridge/L2SharedBridgeLegacy.sol +++ b/l1-contracts/contracts/bridge/L2SharedBridgeLegacy.sol @@ -7,7 +7,7 @@ import {UpgradeableBeacon} from "@openzeppelin/contracts-v4/proxy/beacon/Upgrade import {BridgedStandardERC20} from "./BridgedStandardERC20.sol"; -import {DEPLOYER_SYSTEM_CONTRACT, L2_ASSET_ROUTER_ADDR, L2_NATIVE_TOKEN_VAULT_ADDR} from "../common/L2ContractAddresses.sol"; +import {L2_DEPLOYER_SYSTEM_CONTRACT_ADDR, L2_ASSET_ROUTER_ADDR, L2_NATIVE_TOKEN_VAULT_ADDR} from "../common/L2ContractAddresses.sol"; import {SystemContractsCaller} from "../common/libraries/SystemContractsCaller.sol"; import {L2ContractHelper, IContractDeployer} from "../common/libraries/L2ContractHelper.sol"; import {AddressAliasHelper} from "../vendor/AddressAliasHelper.sol"; @@ -175,7 +175,7 @@ contract L2SharedBridgeLegacy is IL2SharedBridgeLegacy, Initializable { function deployBeaconProxy(bytes32 salt) external onlyNTV returns (address proxy) { (bool success, bytes memory returndata) = SystemContractsCaller.systemCallWithReturndata( uint32(gasleft()), - DEPLOYER_SYSTEM_CONTRACT, + L2_DEPLOYER_SYSTEM_CONTRACT_ADDR, 0, abi.encodeCall( IContractDeployer.create2, diff --git a/l1-contracts/contracts/bridge/asset-router/AssetRouterBase.sol b/l1-contracts/contracts/bridge/asset-router/AssetRouterBase.sol index d6ca41bdf..b315a3c64 100644 --- a/l1-contracts/contracts/bridge/asset-router/AssetRouterBase.sol +++ b/l1-contracts/contracts/bridge/asset-router/AssetRouterBase.sol @@ -109,7 +109,7 @@ abstract contract AssetRouterBase is IAssetRouterBase, Ownable2StepUpgradeable, IAssetHandler(assetHandler).bridgeMint(_chainId, _assetId, _transferData); } else { assetHandlerAddress[_assetId] = _nativeTokenVault; - IAssetHandler(_nativeTokenVault).bridgeMint(_chainId, _assetId, _transferData); // ToDo: Maybe it's better to receive amount and receiver here? transferData may have different encoding + IAssetHandler(_nativeTokenVault).bridgeMint(_chainId, _assetId, _transferData); } } diff --git a/l1-contracts/contracts/bridge/asset-router/IAssetRouterBase.sol b/l1-contracts/contracts/bridge/asset-router/IAssetRouterBase.sol index a307ba526..2f5ba8954 100644 --- a/l1-contracts/contracts/bridge/asset-router/IAssetRouterBase.sol +++ b/l1-contracts/contracts/bridge/asset-router/IAssetRouterBase.sol @@ -32,13 +32,6 @@ interface IAssetRouterBase { bytes bridgeMintCalldata ); - event BridgehubWithdrawalInitiated( - uint256 chainId, - address indexed sender, - bytes32 indexed assetId, - bytes32 assetDataHash // Todo: What's the point of emitting hash? - ); - event AssetHandlerRegisteredInitial( bytes32 indexed assetId, address indexed assetHandlerAddress, diff --git a/l1-contracts/contracts/bridge/asset-router/IL2AssetRouter.sol b/l1-contracts/contracts/bridge/asset-router/IL2AssetRouter.sol index 81b1bc995..32f93070b 100644 --- a/l1-contracts/contracts/bridge/asset-router/IL2AssetRouter.sol +++ b/l1-contracts/contracts/bridge/asset-router/IL2AssetRouter.sol @@ -16,7 +16,7 @@ interface IL2AssetRouter is IAssetRouterBase { function withdraw(bytes32 _assetId, bytes calldata _transferData) external returns (bytes32); - function l1AssetRouter() external view returns (address); + function L1_ASSET_ROUTER() external view returns (address); function withdrawLegacyBridge(address _l1Receiver, address _l2Token, uint256 _amount, address _sender) external; @@ -28,7 +28,7 @@ interface IL2AssetRouter is IAssetRouterBase { bytes calldata _data ) external; - /// @dev Used to set the assedAddress for a given assetId. + /// @dev Used to set the assetHandlerAddress for a given assetId. /// @dev Will be used by ZK Gateway function setAssetHandlerAddress(uint256 _originChainId, bytes32 _assetId, address _assetAddress) external; } diff --git a/l1-contracts/contracts/bridge/asset-router/L1AssetRouter.sol b/l1-contracts/contracts/bridge/asset-router/L1AssetRouter.sol index b83b8cd97..cf2ab966c 100644 --- a/l1-contracts/contracts/bridge/asset-router/L1AssetRouter.sol +++ b/l1-contracts/contracts/bridge/asset-router/L1AssetRouter.sol @@ -109,7 +109,7 @@ contract L1AssetRouter is AssetRouterBase, IL1AssetRouter, ReentrancyGuard { _transferOwnership(_owner); } - /// @notice Sets the L1ERC20Bridge contract address. + /// @notice Sets the NativeTokenVault contract address. /// @dev Should be called only once by the owner. /// @param _nativeTokenVault The address of the native token vault. function setNativeTokenVault(INativeTokenVault _nativeTokenVault) external onlyOwner { @@ -144,9 +144,7 @@ contract L1AssetRouter is AssetRouterBase, IL1AssetRouter, ReentrancyGuard { bytes32 _assetRegistrationData, address _assetDeploymentTracker ) external onlyOwner { - bytes32 assetId = keccak256( - abi.encode(uint256(block.chainid), _assetDeploymentTracker, _assetRegistrationData) - ); + bytes32 assetId = keccak256(abi.encode(block.chainid, _assetDeploymentTracker, _assetRegistrationData)); assetDeploymentTracker[assetId] = _assetDeploymentTracker; emit AssetDeploymentTrackerSet(assetId, _assetDeploymentTracker, _assetRegistrationData); } @@ -160,7 +158,6 @@ contract L1AssetRouter is AssetRouterBase, IL1AssetRouter, ReentrancyGuard { } /// @notice Used to set the asset handler address for a given asset ID on a remote ZK chain - /// @dev No access control on the caller, as msg.sender is encoded in the assetId. /// @param _chainId The ZK chain ID. /// @param _originalCaller The `msg.sender` address from the external call that initiated current one. /// @param _assetId The encoding of asset ID. diff --git a/l1-contracts/contracts/bridge/asset-router/L2AssetRouter.sol b/l1-contracts/contracts/bridge/asset-router/L2AssetRouter.sol index 2bd0fa748..04d92e3ac 100644 --- a/l1-contracts/contracts/bridge/asset-router/L2AssetRouter.sol +++ b/l1-contracts/contracts/bridge/asset-router/L2AssetRouter.sol @@ -33,13 +33,13 @@ contract L2AssetRouter is AssetRouterBase, IL2AssetRouter { bytes32 public immutable BASE_TOKEN_ASSET_ID; /// @dev The address of the L1 asset router counterpart. - address public override l1AssetRouter; + address public immutable override L1_ASSET_ROUTER; /// @notice Checks that the message sender is the L1 Asset Router. modifier onlyAssetRouterCounterpart(uint256 _originChainId) { if (_originChainId == L1_CHAIN_ID) { // Only the L1 Asset Router counterpart can initiate and finalize the deposit. - if (AddressAliasHelper.undoL1ToL2Alias(msg.sender) != l1AssetRouter) { + if (AddressAliasHelper.undoL1ToL2Alias(msg.sender) != L1_ASSET_ROUTER) { revert InvalidCaller(msg.sender); } } else { @@ -52,9 +52,11 @@ contract L2AssetRouter is AssetRouterBase, IL2AssetRouter { modifier onlyAssetRouterCounterpartOrSelf(uint256 _originChainId) { if (_originChainId == L1_CHAIN_ID) { // Only the L1 Asset Router counterpart can initiate and finalize the deposit. - if ((AddressAliasHelper.undoL1ToL2Alias(msg.sender) != l1AssetRouter) && (msg.sender != address(this))) { + if ((AddressAliasHelper.undoL1ToL2Alias(msg.sender) != L1_ASSET_ROUTER) && (msg.sender != address(this))) { revert InvalidCaller(msg.sender); } + } else { + revert InvalidCaller(msg.sender); // xL2 messaging not supported for now } _; } @@ -82,7 +84,7 @@ contract L2AssetRouter is AssetRouterBase, IL2AssetRouter { if (_l1AssetRouter == address(0)) { revert EmptyAddress(); } - l1AssetRouter = _l1AssetRouter; + L1_ASSET_ROUTER = _l1AssetRouter; assetHandlerAddress[_baseTokenAssetId] = L2_NATIVE_TOKEN_VAULT_ADDR; BASE_TOKEN_ASSET_ID = _baseTokenAssetId; _disableInitializers(); @@ -93,10 +95,10 @@ contract L2AssetRouter is AssetRouterBase, IL2AssetRouter { function setAssetHandlerAddress( uint256 _originChainId, bytes32 _assetId, - address _assetAddress + address _assetHandlerAddress ) external override onlyAssetRouterCounterpart(_originChainId) { - assetHandlerAddress[_assetId] = _assetAddress; - emit AssetHandlerRegistered(_assetId, _assetAddress); + assetHandlerAddress[_assetId] = _assetHandlerAddress; + emit AssetHandlerRegistered(_assetId, _assetHandlerAddress); } /// @inheritdoc IAssetRouterBase @@ -128,16 +130,6 @@ contract L2AssetRouter is AssetRouterBase, IL2AssetRouter { emit DepositFinalizedAssetRouter(L1_CHAIN_ID, _assetId, _transferData); } - /*////////////////////////////////////////////////////////////// - Internal & Helpers - //////////////////////////////////////////////////////////////*/ - - /// @inheritdoc AssetRouterBase - function _ensureTokenRegisteredWithNTV(address _token) internal override returns (bytes32 assetId) { - IL2NativeTokenVault nativeTokenVault = IL2NativeTokenVault(L2_NATIVE_TOKEN_VAULT_ADDR); - nativeTokenVault.ensureTokenIsRegistered(_token); - } - /*////////////////////////////////////////////////////////////// LEGACY FUNCTIONS //////////////////////////////////////////////////////////////*/ @@ -161,6 +153,19 @@ contract L2AssetRouter is AssetRouterBase, IL2AssetRouter { return _withdrawSender(assetId, _assetData, msg.sender, true); } + /*////////////////////////////////////////////////////////////// + Internal & Helpers + //////////////////////////////////////////////////////////////*/ + + /// @notice Ensures that token is registered with native token vault. + /// @dev Only used when deposit is made with legacy data encoding format. + /// @param _token The L2 token address which should be registered with native token vault. + /// @return assetId The asset ID of the token provided. + function _ensureTokenRegisteredWithNTV(address _token) internal override returns (bytes32 assetId) { + IL2NativeTokenVault nativeTokenVault = IL2NativeTokenVault(L2_NATIVE_TOKEN_VAULT_ADDR); + nativeTokenVault.ensureTokenIsRegistered(_token); + } + /// @notice Initiates a withdrawal by burning funds on the contract and sending the message to L1 /// where tokens would be unlocked /// @param _assetId The asset id of the withdrawn asset @@ -223,6 +228,10 @@ contract L2AssetRouter is AssetRouterBase, IL2AssetRouter { return abi.encodePacked(IL1ERC20Bridge.finalizeWithdrawal.selector, _l1Receiver, _l1Token, _amount); } + /*////////////////////////////////////////////////////////////// + LEGACY FUNCTIONS + //////////////////////////////////////////////////////////////*/ + /// @notice Legacy finalizeDeposit. /// @dev Finalizes the deposit and mint funds. /// @param _l1Sender The address of token sender on L1. @@ -338,6 +347,6 @@ contract L2AssetRouter is AssetRouterBase, IL2AssetRouter { /// @notice Returns the address of the L1 asset router. /// @dev The old name is kept for backward compatibility. function l1Bridge() external view returns (address) { - return l1AssetRouter; + return L1_ASSET_ROUTER; } } diff --git a/l1-contracts/contracts/bridge/interfaces/IAssetHandler.sol b/l1-contracts/contracts/bridge/interfaces/IAssetHandler.sol index 57f58eb59..bcb60a2ab 100644 --- a/l1-contracts/contracts/bridge/interfaces/IAssetHandler.sol +++ b/l1-contracts/contracts/bridge/interfaces/IAssetHandler.sol @@ -7,9 +7,6 @@ pragma solidity 0.8.24; /// @custom:security-contact security@matterlabs.dev /// @notice Used for any asset handler and called by the AssetRouter interface IAssetHandler { - /// @dev Emitted when a new token is initialized - event BridgeInitialize(address indexed token, string name, string symbol, uint8 decimals); - /// @dev Emitted when a token is minted event BridgeMint(uint256 indexed chainId, bytes32 indexed assetId, address receiver, uint256 amount); @@ -27,7 +24,7 @@ interface IAssetHandler { /// @param _data the actual data specified for the function function bridgeMint(uint256 _chainId, bytes32 _assetId, bytes calldata _data) external payable; - /// @notice Burns bridged tokens and returns the calldata for L2 -> L1 message. + /// @notice Burns bridged tokens and returns the calldata for L2 <-> L1 message. /// @dev In case of native token vault _data is the tuple of _depositAmount and _l2Receiver. /// @param _chainId the chainId that the message will be sent to /// @param _msgValue the msg.value of the L2 transaction. For now it is always 0. diff --git a/l1-contracts/contracts/bridge/interfaces/IL2Bridge.sol b/l1-contracts/contracts/bridge/interfaces/IL2Bridge.sol deleted file mode 100644 index 7fe7b7a97..000000000 --- a/l1-contracts/contracts/bridge/interfaces/IL2Bridge.sol +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: MIT -// We use a floating point pragma here so it can be used within other projects that interact with the ZKsync ecosystem without using our exact pragma version. -pragma solidity ^0.8.21; - -/// @author Matter Labs -/// @custom:security-contact security@matterlabs.dev -interface IL2Bridge { - function withdraw(bytes32 _assetId, bytes memory _assetData) external; - - function finalizeDeposit(bytes32 _assetId, bytes calldata _transferData) external; - - function l1Bridge() external view returns (address); - - function setAssetHandlerAddress(bytes32 _assetId, address _assetAddress) external; -} diff --git a/l1-contracts/contracts/bridge/ntv/L1NativeTokenVault.sol b/l1-contracts/contracts/bridge/ntv/L1NativeTokenVault.sol index ec31504c9..a36007fe7 100644 --- a/l1-contracts/contracts/bridge/ntv/L1NativeTokenVault.sol +++ b/l1-contracts/contracts/bridge/ntv/L1NativeTokenVault.sol @@ -35,9 +35,6 @@ contract L1NativeTokenVault is IL1NativeTokenVault, IL1AssetHandler, NativeToken /// @dev L1 nullifier contract that handles legacy functions & finalize withdrawal, confirm l2 tx mappings IL1Nullifier public immutable override L1_NULLIFIER; - /// @dev Era's chainID - uint256 public immutable ERA_CHAIN_ID; - /// @dev Maps token balances for each chain to prevent unauthorized spending across ZK chains. /// This serves as a security measure until hyperbridging is implemented. /// NOTE: this function may be removed in the future, don't rely on it! @@ -47,12 +44,10 @@ contract L1NativeTokenVault is IL1NativeTokenVault, IL1AssetHandler, NativeToken /// @dev Initialize the implementation to prevent Parity hack. /// @param _l1WethAddress Address of WETH on deployed chain /// @param _l1AssetRouter Address of Asset Router on L1. - /// @param _eraChainId ID of Era. /// @param _l1Nullifier Address of the nullifier contract, which handles transaction progress between L1 and ZK chains. constructor( address _l1WethAddress, address _l1AssetRouter, - uint256 _eraChainId, IL1Nullifier _l1Nullifier ) NativeTokenVault( @@ -62,7 +57,6 @@ contract L1NativeTokenVault is IL1NativeTokenVault, IL1AssetHandler, NativeToken block.chainid ) { - ERA_CHAIN_ID = _eraChainId; L1_NULLIFIER = _l1Nullifier; } @@ -237,9 +231,9 @@ contract L1NativeTokenVault is IL1NativeTokenVault, IL1AssetHandler, NativeToken // get the computed address before the contract DeployWithCreate2 deployed using Bytecode of contract DeployWithCreate2 and salt specified by the sender function calculateCreate2TokenAddress( uint256 _originChainId, - address _l1Token + address _nonNativeToken ) public view override(INativeTokenVault, NativeTokenVault) returns (address) { - bytes32 salt = _getCreate2Salt(_originChainId, _l1Token); + bytes32 salt = _getCreate2Salt(_originChainId, _nonNativeToken); return Create2.computeAddress( salt, diff --git a/l1-contracts/contracts/bridge/ntv/L2NativeTokenVault.sol b/l1-contracts/contracts/bridge/ntv/L2NativeTokenVault.sol index b896f11df..af66345dc 100644 --- a/l1-contracts/contracts/bridge/ntv/L2NativeTokenVault.sol +++ b/l1-contracts/contracts/bridge/ntv/L2NativeTokenVault.sol @@ -16,13 +16,13 @@ import {NativeTokenVault} from "./NativeTokenVault.sol"; import {IL2SharedBridgeLegacy} from "../interfaces/IL2SharedBridgeLegacy.sol"; import {BridgedStandardERC20} from "../BridgedStandardERC20.sol"; -import {DEPLOYER_SYSTEM_CONTRACT, L2_ASSET_ROUTER_ADDR} from "../../common/L2ContractAddresses.sol"; +import {L2_DEPLOYER_SYSTEM_CONTRACT_ADDR, L2_ASSET_ROUTER_ADDR} from "../../common/L2ContractAddresses.sol"; import {L2ContractHelper, IContractDeployer} from "../../common/libraries/L2ContractHelper.sol"; import {SystemContractsCaller} from "../../common/libraries/SystemContractsCaller.sol"; import {DataEncoding} from "../../common/libraries/DataEncoding.sol"; -import {EmptyAddress, EmptyBytes32, AddressMismatch, DeployFailed, AssetIdNotSupported} from "../../common/L1ContractErrors.sol"; +import {EmptyAddress, EmptyBytes32, AddressMismatch, DeployFailed, AssetIdNotSupported, ZeroAddress} from "../../common/L1ContractErrors.sol"; /// @author Matter Labs /// @custom:security-contact security@matterlabs.dev @@ -34,7 +34,7 @@ contract L2NativeTokenVault is IL2NativeTokenVault, NativeTokenVault { IL2SharedBridgeLegacy public immutable L2_LEGACY_SHARED_BRIDGE; /// @dev Bytecode hash of the proxy for tokens deployed by the bridge. - bytes32 internal l2TokenProxyBytecodeHash; + bytes32 internal immutable L2_TOKEN_PROXY_BYTECODE_HASH; /// @notice Initializes the bridge contract for later use. /// @dev this contract is deployed in the L2GenesisUpgrade, and is meant as direct deployment without a proxy. @@ -64,7 +64,7 @@ contract L2NativeTokenVault is IL2NativeTokenVault, NativeTokenVault { revert EmptyAddress(); } - l2TokenProxyBytecodeHash = _l2TokenProxyBytecodeHash; + L2_TOKEN_PROXY_BYTECODE_HASH = _l2TokenProxyBytecodeHash; _transferOwnership(_aliasedOwner); if (_contractsDeployedAlready) { @@ -86,6 +86,9 @@ contract L2NativeTokenVault is IL2NativeTokenVault, NativeTokenVault { /// @notice Sets the legacy token asset ID for the given L2 token address. function setLegacyTokenAssetId(address _l2TokenAddress) public { address l1TokenAddress = L2_LEGACY_SHARED_BRIDGE.l1TokenAddress(_l2TokenAddress); + if (l1TokenAddress == address(0)) { + revert ZeroAddress(); + } bytes32 newAssetId = DataEncoding.encodeNTVAssetId(L1_CHAIN_ID, l1TokenAddress); tokenAddress[newAssetId] = _l2TokenAddress; assetId[_l2TokenAddress] = newAssetId; @@ -145,8 +148,8 @@ contract L2NativeTokenVault is IL2NativeTokenVault, NativeTokenVault { assetId[_expectedToken] = _assetId; } - /// @notice Deploys the beacon proxy for the L2 token, while using ContractDeployer system contract or the legacy shared bridge. - /// @dev This function uses raw call to ContractDeployer to make sure that exactly `l2TokenProxyBytecodeHash` is used + /// @notice Deploys the beacon proxy for the L2 token, while using ContractDeployer system contract. + /// @dev This function uses raw call to ContractDeployer to make sure that exactly `L2_TOKEN_PROXY_BYTECODE_HASH` is used /// for the code of the proxy. /// @param _salt The salt used for beacon proxy deployment of L2 bridged token. /// @param _tokenOriginChainId The origin chain id of the token. @@ -160,11 +163,11 @@ contract L2NativeTokenVault is IL2NativeTokenVault, NativeTokenVault { (bool success, bytes memory returndata) = SystemContractsCaller.systemCallWithReturndata( uint32(gasleft()), - DEPLOYER_SYSTEM_CONTRACT, + L2_DEPLOYER_SYSTEM_CONTRACT_ADDR, 0, abi.encodeCall( IContractDeployer.create2, - (_salt, l2TokenProxyBytecodeHash, abi.encode(address(bridgedTokenBeacon), "")) + (_salt, L2_TOKEN_PROXY_BYTECODE_HASH, abi.encode(address(bridgedTokenBeacon), "")) ) ); @@ -194,23 +197,23 @@ contract L2NativeTokenVault is IL2NativeTokenVault, NativeTokenVault { //////////////////////////////////////////////////////////////*/ /// @notice Calculates L2 wrapped token address given the currently stored beacon proxy bytecode hash and beacon address. - /// @param _tokenOriginChainId The chain id of the origin token. - /// @param _l1Token The address of token on L1. + /// @param _originChainId The chain id of the origin token. + /// @param _nonNativeToken The address of token on its origin chain.. /// @return Address of an L2 token counterpart. function calculateCreate2TokenAddress( - uint256 _tokenOriginChainId, - address _l1Token + uint256 _originChainId, + address _nonNativeToken ) public view virtual override(INativeTokenVault, NativeTokenVault) returns (address) { - if (address(L2_LEGACY_SHARED_BRIDGE) != address(0) && _tokenOriginChainId == L1_CHAIN_ID) { - return L2_LEGACY_SHARED_BRIDGE.l2TokenAddress(_l1Token); + if (address(L2_LEGACY_SHARED_BRIDGE) != address(0)) { + return L2_LEGACY_SHARED_BRIDGE.l2TokenAddress(_nonNativeToken); } else { bytes32 constructorInputHash = keccak256(abi.encode(address(bridgedTokenBeacon), "")); - bytes32 salt = _getCreate2Salt(_tokenOriginChainId, _l1Token); + bytes32 salt = _getCreate2Salt(_originChainId, _nonNativeToken); return L2ContractHelper.computeCreate2Address( address(this), salt, - l2TokenProxyBytecodeHash, + L2_TOKEN_PROXY_BYTECODE_HASH, constructorInputHash ); } diff --git a/l1-contracts/contracts/bridge/ntv/NativeTokenVault.sol b/l1-contracts/contracts/bridge/ntv/NativeTokenVault.sol index ae146930d..1ee41fdf6 100644 --- a/l1-contracts/contracts/bridge/ntv/NativeTokenVault.sol +++ b/l1-contracts/contracts/bridge/ntv/NativeTokenVault.sol @@ -19,7 +19,7 @@ import {DataEncoding} from "../../common/libraries/DataEncoding.sol"; import {BridgedStandardERC20} from "../BridgedStandardERC20.sol"; import {BridgeHelper} from "../BridgeHelper.sol"; -import {DeployingBridgedTokenForNativeToken, EmptyDeposit, Unauthorized, TokensWithFeesNotSupported, TokenNotSupported, NonEmptyMsgValue, ValueMismatch, AddressMismatch, AssetIdMismatch, AmountMustBeGreaterThanZero, ZeroAddress} from "../../common/L1ContractErrors.sol"; +import {AssetIdAlreadyRegistered, DeployingBridgedTokenForNativeToken, EmptyDeposit, Unauthorized, TokensWithFeesNotSupported, TokenNotSupported, NonEmptyMsgValue, ValueMismatch, AddressMismatch, AssetIdMismatch, AmountMustBeGreaterThanZero, ZeroAddress} from "../../common/L1ContractErrors.sol"; import {EmptyToken} from "../L1BridgeContractErrors.sol"; /// @author Matter Labs @@ -45,8 +45,8 @@ abstract contract NativeTokenVault is INativeTokenVault, IAssetHandler, Ownable2 /// @dev For more details see https://docs.openzeppelin.com/contracts/3.x/api/proxy#UpgradeableBeacon. IBeacon public bridgedTokenBeacon; - /// @dev A mapping assetId => tokenAddress - mapping(bytes32 assetId => uint256 chainId) public originChainId; + /// @dev A mapping assetId => originChainId + mapping(bytes32 assetId => uint256 originChainId) public originChainId; /// @dev A mapping assetId => tokenAddress mapping(bytes32 assetId => address tokenAddress) public tokenAddress; @@ -93,6 +93,9 @@ abstract contract NativeTokenVault is INativeTokenVault, IAssetHandler, Ownable2 if (_nativeToken.code.length == 0) { revert EmptyToken(); } + if (assetId[_nativeToken] != bytes32(0)) { + revert AssetIdAlreadyRegistered(); + } _unsafeRegisterNativeToken(_nativeToken); } @@ -236,8 +239,8 @@ abstract contract NativeTokenVault is INativeTokenVault, IAssetHandler, Ownable2 _bridgeMintData = DataEncoding.encodeBridgeMintData({ _originalCaller: _originalCaller, - _l2Receiver: _receiver, - _l1Token: originToken, + _remoteReceiver: _receiver, + _originToken: originToken, _amount: _amount, _erc20Metadata: erc20Metadata }); @@ -292,8 +295,8 @@ abstract contract NativeTokenVault is INativeTokenVault, IAssetHandler, Ownable2 } _bridgeMintData = DataEncoding.encodeBridgeMintData({ _originalCaller: _originalCaller, - _l2Receiver: _receiver, - _l1Token: nativeToken, + _remoteReceiver: _receiver, + _originToken: nativeToken, _amount: amount, _erc20Metadata: erc20Metadata }); @@ -336,10 +339,10 @@ abstract contract NativeTokenVault is INativeTokenVault, IAssetHandler, Ownable2 /// @param _nativeToken The address of the token to be registered. function _unsafeRegisterNativeToken(address _nativeToken) internal { bytes32 newAssetId = DataEncoding.encodeNTVAssetId(block.chainid, _nativeToken); - ASSET_ROUTER.setAssetHandlerAddressThisChain(bytes32(uint256(uint160(_nativeToken))), address(this)); tokenAddress[newAssetId] = _nativeToken; assetId[_nativeToken] = newAssetId; originChainId[newAssetId] = block.chainid; + ASSET_ROUTER.setAssetHandlerAddressThisChain(bytes32(uint256(uint160(_nativeToken))), address(this)); } function _handleChainBalanceIncrease( diff --git a/l1-contracts/contracts/bridgehub/Bridgehub.sol b/l1-contracts/contracts/bridgehub/Bridgehub.sol index 4ac6fc5fa..4ae2a4c96 100644 --- a/l1-contracts/contracts/bridgehub/Bridgehub.sol +++ b/l1-contracts/contracts/bridgehub/Bridgehub.sol @@ -26,7 +26,7 @@ import {NoCTMForAssetId, MigrationPaused, AssetIdAlreadyRegistered, CTMNotRegist /// @author Matter Labs /// @custom:security-contact security@matterlabs.dev -/// @dev The Bridgehub contract serves as the primary entry point for L1<->L2 communication, +/// @dev The Bridgehub contract serves as the primary entry point for L1->L2 communication, /// facilitating interactions between end user and bridges. /// It also manages state transition managers, base tokens, and chain registrations. /// Bridgehub is also an IL1AssetHandler for the chains themselves, which is used to migrate the chains @@ -45,7 +45,7 @@ contract Bridgehub is IBridgehub, ReentrancyGuard, Ownable2StepUpgradeable, Paus /// This is the temporary security measure. uint256 public immutable MAX_NUMBER_OF_ZK_CHAINS; - /// @notice all the ether and ERC20 tokens are held by NativeVaultToken managed by this shared Bridge. + /// @notice all the ether and ERC20 tokens are held by NativeVaultToken managed by the asset router. address public assetRouter; /// @notice ChainTypeManagers that are registered, and ZKchains that use these CTMs can use this bridgehub as settlement layer. @@ -109,13 +109,6 @@ contract Bridgehub is IBridgehub, ReentrancyGuard, Ownable2StepUpgradeable, Paus _; } - modifier onlyChainCTM(uint256 _chainId) { - if (msg.sender != chainTypeManager[_chainId]) { - revert Unauthorized(msg.sender); - } - _; - } - modifier onlyL1() { if (L1_CHAIN_ID != block.chainid) { revert NotL1(L1_CHAIN_ID, block.chainid); @@ -252,12 +245,12 @@ contract Bridgehub is IBridgehub, ReentrancyGuard, Ownable2StepUpgradeable, Paus if (chainAddress == address(0)) { revert ChainNotPresentInCTM(); } - _registerNewZKChain(_chainId, chainAddress); + _registerNewZKChain(_chainId, chainAddress, false); } //// Registry - /// @notice State Transition can be any contract with the appropriate interface/functionality + /// @notice Chain Type Manager can be any contract with the appropriate interface/functionality /// @param _chainTypeManager the state transition manager address to be added function addChainTypeManager(address _chainTypeManager) external onlyOwner { if (_chainTypeManager == address(0)) { @@ -271,8 +264,8 @@ contract Bridgehub is IBridgehub, ReentrancyGuard, Ownable2StepUpgradeable, Paus emit ChainTypeManagerAdded(_chainTypeManager); } - /// @notice State Transition can be any contract with the appropriate interface/functionality - /// @notice this stops new Chains from using the STF, old chains are not affected + /// @notice Chain Type Manager can be any contract with the appropriate interface/functionality + /// @notice this stops new Chains from using the CTM, old chains are not affected /// @param _chainTypeManager the state transition manager address to be removed function removeChainTypeManager(address _chainTypeManager) external onlyOwner { if (_chainTypeManager == address(0)) { @@ -311,7 +304,7 @@ contract Bridgehub is IBridgehub, ReentrancyGuard, Ownable2StepUpgradeable, Paus /// @dev Used to set the assetAddress for a given assetInfo. /// @param _additionalData the additional data to identify the asset /// @param _assetAddress the asset handler address - function setAssetHandlerAddress(bytes32 _additionalData, address _assetAddress) external { + function setCTMAssetAddress(bytes32 _additionalData, address _assetAddress) external { // It is a simplified version of the logic used by the AssetRouter to manage asset handlers. // CTM's assetId is `keccak256(abi.encode(L1_CHAIN_ID, l1CtmDeployer, ctmAddress))`. // And the l1CtmDeployer is considered the deployment tracker for the CTM asset. @@ -332,10 +325,10 @@ contract Bridgehub is IBridgehub, ReentrancyGuard, Ownable2StepUpgradeable, Paus revert CTMNotRegistered(); } - bytes32 assetInfo = keccak256(abi.encode(L1_CHAIN_ID, sender, _additionalData)); - ctmAssetIdToAddress[assetInfo] = _assetAddress; - ctmAssetIdFromAddress[_assetAddress] = assetInfo; - emit AssetRegistered(assetInfo, _assetAddress, _additionalData, msg.sender); + bytes32 ctmAssetId = keccak256(abi.encode(L1_CHAIN_ID, sender, _additionalData)); + ctmAssetIdToAddress[ctmAssetId] = _assetAddress; + ctmAssetIdFromAddress[_assetAddress] = ctmAssetId; + emit AssetRegistered(ctmAssetId, _assetAddress, _additionalData, msg.sender); } /*////////////////////////////////////////////////////////////// @@ -375,18 +368,26 @@ contract Bridgehub is IBridgehub, ReentrancyGuard, Ownable2StepUpgradeable, Paus _initData: _initData, _factoryDeps: _factoryDeps }); - _registerNewZKChain(_chainId, chainAddress); + _registerNewZKChain(_chainId, chainAddress, true); messageRoot.addNewChain(_chainId); emit NewChain(_chainId, _chainTypeManager, _admin); return _chainId; } - /// @dev This internal function is used to register a new zkChain in the system. - function _registerNewZKChain(uint256 _chainId, address _zkChain) internal { + /// @notice This internal function is used to register a new zkChain in the system. + /// @param _chainId The chain ID of the ZK chain + /// @param _zkChain The address of the ZK chain's DiamondProxy contract. + /// @param _checkMaxNumberOfZKChains Whether to check that the limit for the number + /// of chains has not been crossed. + /// @dev Providing `_checkMaxNumberOfZKChains = false` may be preferrable in cases + /// where we want to guarantee that a chain can be added. These include: + /// - Migration of a chain from the mapping in the old CTM + /// - Migration of a chain to a new settlement layer + function _registerNewZKChain(uint256 _chainId, address _zkChain, bool _checkMaxNumberOfZKChains) internal { // slither-disable-next-line unused-return zkChainMap.set(_chainId, _zkChain); - if (zkChainMap.length() > MAX_NUMBER_OF_ZK_CHAINS) { + if (_checkMaxNumberOfZKChains && zkChainMap.length() > MAX_NUMBER_OF_ZK_CHAINS) { revert ZKChainLimitReached(); } } @@ -447,7 +448,7 @@ contract Bridgehub is IBridgehub, ReentrancyGuard, Ownable2StepUpgradeable, Paus /// this assumes that either ether is the base token or /// the msg.sender has approved mintValue allowance for the nativeTokenVault. /// This means this is not ideal for contract calls, as the contract would have to handle token allowance of the base Token. - /// In case allowance is provided to the Shared Bridge, then it will be transferred to NTV. + /// In case allowance is provided to the Asset Router, then it will be transferred to NTV. function requestL2TransactionDirect( L2TransactionRequestDirect calldata _request ) external payable override nonReentrant whenNotPaused onlyL1 returns (bytes32 canonicalTxHash) { @@ -771,7 +772,8 @@ contract Bridgehub is IBridgehub, ReentrancyGuard, Ownable2StepUpgradeable, Paus if (zkChain == address(0)) { revert ChainIdNotRegistered(bridgehubData.chainId); } - _registerNewZKChain(bridgehubData.chainId, zkChain); + // We want to allow any chain to be migrated, + _registerNewZKChain(bridgehubData.chainId, zkChain, false); messageRoot.addNewChain(bridgehubData.chainId); } @@ -839,7 +841,7 @@ contract Bridgehub is IBridgehub, ReentrancyGuard, Ownable2StepUpgradeable, Paus baseTokenAssetId[_chainId] = chainBaseTokenAssetId; settlementLayer[_chainId] = block.chainid; - _registerNewZKChain(_chainId, _zkChain); + _registerNewZKChain(_chainId, _zkChain, true); messageRoot.addNewChain(_chainId); emit NewChain(_chainId, ctm, chainAdmin); diff --git a/l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol b/l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol index 09fae9f31..856fe6e8b 100644 --- a/l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol +++ b/l1-contracts/contracts/bridgehub/CTMDeploymentTracker.sol @@ -3,7 +3,6 @@ pragma solidity 0.8.24; import {Ownable2StepUpgradeable} from "@openzeppelin/contracts-upgradeable-v4/access/Ownable2StepUpgradeable.sol"; -import {PausableUpgradeable} from "@openzeppelin/contracts-upgradeable-v4/security/PausableUpgradeable.sol"; import {IBridgehub, L2TransactionRequestTwoBridgesInner} from "./IBridgehub.sol"; import {ICTMDeploymentTracker} from "./ICTMDeploymentTracker.sol"; @@ -21,11 +20,11 @@ bytes1 constant CTM_DEPLOYMENT_TRACKER_ENCODING_VERSION = 0x01; /// @author Matter Labs /// @custom:security-contact security@matterlabs.dev /// @dev Contract to be deployed on L1, can link together other contracts based on AssetInfo. -contract CTMDeploymentTracker is ICTMDeploymentTracker, ReentrancyGuard, Ownable2StepUpgradeable, PausableUpgradeable { +contract CTMDeploymentTracker is ICTMDeploymentTracker, ReentrancyGuard, Ownable2StepUpgradeable { /// @dev Bridgehub smart contract that is used to operate with L2 via asynchronous L2 <-> L1 communication. IBridgehub public immutable override BRIDGE_HUB; - /// @dev Bridgehub smart contract that is used to operate with L2 via asynchronous L2 <-> L1 communication. + /// @dev L1AssetRouter smart contract that is used to bridge assets (including chains) between L1 and L2. IAssetRouterBase public immutable override L1_ASSET_ROUTER; /// @notice Checks that the message sender is the bridgehub. @@ -46,10 +45,10 @@ contract CTMDeploymentTracker is ICTMDeploymentTracker, ReentrancyGuard, Ownable /// @dev Contract is expected to be used as proxy implementation on L1. /// @dev Initialize the implementation to prevent Parity hack. - constructor(IBridgehub _bridgehub, IAssetRouterBase _sharedBridge) reentrancyGuardInitializer { + constructor(IBridgehub _bridgehub, IAssetRouterBase _l1AssetRouter) reentrancyGuardInitializer { _disableInitializers(); BRIDGE_HUB = _bridgehub; - L1_ASSET_ROUTER = _sharedBridge; + L1_ASSET_ROUTER = _l1AssetRouter; } /// @notice used to initialize the contract @@ -65,7 +64,7 @@ contract CTMDeploymentTracker is ICTMDeploymentTracker, ReentrancyGuard, Ownable revert CTMNotRegistered(); } L1_ASSET_ROUTER.setAssetHandlerAddressThisChain(bytes32(uint256(uint160(_ctmAddress))), address(BRIDGE_HUB)); - BRIDGE_HUB.setAssetHandlerAddress(bytes32(uint256(uint160(_ctmAddress))), _ctmAddress); + BRIDGE_HUB.setCTMAssetAddress(bytes32(uint256(uint160(_ctmAddress))), _ctmAddress); } /// @notice The function responsible for registering the L2 counterpart of an CTM asset on the L2 Bridgehub. @@ -135,7 +134,7 @@ contract CTMDeploymentTracker is ICTMDeploymentTracker, ReentrancyGuard, Ownable address _ctmL2Address ) internal pure returns (L2TransactionRequestTwoBridgesInner memory request) { bytes memory l2TxCalldata = abi.encodeCall( - IBridgehub.setAssetHandlerAddress, + IBridgehub.setCTMAssetAddress, (bytes32(uint256(uint160(_ctmL1Address))), _ctmL2Address) ); diff --git a/l1-contracts/contracts/bridgehub/IBridgehub.sol b/l1-contracts/contracts/bridgehub/IBridgehub.sol index 8904d77f8..75e24f413 100644 --- a/l1-contracts/contracts/bridgehub/IBridgehub.sol +++ b/l1-contracts/contracts/bridgehub/IBridgehub.sol @@ -225,7 +225,7 @@ interface IBridgehub is IAssetHandler, IL1AssetHandler { function ctmAssetIdToAddress(bytes32 _assetInfo) external view returns (address); - function setAssetHandlerAddress(bytes32 _additionalData, address _assetAddress) external; + function setCTMAssetAddress(bytes32 _additionalData, address _assetAddress) external; function L1_CHAIN_ID() external view returns (uint256); diff --git a/l1-contracts/contracts/bridgehub/IMessageRoot.sol b/l1-contracts/contracts/bridgehub/IMessageRoot.sol index 2e15e6f63..d4a3c7d7b 100644 --- a/l1-contracts/contracts/bridgehub/IMessageRoot.sol +++ b/l1-contracts/contracts/bridgehub/IMessageRoot.sol @@ -4,8 +4,11 @@ pragma solidity 0.8.24; import {IBridgehub} from "./IBridgehub.sol"; -/// @author Matter Labs -/// @custom:security-contact security@matterlabs.dev +/** + * @author Matter Labs + * @notice MessageRoot contract is responsible for storing and aggregating the roots of the batches from different chains into the MessageRoot. + * @custom:security-contact security@matterlabs.dev + */ interface IMessageRoot { function BRIDGE_HUB() external view returns (IBridgehub); diff --git a/l1-contracts/contracts/bridgehub/MessageRoot.sol b/l1-contracts/contracts/bridgehub/MessageRoot.sol index edb77f276..62303b8dc 100644 --- a/l1-contracts/contracts/bridgehub/MessageRoot.sol +++ b/l1-contracts/contracts/bridgehub/MessageRoot.sol @@ -19,7 +19,7 @@ bytes32 constant CHAIN_TREE_EMPTY_ENTRY_HASH = bytes32( 0x46700b4d40ac5c35af2c22dda2787a91eb567b06c924a8fb8ae9a05b20c08c21 ); -// Chain tree consists of batch commitments as their leaves. We use hash of "new bytes(96)" as the hash of an empty leaf. +// The single shared tree consists of the roots of chain trees as its leaves. We use hash of "new bytes(96)" as the hash of an empty leaf. bytes32 constant SHARED_ROOT_TREE_EMPTY_HASH = bytes32( 0x46700b4d40ac5c35af2c22dda2787a91eb567b06c924a8fb8ae9a05b20c08c21 ); diff --git a/l1-contracts/contracts/common/Config.sol b/l1-contracts/contracts/common/Config.sol index 1aa26ba4f..da2aa92fd 100644 --- a/l1-contracts/contracts/common/Config.sol +++ b/l1-contracts/contracts/common/Config.sol @@ -18,7 +18,6 @@ uint256 constant MAX_L2_TO_L1_LOGS_COMMITMENT_BYTES = 4 + L2_TO_L1_LOG_SERIALIZE /// @dev Actually equal to the `keccak256(new bytes(L2_TO_L1_LOG_SERIALIZE_SIZE))` bytes32 constant L2_L1_LOGS_TREE_DEFAULT_LEAF_HASH = 0x72abee45b59e344af8a6e520241c4744aff26ed411f4c4b00f8af09adada43ba; -// TODO: change constant to the real root hash of empty Merkle tree (SMA-184) bytes32 constant DEFAULT_L2_LOGS_TREE_ROOT_HASH = bytes32(0); /// @dev Denotes the type of the ZKsync transaction that came from L1. @@ -112,6 +111,10 @@ bytes32 constant TWO_BRIDGES_MAGIC_VALUE = bytes32(uint256(keccak256("TWO_BRIDGE address constant BRIDGEHUB_MIN_SECOND_BRIDGE_ADDRESS = address(uint160(type(uint16).max)); /// @dev the maximum number of supported chains, this is an arbitrary limit. +/// @dev Note, that in case of a malicious Bridgehub admin, the total number of chains +/// can be up to 2 times higher. This may be possible, in case the old ChainTypeManager +/// had `100` chains and these were migrated to the Bridgehub only after `MAX_NUMBER_OF_ZK_CHAINS` +/// were added to the bridgehub via creation of new chains. uint256 constant MAX_NUMBER_OF_ZK_CHAINS = 100; /// @dev Used as the `msg.sender` for transactions that relayed via a settlement layer. diff --git a/l1-contracts/contracts/common/L1ContractErrors.sol b/l1-contracts/contracts/common/L1ContractErrors.sol index cb0481f68..5aa1c0a50 100644 --- a/l1-contracts/contracts/common/L1ContractErrors.sol +++ b/l1-contracts/contracts/common/L1ContractErrors.sol @@ -223,6 +223,8 @@ error ProtocolIdNotGreater(); error ProtocolVersionMinorDeltaTooBig(uint256 limit, uint256 proposed); // 0x88d7b498 error ProtocolVersionTooSmall(); +// 0x53dee67b +error PubdataCommitmentsEmpty(); // 0x959f26fb error PubdataGreaterThanLimit(uint256 limit, uint256 length); // 0x63c36549 diff --git a/l1-contracts/contracts/common/L2ContractAddresses.sol b/l1-contracts/contracts/common/L2ContractAddresses.sol index a8fba013c..2f656acd9 100644 --- a/l1-contracts/contracts/common/L2ContractAddresses.sol +++ b/l1-contracts/contracts/common/L2ContractAddresses.sol @@ -38,7 +38,7 @@ address constant L2_COMPLEX_UPGRADER_ADDR = address(0x800f); /// @dev The address used to execute the genesis upgrade address constant L2_GENESIS_UPGRADE_ADDR = address(0x10001); -/// @dev The address of the L2 bridge hub system contract, used to start L2<>L2 transactions +/// @dev The address of the L2 bridge hub system contract, used to start L1->L2 transactions address constant L2_BRIDGEHUB_ADDR = address(0x10002); /// @dev the address of the l2 asset router. @@ -69,15 +69,12 @@ interface IL2Messenger { /// if the assetId can be calculated with this address then it is in fact an NTV asset address constant L2_NATIVE_TOKEN_VAULT_ADDR = address(0x10004); -/// @dev the address of the l2 asse3t router. +/// @dev the address of the l2 asset router. address constant L2_MESSAGE_ROOT_ADDR = address(0x10005); /// @dev the offset for the system contracts uint160 constant SYSTEM_CONTRACTS_OFFSET = 0x8000; // 2^15 -/// @dev the address of the deployer system contract -address constant DEPLOYER_SYSTEM_CONTRACT = address(SYSTEM_CONTRACTS_OFFSET + 0x06); - /// @dev the address of the l2 messenger system contract IL2Messenger constant L2_MESSENGER = IL2Messenger(address(SYSTEM_CONTRACTS_OFFSET + 0x08)); diff --git a/l1-contracts/contracts/common/libraries/DataEncoding.sol b/l1-contracts/contracts/common/libraries/DataEncoding.sol index 6d3611109..f4aaff8a0 100644 --- a/l1-contracts/contracts/common/libraries/DataEncoding.sol +++ b/l1-contracts/contracts/common/libraries/DataEncoding.sol @@ -15,27 +15,27 @@ import {UnsupportedEncodingVersion} from "../L1ContractErrors.sol"; library DataEncoding { /// @notice Abi.encodes the data required for bridgeMint on remote chain. /// @param _originalCaller The address which initiated the transfer. - /// @param _l2Receiver The address which to receive tokens on remote chain. - /// @param _l1Token The transferred token address. + /// @param _remoteReceiver The address which to receive tokens on remote chain. + /// @param _originToken The transferred token address. /// @param _amount The amount of token to be transferred. /// @param _erc20Metadata The transferred token metadata. /// @return The encoded bridgeMint data function encodeBridgeMintData( address _originalCaller, - address _l2Receiver, - address _l1Token, + address _remoteReceiver, + address _originToken, uint256 _amount, bytes memory _erc20Metadata ) internal pure returns (bytes memory) { // solhint-disable-next-line func-named-parameters - return abi.encode(_originalCaller, _l2Receiver, _l1Token, _amount, _erc20Metadata); + return abi.encode(_originalCaller, _remoteReceiver, _originToken, _amount, _erc20Metadata); } /// @notice Function decoding transfer data previously encoded with this library. /// @param _bridgeMintData The encoded bridgeMint data /// @return _originalCaller The address which initiated the transfer. - /// @return _l2Receiver The address which to receive tokens on remote chain. - /// @return _parsedL1Token The transferred token address. + /// @return _remoteReceiver The address which to receive tokens on remote chain. + /// @return _parsedOriginToken The transferred token address. /// @return _amount The amount of token to be transferred. /// @return _erc20Metadata The transferred token metadata. function decodeBridgeMintData( @@ -45,13 +45,13 @@ library DataEncoding { pure returns ( address _originalCaller, - address _l2Receiver, - address _parsedL1Token, + address _remoteReceiver, + address _parsedOriginToken, uint256 _amount, bytes memory _erc20Metadata ) { - (_originalCaller, _l2Receiver, _parsedL1Token, _amount, _erc20Metadata) = abi.decode( + (_originalCaller, _remoteReceiver, _parsedOriginToken, _amount, _erc20Metadata) = abi.decode( _bridgeMintData, (address, address, address, uint256, bytes) ); @@ -68,11 +68,11 @@ library DataEncoding { /// @notice Encodes the asset data by combining chain id, asset deployment tracker and asset data. /// @param _chainId The id of the chain token is native to. - /// @param _tokenAaddress The address of token that has to be encoded (asset data is the address itself). + /// @param _tokenAddress The address of token that has to be encoded (asset data is the address itself). /// @param _sender The asset deployment tracker address. /// @return The encoded asset data. - function encodeAssetId(uint256 _chainId, address _tokenAaddress, address _sender) internal pure returns (bytes32) { - return keccak256(abi.encode(_chainId, _sender, _tokenAaddress)); + function encodeAssetId(uint256 _chainId, address _tokenAddress, address _sender) internal pure returns (bytes32) { + return keccak256(abi.encode(_chainId, _sender, _tokenAddress)); } /// @notice Encodes the asset data by combining chain id, NTV as asset deployment tracker and asset data. @@ -83,7 +83,7 @@ library DataEncoding { return keccak256(abi.encode(_chainId, L2_NATIVE_TOKEN_VAULT_ADDR, _assetData)); } - /// @notice Encodes the asset data by combining chain id, NTV as asset deployment tracker and asset data. + /// @notice Encodes the asset data by combining chain id, NTV as asset deployment tracker and token address. /// @param _chainId The id of the chain token is native to. /// @param _tokenAddress The address of token that has to be encoded (asset data is the address itself). /// @return The encoded asset data. @@ -148,7 +148,8 @@ library DataEncoding { } } - /// @notice Encodes the token data by combining chain id, asset deployment tracker and asset data. + /// @notice Encodes the token data by combining chain id, and its metadata. + /// @dev Note that all the metadata of the token is expected to be ABI encoded. /// @param _chainId The id of the chain token is native to. /// @param _name The name of the token. /// @param _symbol The symbol of the token. diff --git a/l1-contracts/contracts/common/libraries/FullMerkle.sol b/l1-contracts/contracts/common/libraries/FullMerkle.sol index 829b2dc53..4dbab106b 100644 --- a/l1-contracts/contracts/common/libraries/FullMerkle.sol +++ b/l1-contracts/contracts/common/libraries/FullMerkle.sol @@ -19,11 +19,9 @@ library FullMerkle { } /** - * @dev Initialize a {Bytes32PushTree} using {Merkle.efficientHash} to hash internal nodes. + * @dev Initialize a {FullTree} using {Merkle.efficientHash} to hash internal nodes. * The capacity of the tree (i.e. number of leaves) is set to `2**levels`. * - * Calling this function on MerkleTree that was already setup and used will reset it to a blank state. - * * IMPORTANT: The zero value should be carefully chosen since it will be stored in the tree representing * empty leaves. It should be a value that is not expected to be part of the tree. * @param zero The zero value to be used in the tree. diff --git a/l1-contracts/contracts/common/libraries/Merkle.sol b/l1-contracts/contracts/common/libraries/Merkle.sol index 4f405b791..457d6342d 100644 --- a/l1-contracts/contracts/common/libraries/Merkle.sol +++ b/l1-contracts/contracts/common/libraries/Merkle.sol @@ -12,6 +12,7 @@ library Merkle { /// @dev Calculate Merkle root by the provided Merkle proof. /// NOTE: When using this function, check that the _path length is equal to the tree height to prevent shorter/longer paths attack + /// however, for chains settling on GW the proof includes the GW proof, so the path increases. See Mailbox for more details. /// @param _path Merkle path from the leaf to the root /// @param _index Leaf index in the tree /// @param _itemHash Hash of leaf content diff --git a/l1-contracts/contracts/common/libraries/SystemContractsCaller.sol b/l1-contracts/contracts/common/libraries/SystemContractsCaller.sol index b6bf0c54a..30dbf3a81 100644 --- a/l1-contracts/contracts/common/libraries/SystemContractsCaller.sol +++ b/l1-contracts/contracts/common/libraries/SystemContractsCaller.sol @@ -48,7 +48,7 @@ library SystemContractsCaller { assembly { dataStart := add(data, 0x20) } - uint32 dataLength = uint32(Utils.safeCastToU32(data.length)); + uint32 dataLength = Utils.safeCastToU32(data.length); uint256 farCallAbi = getFarCallABI({ dataOffset: 0, diff --git a/l1-contracts/contracts/common/libraries/UnsafeBytes.sol b/l1-contracts/contracts/common/libraries/UnsafeBytes.sol index 4edf94004..e2680d9e0 100644 --- a/l1-contracts/contracts/common/libraries/UnsafeBytes.sol +++ b/l1-contracts/contracts/common/libraries/UnsafeBytes.sol @@ -30,13 +30,6 @@ library UnsafeBytes { } } - function readUint128(bytes memory _bytes, uint256 _start) internal pure returns (uint128 result, uint256 offset) { - assembly { - offset := add(_start, 16) - result := mload(add(_bytes, offset)) - } - } - function readUint256(bytes memory _bytes, uint256 _start) internal pure returns (uint256 result, uint256 offset) { assembly { offset := add(_start, 32) diff --git a/l1-contracts/contracts/dev-contracts/test/CustomUpgradeTest.sol b/l1-contracts/contracts/dev-contracts/test/CustomUpgradeTest.sol index 50fbd924a..e20d44863 100644 --- a/l1-contracts/contracts/dev-contracts/test/CustomUpgradeTest.sol +++ b/l1-contracts/contracts/dev-contracts/test/CustomUpgradeTest.sol @@ -25,7 +25,7 @@ contract CustomUpgradeTest is BaseZkSyncUpgrade { /// upgrade. function _postUpgrade(bytes calldata _customCallDataForUpgrade) internal override {} - /// @notice The main function that will be called by the upgrade proxy. + /// @notice The main function that will be delegate-called by the chain. /// @param _proposedUpgrade The upgrade to be executed. function upgrade(ProposedUpgrade calldata _proposedUpgrade) public override returns (bytes32) { (uint32 newMinorVersion, bool isPatchOnly) = _setNewProtocolVersion(_proposedUpgrade.newProtocolVersion); diff --git a/l1-contracts/contracts/dev-contracts/test/DummyBridgehubSetter.sol b/l1-contracts/contracts/dev-contracts/test/DummyBridgehubSetter.sol index 8ae0404e7..a84f476f1 100644 --- a/l1-contracts/contracts/dev-contracts/test/DummyBridgehubSetter.sol +++ b/l1-contracts/contracts/dev-contracts/test/DummyBridgehubSetter.sol @@ -16,7 +16,7 @@ contract DummyBridgehubSetter is Bridgehub { ) Bridgehub(_l1ChainId, _owner, _maxNumberOfZKChains) {} function setZKChain(uint256 _chainId, address _zkChain) external { - _registerNewZKChain(_chainId, _zkChain); + _registerNewZKChain(_chainId, _zkChain, true); } function setCTM(uint256 _chainId, address _ctm) external { diff --git a/l1-contracts/contracts/dev-contracts/test/DummySharedBridge.sol b/l1-contracts/contracts/dev-contracts/test/DummySharedBridge.sol index c75ec4530..0033fac3c 100644 --- a/l1-contracts/contracts/dev-contracts/test/DummySharedBridge.sol +++ b/l1-contracts/contracts/dev-contracts/test/DummySharedBridge.sol @@ -10,7 +10,6 @@ import {TWO_BRIDGES_MAGIC_VALUE, ETH_TOKEN_ADDRESS} from "../../common/Config.so import {IL1NativeTokenVault} from "../../bridge/ntv/L1NativeTokenVault.sol"; import {L2_NATIVE_TOKEN_VAULT_ADDR} from "../../common/L2ContractAddresses.sol"; import {SafeERC20} from "@openzeppelin/contracts-v4/token/ERC20/utils/SafeERC20.sol"; -import {IL2Bridge} from "../../bridge/interfaces/IL2Bridge.sol"; import {IL2SharedBridgeLegacy} from "../../bridge/interfaces/IL2SharedBridgeLegacy.sol"; import {IL2SharedBridgeLegacyFunctions} from "../../bridge/interfaces/IL2SharedBridgeLegacyFunctions.sol"; diff --git a/l1-contracts/contracts/dev-contracts/test/L2NativeTokenVaultDev.sol b/l1-contracts/contracts/dev-contracts/test/L2NativeTokenVaultDev.sol index 46960489c..896197fea 100644 --- a/l1-contracts/contracts/dev-contracts/test/L2NativeTokenVaultDev.sol +++ b/l1-contracts/contracts/dev-contracts/test/L2NativeTokenVaultDev.sol @@ -59,7 +59,7 @@ contract L2NativeTokenVaultDev is L2NativeTokenVault { tokenBeacon.transferOwnership(owner()); bridgedTokenBeacon = IBeacon(address(tokenBeacon)); - emit L2TokenBeaconUpdated(address(bridgedTokenBeacon), l2TokenProxyBytecodeHash); + emit L2TokenBeaconUpdated(address(bridgedTokenBeacon), L2_TOKEN_PROXY_BYTECODE_HASH); } function test() external pure { diff --git a/l1-contracts/contracts/governance/AccessControlRestriction.sol b/l1-contracts/contracts/governance/AccessControlRestriction.sol index bdadac713..6052d1e6a 100644 --- a/l1-contracts/contracts/governance/AccessControlRestriction.sol +++ b/l1-contracts/contracts/governance/AccessControlRestriction.sol @@ -11,14 +11,14 @@ import {Call} from "./Common.sol"; /// @author Matter Labs /// @custom:security-contact security@matterlabs.dev /// @notice The Restriction that is designed to provide the access control logic for the `ChainAdmin` contract. -/// @dev It inherits from `AccessControlDefaultAdminRules` without overriding `_setRoleAdmin` functionaity. In other +/// @dev It inherits from `AccessControlDefaultAdminRules` without overriding `_setRoleAdmin` functionality. In other /// words, the `DEFAULT_ADMIN_ROLE` is the only role that can manage roles. This is done for simplicity. /// @dev An instance of this restriction should be deployed separately for each `ChainAdmin` contract. /// @dev IMPORTANT: this function does not validate the ability of the invoker to use `msg.value`. Thus, /// either all callers with access to functions should be trusted to not steal ETH from the `ChainAdmin` account -/// or not ETH should be passively stored in `ChainAdmin` account. +/// or no ETH should be passively stored in `ChainAdmin` account. contract AccessControlRestriction is Restriction, IAccessControlRestriction, AccessControlDefaultAdminRules { - /// @notice Required roles to call a specific functions. + /// @notice Required roles to call a specific function. /// @dev Note, that the role 0 means the `DEFAULT_ADMIN_ROLE` from the `AccessControlDefaultAdminRules` contract. mapping(address target => mapping(bytes4 selector => bytes32 requiredRole)) public requiredRoles; diff --git a/l1-contracts/contracts/governance/ChainAdmin.sol b/l1-contracts/contracts/governance/ChainAdmin.sol index f661a72df..25a30c498 100644 --- a/l1-contracts/contracts/governance/ChainAdmin.sol +++ b/l1-contracts/contracts/governance/ChainAdmin.sol @@ -22,6 +22,13 @@ import {ReentrancyGuard} from "../common/ReentrancyGuard.sol"; contract ChainAdmin is IChainAdmin, ReentrancyGuard { using EnumerableSet for EnumerableSet.AddressSet; + /// @notice Mapping of protocol versions to their expected upgrade timestamps. + /// @dev Needed for the offchain node administration to know when to start building batches with the new protocol version. + mapping(uint256 protocolVersion => uint256 upgradeTimestamp) public protocolVersionToUpgradeTimestamp; + + /// @notice The set of active restrictions. + EnumerableSet.AddressSet internal activeRestrictions; + /// @notice Ensures that only the `ChainAdmin` contract itself can call the function. /// @dev All functions that require access-control should use `onlySelf` modifier, while the access control logic /// should be implemented in the restriction contracts. @@ -40,13 +47,6 @@ contract ChainAdmin is IChainAdmin, ReentrancyGuard { } } - /// @notice Mapping of protocol versions to their expected upgrade timestamps. - /// @dev Needed for the offchain node administration to know when to start building batches with the new protocol version. - mapping(uint256 protocolVersion => uint256 upgradeTimestamp) public protocolVersionToUpgradeTimestamp; - - /// @notice The set of active restrictions. - EnumerableSet.AddressSet internal activeRestrictions; - /// @notice Returns the list of active restrictions. function getRestrictions() public view returns (address[] memory) { return activeRestrictions.values(); @@ -107,9 +107,9 @@ contract ChainAdmin is IChainAdmin, ReentrancyGuard { /// @dev Contract might receive/hold ETH as part of the maintenance process. receive() external payable {} - /// @notice Function that ensures that the current admin can perform the call. - /// @dev Reverts in case the call can not be performed. Successfully executes otherwise. - function _validateCall(Call calldata _call) internal view { + /// @notice Function that returns the current admin can perform the call. + /// @dev By default it always returns true, but can be overridden in derived contracts. + function _validateCall(Call calldata _call) private view { address[] memory restrictions = getRestrictions(); unchecked { @@ -121,7 +121,7 @@ contract ChainAdmin is IChainAdmin, ReentrancyGuard { /// @notice Adds a new restriction to the active restrictions set. /// @param _restriction The address of the restriction contract to be added. - function _addRestriction(address _restriction) internal { + function _addRestriction(address _restriction) private { RestrictionValidator.validateRestriction(_restriction); if (!activeRestrictions.add(_restriction)) { diff --git a/l1-contracts/contracts/governance/L2AdminFactory.sol b/l1-contracts/contracts/governance/L2AdminFactory.sol index 44515c117..c8196c616 100644 --- a/l1-contracts/contracts/governance/L2AdminFactory.sol +++ b/l1-contracts/contracts/governance/L2AdminFactory.sol @@ -35,14 +35,17 @@ contract L2AdminFactory { // Even though the chain admin will likely perform similar checks, // we keep those here just in case, since it is not expensive, while allowing to fail fast. _validateRestrctions(_additionalRestrictions); - address[] memory restrictions = new address[](requiredRestrictions.length + _additionalRestrictions.length); uint256 cachedRequired = requiredRestrictions.length; - for (uint256 i = 0; i < cachedRequired; ++i) { - restrictions[i] = requiredRestrictions[i]; - } uint256 cachedAdditional = _additionalRestrictions.length; - for (uint256 i = 0; i < cachedAdditional; ++i) { - restrictions[requiredRestrictions.length + i] = _additionalRestrictions[i]; + address[] memory restrictions = new address[](cachedRequired + cachedAdditional); + + unchecked { + for (uint256 i = 0; i < cachedRequired; ++i) { + restrictions[i] = requiredRestrictions[i]; + } + for (uint256 i = 0; i < cachedAdditional; ++i) { + restrictions[cachedRequired + i] = _additionalRestrictions[i]; + } } admin = address(new ChainAdmin{salt: _salt}(restrictions)); diff --git a/l1-contracts/contracts/governance/PermanentRestriction.sol b/l1-contracts/contracts/governance/PermanentRestriction.sol index 1e8b6d948..30b586086 100644 --- a/l1-contracts/contracts/governance/PermanentRestriction.sol +++ b/l1-contracts/contracts/governance/PermanentRestriction.sol @@ -43,12 +43,13 @@ contract PermanentRestriction is Restriction, IPermanentRestriction, Ownable2Ste mapping(bytes allowedCalldata => bool isAllowed) public allowedCalls; /// @notice The mapping of the validated selectors. - mapping(bytes4 selector => bool isValidated) public validatedSelectors; + mapping(bytes4 selector => bool isValidated) public selectorsToValidate; /// @notice The mapping of whitelisted L2 admins. mapping(address adminAddress => bool isWhitelisted) public allowedL2Admins; constructor(IBridgehub _bridgehub, address _l2AdminFactory) { + _disableInitializers(); BRIDGE_HUB = _bridgehub; L2_ADMIN_FACTORY = _l2AdminFactory; } @@ -67,7 +68,7 @@ contract PermanentRestriction is Restriction, IPermanentRestriction, Ownable2Ste /// @notice Allows a certain `ChainAdmin` implementation to be used as an admin. /// @param _implementationHash The hash of the implementation code. /// @param _isAllowed The flag that indicates if the implementation is allowed. - function allowAdminImplementation(bytes32 _implementationHash, bool _isAllowed) external onlyOwner { + function setAllowedAdminImplementation(bytes32 _implementationHash, bool _isAllowed) external onlyOwner { allowedAdminImplementations[_implementationHash] = _isAllowed; emit AdminImplementationAllowed(_implementationHash, _isAllowed); @@ -85,8 +86,8 @@ contract PermanentRestriction is Restriction, IPermanentRestriction, Ownable2Ste /// @notice Allows a certain selector to be validated. /// @param _selector The selector of the function. /// @param _isValidated The flag that indicates if the selector is validated. - function setSelectorIsValidated(bytes4 _selector, bool _isValidated) external onlyOwner { - validatedSelectors[_selector] = _isValidated; + function setSelectorShouldBeValidated(bytes4 _selector, bool _isValidated) external onlyOwner { + selectorsToValidate[_selector] = _isValidated; emit SelectorValidationChanged(_selector, _isValidated); } @@ -127,7 +128,7 @@ contract PermanentRestriction is Restriction, IPermanentRestriction, Ownable2Ste /// @param _call The call data. /// @dev Note that we do not need to validate the migration to the L1 layer as the admin /// is not changed in this case. - function _validateMigrationToL2(Call calldata _call) internal view { + function _validateMigrationToL2(Call calldata _call) private view { (address admin, bool isMigration) = _getNewAdminFromMigration(_call); if (isMigration) { if (!allowedL2Admins[admin]) { @@ -138,7 +139,7 @@ contract PermanentRestriction is Restriction, IPermanentRestriction, Ownable2Ste /// @notice Validates the call as the chain admin /// @param _call The call data. - function _validateAsChainAdmin(Call calldata _call) internal view { + function _validateAsChainAdmin(Call calldata _call) private view { if (!_isAdminOfAChain(_call.target)) { // We only validate calls related to being an admin of a chain return; @@ -157,7 +158,7 @@ contract PermanentRestriction is Restriction, IPermanentRestriction, Ownable2Ste return; } - if (!validatedSelectors[selector]) { + if (!selectorsToValidate[selector]) { // The selector is not validated, any data is allowed. return; } @@ -170,7 +171,7 @@ contract PermanentRestriction is Restriction, IPermanentRestriction, Ownable2Ste /// @notice Validates the correctness of the new admin. /// @param _call The call data. /// @dev Ensures that the admin has a whitelisted implementation and does not remove this restriction. - function _validateNewAdmin(Call calldata _call) internal view { + function _validateNewAdmin(Call calldata _call) private view { address newChainAdmin = abi.decode(_call.data[4:], (address)); bytes32 implementationCodeHash = newChainAdmin.codehash; @@ -189,7 +190,7 @@ contract PermanentRestriction is Restriction, IPermanentRestriction, Ownable2Ste /// @notice Validates the removal of the restriction. /// @param _call The call data. /// @dev Ensures that this restriction is not removed. - function _validateRemoveRestriction(Call calldata _call) internal view { + function _validateRemoveRestriction(Call calldata _call) private view { if (_call.target != msg.sender) { return; } @@ -248,7 +249,7 @@ contract PermanentRestriction is Restriction, IPermanentRestriction, Ownable2Ste /// @return success Whether the `chain` is indeed an address of a ZK Chain. /// @dev Returns a tuple of the chainId and whether the call was successful. /// If the second item is `false`, the caller should ignore the first value. - function _getChainIdUnffallibleCall(address _chain) internal view returns (uint256 chainId, bool success) { + function _getChainIdUnffallibleCall(address _chain) private view returns (uint256 chainId, bool success) { bytes4 selector = IGetters.getChainId.selector; assembly { // We use scratch space here, so it is safe diff --git a/l1-contracts/contracts/governance/restriction/IRestriction.sol b/l1-contracts/contracts/governance/restriction/IRestriction.sol index 3d0c21594..9124d1f67 100644 --- a/l1-contracts/contracts/governance/restriction/IRestriction.sol +++ b/l1-contracts/contracts/governance/restriction/IRestriction.sol @@ -18,5 +18,5 @@ interface IRestriction { /// @notice Ensures that the invoker has the required role to call the function. /// @param _call The call data. /// @param _invoker The address of the invoker. - function validateCall(Call calldata _call, address _invoker) external view virtual; + function validateCall(Call calldata _call, address _invoker) external view; } diff --git a/l1-contracts/contracts/state-transition/ChainTypeManager.sol b/l1-contracts/contracts/state-transition/ChainTypeManager.sol index 684c7bd9a..d97f7b8f1 100644 --- a/l1-contracts/contracts/state-transition/ChainTypeManager.sol +++ b/l1-contracts/contracts/state-transition/ChainTypeManager.sol @@ -356,7 +356,6 @@ contract ChainTypeManager is IChainTypeManager, ReentrancyGuard, Ownable2StepUpg return getZKChain(_chainId); } - // check not registered Diamond.DiamondCutData memory diamondCut = abi.decode(_diamondCut, (Diamond.DiamondCutData)); { diff --git a/l1-contracts/contracts/state-transition/IChainTypeManager.sol b/l1-contracts/contracts/state-transition/IChainTypeManager.sol index 8c72c882a..ad81bd16c 100644 --- a/l1-contracts/contracts/state-transition/IChainTypeManager.sol +++ b/l1-contracts/contracts/state-transition/IChainTypeManager.sol @@ -160,8 +160,6 @@ interface IChainTypeManager { function registerSettlementLayer(uint256 _newSettlementLayerChainId, bool _isWhitelisted) external; - event BridgeInitialize(address indexed l1Token, string name, string symbol, uint8 decimals); - function forwardedBridgeBurn( uint256 _chainId, bytes calldata _data diff --git a/l1-contracts/contracts/state-transition/ValidatorTimelock.sol b/l1-contracts/contracts/state-transition/ValidatorTimelock.sol index bd9f075cd..550d39e2c 100644 --- a/l1-contracts/contracts/state-transition/ValidatorTimelock.sol +++ b/l1-contracts/contracts/state-transition/ValidatorTimelock.sol @@ -52,13 +52,9 @@ contract ValidatorTimelock is IExecutor, Ownable2Step { /// @dev The delay between committing and executing batches. uint32 public executionDelay; - /// @dev Era's chainID - uint256 internal immutable ERA_CHAIN_ID; - - constructor(address _initialOwner, uint32 _executionDelay, uint256 _eraChainId) { + constructor(address _initialOwner, uint32 _executionDelay) { _transferOwnership(_initialOwner); executionDelay = _executionDelay; - ERA_CHAIN_ID = _eraChainId; } /// @notice Checks if the caller is the admin of the chain. diff --git a/l1-contracts/contracts/state-transition/chain-deps/GatewayCTMDeployer.sol b/l1-contracts/contracts/state-transition/chain-deps/GatewayCTMDeployer.sol index 09b2c7c3b..dbfdf4ba8 100644 --- a/l1-contracts/contracts/state-transition/chain-deps/GatewayCTMDeployer.sol +++ b/l1-contracts/contracts/state-transition/chain-deps/GatewayCTMDeployer.sol @@ -176,7 +176,7 @@ contract GatewayCTMDeployer { }); _deployVerifier(salt, _config.testnetVerifier, contracts); - ValidatorTimelock timelock = new ValidatorTimelock{salt: salt}(address(this), 0, eraChainId); + ValidatorTimelock timelock = new ValidatorTimelock{salt: salt}(address(this), 0); contracts.stateTransition.validatorTimelock = address(timelock); _deployCTM(salt, _config, contracts); diff --git a/l1-contracts/contracts/state-transition/chain-deps/facets/Admin.sol b/l1-contracts/contracts/state-transition/chain-deps/facets/Admin.sol index 0317f9855..7ea8c9ff9 100644 --- a/l1-contracts/contracts/state-transition/chain-deps/facets/Admin.sol +++ b/l1-contracts/contracts/state-transition/chain-deps/facets/Admin.sol @@ -394,7 +394,6 @@ contract AdminFacet is ZKChainBase, IAdmin { } /// @inheritdoc IAdmin - /// @dev Note that this function does not check that the caller is the chain admin. function forwardedBridgeRecoverFailedTransfer( uint256 /* _chainId */, bytes32 /* _assetInfo */, diff --git a/l1-contracts/contracts/state-transition/chain-deps/facets/Mailbox.sol b/l1-contracts/contracts/state-transition/chain-deps/facets/Mailbox.sol index 24c94cae8..87f3496e7 100644 --- a/l1-contracts/contracts/state-transition/chain-deps/facets/Mailbox.sol +++ b/l1-contracts/contracts/state-transition/chain-deps/facets/Mailbox.sol @@ -24,9 +24,7 @@ import {REQUIRED_L2_GAS_PRICE_PER_PUBDATA, L1_GAS_PER_PUBDATA_BYTE, L2_L1_LOGS_T import {L2_BOOTLOADER_ADDRESS, L2_TO_L1_MESSENGER_SYSTEM_CONTRACT_ADDR, L2_BRIDGEHUB_ADDR} from "../../../common/L2ContractAddresses.sol"; import {IL1AssetRouter} from "../../../bridge/asset-router/IL1AssetRouter.sol"; -import {IBridgehub} from "../../../bridgehub/IBridgehub.sol"; -import {IChainTypeManager} from "../../IChainTypeManager.sol"; import {MerklePathEmpty, OnlyEraSupported, BatchNotExecuted, HashedLogIsDefault, BaseTokenGasPriceDenominatorNotSet, TransactionNotAllowed, GasPerPubdataMismatch, TooManyFactoryDeps, MsgValueTooLow} from "../../../common/L1ContractErrors.sol"; import {NotL1, UnsupportedProofMetadataVersion, LocalRootIsZero, LocalRootMustBeZero, NotSettlementLayer, NotHyperchain} from "../../L1StateTransitionErrors.sol"; diff --git a/l1-contracts/contracts/state-transition/chain-interfaces/IAdmin.sol b/l1-contracts/contracts/state-transition/chain-interfaces/IAdmin.sol index 099c744fa..ac5cfcc2d 100644 --- a/l1-contracts/contracts/state-transition/chain-interfaces/IAdmin.sol +++ b/l1-contracts/contracts/state-transition/chain-interfaces/IAdmin.sol @@ -136,8 +136,6 @@ interface IAdmin is IZKChainBase { event NewL2DAValidator(address indexed oldL2DAValidator, address indexed newL2DAValidator); event NewL1DAValidator(address indexed oldL1DAValidator, address indexed newL1DAValidator); - event BridgeInitialize(address indexed l1Token, string name, string symbol, uint8 decimals); - event BridgeMint(address indexed _account, uint256 _amount); /// @dev Similar to IL1AssetHandler interface, used to send chains. diff --git a/l1-contracts/contracts/state-transition/chain-interfaces/IL1DAValidator.sol b/l1-contracts/contracts/state-transition/chain-interfaces/IL1DAValidator.sol index a4fe56b01..b5ea1b85c 100644 --- a/l1-contracts/contracts/state-transition/chain-interfaces/IL1DAValidator.sol +++ b/l1-contracts/contracts/state-transition/chain-interfaces/IL1DAValidator.sol @@ -23,7 +23,7 @@ struct L1DAValidatorOutput { interface IL1DAValidator { /// @notice The function that checks the data availability for the given batch input. /// @param _chainId The chain id of the chain that is being committed. - /// @param _chainId The batch number for which the data availability is being checked. + /// @param _batchNumber The batch number for which the data availability is being checked. /// @param _l2DAValidatorOutputHash The hash of that was returned by the l2DAValidator. /// @param _operatorDAInput The DA input by the operator provided on L1. /// @param _maxBlobsSupported The maximal number of blobs supported by the chain. diff --git a/l1-contracts/contracts/state-transition/data-availability/CalldataDAGateway.sol b/l1-contracts/contracts/state-transition/data-availability/CalldataDAGateway.sol index f919d0d8d..0a072638f 100644 --- a/l1-contracts/contracts/state-transition/data-availability/CalldataDAGateway.sol +++ b/l1-contracts/contracts/state-transition/data-availability/CalldataDAGateway.sol @@ -5,7 +5,7 @@ pragma solidity 0.8.24; import {CalldataDA, BLOB_COMMITMENT_SIZE, BLOB_SIZE_BYTES} from "./CalldataDA.sol"; import {PubdataTooSmall, PubdataTooLong, InvalidPubdataHash} from "../L1StateTransitionErrors.sol"; -/// @notice Contract that contains the functionality for process the calldata DA. +/// @notice Contract that contains the functionality for processing the calldata DA. /// @dev The expected l2DAValidator that should be used with it `RollupL2DAValidator`. abstract contract CalldataDAGateway is CalldataDA { /// @inheritdoc CalldataDA diff --git a/l1-contracts/contracts/state-transition/libraries/PriorityTree.sol b/l1-contracts/contracts/state-transition/libraries/PriorityTree.sol index 1f487851e..53325845a 100644 --- a/l1-contracts/contracts/state-transition/libraries/PriorityTree.sol +++ b/l1-contracts/contracts/state-transition/libraries/PriorityTree.sol @@ -26,7 +26,7 @@ library PriorityTree { DynamicIncrementalMerkle.Bytes32PushTree tree; } - /// @notice Returns zero if and only if no operations were processed from the queue + /// @notice Returns zero if and only if no operations were processed from the tree /// @return Index of the oldest priority operation that wasn't processed yet function getFirstUnprocessedPriorityTx(Tree storage _tree) internal view returns (uint256) { return _tree.startIndex + _tree.unprocessedIndex; diff --git a/l1-contracts/contracts/transactionFilterer/GatewayTransactionFilterer.sol b/l1-contracts/contracts/transactionFilterer/GatewayTransactionFilterer.sol index ebcb37ff8..25d57ce4f 100644 --- a/l1-contracts/contracts/transactionFilterer/GatewayTransactionFilterer.sol +++ b/l1-contracts/contracts/transactionFilterer/GatewayTransactionFilterer.sol @@ -8,7 +8,6 @@ import {ReentrancyGuard} from "../common/ReentrancyGuard.sol"; import {AlreadyWhitelisted, InvalidSelector, NotWhitelisted, ZeroAddress} from "../common/L1ContractErrors.sol"; import {ITransactionFilterer} from "../state-transition/chain-interfaces/ITransactionFilterer.sol"; import {IBridgehub} from "../bridgehub/IBridgehub.sol"; -import {IL2Bridge} from "../bridge/interfaces/IL2Bridge.sol"; import {IAssetRouterBase} from "../bridge/asset-router/IAssetRouterBase.sol"; import {IL2AssetRouter} from "../bridge/asset-router/IL2AssetRouter.sol"; @@ -86,25 +85,22 @@ contract GatewayTransactionFilterer is ITransactionFilterer, ReentrancyGuard, Ow if (IL2AssetRouter.setAssetHandlerAddress.selector == l2TxSelector) { (, bytes32 decodedAssetId, ) = abi.decode(l2Calldata[4:], (uint256, bytes32, address)); - return _checkSTMAssetId(decodedAssetId); + return _checkCTMAssetId(decodedAssetId); } - if ( - IAssetRouterBase.finalizeDeposit.selector != l2TxSelector && - IL2Bridge.finalizeDeposit.selector != l2TxSelector - ) { + if (IAssetRouterBase.finalizeDeposit.selector != l2TxSelector) { revert InvalidSelector(l2TxSelector); } (, bytes32 decodedAssetId, ) = abi.decode(l2Calldata[4:], (uint256, bytes32, bytes)); - return _checkSTMAssetId(decodedAssetId); + return _checkCTMAssetId(decodedAssetId); } return whitelistedSenders[sender]; } - function _checkSTMAssetId(bytes32 assetId) internal view returns (bool) { - address stmAddress = BRIDGE_HUB.ctmAssetIdToAddress(assetId); - return stmAddress != address(0); + function _checkCTMAssetId(bytes32 assetId) internal view returns (bool) { + address ctmAddress = BRIDGE_HUB.ctmAssetIdToAddress(assetId); + return ctmAddress != address(0); } } diff --git a/l1-contracts/contracts/upgrades/BaseZkSyncUpgrade.sol b/l1-contracts/contracts/upgrades/BaseZkSyncUpgrade.sol index d79e26725..a619e5311 100644 --- a/l1-contracts/contracts/upgrades/BaseZkSyncUpgrade.sol +++ b/l1-contracts/contracts/upgrades/BaseZkSyncUpgrade.sol @@ -61,7 +61,7 @@ abstract contract BaseZkSyncUpgrade is ZKChainBase { /// @notice Notifies about complete upgrade event UpgradeComplete(uint256 indexed newProtocolVersion, bytes32 indexed l2UpgradeTxHash, ProposedUpgrade upgrade); - /// @notice The main function that will be provided by the upgrade proxy + /// @notice The main function that will be delegate-called by the chain. /// @dev This is a virtual function and should be overridden by custom upgrade implementations. /// @param _proposedUpgrade The upgrade to be executed. /// @return txHash The hash of the L2 system contract upgrade transaction. diff --git a/l1-contracts/contracts/upgrades/DefaultUpgrade.sol b/l1-contracts/contracts/upgrades/DefaultUpgrade.sol index c6ebb18dc..87c6dd220 100644 --- a/l1-contracts/contracts/upgrades/DefaultUpgrade.sol +++ b/l1-contracts/contracts/upgrades/DefaultUpgrade.sol @@ -8,7 +8,7 @@ import {BaseZkSyncUpgrade, ProposedUpgrade} from "./BaseZkSyncUpgrade.sol"; /// @author Matter Labs /// @custom:security-contact security@matterlabs.dev contract DefaultUpgrade is BaseZkSyncUpgrade { - /// @notice The main function that will be called by the upgrade proxy. + /// @notice The main function that will be delegate-called by the chain. /// @param _proposedUpgrade The upgrade to be executed. function upgrade(ProposedUpgrade calldata _proposedUpgrade) public override returns (bytes32) { super.upgrade(_proposedUpgrade); diff --git a/l1-contracts/contracts/upgrades/GatewayUpgrade.sol b/l1-contracts/contracts/upgrades/GatewayUpgrade.sol index 2efa6230a..5274c44c3 100644 --- a/l1-contracts/contracts/upgrades/GatewayUpgrade.sol +++ b/l1-contracts/contracts/upgrades/GatewayUpgrade.sol @@ -33,14 +33,17 @@ contract GatewayUpgrade is BaseZkSyncUpgrade { using PriorityQueue for PriorityQueue.Queue; using PriorityTree for PriorityTree.Tree; + /// @notice The address of this contract. + /// @dev needed as this address is delegateCalled, and we delegateCall it again. address public immutable THIS_ADDRESS; constructor() { THIS_ADDRESS = address(this); } - /// @notice The main function that will be called by the upgrade proxy. + /// @notice The main function that will be delegate-called by the chain. /// @param _proposedUpgrade The upgrade to be executed. + /// @dev Doesn't require any access-control restrictions as the contract is used in the delegate call. function upgrade(ProposedUpgrade calldata _proposedUpgrade) public override returns (bytes32) { GatewayUpgradeEncodedInput memory encodedInput = abi.decode( _proposedUpgrade.postUpgradeCalldata, @@ -72,9 +75,7 @@ contract GatewayUpgrade is BaseZkSyncUpgrade { ); // slither-disable-next-line controlled-delegatecall - (bool success, ) = THIS_ADDRESS.delegatecall( - abi.encodeWithSelector(IGatewayUpgrade.upgradeExternal.selector, proposedUpgrade) - ); + (bool success, ) = THIS_ADDRESS.delegatecall(abi.encodeCall(IGatewayUpgrade.upgradeExternal, proposedUpgrade)); if (!success) { revert GatewayUpgradeFailed(); } @@ -82,6 +83,7 @@ contract GatewayUpgrade is BaseZkSyncUpgrade { } /// @notice The function that will be called from this same contract, we need an external call to be able to modify _proposedUpgrade (memory/calldata). + /// @dev Doesn't require any access-control restrictions as the contract is used in the delegate call. function upgradeExternal(ProposedUpgrade calldata _proposedUpgrade) external { super.upgrade(_proposedUpgrade); } diff --git a/l1-contracts/contracts/upgrades/IGatewayUpgrade.sol b/l1-contracts/contracts/upgrades/IGatewayUpgrade.sol index eaa74c75b..c2c972a5e 100644 --- a/l1-contracts/contracts/upgrades/IGatewayUpgrade.sol +++ b/l1-contracts/contracts/upgrades/IGatewayUpgrade.sol @@ -4,6 +4,14 @@ pragma solidity 0.8.24; import {ProposedUpgrade} from "./BaseZkSyncUpgrade.sol"; +/** + * @author Matter Labs + * @custom:security-contact security@matterlabs.dev + * @notice Gateway upgrade interface. Used for the protocol upgrade that introduces the Gateway. + */ interface IGatewayUpgrade { + /// @notice The upgrade function called from within this same contract + /// @dev This is needed for memory -> calldata conversion of the _upgrade arg. + /// @param _upgrade The upgrade to be executed. function upgradeExternal(ProposedUpgrade calldata _upgrade) external returns (bytes32); } diff --git a/l1-contracts/contracts/upgrades/IL1GenesisUpgrade.sol b/l1-contracts/contracts/upgrades/IL1GenesisUpgrade.sol index 57dd40131..c217e3be1 100644 --- a/l1-contracts/contracts/upgrades/IL1GenesisUpgrade.sol +++ b/l1-contracts/contracts/upgrades/IL1GenesisUpgrade.sol @@ -4,8 +4,18 @@ pragma solidity 0.8.24; import {L2CanonicalTransaction} from "../common/Messaging.sol"; +/** + * @author Matter Labs + * @custom:security-contact security@matterlabs.dev + * @notice L1 genesis upgrade interface. Every chain has to process an upgrade txs at its genesis. + * @notice This is needed to set system params like the chainId and to deploy some system contracts. + */ interface IL1GenesisUpgrade { /// @dev emitted when a chain registers and a GenesisUpgrade happens + /// @param _zkChain the address of the zk chain + /// @param _l2Transaction the l2 genesis upgrade transaction + /// @param _protocolVersion the current protocol version + /// @param _factoryDeps the factory dependencies needed for the upgrade event GenesisUpgrade( address indexed _zkChain, L2CanonicalTransaction _l2Transaction, @@ -13,6 +23,13 @@ interface IL1GenesisUpgrade { bytes[] _factoryDeps ); + /// @notice The main function that will be called by the Admin facet at genesis. + /// @param _l1GenesisUpgrade the address of the l1 genesis upgrade + /// @param _chainId the chain id + /// @param _protocolVersion the current protocol version + /// @param _l1CtmDeployerAddress the address of the l1 ctm deployer + /// @param _forceDeployments the force deployments + /// @param _factoryDeps the factory dependencies function genesisUpgrade( address _l1GenesisUpgrade, uint256 _chainId, diff --git a/l1-contracts/contracts/upgrades/L1GenesisUpgrade.sol b/l1-contracts/contracts/upgrades/L1GenesisUpgrade.sol index e9e01cfaf..107cecd03 100644 --- a/l1-contracts/contracts/upgrades/L1GenesisUpgrade.sol +++ b/l1-contracts/contracts/upgrades/L1GenesisUpgrade.sol @@ -24,7 +24,13 @@ import {L1GatewayHelper} from "./L1GatewayHelper.sol"; /// @author Matter Labs /// @custom:security-contact security@matterlabs.dev contract L1GenesisUpgrade is IL1GenesisUpgrade, BaseZkSyncUpgradeGenesis { - /// @notice The main function that will be called by the upgrade proxy. + /// @notice The main function that will be called by the Admin facet. + /// @param _l1GenesisUpgrade the address of the l1 genesis upgrade + /// @param _chainId the chain id + /// @param _protocolVersion the current protocol version + /// @param _l1CtmDeployerAddress the address of the l1 ctm deployer + /// @param _fixedForceDeploymentsData the force deployments data + /// @param _factoryDeps the factory dependencies function genesisUpgrade( address _l1GenesisUpgrade, uint256 _chainId, diff --git a/l1-contracts/deploy-scripts/DeployL1.s.sol b/l1-contracts/deploy-scripts/DeployL1.s.sol index 7b2ad799f..541a00eea 100644 --- a/l1-contracts/deploy-scripts/DeployL1.s.sol +++ b/l1-contracts/deploy-scripts/DeployL1.s.sol @@ -44,7 +44,6 @@ import {BridgedStandardERC20} from "contracts/bridge/BridgedStandardERC20.sol"; import {AddressHasNoCode} from "./ZkSyncScriptErrors.sol"; import {ICTMDeploymentTracker} from "contracts/bridgehub/ICTMDeploymentTracker.sol"; import {IMessageRoot} from "contracts/bridgehub/IMessageRoot.sol"; -import {IL2ContractDeployer} from "contracts/common/interfaces/IL2ContractDeployer.sol"; import {L2ContractHelper} from "contracts/common/libraries/L2ContractHelper.sol"; import {AddressAliasHelper} from "contracts/vendor/AddressAliasHelper.sol"; import {IL1Nullifier} from "contracts/bridge/L1Nullifier.sol"; @@ -430,7 +429,6 @@ contract DeployL1Script is Script, DeployUtils { abi.encode( config.tokens.tokenWethAddress, addresses.bridges.sharedBridgeProxy, - config.eraChainId, addresses.bridges.l1NullifierProxy ) ); diff --git a/l1-contracts/deploy-scripts/DeployZKAndBridgeToL1.s.sol b/l1-contracts/deploy-scripts/DeployZKAndBridgeToL1.s.sol new file mode 100644 index 000000000..5e0a87681 --- /dev/null +++ b/l1-contracts/deploy-scripts/DeployZKAndBridgeToL1.s.sol @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.24; + +// solhint-disable no-console + +import {Vm} from "forge-std/Vm.sol"; +import {Script, console2 as console} from "forge-std/Script.sol"; +import {stdToml} from "forge-std/StdToml.sol"; + +// It's required to disable lints to force the compiler to compile the contracts +// solhint-disable no-unused-import +import {TestnetERC20Token} from "contracts/dev-contracts/TestnetERC20Token.sol"; +// solhint-disable no-unused-import +import {WETH9} from "contracts/dev-contracts/WETH9.sol"; + +import {L2_ASSET_ROUTER_ADDR, L2_NATIVE_TOKEN_VAULT_ADDR} from "contracts/common/L2ContractAddresses.sol"; + +import {FinalizeL1DepositParams} from "contracts/bridge/interfaces/IL1Nullifier.sol"; +import {L1AssetRouter} from "contracts/bridge/asset-router/L1AssetRouter.sol"; +import {L2AssetRouter} from "contracts/bridge/asset-router/L2AssetRouter.sol"; +import {L1Nullifier} from "contracts/bridge/L1Nullifier.sol"; +import {L2NativeTokenVault} from "contracts/bridge/ntv/L2NativeTokenVault.sol"; +import {IL1NativeTokenVault} from "contracts/bridge/ntv/IL1NativeTokenVault.sol"; +import {Utils} from "./Utils.sol"; +import {MintFailed} from "./ZkSyncScriptErrors.sol"; + +contract DeployZKScript is Script { + using stdToml for string; + + struct Config { + TokenDescription zkToken; + address deployerAddress; + address[] additionalAddressesForMinting; + address create2FactoryAddr; + bytes32 create2FactorySalt; + uint256 chainId; + address l1SharedBridge; + address bridgehub; + address l1Nullifier; + address chainAdmin; + address governance; + address deployer; + address owner; + address anotherOwner; + address chainGovernor; + } + + struct TokenDescription { + address addr; + string name; + string symbol; + uint256 decimals; + string implementation; + uint256 mint; + bytes32 assetId; + } + + Config internal config; + + function run() public { + initializeConfig(); + deployZkToken(); + saveOutput(); + } + + function getTokenAddress() public view returns (address) { + return config.zkToken.addr; + } + + function initializeConfig() internal { + config.deployerAddress = msg.sender; + + string memory root = vm.projectRoot(); + + // Grab config from output of l1 deployment + string memory path = string.concat(root, vm.envString("TOKENS_CONFIG")); + string memory toml = vm.readFile(path); + + config.additionalAddressesForMinting = vm.parseTomlAddressArray(toml, "$.additional_addresses_for_minting"); + + // Parse the ZK token configuration + string memory key = "$.tokens.ZK"; + config.zkToken.name = toml.readString(string.concat(key, ".name")); + config.zkToken.symbol = toml.readString(string.concat(key, ".symbol")); + config.zkToken.decimals = toml.readUint(string.concat(key, ".decimals")); + config.zkToken.implementation = toml.readString(string.concat(key, ".implementation")); + config.zkToken.mint = toml.readUint(string.concat(key, ".mint")); + + // Grab config from custom config file + path = string.concat(root, vm.envString("ZK_CHAIN_CONFIG")); + toml = vm.readFile(path); + + config.bridgehub = toml.readAddress("$.deployed_addresses.bridgehub.bridgehub_proxy_addr"); + config.l1SharedBridge = toml.readAddress("$.deployed_addresses.bridges.shared_bridge_proxy_addr"); + config.l1Nullifier = toml.readAddress("$.deployed_addresses.bridges.l1_nullifier_proxy_addr"); + config.chainId = toml.readUint("$.chain.chain_chain_id"); + config.chainGovernor = toml.readAddress("$.owner_address"); + } + + function initializeAdditionalConfig() internal { + string memory root = vm.projectRoot(); + string memory path = string.concat(root, vm.envString("L1_OUTPUT")); + string memory toml = vm.readFile(path); + + config.owner = toml.readAddress("$.owner_address"); + } + + function deployZkToken() internal { + uint256 someBigAmount = 100000000000000000000000000000000; + TokenDescription storage token = config.zkToken; + console.log("Deploying token:", token.name); + + vm.startBroadcast(); + address zkTokenAddress = deployErc20({ + name: token.name, + symbol: token.symbol, + decimals: token.decimals, + mint: token.mint, + additionalAddressesForMinting: config.additionalAddressesForMinting + }); + console.log("Token deployed at:", zkTokenAddress); + token.addr = zkTokenAddress; + address deployer = msg.sender; + TestnetERC20Token zkToken = TestnetERC20Token(zkTokenAddress); + zkToken.mint(deployer, someBigAmount); + uint256 deployerBalance = zkToken.balanceOf(deployer); + console.log("Deployer balance:", deployerBalance); + L2AssetRouter l2AR = L2AssetRouter(L2_ASSET_ROUTER_ADDR); + L2NativeTokenVault l2NTV = L2NativeTokenVault(L2_NATIVE_TOKEN_VAULT_ADDR); + l2NTV.registerToken(zkTokenAddress); + bytes32 zkTokenAssetId = l2NTV.assetId(zkTokenAddress); + config.zkToken.assetId = zkTokenAssetId; + console.log("zkTokenAssetId:", uint256(zkTokenAssetId)); + zkToken.approve(L2_NATIVE_TOKEN_VAULT_ADDR, someBigAmount); + vm.stopBroadcast(); + + vm.broadcast(); + l2AR.withdraw(zkTokenAssetId, abi.encode(someBigAmount, deployer)); + uint256 deployerBalanceAfterWithdraw = zkToken.balanceOf(deployer); + console.log("Deployed balance after withdraw:", deployerBalanceAfterWithdraw); + } + + /// TODO(EVM-748): make that function support non-ETH based chains + function supplyEraWallet(address addr, uint256 amount) public { + initializeConfig(); + + Utils.runL1L2Transaction( + hex"", + Utils.MAX_PRIORITY_TX_GAS, + amount, + new bytes[](0), + addr, + config.chainId, + config.bridgehub, + config.l1SharedBridge + ); + } + + function finalizeZkTokenWithdrawal( + uint256 _chainId, + uint256 _l2BatchNumber, + uint256 _l2MessageIndex, + uint16 _l2TxNumberInBatch, + bytes memory _message, + bytes32[] memory _merkleProof + ) public { + initializeConfig(); + + L1Nullifier l1Nullifier = L1Nullifier(config.l1Nullifier); + + vm.broadcast(); + l1Nullifier.finalizeDeposit( + FinalizeL1DepositParams({ + chainId: _chainId, + l2BatchNumber: _l2BatchNumber, + l2MessageIndex: _l2MessageIndex, + l2Sender: L2_ASSET_ROUTER_ADDR, + l2TxNumberInBatch: _l2TxNumberInBatch, + message: _message, + merkleProof: _merkleProof + }) + ); + } + + function saveL1Address() public { + initializeConfig(); + initializeAdditionalConfig(); + + string memory root = vm.projectRoot(); + string memory path = string.concat(root, vm.envString("ZK_TOKEN_OUTPUT")); + + string memory toml = vm.readFile(path); + + bytes32 zkTokenAssetId = toml.readBytes32("$.ZK.assetId"); + + L1AssetRouter l1AR = L1AssetRouter(config.l1SharedBridge); + console.log("L1 AR address", address(l1AR)); + IL1NativeTokenVault nativeTokenVault = IL1NativeTokenVault(address(l1AR.nativeTokenVault())); + address l1ZKAddress = nativeTokenVault.tokenAddress(zkTokenAssetId); + console.log("L1 ZK address", l1ZKAddress); + TestnetERC20Token l1ZK = TestnetERC20Token(l1ZKAddress); + + uint256 balance = l1ZK.balanceOf(config.deployerAddress); + vm.broadcast(); + l1ZK.transfer(config.owner, balance / 2); + string memory tokenInfo = vm.serializeAddress("ZK", "l1Address", l1ZKAddress); + vm.writeToml(tokenInfo, path, ".ZK.l1Address"); + } + + function fundChainGovernor() public { + initializeConfig(); + + string memory root = vm.projectRoot(); + string memory path = string.concat(root, vm.envString("ZK_TOKEN_OUTPUT")); + string memory toml = vm.readFile(path); + + address l1ZKAddress = toml.readAddress("$.ZK.l1Address.l1Address"); + console.log("L1 ZK address: ", l1ZKAddress); + console.log("Address of governor: ", config.chainGovernor); + TestnetERC20Token l1ZK = TestnetERC20Token(l1ZKAddress); + uint256 balance = l1ZK.balanceOf(config.deployerAddress); + vm.broadcast(); + l1ZK.transfer(config.chainGovernor, balance / 10); + } + + function deployErc20( + string memory name, + string memory symbol, + uint256 decimals, + uint256 mint, + address[] storage additionalAddressesForMinting + ) internal returns (address) { + address tokenAddress = address(new TestnetERC20Token(name, symbol, uint8(decimals))); // No salt for testing + + if (mint > 0) { + additionalAddressesForMinting.push(config.deployerAddress); + uint256 addressMintListLength = additionalAddressesForMinting.length; + for (uint256 i = 0; i < addressMintListLength; ++i) { + (bool success, ) = tokenAddress.call( + abi.encodeWithSignature("mint(address,uint256)", additionalAddressesForMinting[i], mint) + ); + if (!success) { + revert MintFailed(); + } + console.log("Minting to:", additionalAddressesForMinting[i]); + if (!success) { + revert MintFailed(); + } + } + } + + return tokenAddress; + } + + function saveOutput() internal { + TokenDescription memory token = config.zkToken; + string memory section = token.symbol; + + // Serialize each attribute directly under the token's symbol (e.g., [ZK]) + vm.serializeString(section, "name", token.name); + vm.serializeString(section, "symbol", token.symbol); + vm.serializeUint(section, "decimals", token.decimals); + vm.serializeString(section, "implementation", token.implementation); + vm.serializeUintToHex(section, "mint", token.mint); + vm.serializeBytes32(section, "assetId", token.assetId); + vm.serializeAddress(token.symbol, "l1Address", address(0)); + + string memory tokenInfo = vm.serializeAddress(token.symbol, "address", token.addr); + string memory toml = vm.serializeString("root", "ZK", tokenInfo); + string memory root = vm.projectRoot(); + string memory path = string.concat(root, vm.envString("ZK_TOKEN_OUTPUT")); + vm.writeToml(toml, path); + } + + // add this to be excluded from coverage report + function test() internal {} +} diff --git a/l1-contracts/deploy-scripts/GatewayCTMDeployerHelper.sol b/l1-contracts/deploy-scripts/GatewayCTMDeployerHelper.sol index 50af31f88..5bb7f7585 100644 --- a/l1-contracts/deploy-scripts/GatewayCTMDeployerHelper.sol +++ b/l1-contracts/deploy-scripts/GatewayCTMDeployerHelper.sol @@ -66,7 +66,7 @@ library GatewayCTMDeployerHelper { contracts.stateTransition.validatorTimelock = _deployInternal( "ValidatorTimelock", "ValidatorTimelock.sol", - abi.encode(ctmDeployerAddress, 0, eraChainId), + abi.encode(ctmDeployerAddress, 0), innerConfig ); diff --git a/l1-contracts/deploy-scripts/GatewayCTMFromL1.s.sol b/l1-contracts/deploy-scripts/GatewayCTMFromL1.s.sol index 3268a4f5e..aaf51357a 100644 --- a/l1-contracts/deploy-scripts/GatewayCTMFromL1.s.sol +++ b/l1-contracts/deploy-scripts/GatewayCTMFromL1.s.sol @@ -7,6 +7,10 @@ import {Script, console2 as console} from "forge-std/Script.sol"; // import {Vm} from "forge-std/Vm.sol"; import {stdToml} from "forge-std/StdToml.sol"; +// It's required to disable lints to force the compiler to compile the contracts +// solhint-disable no-unused-import +import {TestnetERC20Token} from "contracts/dev-contracts/TestnetERC20Token.sol"; + import {Ownable} from "@openzeppelin/contracts-v4/access/Ownable.sol"; import {IBridgehub} from "contracts/bridgehub/IBridgehub.sol"; import {IZKChain} from "contracts/state-transition/chain-interfaces/IZKChain.sol"; @@ -16,6 +20,8 @@ import {IZKChain} from "contracts/state-transition/chain-interfaces/IZKChain.sol import {StateTransitionDeployedAddresses, Utils, L2_BRIDGEHUB_ADDRESS, L2_CREATE2_FACTORY_ADDRESS} from "./Utils.sol"; import {AddressAliasHelper} from "contracts/vendor/AddressAliasHelper.sol"; import {L2ContractsBytecodesLib} from "./L2ContractsBytecodesLib.sol"; +import {L1AssetRouter} from "contracts/bridge/asset-router/L1AssetRouter.sol"; +import {IL1NativeTokenVault} from "contracts/bridge/ntv/IL1NativeTokenVault.sol"; import {AdminFacet} from "contracts/state-transition/chain-deps/facets/Admin.sol"; import {ExecutorFacet} from "contracts/state-transition/chain-deps/facets/Executor.sol"; @@ -58,6 +64,9 @@ contract GatewayCTMFromL1 is Script { address chainTypeManagerProxy; address sharedBridgeProxy; address governance; + address governanceAddr; + address deployerAddr; + address baseToken; uint256 chainChainId; uint256 eraChainId; uint256 l1ChainId; @@ -96,6 +105,10 @@ contract GatewayCTMFromL1 is Script { function prepareAddresses() external { initializeConfig(); + if (config.baseToken != ADDRESS_ONE) { + distributeBaseToken(); + } + deployGatewayContracts(); (DeployedContracts memory expectedGatewayContracts, bytes memory create2Calldata, ) = GatewayCTMDeployerHelper .calculateAddresses(bytes32(0), gatewayCTMDeployerConfig); @@ -246,6 +259,24 @@ contract GatewayCTMFromL1 is Script { }); } + function distributeBaseToken() internal { + deployerAddress = msg.sender; + uint256 amountForDistribution = 100000000000000000000; + L1AssetRouter l1AR = L1AssetRouter(config.sharedBridgeProxy); + IL1NativeTokenVault nativeTokenVault = IL1NativeTokenVault(address(l1AR.nativeTokenVault())); + bytes32 baseTokenAssetID = nativeTokenVault.assetId(config.baseToken); + uint256 baseTokenOriginChainId = nativeTokenVault.originChainId(baseTokenAssetID); + TestnetERC20Token baseToken = TestnetERC20Token(config.baseToken); + + vm.startBroadcast(); + if (baseTokenOriginChainId == block.chainid) { + baseToken.mint(config.governanceAddr, amountForDistribution); + } else { + baseToken.transfer(config.governanceAddr, amountForDistribution); + } + vm.stopBroadcast(); + } + function saveOutput() internal { vm.serializeAddress( "gateway_state_transition", diff --git a/l1-contracts/deploy-scripts/GatewayPreparation.s.sol b/l1-contracts/deploy-scripts/GatewayPreparation.s.sol index 1c68dd82d..633446230 100644 --- a/l1-contracts/deploy-scripts/GatewayPreparation.s.sol +++ b/l1-contracts/deploy-scripts/GatewayPreparation.s.sol @@ -7,6 +7,10 @@ import {Script, console2 as console} from "forge-std/Script.sol"; // import {Vm} from "forge-std/Vm.sol"; import {stdToml} from "forge-std/StdToml.sol"; +// It's required to disable lints to force the compiler to compile the contracts +// solhint-disable no-unused-import +import {TestnetERC20Token} from "contracts/dev-contracts/TestnetERC20Token.sol"; + import {Ownable} from "@openzeppelin/contracts-v4/access/Ownable.sol"; import {IBridgehub, BridgehubBurnCTMAssetData} from "contracts/bridgehub/IBridgehub.sol"; import {IZKChain} from "contracts/state-transition/chain-interfaces/IZKChain.sol"; @@ -24,9 +28,13 @@ import {SET_ASSET_HANDLER_COUNTERPART_ENCODING_VERSION} from "contracts/bridge/a import {CTM_DEPLOYMENT_TRACKER_ENCODING_VERSION} from "contracts/bridgehub/CTMDeploymentTracker.sol"; import {L2AssetRouter, IL2AssetRouter} from "contracts/bridge/asset-router/L2AssetRouter.sol"; import {L1Nullifier} from "contracts/bridge/L1Nullifier.sol"; +import {L1AssetRouter} from "contracts/bridge/asset-router/L1AssetRouter.sol"; +import {IL1NativeTokenVault} from "contracts/bridge/ntv/IL1NativeTokenVault.sol"; import {BridgehubMintCTMAssetData} from "contracts/bridgehub/IBridgehub.sol"; import {IAssetRouterBase} from "contracts/bridge/asset-router/IAssetRouterBase.sol"; import {L2_ASSET_ROUTER_ADDR} from "contracts/common/L2ContractAddresses.sol"; +import {ETH_TOKEN_ADDRESS} from "contracts/common/Config.sol"; +import {DataEncoding} from "contracts/common/libraries/DataEncoding.sol"; import {IAdmin} from "contracts/state-transition/chain-interfaces/IAdmin.sol"; import {FinalizeL1DepositParams} from "contracts/bridge/interfaces/IL1Nullifier.sol"; @@ -246,6 +254,33 @@ contract GatewayPreparation is Script { function migrateChainToGateway(address chainAdmin, address accessControlRestriction, uint256 chainId) public { initializeConfig(); + IBridgehub bridgehubContract = IBridgehub(config.bridgehub); + bytes32 gatewayBaseTokenAssetId = bridgehubContract.baseTokenAssetId(config.gatewayChainId); + bytes32 ethTokenAssetId = DataEncoding.encodeNTVAssetId(block.chainid, ETH_TOKEN_ADDRESS); + + // Fund chain admin with tokens + if (gatewayBaseTokenAssetId != ethTokenAssetId) { + deployerAddress = msg.sender; + uint256 amountForDistribution = 100000000000000000000; + L1AssetRouter l1AR = L1AssetRouter(config.sharedBridgeProxy); + IL1NativeTokenVault nativeTokenVault = IL1NativeTokenVault(address(l1AR.nativeTokenVault())); + address baseTokenAddress = nativeTokenVault.tokenAddress(gatewayBaseTokenAssetId); + uint256 baseTokenOriginChainId = nativeTokenVault.originChainId(gatewayBaseTokenAssetId); + TestnetERC20Token baseToken = TestnetERC20Token(baseTokenAddress); + uint256 deployerBalance = baseToken.balanceOf(deployerAddress); + console.log("Base token origin id: ", baseTokenOriginChainId); + + vm.startBroadcast(); + if (baseTokenOriginChainId == block.chainid) { + baseToken.mint(chainAdmin, amountForDistribution); + } else { + baseToken.transfer(chainAdmin, amountForDistribution); + } + vm.stopBroadcast(); + } + + console.log("Chain Admin address:", chainAdmin); + // TODO(EVM-746): Use L2-based chain admin contract address l2ChainAdmin = AddressAliasHelper.applyL1ToL2Alias(chainAdmin); diff --git a/l1-contracts/deploy-scripts/RegisterZKChain.s.sol b/l1-contracts/deploy-scripts/RegisterZKChain.s.sol index dd3ed0a09..31f16f0fa 100644 --- a/l1-contracts/deploy-scripts/RegisterZKChain.s.sol +++ b/l1-contracts/deploy-scripts/RegisterZKChain.s.sol @@ -288,7 +288,13 @@ contract RegisterZKChainScript is Script { function registerAssetIdOnBridgehub() internal { IBridgehub bridgehub = IBridgehub(config.bridgehub); Ownable ownable = Ownable(config.bridgehub); - bytes32 baseTokenAssetId = DataEncoding.encodeNTVAssetId(block.chainid, config.baseToken); + INativeTokenVault ntv = INativeTokenVault(config.nativeTokenVault); + bytes32 baseTokenAssetId = ntv.assetId(config.baseToken); + uint256 baseTokenOriginChain = ntv.originChainId(baseTokenAssetId); + + if (baseTokenAssetId == bytes32(0)) { + baseTokenAssetId = DataEncoding.encodeNTVAssetId(block.chainid, config.baseToken); + } if (bridgehub.assetIdIsRegistered(baseTokenAssetId)) { console.log("Base token asset id already registered on Bridgehub"); @@ -308,13 +314,17 @@ contract RegisterZKChainScript is Script { function registerTokenOnNTV() internal { INativeTokenVault ntv = INativeTokenVault(config.nativeTokenVault); - // Ownable ownable = Ownable(config.nativeTokenVault); - bytes32 baseTokenAssetId = DataEncoding.encodeNTVAssetId(block.chainid, config.baseToken); + bytes32 baseTokenAssetId = ntv.assetId(config.baseToken); + uint256 baseTokenOriginChain = ntv.originChainId(baseTokenAssetId); + + // If it hasn't been registered already with ntv + if (baseTokenAssetId == bytes32(0)) { + baseTokenAssetId = DataEncoding.encodeNTVAssetId(block.chainid, config.baseToken); + } config.baseTokenAssetId = baseTokenAssetId; if (ntv.tokenAddress(baseTokenAssetId) != address(0) || config.baseToken == ETH_TOKEN_ADDRESS) { console.log("Token already registered on NTV"); } else { - // bytes memory data = abi.encodeCall(ntv.registerToken, (config.baseToken)); vm.broadcast(); ntv.registerToken(config.baseToken); console.log("Token registered on NTV"); diff --git a/l1-contracts/src.ts/deploy.ts b/l1-contracts/src.ts/deploy.ts index 677f30b9b..4747418c7 100644 --- a/l1-contracts/src.ts/deploy.ts +++ b/l1-contracts/src.ts/deploy.ts @@ -939,12 +939,11 @@ export class Deployer { create2Salt: string, ethTxOptions: ethers.providers.TransactionRequest ) { - const eraChainId = getNumberFromEnv("CONTRACTS_ERA_CHAIN_ID"); const tokens = getTokens(); const l1WethToken = tokens.find((token: { symbol: string }) => token.symbol == "WETH")!.address; const contractAddress = await this.deployViaCreate2( "L1NativeTokenVault", - [l1WethToken, this.addresses.Bridges.SharedBridgeProxy, eraChainId, this.addresses.Bridges.L1NullifierProxy], + [l1WethToken, this.addresses.Bridges.SharedBridgeProxy, this.addresses.Bridges.L1NullifierProxy], create2Salt, ethTxOptions ); @@ -1713,10 +1712,9 @@ export class Deployer { public async deployValidatorTimelock(create2Salt: string, ethTxOptions: ethers.providers.TransactionRequest) { const executionDelay = getNumberFromEnv("CONTRACTS_VALIDATOR_TIMELOCK_EXECUTION_DELAY"); - const eraChainId = getNumberFromEnv("CONTRACTS_ERA_CHAIN_ID"); const contractAddress = await this.deployViaCreate2( "ValidatorTimelock", - [this.ownerAddress, executionDelay, eraChainId], + [this.ownerAddress, executionDelay], create2Salt, ethTxOptions ); diff --git a/l1-contracts/test/foundry/l1/integration/AssetRouterTest.t.sol b/l1-contracts/test/foundry/l1/integration/AssetRouterTest.t.sol index a3060b9ec..881c38e88 100644 --- a/l1-contracts/test/foundry/l1/integration/AssetRouterTest.t.sol +++ b/l1-contracts/test/foundry/l1/integration/AssetRouterTest.t.sol @@ -89,8 +89,8 @@ contract AssetRouterTest is L1ContractDeployer, ZKChainDeployer, TokenDeployer, l2TokenAssetId = DataEncoding.encodeNTVAssetId(chainId, _tokenAddress); bytes memory transferData = DataEncoding.encodeBridgeMintData({ _originalCaller: ETH_TOKEN_ADDRESS, - _l2Receiver: address(this), - _l1Token: _tokenAddress, + _remoteReceiver: address(this), + _originToken: ETH_TOKEN_ADDRESS, _amount: 100, _erc20Metadata: BridgeHelper.getERC20Getters(_tokenAddress, chainId) }); diff --git a/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/L2WethTestAbstract.t.sol b/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/L2WethTestAbstract.t.sol index af52b4f74..3bc89b3ef 100644 --- a/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/L2WethTestAbstract.t.sol +++ b/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/L2WethTestAbstract.t.sol @@ -11,9 +11,7 @@ import {BridgedStandardERC20} from "contracts/bridge/BridgedStandardERC20.sol"; import {L2AssetRouter} from "contracts/bridge/asset-router/L2AssetRouter.sol"; import {IL2NativeTokenVault} from "contracts/bridge/ntv/IL2NativeTokenVault.sol"; -import {UpgradeableBeacon} from "@openzeppelin/contracts-v4/proxy/beacon/UpgradeableBeacon.sol"; -import {BeaconProxy} from "@openzeppelin/contracts-v4/proxy/beacon/BeaconProxy.sol"; - +import {Unauthorized, BridgeMintNotImplemented} from "contracts/common/L1ContractErrors.sol"; import {L2_ASSET_ROUTER_ADDR, L2_NATIVE_TOKEN_VAULT_ADDR, L2_BRIDGEHUB_ADDR} from "contracts/common/L2ContractAddresses.sol"; import {ETH_TOKEN_ADDRESS, SETTLEMENT_LAYER_RELAY_SENDER} from "contracts/common/Config.sol"; @@ -31,7 +29,6 @@ import {IZKChain} from "contracts/state-transition/chain-interfaces/IZKChain.sol import {SystemContractsArgs} from "./_SharedL2ContractL1DeployerUtils.sol"; import {DeployUtils} from "deploy-scripts/DeployUtils.s.sol"; -import {Unauthorized, BridgeMintNotImplemented} from "contracts/common/L1ContractErrors.sol"; abstract contract L2WethTestAbstract is Test, SharedL2ContractDeployer { function test_shouldDepositWethByCallingDeposit() public { diff --git a/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/_SharedL2ContractDeployer.sol b/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/_SharedL2ContractDeployer.sol index 4e72c7d25..b0c0ac6f7 100644 --- a/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/_SharedL2ContractDeployer.sol +++ b/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/_SharedL2ContractDeployer.sol @@ -117,7 +117,7 @@ abstract contract SharedL2ContractDeployer is Test, DeployUtils { vm.prank(ownerWallet); l2Bridgehub.addChainTypeManager(address(addresses.stateTransition.chainTypeManagerProxy)); vm.prank(AddressAliasHelper.applyL1ToL2Alias(l1CTMDeployer)); - l2Bridgehub.setAssetHandlerAddress( + l2Bridgehub.setCTMAssetAddress( bytes32(uint256(uint160(l1CTM))), address(addresses.stateTransition.chainTypeManagerProxy) ); diff --git a/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/_SharedL2ContractL1DeployerUtils.sol b/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/_SharedL2ContractL1DeployerUtils.sol index a363c0cd0..c5076c3c1 100644 --- a/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/_SharedL2ContractL1DeployerUtils.sol +++ b/l1-contracts/test/foundry/l1/integration/l2-tests-in-l1-context/_SharedL2ContractL1DeployerUtils.sol @@ -89,7 +89,6 @@ contract SharedL2ContractL1DeployerUtils is DeployUtils { ); vm.etch(L2_ASSET_ROUTER_ADDR, assetRouter.code); - stdstore.target(address(L2_ASSET_ROUTER_ADDR)).sig("l1AssetRouter()").checked_write(_args.l1AssetRouter); stdstore .target(L2_ASSET_ROUTER_ADDR) diff --git a/l1-contracts/test/foundry/l1/unit/concrete/Bridgehub/experimental_bridge.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/Bridgehub/experimental_bridge.t.sol index 850e48eda..83ac20721 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/Bridgehub/experimental_bridge.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/Bridgehub/experimental_bridge.t.sol @@ -190,7 +190,7 @@ contract ExperimentalBridgeTest is Test { } function _deployNTV(address _sharedBridgeAddr) internal returns (L1NativeTokenVault addr) { - L1NativeTokenVault ntvImpl = new L1NativeTokenVault(weth, _sharedBridgeAddr, eraChainId, l1Nullifier); + L1NativeTokenVault ntvImpl = new L1NativeTokenVault(weth, _sharedBridgeAddr, l1Nullifier); TransparentUpgradeableProxy ntvProxy = new TransparentUpgradeableProxy( address(ntvImpl), address(bridgeOwner), diff --git a/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1Erc20Bridge/_L1Erc20Bridge_Shared.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1Erc20Bridge/_L1Erc20Bridge_Shared.t.sol index fb0c30c58..acbc8ccec 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1Erc20Bridge/_L1Erc20Bridge_Shared.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1Erc20Bridge/_L1Erc20Bridge_Shared.t.sol @@ -47,12 +47,7 @@ contract L1Erc20BridgeTest is Test { ); address weth = makeAddr("weth"); - L1NativeTokenVault ntv = new L1NativeTokenVault( - weth, - sharedBridgeAddress, - eraChainId, - IL1Nullifier(l1NullifierAddress) - ); + L1NativeTokenVault ntv = new L1NativeTokenVault(weth, sharedBridgeAddress, IL1Nullifier(l1NullifierAddress)); vm.store(address(bridge), bytes32(uint256(212)), bytes32(0)); diff --git a/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1SharedBridge/L1SharedBridgeFails.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1SharedBridge/L1SharedBridgeFails.t.sol index 4a7686c2f..2990e6695 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1SharedBridge/L1SharedBridgeFails.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1SharedBridge/L1SharedBridgeFails.t.sol @@ -24,7 +24,7 @@ import {IGetters} from "contracts/state-transition/chain-interfaces/IGetters.sol import {AddressAlreadyUsed, WithdrawFailed, Unauthorized, AssetIdNotSupported, SharedBridgeKey, SharedBridgeValueNotSet, L2WithdrawalMessageWrongLength, InsufficientChainBalance, ZeroAddress, ValueMismatch, NonEmptyMsgValue, DepositExists, ValueMismatch, NonEmptyMsgValue, TokenNotSupported, EmptyDeposit, InvalidProof, NoFundsTransferred, DepositDoesNotExist, WithdrawalAlreadyFinalized, InvalidSelector, TokensWithFeesNotSupported} from "contracts/common/L1ContractErrors.sol"; import {StdStorage, stdStorage} from "forge-std/Test.sol"; import {DepositNotSet} from "test/foundry/L1TestsErrors.sol"; -import {WrongCounterpart, EthTransferFailed, NotNTV, EmptyToken, NativeTokenVaultAlreadySet, ZeroAmountToTransfer, WrongAmountTransferred, ClaimFailedDepositFailed} from "contracts/bridge/L1BridgeContractErrors.sol"; +import {WrongCounterpart, EthTransferFailed, EmptyToken, NativeTokenVaultAlreadySet, ZeroAmountToTransfer, WrongAmountTransferred, ClaimFailedDepositFailed} from "contracts/bridge/L1BridgeContractErrors.sol"; /// We are testing all the specified revert and require cases. contract L1AssetRouterFailTest is L1AssetRouterTest { @@ -63,7 +63,7 @@ contract L1AssetRouterFailTest is L1AssetRouterTest { } function test_nullifyChainBalanceByNTV_wrongCaller() public { - vm.expectRevert(NotNTV.selector); + vm.expectRevert(); l1Nullifier.nullifyChainBalanceByNTV(chainId, address(token)); } diff --git a/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1SharedBridge/_L1SharedBridge_Shared.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1SharedBridge/_L1SharedBridge_Shared.t.sol index de655e7ee..f7ab8c6f5 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1SharedBridge/_L1SharedBridge_Shared.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/Bridges/L1SharedBridge/_L1SharedBridge_Shared.t.sol @@ -153,7 +153,6 @@ contract L1AssetRouterTest is Test { nativeTokenVaultImpl = new L1NativeTokenVault({ _l1WethAddress: l1WethAddress, _l1AssetRouter: address(sharedBridge), - _eraChainId: eraChainId, _l1Nullifier: l1Nullifier }); address tokenBeacon = makeAddr("tokenBeacon"); diff --git a/l1-contracts/test/foundry/l1/unit/concrete/Governance/PermanentRestriction.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/Governance/PermanentRestriction.t.sol index 1262f1d83..54a576cb7 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/Governance/PermanentRestriction.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/Governance/PermanentRestriction.t.sol @@ -90,12 +90,12 @@ contract PermanentRestrictionTest is ChainTypeManagerTest { ); } - function test_allowAdminImplementation(bytes32 implementationHash) public { + function test_setAllowedAdminImplementation(bytes32 implementationHash) public { vm.expectEmit(true, false, false, true); emit IPermanentRestriction.AdminImplementationAllowed(implementationHash, true); vm.prank(owner); - permRestriction.allowAdminImplementation(implementationHash, true); + permRestriction.setAllowedAdminImplementation(implementationHash, true); } function test_setAllowedData(bytes memory data) public { @@ -106,12 +106,12 @@ contract PermanentRestrictionTest is ChainTypeManagerTest { permRestriction.setAllowedData(data, true); } - function test_setSelectorIsValidated(bytes4 selector) public { + function test_setSelectorShouldBeValidated(bytes4 selector) public { vm.expectEmit(true, false, false, true); emit IPermanentRestriction.SelectorValidationChanged(selector, true); vm.prank(owner); - permRestriction.setSelectorIsValidated(selector, true); + permRestriction.setSelectorShouldBeValidated(selector, true); } function isAddressAdmin(address chainAddr, address _potentialAdmin) internal returns (bool) { @@ -121,16 +121,16 @@ contract PermanentRestrictionTest is ChainTypeManagerTest { return permRestriction.isAdminOfAChain(chainAddr); } - function test_tryCompareAdminOfAChainIsAddressZero() public { - assertFalse(isAddressAdmin(address(0), owner)); + function test_isAdminOfAChainIsAddressZero() public { + assertFalse(permRestriction.isAdminOfAChain(address(0))); } - function test_tryCompareAdminOfAChainNotAHyperchain() public { - assertFalse(isAddressAdmin(makeAddr("random"), owner)); + function test_isAdminOfAChainNotAHyperchain() public { + assertFalse(permRestriction.isAdminOfAChain(makeAddr("random"))); } - function test_tryCompareAdminOfAChainNotAnAdmin() public { - assertFalse(isAddressAdmin(hyperchain, owner)); + function test_isAdminOfAChainOfAChainNotAnAdmin() public { + assertFalse(permRestriction.isAdminOfAChain(hyperchain)); } function test_tryCompareAdminOfAChain() public { @@ -161,7 +161,7 @@ contract PermanentRestrictionTest is ChainTypeManagerTest { function test_validateCallSetPendingAdminRemovingPermanentRestriction() public { vm.prank(owner); - permRestriction.allowAdminImplementation(address(chainAdmin).codehash, true); + permRestriction.setAllowedAdminImplementation(address(chainAdmin).codehash, true); Call memory call = Call({ target: hyperchain, @@ -178,7 +178,7 @@ contract PermanentRestrictionTest is ChainTypeManagerTest { function test_validateCallSetPendingAdmin() public { vm.prank(owner); - permRestriction.allowAdminImplementation(address(chainAdmin).codehash, true); + permRestriction.setAllowedAdminImplementation(address(chainAdmin).codehash, true); vm.prank(address(chainAdmin)); chainAdmin.addRestriction(address(permRestriction)); @@ -208,7 +208,7 @@ contract PermanentRestrictionTest is ChainTypeManagerTest { function test_validateCallCallNotAllowed() public { vm.prank(owner); - permRestriction.setSelectorIsValidated(IAdmin.acceptAdmin.selector, true); + permRestriction.setSelectorShouldBeValidated(IAdmin.acceptAdmin.selector, true); Call memory call = Call({ target: hyperchain, value: 0, @@ -224,7 +224,7 @@ contract PermanentRestrictionTest is ChainTypeManagerTest { function test_validateCall() public { vm.prank(owner); - permRestriction.setSelectorIsValidated(IAdmin.acceptAdmin.selector, true); + permRestriction.setSelectorShouldBeValidated(IAdmin.acceptAdmin.selector, true); Call memory call = Call({ target: hyperchain, value: 0, @@ -374,7 +374,7 @@ contract PermanentRestrictionTest is ChainTypeManagerTest { // ctm deployer address is 0 in this test vm.startPrank(address(0)); - bridgehub.setAssetHandlerAddress( + bridgehub.setCTMAssetAddress( bytes32(uint256(uint160(address(chainContractAddress)))), address(chainContractAddress) ); diff --git a/l1-contracts/test/foundry/l1/unit/concrete/ValidatorTimelock/ValidatorTimelock.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/ValidatorTimelock/ValidatorTimelock.t.sol index 3725f54e2..67c769ca2 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/ValidatorTimelock/ValidatorTimelock.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/ValidatorTimelock/ValidatorTimelock.t.sol @@ -46,7 +46,7 @@ contract ValidatorTimelockTest is Test { executionDelay = 10; chainTypeManager = new DummyChainTypeManagerForValidatorTimelock(owner, zkSync); - validator = new ValidatorTimelock(owner, executionDelay, eraChainId); + validator = new ValidatorTimelock(owner, executionDelay); vm.prank(owner); validator.setChainTypeManager(IChainTypeManager(address(chainTypeManager))); vm.prank(owner); diff --git a/l1-contracts/test/foundry/l1/unit/concrete/state-transition/chain-deps/facets/Base/_Base_Shared.t.sol b/l1-contracts/test/foundry/l1/unit/concrete/state-transition/chain-deps/facets/Base/_Base_Shared.t.sol index be93c91df..6894fb1ba 100644 --- a/l1-contracts/test/foundry/l1/unit/concrete/state-transition/chain-deps/facets/Base/_Base_Shared.t.sol +++ b/l1-contracts/test/foundry/l1/unit/concrete/state-transition/chain-deps/facets/Base/_Base_Shared.t.sol @@ -21,8 +21,6 @@ contract TestBaseFacet is ZKChainBase { function functionWithOnlyAdminOrChainTypeManagerModifier() external onlyAdminOrChainTypeManager {} - function functionWithonlyValidatorOrChainTypeManagerModifier() external onlyValidatorOrChainTypeManager {} - // add this to be excluded from coverage report function test() internal virtual {} } @@ -46,7 +44,6 @@ contract ZKChainBaseTest is Test { selectors[2] = TestBaseFacet.functionWithOnlyChainTypeManagerModifier.selector; selectors[3] = TestBaseFacet.functionWithOnlyBridgehubModifier.selector; selectors[4] = TestBaseFacet.functionWithOnlyAdminOrChainTypeManagerModifier.selector; - selectors[5] = TestBaseFacet.functionWithonlyValidatorOrChainTypeManagerModifier.selector; } function setUp() public virtual { diff --git a/l1-contracts/test/foundry/l2/integration/L2Utils.sol b/l1-contracts/test/foundry/l2/integration/L2Utils.sol index d10105734..cdda458cd 100644 --- a/l1-contracts/test/foundry/l2/integration/L2Utils.sol +++ b/l1-contracts/test/foundry/l2/integration/L2Utils.sol @@ -5,9 +5,7 @@ pragma solidity ^0.8.20; import {Vm} from "forge-std/Vm.sol"; import "forge-std/console.sol"; -import {UpgradeableBeacon} from "@openzeppelin/contracts-v4/proxy/beacon/UpgradeableBeacon.sol"; -import {BeaconProxy} from "@openzeppelin/contracts-v4/proxy/beacon/BeaconProxy.sol"; -import {DEPLOYER_SYSTEM_CONTRACT, L2_ASSET_ROUTER_ADDR, L2_NATIVE_TOKEN_VAULT_ADDR, L2_BRIDGEHUB_ADDR, L2_MESSAGE_ROOT_ADDR} from "contracts/common/L2ContractAddresses.sol"; +import {L2_DEPLOYER_SYSTEM_CONTRACT_ADDR, L2_ASSET_ROUTER_ADDR, L2_NATIVE_TOKEN_VAULT_ADDR, L2_BRIDGEHUB_ADDR, L2_MESSAGE_ROOT_ADDR} from "contracts/common/L2ContractAddresses.sol"; import {IContractDeployer, L2ContractHelper} from "contracts/common/libraries/L2ContractHelper.sol"; import {TransparentUpgradeableProxy} from "@openzeppelin/contracts-v4/proxy/transparent/TransparentUpgradeableProxy.sol"; @@ -70,7 +68,7 @@ library L2Utils { */ function initSystemContracts(SystemContractsArgs memory _args) internal { bytes memory contractDeployerBytecode = readSystemContractsBytecode("ContractDeployer"); - vm.etch(DEPLOYER_SYSTEM_CONTRACT, contractDeployerBytecode); + vm.etch(L2_DEPLOYER_SYSTEM_CONTRACT_ADDR, contractDeployerBytecode); forceDeploySystemContracts(_args); } @@ -213,7 +211,7 @@ library L2Utils { }); vm.prank(L2_FORCE_DEPLOYER_ADDR); - IContractDeployer(DEPLOYER_SYSTEM_CONTRACT).forceDeployOnAddresses(deployments); + IContractDeployer(L2_DEPLOYER_SYSTEM_CONTRACT_ADDR).forceDeployOnAddresses(deployments); } function deployViaCreat2L2( diff --git a/l1-contracts/test/foundry/l2/unit/GatewayCTMDeployer/GatewayCTMDeployer.t.sol b/l1-contracts/test/foundry/l2/unit/GatewayCTMDeployer/GatewayCTMDeployer.t.sol index e04432c9d..f016b6559 100644 --- a/l1-contracts/test/foundry/l2/unit/GatewayCTMDeployer/GatewayCTMDeployer.t.sol +++ b/l1-contracts/test/foundry/l2/unit/GatewayCTMDeployer/GatewayCTMDeployer.t.sol @@ -73,7 +73,7 @@ contract GatewayCTMDeployerTest is Test { new TestnetVerifier(); new Verifier(); - new ValidatorTimelock(address(0), 0, 0); + new ValidatorTimelock(address(0), 0); // This call will likely fail due to various checks, but we just need to get the bytecode published try new TransparentUpgradeableProxy(address(0), address(0), hex"") {} catch {} diff --git a/l1-contracts/test/foundry/unit/concrete/GatewayTransactionFilterer/CheckTransaction.sol b/l1-contracts/test/foundry/unit/concrete/GatewayTransactionFilterer/CheckTransaction.sol index 3231a7144..f9e22907f 100644 --- a/l1-contracts/test/foundry/unit/concrete/GatewayTransactionFilterer/CheckTransaction.sol +++ b/l1-contracts/test/foundry/unit/concrete/GatewayTransactionFilterer/CheckTransaction.sol @@ -4,13 +4,16 @@ pragma solidity 0.8.24; import {GatewayTransactionFiltererTest} from "./_GatewayTransactionFilterer_Shared.t.sol"; import {IGetters} from "contracts/state-transition/chain-interfaces/IGetters.sol"; -import {IL2Bridge} from "contracts/bridge/interfaces/IL2Bridge.sol"; import {IBridgehub} from "contracts/bridgehub/IBridgehub.sol"; +import {IAssetRouterBase} from "contracts/bridge/asset-router/IAssetRouterBase.sol"; import {AlreadyWhitelisted, InvalidSelector, NotWhitelisted} from "contracts/common/L1ContractErrors.sol"; contract CheckTransactionTest is GatewayTransactionFiltererTest { function test_TransactionAllowedOnlyFromWhitelistedSenderWhichIsNotAssetRouter() public { - bytes memory txCalladata = abi.encodeCall(IL2Bridge.finalizeDeposit, (bytes32("0x12345"), bytes("0x23456"))); + bytes memory txCalladata = abi.encodeCall( + IAssetRouterBase.finalizeDeposit, + (uint256(10), bytes32("0x12345"), bytes("0x23456")) + ); vm.startPrank(owner); vm.mockCall( bridgehub, @@ -50,7 +53,10 @@ contract CheckTransactionTest is GatewayTransactionFiltererTest { function test_TransactionAllowedFromWhitelistedSenderForChainBridging() public { address stm = address(0x6060606); - bytes memory txCalladata = abi.encodeCall(IL2Bridge.finalizeDeposit, (bytes32("0x12345"), bytes("0x23456"))); + bytes memory txCalladata = abi.encodeCall( + IAssetRouterBase.finalizeDeposit, + (uint256(10), bytes32("0x12345"), bytes("0x23456")) + ); vm.startPrank(owner); vm.mockCall( bridgehub, @@ -74,9 +80,14 @@ contract CheckTransactionTest is GatewayTransactionFiltererTest { } function test_TransactionFailsWithInvalidSelectorEvenIfTheSenderIsAR() public { - bytes memory txCalladata = abi.encodeCall(IL2Bridge.withdraw, (bytes32("0x12345"), bytes("0x23456"))); + bytes memory txCalladata = abi.encodeCall( + IAssetRouterBase.setAssetHandlerAddressThisChain, + (bytes32("0x12345"), address(0x01234567890123456789)) + ); vm.prank(owner); - vm.expectRevert(abi.encodeWithSelector(InvalidSelector.selector, IL2Bridge.withdraw.selector)); + vm.expectRevert( + abi.encodeWithSelector(InvalidSelector.selector, IAssetRouterBase.setAssetHandlerAddressThisChain.selector) + ); bool isTxAllowed = transactionFiltererProxy.isTransactionAllowed( assetRouter, address(0), diff --git a/l2-contracts/contracts/L2ContractHelper.sol b/l2-contracts/contracts/L2ContractHelper.sol index 219b1927b..f367e33ab 100644 --- a/l2-contracts/contracts/L2ContractHelper.sol +++ b/l2-contracts/contracts/L2ContractHelper.sol @@ -55,6 +55,12 @@ interface IContractDeployer { /// @param _input the calldata to be sent to the constructor of the new contract function create2(bytes32 _salt, bytes32 _bytecodeHash, bytes calldata _input) external returns (address); + /// @notice Calculates the address of a create2 contract deployment + /// @param _sender The address of the sender. + /// @param _bytecodeHash The bytecode hash of the new contract to be deployed. + /// @param _salt a unique value to create the deterministic address of the new contract + /// @param _input the calldata to be sent to the constructor of the new contract + /// @return newAddress The derived address of the account. function getNewAddressCreate2( address _sender, bytes32 _bytecodeHash, @@ -82,6 +88,11 @@ interface IBaseToken { * the compression of the state diffs and bytecodes. */ interface ICompressor { + /// @notice Verifies that the compression of state diffs has been done correctly for the {_stateDiffs} param. + /// @param _numberOfStateDiffs The number of state diffs being checked. + /// @param _enumerationIndexSize Number of bytes used to represent an enumeration index for repeated writes. + /// @param _stateDiffs Encoded full state diff structs. See the first dev comment below for encoding. + /// @param _compressedStateDiffs The compressed state diffs function verifyCompressedStateDiffs( uint256 _numberOfStateDiffs, uint256 _enumerationIndexSize, diff --git a/l2-contracts/contracts/errors/L2ContractErrors.sol b/l2-contracts/contracts/errors/L2ContractErrors.sol index 332c1b8b7..89e548ea0 100644 --- a/l2-contracts/contracts/errors/L2ContractErrors.sol +++ b/l2-contracts/contracts/errors/L2ContractErrors.sol @@ -8,22 +8,14 @@ error AddressMismatch(address expected, address supplied); error AssetIdMismatch(bytes32 expected, bytes32 supplied); // 0x5e85ae73 error AmountMustBeGreaterThanZero(); -// 0xb4f54111 -error DeployFailed(); // 0x7138356f error EmptyAddress(); -// 0x1c25715b -error EmptyBytes32(); // 0x1bdfd505 error FailedToTransferTokens(address tokenContract, address to, uint256 amount); // 0x2a1b2dd8 error InsufficientAllowance(uint256 providedAllowance, uint256 requiredAmount); -// 0xcbd9d2e0 -error InvalidCaller(address); // 0xb4fa3fb3 error InvalidInput(); -// 0x0ac76f01 -error NonSequentialVersion(); // 0x8e4a23d6 error Unauthorized(address); // 0xff15b069 diff --git a/system-contracts/SystemContractsHashes.json b/system-contracts/SystemContractsHashes.json index 60a5b5c83..a6dec1ede 100644 --- a/system-contracts/SystemContractsHashes.json +++ b/system-contracts/SystemContractsHashes.json @@ -3,49 +3,49 @@ "contractName": "AccountCodeStorage", "bytecodePath": "artifacts-zk/contracts-preprocessed/AccountCodeStorage.sol/AccountCodeStorage.json", "sourceCodePath": "contracts-preprocessed/AccountCodeStorage.sol", - "bytecodeHash": "0x0100005d279e00e3781eba9b8813f8de7bc5d652c848d583d0062b36b2cae29e", + "bytecodeHash": "0x0100005df535e7d1e6f3933b26076778d9c44fd6e7faf546732f08290d8c8f94", "sourceCodeHash": "0x2e0e09d57a04bd1e722d8bf8c6423fdf3f8bca44e5e8c4f6684f987794be066e" }, { "contractName": "BootloaderUtilities", "bytecodePath": "artifacts-zk/contracts-preprocessed/BootloaderUtilities.sol/BootloaderUtilities.json", "sourceCodePath": "contracts-preprocessed/BootloaderUtilities.sol", - "bytecodeHash": "0x010007c7d96a06465d57858ea13b4bb612215046e127974b5f5b5808f9e3c788", + "bytecodeHash": "0x010007c72b7f29a0e1954ee4c65e6598d0934d33c692faedd7ac4fe30b508fa3", "sourceCodeHash": "0x0f1213c4b95acb71f4ab5d4082cc1aeb2bd5017e1cccd46afc66e53268609d85" }, { "contractName": "ComplexUpgrader", "bytecodePath": "artifacts-zk/contracts-preprocessed/ComplexUpgrader.sol/ComplexUpgrader.json", "sourceCodePath": "contracts-preprocessed/ComplexUpgrader.sol", - "bytecodeHash": "0x010000bf784451a15f1bdcaa8ed5770cc25235984e44ce605779b79849386467", + "bytecodeHash": "0x010000bf6fcec0995b82b1c51133a507c8f63111234530b69fe7dadaae0c8172", "sourceCodeHash": "0xfcc74aefbc7cbde7945c29bad0e47527ac443bd6b75251a4ae520e28c714af37" }, { "contractName": "Compressor", "bytecodePath": "artifacts-zk/contracts-preprocessed/Compressor.sol/Compressor.json", "sourceCodePath": "contracts-preprocessed/Compressor.sol", - "bytecodeHash": "0x0100014b6c51a439159f8e86a891e3515eeea6b782efcba60b7b2ff0008a6869", + "bytecodeHash": "0x0100014b2cac967629cb05fb59a5c77cb5a077b74c50521ed9216a59511bf182", "sourceCodeHash": "0x7240b5fb2ea8e184522e731fb14f764ebae52b8a69d1870a55daedac9a3ed617" }, { "contractName": "ContractDeployer", "bytecodePath": "artifacts-zk/contracts-preprocessed/ContractDeployer.sol/ContractDeployer.json", "sourceCodePath": "contracts-preprocessed/ContractDeployer.sol", - "bytecodeHash": "0x010004e51f0fb3af074c21411a299285b9b6760d577a79bfd98a6d15bde5bd2c", + "bytecodeHash": "0x010004e5a266e697bb45bc90ff310dcb293725006146ff83e46bde8f3c6b44fa", "sourceCodeHash": "0x92bc09da23ed9d86ba7a84f0dbf48503c99582ae58cdbebbdcc5f14ea1fcf014" }, { "contractName": "Create2Factory", "bytecodePath": "artifacts-zk/contracts-preprocessed/Create2Factory.sol/Create2Factory.json", "sourceCodePath": "contracts-preprocessed/Create2Factory.sol", - "bytecodeHash": "0x0100004979f4353b7edd11ad71b4e0435ae74dc669248f12646e06a95ae5eeec", + "bytecodeHash": "0x010000493a391e65a70dea42442132cf7c7001dac94388b9c4218ce9b1491b57", "sourceCodeHash": "0x97392413259e6aae5187768cefd734507460ae818d6975709cc9b4e15a9af906" }, { "contractName": "DefaultAccount", "bytecodePath": "artifacts-zk/contracts-preprocessed/DefaultAccount.sol/DefaultAccount.json", "sourceCodePath": "contracts-preprocessed/DefaultAccount.sol", - "bytecodeHash": "0x0100055d7dee7cec6611cf68fa483883fedccd32592e5b91418d5e8338880fc1", + "bytecodeHash": "0x0100055d74f7387e03ecbb5209bea7e0318aea05cfaaa1c195a85df100115cea", "sourceCodeHash": "0xebffe840ebbd9329edb1ebff8ca50f6935e7dabcc67194a896fcc2e968d46dfb" }, { @@ -59,71 +59,71 @@ "contractName": "ImmutableSimulator", "bytecodePath": "artifacts-zk/contracts-preprocessed/ImmutableSimulator.sol/ImmutableSimulator.json", "sourceCodePath": "contracts-preprocessed/ImmutableSimulator.sol", - "bytecodeHash": "0x01000039bbbb1e91691c8c36672cd0b57adb505bf485b1aeea7b1e1f41d592ef", + "bytecodeHash": "0x0100003946a9e538157e73717201b8cd17af70998602a3692b0ac1eff6ad850e", "sourceCodeHash": "0x9659e69f7db09e8f60a8bb95314b1ed26afcc689851665cf27f5408122f60c98" }, { "contractName": "KnownCodesStorage", "bytecodePath": "artifacts-zk/contracts-preprocessed/KnownCodesStorage.sol/KnownCodesStorage.json", "sourceCodePath": "contracts-preprocessed/KnownCodesStorage.sol", - "bytecodeHash": "0x0100006f68de5d0154a31ff1e889f0623c2b9bfaed2109547c0bbc93df82d6c3", + "bytecodeHash": "0x0100006f1ab2c7415de3914a2b9c53942cd3ff6471f698e7383b59f51e33e4d3", "sourceCodeHash": "0xb39b5b81168653e0c5062f7b8e1d6d15a4e186df3317f192f0cb2fc3a74f5448" }, { "contractName": "L1Messenger", "bytecodePath": "artifacts-zk/contracts-preprocessed/L1Messenger.sol/L1Messenger.json", "sourceCodePath": "contracts-preprocessed/L1Messenger.sol", - "bytecodeHash": "0x010001f7efd57d106ffdacde139e11ae13590509ee55d8ba15573e4410ac092a", - "sourceCodeHash": "0x8d22a4019347a45cb0c27bed9e98f7033637a7bdcd90fafb1922caa48f2b05de" + "bytecodeHash": "0x010001f74f7e45f40e1acbae30507ef94ea2775026a6ba0d0eb38cce10e4a472", + "sourceCodeHash": "0xe97846e4ff5f1cfffd6a454f5ad278deecf6fd7a67525908dea9af877dc822a9" }, { "contractName": "L2BaseToken", "bytecodePath": "artifacts-zk/contracts-preprocessed/L2BaseToken.sol/L2BaseToken.json", "sourceCodePath": "contracts-preprocessed/L2BaseToken.sol", - "bytecodeHash": "0x010001033bf67c0464dce4ed878664840d0b37d25756f6c6c2fb439a253b3017", + "bytecodeHash": "0x01000103bbfa393b49b9f8a7adcfedf1273b7928750f3ea8798347dfd8ca0d6f", "sourceCodeHash": "0x8bdd2b4d0b53dba84c9f0af250bbaa2aad10b3de6747bba957f0bd3721090dfa" }, { "contractName": "L2GatewayUpgrade", "bytecodePath": "artifacts-zk/contracts-preprocessed/L2GatewayUpgrade.sol/L2GatewayUpgrade.json", "sourceCodePath": "contracts-preprocessed/L2GatewayUpgrade.sol", - "bytecodeHash": "0x0100038b37fe650a1b83b23cdcf35cb2701bb5f952f0a63d8f080718788ed4a4", + "bytecodeHash": "0x0100038b3b4065d2682996020e14177a9b4632e054b6718f68d46ff13c012b20", "sourceCodeHash": "0x9248f46f491b8853da77e8f9787cfc1a136abee90fde18a3b8f47dcb8859c63c" }, { "contractName": "L2GatewayUpgradeHelper", "bytecodePath": "artifacts-zk/contracts-preprocessed/L2GatewayUpgradeHelper.sol/L2GatewayUpgradeHelper.json", "sourceCodePath": "contracts-preprocessed/L2GatewayUpgradeHelper.sol", - "bytecodeHash": "0x01000007535b44016390e041915eadc958c713a1a8f5bc27e35ad444ef546fad", + "bytecodeHash": "0x010000071330ec1656098ed33e28b475e101394550c02907d7ee2abbae9b762e", "sourceCodeHash": "0xd1c42c4d338697b8effbfe22a0f07d8d9c5a06c8ec8f45deae77765af48a355b" }, { "contractName": "L2GenesisUpgrade", "bytecodePath": "artifacts-zk/contracts-preprocessed/L2GenesisUpgrade.sol/L2GenesisUpgrade.json", "sourceCodePath": "contracts-preprocessed/L2GenesisUpgrade.sol", - "bytecodeHash": "0x010001b3d649bf9be5b9ed5cc8bc7ae0c7b9664ce63c33e74d7c0674a369521b", - "sourceCodeHash": "0xeb8583c1b31bd66d71f253cab8f8c38d789b7a5986a0ae0a1807b69532678343" + "bytecodeHash": "0x010001b386e0ed48ce9fbaad09c7865a58c28c8350d9bc9446b3beaee4aee999", + "sourceCodeHash": "0x2aaddd8a8ef3f56b4f4e6ba52c0035572145b0ea562fbf218a2eb5fc462f988d" }, { "contractName": "MsgValueSimulator", "bytecodePath": "artifacts-zk/contracts-preprocessed/MsgValueSimulator.sol/MsgValueSimulator.json", "sourceCodePath": "contracts-preprocessed/MsgValueSimulator.sol", - "bytecodeHash": "0x0100005d23f184324cb35d3e9c76e70a898a94e347eea5ddfeea2055b372ec8a", + "bytecodeHash": "0x0100005df63cf8940e407a67346b406dcddf4788cba9792ecd6a0edb8d8b3bd8", "sourceCodeHash": "0x082f3dcbc2fe4d93706c86aae85faa683387097d1b676e7ebd00f71ee0f13b71" }, { "contractName": "NonceHolder", "bytecodePath": "artifacts-zk/contracts-preprocessed/NonceHolder.sol/NonceHolder.json", "sourceCodePath": "contracts-preprocessed/NonceHolder.sol", - "bytecodeHash": "0x010000d9fbee5cbf613421094d193a1a012eb071565a311c548080b6db5f8157", + "bytecodeHash": "0x010000d9e79c30aeda9b823f1a0161c7637ed50848e6287e2a34e37cf2e7e4e8", "sourceCodeHash": "0xcd0c0366effebf2c98c58cf96322cc242a2d1c675620ef5514b7ed1f0a869edc" }, { "contractName": "PubdataChunkPublisher", "bytecodePath": "artifacts-zk/contracts-preprocessed/PubdataChunkPublisher.sol/PubdataChunkPublisher.json", "sourceCodePath": "contracts-preprocessed/PubdataChunkPublisher.sol", - "bytecodeHash": "0x01000049fe72fd8726473cc9fd892b9a6aa02d12f3db1bc20452c5fd0b1c4cc8", - "sourceCodeHash": "0x04d3d2e4019081c87aae5c22a060d84ae2e9d631ebce59801ecce37b9c87e4c7" + "bytecodeHash": "0x01000049377ba719b2d7493420854f12ebe67b75e21338777fb22b73e58ec057", + "sourceCodeHash": "0x398b1b9325b39d4c31e672866d4cbdf1cab453fae8d29f438262d921d427f094" }, { "contractName": "SloadContract", @@ -136,7 +136,7 @@ "contractName": "SystemContext", "bytecodePath": "artifacts-zk/contracts-preprocessed/SystemContext.sol/SystemContext.json", "sourceCodePath": "contracts-preprocessed/SystemContext.sol", - "bytecodeHash": "0x0100017f3a6ec3b05bcfa216590e25bdfde2ac07c22c6ec2c7fb82ac54187a45", + "bytecodeHash": "0x0100017f235b172e9a808764229a777b027e179eacc88a7ea48ef81cb193630a", "sourceCodeHash": "0x22406893d61abd477ce071dce506cf2534cca7b7717d015769fc8af1f1b80e06" }, { diff --git a/system-contracts/contracts/L1Messenger.sol b/system-contracts/contracts/L1Messenger.sol index 0f9242ef1..1f0cfe8e5 100644 --- a/system-contracts/contracts/L1Messenger.sol +++ b/system-contracts/contracts/L1Messenger.sol @@ -186,6 +186,7 @@ contract L1Messenger is IL1Messenger, SystemContractBase { /// @notice Verifies that the {_operatorInput} reflects what occurred within the L1Batch and that /// the compressed statediffs are equivalent to the full state diffs. + /// @param _l2DAValidator the address of the l2 da validator /// @param _operatorInput The total pubdata and uncompressed state diffs of transactions that were /// processed in the current L1 Batch. Pubdata consists of L2 to L1 Logs, messages, deployed bytecode, and state diffs. /// @dev Function that should be called exactly once per L1 Batch by the bootloader. diff --git a/system-contracts/contracts/L2GenesisUpgrade.sol b/system-contracts/contracts/L2GenesisUpgrade.sol index 111135a4a..00a22c799 100644 --- a/system-contracts/contracts/L2GenesisUpgrade.sol +++ b/system-contracts/contracts/L2GenesisUpgrade.sol @@ -11,8 +11,14 @@ import {L2GatewayUpgradeHelper} from "./L2GatewayUpgradeHelper.sol"; /// @custom:security-contact security@matterlabs.dev /// @author Matter Labs -/// @notice The contract that can be used for deterministic contract deployment. +/// @notice The l2 component of the genesis upgrade. contract L2GenesisUpgrade is IL2GenesisUpgrade { + /// @notice The function that is delegateCalled from the complex upgrader. + /// @dev It is used to set the chainId and to deploy the force deployments. + /// @param _chainId the chain id + /// @param _ctmDeployer the address of the ctm deployer + /// @param _fixedForceDeploymentsData the force deployments data + /// @param _additionalForceDeploymentsData the additional force deployments data function genesisUpgrade( uint256 _chainId, address _ctmDeployer, diff --git a/system-contracts/contracts/PubdataChunkPublisher.sol b/system-contracts/contracts/PubdataChunkPublisher.sol index f61f0b5ac..7c2abf2e1 100644 --- a/system-contracts/contracts/PubdataChunkPublisher.sol +++ b/system-contracts/contracts/PubdataChunkPublisher.sol @@ -2,7 +2,6 @@ pragma solidity 0.8.24; import {IPubdataChunkPublisher} from "./interfaces/IPubdataChunkPublisher.sol"; -import {SystemContractBase} from "./abstract/SystemContractBase.sol"; import {BLOB_SIZE_BYTES, MAX_NUMBER_OF_BLOBS} from "./Constants.sol"; import {TooMuchPubdata} from "./SystemContractErrors.sol"; @@ -11,7 +10,7 @@ import {TooMuchPubdata} from "./SystemContractErrors.sol"; * @custom:security-contact security@matterlabs.dev * @notice Smart contract for chunking pubdata into the appropriate size for EIP-4844 blobs. */ -contract PubdataChunkPublisher is IPubdataChunkPublisher, SystemContractBase { +contract PubdataChunkPublisher is IPubdataChunkPublisher { /// @notice Chunks pubdata into pieces that can fit into blobs. /// @param _pubdata The total l2 to l1 pubdata that will be sent via L1 blobs. /// @dev Note: This is an early implementation, in the future we plan to support up to 16 blobs per l1 batch. diff --git a/system-contracts/contracts/interfaces/IMessageRoot.sol b/system-contracts/contracts/interfaces/IMessageRoot.sol index 854508eb1..3966caa15 100644 --- a/system-contracts/contracts/interfaces/IMessageRoot.sol +++ b/system-contracts/contracts/interfaces/IMessageRoot.sol @@ -2,6 +2,13 @@ // We use a floating point pragma here so it can be used within other projects that interact with the ZKsync ecosystem without using our exact pragma version. pragma solidity ^0.8.20; +/** + * @author Matter Labs + * @notice MessageRoot contract is responsible for storing and aggregating the roots of the batches from different chains into the MessageRoot. + * @custom:security-contact security@matterlabs.dev + */ interface IMessageRoot { + /// @notice The aggregated root of the batches from different chains. + /// @return aggregatedRoot of the batches from different chains. function getAggregatedRoot() external view returns (bytes32 aggregatedRoot); } diff --git a/system-contracts/scripts/constants.ts b/system-contracts/scripts/constants.ts index 71425ed6a..0827b2df1 100644 --- a/system-contracts/scripts/constants.ts +++ b/system-contracts/scripts/constants.ts @@ -229,7 +229,7 @@ export const SYSTEM_CONTRACTS: ISystemContracts = { address: "0x0000000000000000000000000000000000010007", codeName: "L2WrappedBaseToken", lang: Language.Solidity, - } + }, } as const; export const EIP712_TX_ID = 113;