diff --git a/.github/tests/blockscout.yaml b/.github/tests/blockscout.yaml index 615f94c..cf25243 100644 --- a/.github/tests/blockscout.yaml +++ b/.github/tests/blockscout.yaml @@ -1,7 +1,8 @@ optimism_package: - - participants: - - el_type: op-geth - network_params: - name: op-rollup-one - additional_services: - - blockscout + chains: + - participants: + - el_type: op-geth + network_params: + name: op-rollup-one + additional_services: + - blockscout diff --git a/.github/tests/helder-op.yaml_norun b/.github/tests/helder-op.yaml_norun index aabe4c1..f1fd661 100644 --- a/.github/tests/helder-op.yaml_norun +++ b/.github/tests/helder-op.yaml_norun @@ -1,8 +1,9 @@ optimism_package: - participants: - - el_type: op-geth - additional_services: - - blockscout + chains: + - participants: + - el_type: op-geth + additional_services: + - blockscout ethereum_package: participants: - el_type: nethermind diff --git a/.github/tests/hildr.yaml b/.github/tests/hildr.yaml index 38f9d83..ad3be74 100644 --- a/.github/tests/hildr.yaml +++ b/.github/tests/hildr.yaml @@ -1,10 +1,11 @@ optimism_package: - participants: - - el_type: op-geth - cl_type: op-node - - el_type: op-geth - cl_type: hildr - - el_type: op-reth - cl_type: hildr - - el_type: op-erigon - cl_type: hildr + chains: + - participants: + - el_type: op-geth + cl_type: op-node + - el_type: op-geth + cl_type: hildr + - el_type: op-reth + cl_type: hildr + - el_type: op-erigon + cl_type: hildr diff --git a/.github/tests/multiple_l2s.yaml b/.github/tests/multiple_l2s.yaml index 7216479..a536a78 100644 --- a/.github/tests/multiple_l2s.yaml +++ b/.github/tests/multiple_l2s.yaml @@ -1,7 +1,8 @@ optimism_package: - - network_params: - name: op-rollup-one - network_id: '3151909' - - network_params: - name: op-rollup-two - network_id: '3151910' + chains: + - network_params: + name: op-rollup-one + network_id: '3151909' + - network_params: + name: op-rollup-two + network_id: '3151910' diff --git a/.github/tests/op-besu.yaml b/.github/tests/op-besu.yaml index da0d16c..cb82369 100644 --- a/.github/tests/op-besu.yaml +++ b/.github/tests/op-besu.yaml @@ -1,4 +1,5 @@ optimism_package: - participants: - - el_type: op-besu - cl_type: op-node + chains: + - participants: + - el_type: op-besu + cl_type: op-node diff --git a/.github/tests/op-erigon.yaml b/.github/tests/op-erigon.yaml index df9f573..26bd7b6 100644 --- a/.github/tests/op-erigon.yaml +++ b/.github/tests/op-erigon.yaml @@ -1,4 +1,5 @@ optimism_package: - participants: - - el_type: op-erigon - cl_type: op-node + chains: + - participants: + - el_type: op-erigon + cl_type: op-node diff --git a/.github/tests/op-geth.yaml b/.github/tests/op-geth.yaml index cb00eaf..a11c25f 100644 --- a/.github/tests/op-geth.yaml +++ b/.github/tests/op-geth.yaml @@ -1,4 +1,5 @@ optimism_package: - participants: - - el_type: op-geth - cl_type: op-node + chains: + - participants: + - el_type: op-geth + cl_type: op-node diff --git a/.github/tests/op-nethermind.yaml b/.github/tests/op-nethermind.yaml index afe9909..1093f32 100644 --- a/.github/tests/op-nethermind.yaml +++ b/.github/tests/op-nethermind.yaml @@ -1,5 +1,6 @@ optimism_package: - participants: - - el_type: op-geth - - el_type: op-nethermind - cl_type: op-node + chains: + - participants: + - el_type: op-geth + - el_type: op-nethermind + cl_type: op-node diff --git a/.github/tests/op-node.yaml b/.github/tests/op-node.yaml index 8ce3425..e9c6d2f 100644 --- a/.github/tests/op-node.yaml +++ b/.github/tests/op-node.yaml @@ -1,10 +1,11 @@ optimism_package: - participants: - - el_type: op-geth - cl_type: op-node - - el_type: op-reth - cl_type: op-node - - el_type: op-erigon - cl_type: op-node - - el_type: op-nethermind - cl_type: op-node + chains: + - participants: + - el_type: op-geth + cl_type: op-node + - el_type: op-reth + cl_type: op-node + - el_type: op-erigon + cl_type: op-node + - el_type: op-nethermind + cl_type: op-node diff --git a/.github/tests/op-reth.yaml b/.github/tests/op-reth.yaml index 27f52fd..5f959db 100644 --- a/.github/tests/op-reth.yaml +++ b/.github/tests/op-reth.yaml @@ -1,4 +1,5 @@ optimism_package: - participants: - - el_type: op-reth - cl_type: op-node + chains: + - participants: + - el_type: op-reth + cl_type: op-node diff --git a/.github/tests/single_l2.yaml b/.github/tests/single_l2.yaml index fd243da..3d10586 100644 --- a/.github/tests/single_l2.yaml +++ b/.github/tests/single_l2.yaml @@ -1,3 +1,4 @@ optimism_package: - participants: - - count: 2 + chains: + - participants: + - count: 2 diff --git a/README.md b/README.md index 2653faa..ad1b642 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,15 @@ ## Welcome to Optimism Package The default package for Optimism + ```yaml optimism_package: - participants: - - el_type: op-geth - cl_type: op-node - - el_type: op-reth - - el_type: op-erigon - - el_type: op-nethermind + chains: + - participants: + - el_type: op-geth + cl_type: op-node + - el_type: op-reth + - el_type: op-erigon + - el_type: op-nethermind ethereum_package: network_params: preset: minimal @@ -48,95 +50,98 @@ To configure the package behaviour, you can modify your `network_params.yaml` fi ```yaml optimism_package: - # Specification of the optimism-participants in the network - participants: - # EL(Execution Layer) Specific flags - # The type of EL client that should be started - # Valid values are: - # op-geth - # op-reth - # op-erigon - # op-nethermind - # op-besu - - el_type: geth + # An array of L2 networks to run + chains: + # Specification of the optimism-participants in the network + - participants: + # EL(Execution Layer) Specific flags + # The type of EL client that should be started + # Valid values are: + # op-geth + # op-reth + # op-erigon + # op-nethermind + # op-besu + - el_type: op-geth - # The Docker image that should be used for the EL client; leave blank to use the default for the client type - # Defaults by client: - # - op-geth: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:latest - # - op-reth: parithoshj/op-reth:latest - # - op-erigon: testinprod/op-erigon:latest - # - op-nethermind: nethermindeth/nethermind:op-c482d56 - # - op-besu: ghcr.io/optimism-java/op-besu:latest - el_image: "" - - # CL(Consensus Layer) Specific flags - # The type of CL client that should be started - # Valid values are: - # op-node - # hildr - cl_type: op-node - - # The Docker image that should be used for the CL client; leave blank to use the default for the client type - # Defaults by client: - # - op-node: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:develop - # - hildr: ghcr.io/optimism-java/hildr:latest - cl_image: "" - - # Count of nodes to spin up for this participant - # Default to 1 - count: 1 - - # Default configuration parameters for the network - network_params: - # Network name, used to enable syncing of alternative networks - # Defaults to "kurtosis" - network: "kurtosis" - - # The network ID of the network. - # Must be unique for each network (if you run multiple networks) - # Defaults to "2151908" - network_id: "2151908" - - # Seconds per slots - seconds_per_slot: 2 - - # Name of your rollup. - # Must be unique for each rollup (if you run multiple rollups) - # Defaults to "op-kurtosis" - name: "op-kurtosis" - - # Triggering future forks in the network - # Fjord fork - # Defaults to 0 (genesis activation) - decimal value - # Offset is in seconds - fjord_time_offset: 0 - - # Granite fork - # Defaults to None - not activated - decimal value - # Offset is in seconds - granite_time_offset: "" - - # Holocene fork - # Defaults to None - not activated - decimal value - # Offset is in seconds - holocene_time_offset: "" - - # Interop fork - # Defaults to None - not activated - decimal value - # Offset is in seconds - interop_time_offset: "" - - - # Additional services to run alongside the network - # Defaults to [] - # Available services: - # - blockscout - additional_services: [] - - # L2 contract deployer configuration + # The Docker image that should be used for the EL client; leave blank to use the default for the client type + # Defaults by client: + # - op-geth: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:latest + # - op-reth: parithoshj/op-reth:latest + # - op-erigon: testinprod/op-erigon:latest + # - op-nethermind: nethermindeth/nethermind:op-c482d56 + # - op-besu: ghcr.io/optimism-java/op-besu:latest + el_image: "" + + # CL(Consensus Layer) Specific flags + # The type of CL client that should be started + # Valid values are: + # op-node + # hildr + cl_type: op-node + + # The Docker image that should be used for the CL client; leave blank to use the default for the client type + # Defaults by client: + # - op-node: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:develop + # - hildr: ghcr.io/optimism-java/hildr:latest + cl_image: "" + + # Count of nodes to spin up for this participant + # Default to 1 + count: 1 + + # Default configuration parameters for the network + network_params: + # Network name, used to enable syncing of alternative networks + # Defaults to "kurtosis" + network: "kurtosis" + + # The network ID of the network. + # Must be unique for each network (if you run multiple networks) + # Defaults to "2151908" + network_id: "2151908" + + # Seconds per slots + seconds_per_slot: 2 + + # Name of your rollup. + # Must be unique for each rollup (if you run multiple rollups) + # Defaults to "op-kurtosis" + name: "op-kurtosis" + + # Triggering future forks in the network + # Fjord fork + # Defaults to 0 (genesis activation) - decimal value + # Offset is in seconds + fjord_time_offset: 0 + + # Granite fork + # Defaults to None - not activated - decimal value + # Offset is in seconds + granite_time_offset: "" + + # Holocene fork + # Defaults to None - not activated - decimal value + # Offset is in seconds + holocene_time_offset: "" + + # Interop fork + # Defaults to None - not activated - decimal value + # Offset is in seconds + interop_time_offset: "" + + + # Additional services to run alongside the network + # Defaults to [] + # Available services: + # - blockscout + additional_services: [] + + # L2 contract deployer configuration - used for all L2 networks # The docker image that should be used for the L2 contract deployer op_contract_deployer_params: - image: ethpandaops/optimism-contract-deployer:develop + image: mslipper/op-deployer:latest + artifacts_url: https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-4accd01f0c35c26f24d2aa71aba898dd7e5085a2ce5daadc8a84b10caf113409.tar.gz ``` ### Additional configuration recommendations @@ -145,11 +150,12 @@ It is required you to launch an L1 Ethereum node to interact with the L2 network ```yaml optimism_package: - participants: - - el_type: op-geth - cl_type: op-node - additional_services: - - blockscout + chains: + - participants: + - el_type: op-geth + cl_type: op-node + additional_services: + - blockscout ethereum_package: participants: - el_type: geth @@ -165,20 +171,21 @@ Additionally, you can spin up multiple L2 networks by providing a list of L2 con ```yaml optimism_package: - - participants: - - el_type: op-geth - network_params: - name: op-rollup-one - network_id: "3151909" - additional_services: - - blockscout - - participants: - - el_type: op-geth - network_params: - name: op-rollup-two - network_id: "3151910" - additional_services: - - blockscout + chains: + - participants: + - el_type: op-geth + network_params: + name: op-rollup-one + network_id: "3151909" + additional_services: + - blockscout + - participants: + - el_type: op-geth + network_params: + name: op-rollup-two + network_id: "3151910" + additional_services: + - blockscout ethereum_package: participants: - el_type: geth diff --git a/main.star b/main.star index 213a3e3..975c8e3 100644 --- a/main.star +++ b/main.star @@ -18,10 +18,11 @@ def run(plan, args): """ plan.print("Parsing the L1 input args") # If no args are provided, use the default values with minimal preset - ethereum_args = args.get( - "ethereum_package", {"network_params": {"preset": "minimal"}} - ) - optimism_args = args.get("optimism_package", {}) + ethereum_args = args.get("ethereum_package", input_parser.default_ethereum_config()) + + # need to do a raw get here in case only optimism_package is provided. + # .get will return None if the key is in the config with a None value. + optimism_args = args.get("optimism_package") or input_parser.default_optimism_args() optimism_args_with_right_defaults = input_parser.input_parser(plan, optimism_args) # Deploy the L1 plan.print("Deploying a local L1") @@ -38,17 +39,32 @@ def run(plan, args): all_l1_participants, l1_network_params, l1_network_id ) - if l1_network_params.network != "kurtosis": + if l1_network_params.network == "kurtosis": + plan.print("Waiting for L1 to start up") + wait_for_sync.wait_for_startup(plan, l1_config_env_vars) + else: + plan.print("Waiting for network to sync") wait_for_sync.wait_for_sync(plan, l1_config_env_vars) - l2_contract_deployer_image = ( - optimism_args_with_right_defaults.op_contract_deployer_params.image + deployment_output = contract_deployer.deploy_contracts( + plan, + l1_priv_key, + l1_config_env_vars, + optimism_args_with_right_defaults, ) - # Deploy Create2 Factory contract (only need to do this once for multiple l2s) - contract_deployer.deploy_factory_contract( - plan, l1_priv_key, l1_config_env_vars, l2_contract_deployer_image - ) + for chain in optimism_args_with_right_defaults.chains: + l2_launcher.launch_l2( + plan, + chain.network_params.name, + chain, + deployment_output, + l1_config_env_vars, + l1_priv_key, + all_l1_participants[0].el_context, + ) + + return # Deploy L2s plan.print("Deploying a local L2") if type(optimism_args) == "dict": @@ -104,15 +120,4 @@ def get_l1_config(all_l1_participants, l1_network_params, l1_network_id): env_vars["L1_WS_URL"] = str(all_l1_participants[0].el_context.ws_url) env_vars["L1_CHAIN_ID"] = str(l1_network_id) env_vars["L1_BLOCK_TIME"] = str(l1_network_params.seconds_per_slot) - env_vars["DEPLOYMENT_OUTFILE"] = ( - "/workspace/optimism/packages/contracts-bedrock/deployments/" - + str(l1_network_id) - + "/kurtosis.json" - ) - env_vars["STATE_DUMP_PATH"] = ( - "/workspace/optimism/packages/contracts-bedrock/deployments/" - + str(l1_network_id) - + "/state-dump.json" - ) - return env_vars diff --git a/network_params.yaml b/network_params.yaml index 4423444..81b1e0f 100644 --- a/network_params.yaml +++ b/network_params.yaml @@ -1,11 +1,21 @@ optimism_package: - participants: - - el_type: op-geth - - el_type: op-reth - - el_type: op-erigon - - el_type: op-nethermind - - el_type: op-besu - additional_services: - - blockscout + chains: + - participants: + - el_type: op-geth + el_image: "" + cl_type: op-node + cl_image: "" + count: 1 + network_params: + network: "kurtosis" + network_id: "2151908" + seconds_per_slot: 2 + name: "op-kurtosis" + fjord_time_offset: 0 + granite_time_offset: "" + holocene_time_offset: "" + interop_time_offset: "" + additional_services: [] op_contract_deployer_params: - image: ethpandaops/optimism-contract-deployer:develop + image: mslipper/op-deployer:latest + artifacts_url: https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-4accd01f0c35c26f24d2aa71aba898dd7e5085a2ce5daadc8a84b10caf113409.tar.gz diff --git a/src/blockscout/blockscout_launcher.star b/src/blockscout/blockscout_launcher.star index 957b85f..dd891f2 100644 --- a/src/blockscout/blockscout_launcher.star +++ b/src/blockscout/blockscout_launcher.star @@ -7,6 +7,8 @@ constants = import_module( postgres = import_module("github.com/kurtosis-tech/postgres-package/main.star") +util = import_module("../util.star") + IMAGE_NAME_BLOCKSCOUT = "blockscout/blockscout-optimism:6.6.0" IMAGE_NAME_BLOCKSCOUT_VERIF = "ghcr.io/blockscout/smart-contract-verifier:v1.7.0" @@ -48,10 +50,18 @@ def launch_blockscout( l2_services_suffix, l1_el_context, l2_el_context, - l2oo_address, l2_network_name, - additional_env_vars, + deployment_output, + network_id, ): + rollup_filename = "rollup-{0}".format(network_id) + portal_address = util.read_network_config_value( + plan, deployment_output, rollup_filename, ".deposit_contract_address" + ) + l1_deposit_start_block = util.read_network_config_value( + plan, deployment_output, rollup_filename, ".genesis.l1.number" + ) + postgres_output = postgres.run( plan, service_name="{0}-postgres{1}".format( @@ -75,9 +85,15 @@ def launch_blockscout( l1_el_context, l2_el_context, verif_url, - l2oo_address, l2_network_name, - additional_env_vars, + { + "INDEXER_OPTIMISM_L1_PORTAL_CONTRACT": portal_address, + "INDEXER_OPTIMISM_L1_DEPOSITS_START_BLOCK": l1_deposit_start_block, + "INDEXER_OPTIMISM_L1_WITHDRAWALS_START_BLOCK": l1_deposit_start_block, + "INDEXER_OPTIMISM_L1_BATCH_START_BLOCK": l1_deposit_start_block, + # The L2OO is no longer deployed + "INDEXER_OPTIMISM_L1_OUTPUT_ORACLE_CONTRACT": "0x0000000000000000000000000000000000000000", + }, ) blockscout_service = plan.add_service( "{0}{1}".format(SERVICE_NAME_BLOCKSCOUT, l2_services_suffix), config_backend @@ -112,7 +128,6 @@ def get_config_backend( l1_el_context, l2_el_context, verif_url, - l2oo_address, l2_network_name, additional_env_vars, ): diff --git a/src/cl/hildr/hildr_launcher.star b/src/cl/hildr/hildr_launcher.star index aea3af1..6b7a587 100644 --- a/src/cl/hildr/hildr_launcher.star +++ b/src/cl/hildr/hildr_launcher.star @@ -13,11 +13,12 @@ constants = import_module( "github.com/ethpandaops/ethereum-package/src/package_io/constants.star" ) +util = import_module("../../util.star") + # ---------------------------------- Beacon client ------------------------------------- # The Docker container runs as the "hildr" user so we can't write to root BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/hildr/hildr-beacon-data" -ROLLUP_CONFIG_MOUNT_PATH_ON_CONTAINER = "/network-configs/rollup.json" # Port IDs BEACON_TCP_DISCOVERY_PORT_ID = "tcp-discovery" BEACON_UDP_DISCOVERY_PORT_ID = "udp-discovery" @@ -64,11 +65,8 @@ def launch( el_context, existing_cl_clients, l1_config_env_vars, - gs_sequencer_private_key, sequencer_enabled, ): - network_name = shared_utils.get_network_name(launcher.network) - # beacon_node_identity_recipe = PostHttpRequestRecipe( # endpoint="/", # content_type="application/json", @@ -83,14 +81,12 @@ def launch( config = get_beacon_config( plan, - launcher.el_cl_genesis_data, - launcher.jwt_file, + launcher, image, service_name, el_context, existing_cl_clients, l1_config_env_vars, - gs_sequencer_private_key, # beacon_node_identity_recipe, sequencer_enabled, ) @@ -122,14 +118,12 @@ def launch( def get_beacon_config( plan, - el_cl_genesis_data, - jwt_file, + launcher, image, service_name, el_context, existing_cl_clients, l1_config_env_vars, - gs_sequencer_private_key, # beacon_node_identity_recipe, sequencer_enabled, ): @@ -155,12 +149,23 @@ def get_beacon_config( "--rpc-addr=0.0.0.0", "--rpc-port={0}".format(BEACON_HTTP_PORT_NUM), "--sync-mode=full", - "--network=" + ROLLUP_CONFIG_MOUNT_PATH_ON_CONTAINER, + "--network=" + + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS + + "/rollup-{0}.json".format(launcher.network_params.network_id), ] + sequencer_private_key = util.read_network_config_value( + plan, + launcher.deployment_output, + "sequencer-{0}".format(launcher.network_params.network_id), + ".privateKey", + ) + if sequencer_enabled: cmd.append("--sequencer-enable") + # sequencer private key can't be used by hildr yet + if len(existing_cl_clients) == 1: cmd.append( "--disc-boot-nodes=" @@ -170,8 +175,8 @@ def get_beacon_config( ) files = { - constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data, - constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: launcher.deployment_output, + constants.JWT_MOUNTPOINT_ON_CLIENTS: launcher.jwt_file, } ports = {} ports.update(used_ports) @@ -192,9 +197,9 @@ def get_beacon_config( ) -def new_hildr_launcher(el_cl_genesis_data, jwt_file, network_params): +def new_hildr_launcher(deployment_output, jwt_file, network_params): return struct( - el_cl_genesis_data=el_cl_genesis_data, + deployment_output=deployment_output, jwt_file=jwt_file, - network=network_params.network, + network_params=network_params, ) diff --git a/src/cl/op-node/op_node_launcher.star b/src/cl/op-node/op_node_launcher.star index 4d242ff..b5f6d84 100644 --- a/src/cl/op-node/op_node_launcher.star +++ b/src/cl/op-node/op_node_launcher.star @@ -13,11 +13,12 @@ constants = import_module( "github.com/ethpandaops/ethereum-package/src/package_io/constants.star" ) +util = import_module("../../util.star") + # ---------------------------------- Beacon client ------------------------------------- # The Docker container runs as the "op-node" user so we can't write to root BEACON_DATA_DIRPATH_ON_SERVICE_CONTAINER = "/data/op-node/op-node-beacon-data" -ROLLUP_CONFIG_MOUNT_PATH_ON_CONTAINER = "/network-configs/rollup.json" # Port IDs BEACON_TCP_DISCOVERY_PORT_ID = "tcp-discovery" BEACON_UDP_DISCOVERY_PORT_ID = "udp-discovery" @@ -64,11 +65,8 @@ def launch( el_context, existing_cl_clients, l1_config_env_vars, - gs_sequencer_private_key, sequencer_enabled, ): - network_name = shared_utils.get_network_name(launcher.network) - beacon_node_identity_recipe = PostHttpRequestRecipe( endpoint="/", content_type="application/json", @@ -83,14 +81,11 @@ def launch( config = get_beacon_config( plan, - launcher.el_cl_genesis_data, - launcher.jwt_file, + launcher, image, - service_name, el_context, existing_cl_clients, l1_config_env_vars, - gs_sequencer_private_key, beacon_node_identity_recipe, sequencer_enabled, ) @@ -125,14 +120,11 @@ def launch( def get_beacon_config( plan, - el_cl_genesis_data, - jwt_file, + launcher, image, - service_name, el_context, existing_cl_clients, l1_config_env_vars, - gs_sequencer_private_key, beacon_node_identity_recipe, sequencer_enabled, ): @@ -148,7 +140,9 @@ def get_beacon_config( "--l2={0}".format(EXECUTION_ENGINE_ENDPOINT), "--l2.jwt-secret=" + constants.JWT_MOUNT_PATH_ON_CONTAINER, "--verifier.l1-confs=4", - "--rollup.config=" + ROLLUP_CONFIG_MOUNT_PATH_ON_CONTAINER, + "--rollup.config=" + + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS + + "/rollup-{0}.json".format(launcher.network_params.network_id), "--rpc.addr=0.0.0.0", "--rpc.port={0}".format(BEACON_HTTP_PORT_NUM), "--rpc.enable-admin", @@ -164,8 +158,15 @@ def get_beacon_config( "--p2p.listen.udp={0}".format(BEACON_DISCOVERY_PORT_NUM), ] + sequencer_private_key = util.read_network_config_value( + plan, + launcher.deployment_output, + "sequencer-{0}".format(launcher.network_params.network_id), + ".privateKey", + ) + if sequencer_enabled: - cmd.append("--p2p.sequencer.key=" + gs_sequencer_private_key) + cmd.append("--p2p.sequencer.key=" + sequencer_private_key) cmd.append("--sequencer.enabled") cmd.append("--sequencer.l1-confs=5") @@ -178,8 +179,8 @@ def get_beacon_config( ) files = { - constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data, - constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: launcher.deployment_output, + constants.JWT_MOUNTPOINT_ON_CLIENTS: launcher.jwt_file, } ports = {} ports.update(used_ports) @@ -200,9 +201,9 @@ def get_beacon_config( ) -def new_op_node_launcher(el_cl_genesis_data, jwt_file, network_params): +def new_op_node_launcher(deployment_output, jwt_file, network_params): return struct( - el_cl_genesis_data=el_cl_genesis_data, + deployment_output=deployment_output, jwt_file=jwt_file, - network=network_params.network, + network_params=network_params, ) diff --git a/src/contracts/contract_deployer.star b/src/contracts/contract_deployer.star index cf84ff7..29e46fc 100644 --- a/src/contracts/contract_deployer.star +++ b/src/contracts/contract_deployer.star @@ -4,193 +4,120 @@ FACTORY_ADDRESS = "0x4e59b44847b379578588920cA78FbF26c0B4956C" # raw tx data for deploying Create2Factory contract to L1 FACTORY_DEPLOYER_CODE = "0xf8a58085174876e800830186a08080b853604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf31ba02222222222222222222222222222222222222222222222222222222222222222a02222222222222222222222222222222222222222222222222222222222222222" -CHAINSPEC_JQ_FILEPATH = "../../static_files/chainspec_template/gen2spec.jq" +FUND_SCRIPT_FILEPATH = "../../static_files/scripts" +utils = import_module("../util.star") -def deploy_factory_contract( + +def deploy_contracts( plan, priv_key, l1_config_env_vars, - image, + optimism_args, ): - factory_deployment_result = plan.run_sh( - name="op-deploy-factory-contract", - description="Deploying L2 factory contract to L1 (takes about a minute)", - image=image, - env_vars={ - "PRIVATE_KEY": str(priv_key), - "FUND_VALUE": "10ether", - "DEPLOY_CONFIG_PATH": "/workspace/optimism/packages/contracts-bedrock/deploy-config/getting-started.json", - "DEPLOYMENT_CONTEXT": "getting-started", - } - | l1_config_env_vars, + l2_chain_ids = ",".join( + [str(chain.network_params.network_id) for chain in optimism_args.chains] + ) + + op_deployer_init = plan.run_sh( + name="op-deployer-init", + description="Initialize L2 contract deployments", + image=optimism_args.op_contract_deployer_params.image, + env_vars=l1_config_env_vars, + store=[ + StoreSpec( + src="/network-data", + name="op-deployer-configs", + ) + ], run=" && ".join( [ - "while true; do sleep 1; echo 'L1 Chain is starting up'; if [ \"$(curl -s $CL_RPC_URL/eth/v1/beacon/headers/ | jq -r '.data[0].header.message.slot')\" != \"0\" ]; then echo 'L1 Chain has started!'; break; fi; done", - "cast send {0} --value $FUND_VALUE --rpc-url $L1_RPC_URL --private-key $PRIVATE_KEY".format( - FACTORY_DEPLOYER_ADDRESS - ), - "if [ $(cast codesize {0} --rpc-url $L1_RPC_URL) -gt 0 ]; then echo 'Factory contract already deployed!'; exit 0; fi".format( - FACTORY_ADDRESS + "mkdir -p /network-data", + "op-deployer init --l1-chain-id $L1_CHAIN_ID --l2-chain-ids {0} --workdir /network-data".format( + l2_chain_ids ), - "cast publish --rpc-url $L1_RPC_URL {0}".format(FACTORY_DEPLOYER_CODE), ] ), - wait="300s", - ) - - -def deploy_l2_contracts( - plan, - priv_key, - l1_config_env_vars, - l2_config_env_vars, - l2_services_suffix, - fork_activation_env, - image, -): - chainspec_files_artifact = plan.upload_files( - src=CHAINSPEC_JQ_FILEPATH, - name="op-chainspec-config{0}".format(l2_services_suffix), ) - op_genesis = plan.run_sh( - name="op-deploy-l2-contracts", - description="Deploying L2 contracts (takes about a minute)", - image=image, - env_vars={ - "PRIVATE_KEY": str(priv_key), - "FUND_VALUE": "10ether", - "DEPLOY_CONFIG_PATH": "/workspace/optimism/packages/contracts-bedrock/deploy-config/getting-started.json", - "DEPLOYMENT_CONTEXT": "getting-started", - } - | l1_config_env_vars - | l2_config_env_vars - | fork_activation_env, - files={ - "/workspace/optimism/packages/contracts-bedrock/deploy-config/chainspec-generator/": chainspec_files_artifact, - }, + op_deployer_configure = plan.run_sh( + name="op-deployer-configure", + description="Configure L2 contract deployments", + image=utils.DEPLOYMENT_UTILS_IMAGE, store=[ StoreSpec( - src="/network-configs", - name="op-genesis-configs{0}".format(l2_services_suffix), - ), + src="/network-data", + name="op-deployer-configs", + ) ], + files={ + "/network-data": op_deployer_init.files_artifacts[0], + }, run=" && ".join( [ - "./packages/contracts-bedrock/scripts/getting-started/wallets.sh >> {0}".format( - ENVRC_PATH - ), - "echo 'export IMPL_SALT=$(openssl rand -hex 32)' >> {0}".format( - ENVRC_PATH + "cat /network-data/intent.toml | dasel put -r toml -t string -v '{0}' 'contractArtifactsURL' > /network-data/.intent.toml".format( + optimism_args.op_contract_deployer_params.artifacts_url ), - ". {0}".format(ENVRC_PATH), - "mkdir -p /network-configs", - "cast send $GS_ADMIN_ADDRESS --value $FUND_VALUE --private-key $PRIVATE_KEY --rpc-url $L1_RPC_URL", # Fund Admin - "cast send $GS_BATCHER_ADDRESS --value $FUND_VALUE --private-key $PRIVATE_KEY --rpc-url $L1_RPC_URL", # Fund Batcher - "cast send $GS_PROPOSER_ADDRESS --value $FUND_VALUE --private-key $PRIVATE_KEY --rpc-url $L1_RPC_URL", # Fund Proposer - "cd /workspace/optimism/packages/contracts-bedrock", - "./scripts/getting-started/config.sh", - 'jq \'. + {"fundDevAccounts": true, "useInterop": true}\' $DEPLOY_CONFIG_PATH > tmp.$$.json && mv tmp.$$.json $DEPLOY_CONFIG_PATH', - "forge script scripts/deploy/Deploy.s.sol:Deploy --private-key $GS_ADMIN_PRIVATE_KEY --broadcast --rpc-url $L1_RPC_URL", - "CONTRACT_ADDRESSES_PATH=$DEPLOYMENT_OUTFILE forge script scripts/L2Genesis.s.sol:L2Genesis --sig 'runWithStateDump()' --chain-id $L2_CHAIN_ID", - "cd /workspace/optimism/op-node/bin", - "./op-node genesis l2 \ - --l1-rpc $L1_RPC_URL \ - --deploy-config $DEPLOY_CONFIG_PATH \ - --l2-allocs $STATE_DUMP_PATH \ - --l1-deployments $DEPLOYMENT_OUTFILE \ - --outfile.l2 /network-configs/genesis.json \ - --outfile.rollup /network-configs/rollup.json", - "mv $DEPLOY_CONFIG_PATH /network-configs/getting-started.json", - "mv $DEPLOYMENT_OUTFILE /network-configs/kurtosis.json", - "mv $STATE_DUMP_PATH /network-configs/state-dump.json", - "echo -n $GS_ADMIN_PRIVATE_KEY > /network-configs/GS_ADMIN_PRIVATE_KEY", - "echo -n $GS_SEQUENCER_PRIVATE_KEY > /network-configs/GS_SEQUENCER_PRIVATE_KEY", - "echo -n $GS_BATCHER_PRIVATE_KEY > /network-configs/GS_BATCHER_PRIVATE_KEY", - "echo -n $GS_PROPOSER_PRIVATE_KEY > /network-configs/GS_PROPOSER_PRIVATE_KEY", - "cat /network-configs/genesis.json | jq --from-file /workspace/optimism/packages/contracts-bedrock/deploy-config/chainspec-generator/gen2spec.jq > /network-configs/chainspec.json", + "mv /network-data/.intent.toml /network-data/intent.toml", ] ), - wait="300s", - ) - - gs_admin_private_key = plan.run_sh( - name="read-gs-admin-private-key", - description="Getting the admin private key", - run="cat /network-configs/GS_ADMIN_PRIVATE_KEY ", - files={"/network-configs": op_genesis.files_artifacts[0]}, - ) - - gs_sequencer_private_key = plan.run_sh( - name="read-gs-sequencer-private-key", - description="Getting the sequencer private key", - run="cat /network-configs/GS_SEQUENCER_PRIVATE_KEY ", - files={"/network-configs": op_genesis.files_artifacts[0]}, ) - gs_batcher_private_key = plan.run_sh( - name="read-gs-batcher-private-key", - description="Getting the batcher private key", - run="cat /network-configs/GS_BATCHER_PRIVATE_KEY ", - files={"/network-configs": op_genesis.files_artifacts[0]}, - ) - - gs_proposer_private_key = plan.run_sh( - name="read-gs-proposer-private-key", - description="Getting the proposer private key", - run="cat /network-configs/GS_PROPOSER_PRIVATE_KEY ", - files={"/network-configs": op_genesis.files_artifacts[0]}, - ) - - l2oo_address = plan.run_sh( - name="read-l2oo-address", - description="Getting the L2OutputOracleProxy address", - run="jq -r .L2OutputOracleProxy /network-configs/kurtosis.json | tr -d '\n'", - files={"/network-configs": op_genesis.files_artifacts[0]}, - ) + apply_cmds = [ + "op-deployer apply --l1-rpc-url $L1_RPC_URL --private-key $PRIVATE_KEY --workdir /network-data", + ] + for chain in optimism_args.chains: + network_id = chain.network_params.network_id + apply_cmds.extend( + [ + "op-deployer inspect genesis --workdir /network-data --outfile /network-data/genesis-{0}.json {0}".format( + network_id + ), + "op-deployer inspect rollup --workdir /network-data --outfile /network-data/rollup-{0}.json {0}".format( + network_id + ), + ] + ) - l1_bridge_address = plan.run_sh( - name="read-l1-bridge-address", - description="Getting the L1StandardBridgeProxy address", - run="jq -r .L1StandardBridgeProxy /network-configs/kurtosis.json | tr -d '\n'", - files={"/network-configs": op_genesis.files_artifacts[0]}, + op_deployer_apply = plan.run_sh( + name="op-deployer-apply", + description="Apply L2 contract deployments", + image=optimism_args.op_contract_deployer_params.image, + env_vars={"PRIVATE_KEY": str(priv_key)} | l1_config_env_vars, + store=[ + StoreSpec( + src="/network-data", + name="op-deployer-configs", + ) + ], + files={ + "/network-data": op_deployer_configure.files_artifacts[0], + }, + run=" && ".join(apply_cmds), ) - l1_deposit_start_block = plan.run_sh( - name="read-l1-deposit-start-block", - description="Getting the L1StandardBridgeProxy address", - image="badouralix/curl-jq", - run="jq -r .genesis.l1.number /network-configs/rollup.json | tr -d '\n'", - files={"/network-configs": op_genesis.files_artifacts[0]}, + fund_script_artifact = plan.upload_files( + src=FUND_SCRIPT_FILEPATH, + name="op-deployer-fund-script", ) - l1_portal_contract = plan.run_sh( - name="read-l1-portal-contract", - description="Getting the L1 portal contract", - run="jq -r .OptimismPortal /network-configs/kurtosis.json | tr -d '\n'", - files={"/network-configs": op_genesis.files_artifacts[0]}, + collect_fund = plan.run_sh( + name="op-deployer-fund", + description="Collect keys, and fund addresses", + image=utils.DEPLOYMENT_UTILS_IMAGE, + env_vars={"PRIVATE_KEY": str(priv_key), "FUND_VALUE": "10ether"} + | l1_config_env_vars, + store=[ + StoreSpec( + src="/network-data", + name="op-deployer-configs", + ) + ], + files={ + "/network-data": op_deployer_apply.files_artifacts[0], + "/fund-script": fund_script_artifact, + }, + run='bash /fund-script/fund.sh "{0}"'.format(l2_chain_ids), ) - private_keys = { - "GS_ADMIN_PRIVATE_KEY": gs_admin_private_key.output, - "GS_SEQUENCER_PRIVATE_KEY": gs_sequencer_private_key.output, - "GS_BATCHER_PRIVATE_KEY": gs_batcher_private_key.output, - "GS_PROPOSER_PRIVATE_KEY": gs_proposer_private_key.output, - } - - blockscout_env_variables = { - "INDEXER_OPTIMISM_L1_PORTAL_CONTRACT": l1_portal_contract.output, - "INDEXER_OPTIMISM_L1_DEPOSITS_START_BLOCK": l1_deposit_start_block.output, - "INDEXER_OPTIMISM_L1_WITHDRAWALS_START_BLOCK": l1_deposit_start_block.output, - "INDEXER_OPTIMISM_L1_BATCH_START_BLOCK": l1_deposit_start_block.output, - "INDEXER_OPTIMISM_L1_OUTPUT_ORACLE_CONTRACT": l2oo_address.output, - } - - return ( - op_genesis.files_artifacts[0], - private_keys, - l2oo_address.output, - l1_bridge_address.output, - blockscout_env_variables, - ) + return collect_fund.files_artifacts[0] diff --git a/src/el/op-besu/op_besu_launcher.star b/src/el/op-besu/op_besu_launcher.star index a532b19..3026b28 100644 --- a/src/el/op-besu/op_besu_launcher.star +++ b/src/el/op-besu/op_besu_launcher.star @@ -96,7 +96,7 @@ def launch( config = get_config( plan, - launcher.el_cl_genesis_data, + launcher.deployment_output, launcher.jwt_file, launcher.network, launcher.network_id, @@ -133,7 +133,7 @@ def launch( def get_config( plan, - el_cl_genesis_data, + deployment_output, jwt_file, network, network_id, @@ -150,7 +150,7 @@ def get_config( "besu", "--genesis-file=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER - + "/genesis.json", + + "/genesis-{0}.json".format(network_id), "--network-id={0}".format(network_id), # "--logging=" + log_level, "--data-path=" + EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, @@ -199,7 +199,7 @@ def get_config( cmd_str = " ".join(cmd) files = { - constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data, + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: deployment_output, constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, } @@ -215,13 +215,13 @@ def get_config( def new_op_besu_launcher( - el_cl_genesis_data, + deployment_output, jwt_file, network, network_id, ): return struct( - el_cl_genesis_data=el_cl_genesis_data, + deployment_output=deployment_output, jwt_file=jwt_file, network=network, network_id=network_id, diff --git a/src/el/op-erigon/op_erigon_launcher.star b/src/el/op-erigon/op_erigon_launcher.star index 71ead3a..ee40fdf 100644 --- a/src/el/op-erigon/op_erigon_launcher.star +++ b/src/el/op-erigon/op_erigon_launcher.star @@ -90,7 +90,7 @@ def launch( config = get_config( plan, - launcher.el_cl_genesis_data, + launcher.deployment_output, launcher.jwt_file, launcher.network, launcher.network_id, @@ -130,7 +130,7 @@ def launch( def get_config( plan, - el_cl_genesis_data, + deployment_output, jwt_file, network, network_id, @@ -142,7 +142,8 @@ def get_config( ): init_datadir_cmd_str = "erigon init --datadir={0} {1}".format( EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, - constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.json", + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + + "/genesis-{0}.json".format(network_id), ) discovery_port = DISCOVERY_PORT_NUM @@ -199,7 +200,7 @@ def get_config( command_str = cmd_str files = { - constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data, + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: deployment_output, constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, } @@ -215,13 +216,13 @@ def get_config( def new_op_erigon_launcher( - el_cl_genesis_data, + deployment_output, jwt_file, network, network_id, ): return struct( - el_cl_genesis_data=el_cl_genesis_data, + deployment_output=deployment_output, jwt_file=jwt_file, network=network, network_id=network_id, diff --git a/src/el/op-geth/op_geth_launcher.star b/src/el/op-geth/op_geth_launcher.star index f5fed68..6f31e00 100644 --- a/src/el/op-geth/op_geth_launcher.star +++ b/src/el/op-geth/op_geth_launcher.star @@ -92,16 +92,13 @@ def launch( sequencer_enabled, sequencer_context, ): - network_name = shared_utils.get_network_name(launcher.network) - config = get_config( plan, - launcher.el_cl_genesis_data, + launcher.deployment_output, launcher.jwt_file, launcher.network, launcher.network_id, image, - service_name, existing_el_clients, sequencer_enabled, sequencer_context, @@ -136,19 +133,19 @@ def launch( def get_config( plan, - el_cl_genesis_data, + deployment_output, jwt_file, network, network_id, image, - service_name, existing_el_clients, sequencer_enabled, sequencer_context, ): init_datadir_cmd_str = "geth init --datadir={0} --state.scheme=hash {1}".format( EXECUTION_DATA_DIRPATH_ON_CLIENT_CONTAINER, - constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.json", + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS + + "/genesis-{0}.json".format(network_id), ) discovery_port = DISCOVERY_PORT_NUM @@ -213,7 +210,7 @@ def get_config( command_str = cmd_str files = { - constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data, + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: deployment_output, constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, } @@ -228,13 +225,13 @@ def get_config( def new_op_geth_launcher( - el_cl_genesis_data, + deployment_output, jwt_file, network, network_id, ): return struct( - el_cl_genesis_data=el_cl_genesis_data, + deployment_output=deployment_output, jwt_file=jwt_file, network=network, network_id=network_id, diff --git a/src/el/op-nethermind/op_nethermind_launcher.star b/src/el/op-nethermind/op_nethermind_launcher.star index 0d3dfd3..5c12777 100644 --- a/src/el/op-nethermind/op_nethermind_launcher.star +++ b/src/el/op-nethermind/op_nethermind_launcher.star @@ -182,7 +182,7 @@ def get_config( cmd.append( "--Init.ChainSpecPath=" + constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER - + "/chainspec.json" + + "/chainspec-{0}.json".format(network_id) ) files = { diff --git a/src/el/op-reth/op_reth_launcher.star b/src/el/op-reth/op_reth_launcher.star index 0bc41ce..08af39e 100644 --- a/src/el/op-reth/op_reth_launcher.star +++ b/src/el/op-reth/op_reth_launcher.star @@ -83,18 +83,10 @@ def launch( sequencer_enabled, sequencer_context, ): - network_name = shared_utils.get_network_name(launcher.network) - - cl_client_name = service_name.split("-")[3] - config = get_config( plan, - launcher.el_cl_genesis_data, - launcher.jwt_file, - launcher.network, - launcher.network_id, + launcher, image, - service_name, existing_el_clients, sequencer_enabled, sequencer_context, @@ -126,16 +118,15 @@ def launch( def get_config( plan, - el_cl_genesis_data, - jwt_file, - network, - network_id, + launcher, image, - service_name, existing_el_clients, sequencer_enabled, sequencer_context, ): + deployment_output = launcher.deployment_output + jwt_file = launcher.jwt_file + network = launcher.network public_ports = {} discovery_port = DISCOVERY_PORT_NUM used_ports = get_used_ports(discovery_port) @@ -146,7 +137,8 @@ def get_config( "--chain={0}".format( network if network in constants.PUBLIC_NETWORKS - else constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + "/genesis.json" + else constants.GENESIS_CONFIG_MOUNT_PATH_ON_CONTAINER + + "/genesis-{0}.json".format(launcher.network_id) ), "--http", "--http.port={0}".format(RPC_PORT_NUM), @@ -187,7 +179,7 @@ def get_config( ) files = { - constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: el_cl_genesis_data, + constants.GENESIS_DATA_MOUNTPOINT_ON_CLIENTS: deployment_output, constants.JWT_MOUNTPOINT_ON_CLIENTS: jwt_file, } @@ -202,13 +194,13 @@ def get_config( def new_op_reth_launcher( - el_cl_genesis_data, + deployment_output, jwt_file, network, network_id, ): return struct( - el_cl_genesis_data=el_cl_genesis_data, + deployment_output=deployment_output, jwt_file=jwt_file, network=network, network_id=network_id, diff --git a/src/el_cl_launcher.star b/src/el_cl_launcher.star index 88ede09..36ca1a3 100644 --- a/src/el_cl_launcher.star +++ b/src/el_cl_launcher.star @@ -19,17 +19,16 @@ def launch( plan, jwt_file, network_params, - el_cl_data, + deployment_output, participants, num_participants, l1_config_env_vars, - gs_sequencer_private_key, l2_services_suffix, ): el_launchers = { "op-geth": { "launcher": op_geth.new_op_geth_launcher( - el_cl_data, + deployment_output, jwt_file, network_params.network, network_params.network_id, @@ -38,7 +37,7 @@ def launch( }, "op-reth": { "launcher": op_reth.new_op_reth_launcher( - el_cl_data, + deployment_output, jwt_file, network_params.network, network_params.network_id, @@ -47,7 +46,7 @@ def launch( }, "op-erigon": { "launcher": op_erigon.new_op_erigon_launcher( - el_cl_data, + deployment_output, jwt_file, network_params.network, network_params.network_id, @@ -56,7 +55,7 @@ def launch( }, "op-nethermind": { "launcher": op_nethermind.new_nethermind_launcher( - el_cl_data, + deployment_output, jwt_file, network_params.network, network_params.network_id, @@ -65,7 +64,7 @@ def launch( }, "op-besu": { "launcher": op_besu.new_op_besu_launcher( - el_cl_data, + deployment_output, jwt_file, network_params.network, network_params.network_id, @@ -77,12 +76,14 @@ def launch( cl_launchers = { "op-node": { "launcher": op_node.new_op_node_launcher( - el_cl_data, jwt_file, network_params + deployment_output, jwt_file, network_params ), "launch_method": op_node.launch, }, "hildr": { - "launcher": hildr.new_hildr_launcher(el_cl_data, jwt_file, network_params), + "launcher": hildr.new_hildr_launcher( + deployment_output, jwt_file, network_params + ), "launch_method": hildr.launch, }, } @@ -120,13 +121,14 @@ def launch( # Zero-pad the index using the calculated zfill value index_str = shared_utils.zfill_custom(index + 1, len(str(len(participants)))) - el_service_name = "op-el-{0}-{1}-{2}{3}".format( + el_service_name = "op-el-{0}-{1}-{2}-{3}".format( index_str, el_type, cl_type, l2_services_suffix ) - cl_service_name = "op-cl-{0}-{1}-{2}{3}".format( + cl_service_name = "op-cl-{0}-{1}-{2}-{3}".format( index_str, cl_type, el_type, l2_services_suffix ) + sequencer_context = all_cl_contexts[0] if len(all_cl_contexts) > 0 else None el_context = el_launch_method( plan, el_launcher, @@ -134,9 +136,7 @@ def launch( participant.el_image, all_el_contexts, sequencer_enabled, - all_cl_contexts[0] - if len(all_cl_contexts) > 0 - else None, # sequencer context + sequencer_context, ) cl_context = cl_launch_method( @@ -147,7 +147,6 @@ def launch( el_context, all_cl_contexts, l1_config_env_vars, - gs_sequencer_private_key, sequencer_enabled, ) diff --git a/src/l2.star b/src/l2.star index f9bca16..4678a74 100644 --- a/src/l2.star +++ b/src/l2.star @@ -5,40 +5,23 @@ input_parser = import_module("./package_io/input_parser.star") static_files = import_module( "github.com/ethpandaops/ethereum-package/src/static_files/static_files.star" ) +util = import_module("./util.star") def launch_l2( plan, l2_services_suffix, l2_args, + deployment_output, l1_config, l1_priv_key, l1_bootnode_context, ): - plan.print("Parsing the L2 input args") - args_with_right_defaults = input_parser.input_parser(plan, l2_args) - network_params = args_with_right_defaults.network_params + network_params = l2_args.network_params l2_config_env_vars = {} l2_config_env_vars["L2_CHAIN_ID"] = str(network_params.network_id) l2_config_env_vars["L2_BLOCK_TIME"] = str(network_params.seconds_per_slot) - fork_activation_env = get_network_fork_activation(network_params) - plan.print(fork_activation_env) - ( - el_cl_data, - gs_private_keys, - l2oo_address, - l1_bridge_address, - blockscout_env_variables, - ) = contract_deployer.deploy_l2_contracts( - plan, - l1_priv_key, # get private key of contract deployer for this l2 - l1_config, - l2_config_env_vars, - l2_services_suffix, - fork_activation_env, - args_with_right_defaults.op_contract_deployer_params.image, - ) plan.print("Deploying L2 with name {0}".format(network_params.name)) jwt_file = plan.upload_files( @@ -48,13 +31,11 @@ def launch_l2( all_l2_participants = participant_network.launch_participant_network( plan, - args_with_right_defaults.participants, + l2_args.participants, jwt_file, network_params, - el_cl_data, - gs_private_keys, + deployment_output, l1_config, - l2oo_address, l2_services_suffix, ) @@ -64,17 +45,27 @@ def launch_l2( all_el_contexts.append(participant.el_context) all_cl_contexts.append(participant.cl_context) - for additional_service in args_with_right_defaults.additional_services: + network_id_as_hex = util.to_hex_chain_id(network_params.network_id) + l1_bridge_address = util.read_network_config_value( + plan, + deployment_output, + "state", + '.opChainDeployments[] | select(.id=="{0}") | .l1StandardBridgeProxyAddress'.format( + network_id_as_hex + ), + ) + + for additional_service in l2_args.additional_services: if additional_service == "blockscout": plan.print("Launching op-blockscout") - blockscout_launcher = blockscout.launch_blockscout( + blockscout.launch_blockscout( plan, l2_services_suffix, l1_bootnode_context, # first l1 EL url all_el_contexts[0], # first l2 EL url - l2oo_address, network_params.name, - blockscout_env_variables, + deployment_output, + network_params.network_id, ) plan.print("Successfully launched op-blockscout") @@ -84,21 +75,3 @@ def launch_l2( l1_bridge_address ) ) - - -def get_network_fork_activation(network_params): - env_vars = {} - env_vars["FJORD_TIME_OFFSET"] = "0x" + "%x" % int(network_params.fjord_time_offset) - if network_params.granite_time_offset != None: - env_vars["GRANITE_TIME_OFFSET"] = "0x" + "%x" % int( - network_params.granite_time_offset - ) - if network_params.holocene_time_offset != None: - env_vars["HOLOCENE_TIME_OFFSET"] = "0x" + "%x" % int( - network_params.holocene_time_offset - ) - if network_params.interop_time_offset != None: - env_vars["INTEROP_TIME_OFFSET"] = "0x" + "%x" % int( - network_params.interop_time_offset - ) - return env_vars diff --git a/src/package_io/input_parser.star b/src/package_io/input_parser.star index d4ae8ff..efa53aa 100644 --- a/src/package_io/input_parser.star +++ b/src/package_io/input_parser.star @@ -30,112 +30,133 @@ ATTR_TO_BE_SKIPPED_AT_ROOT = ( "participants", ) - DEFAULT_ADDITIONAL_SERVICES = [] def input_parser(plan, input_args): sanity_check.sanity_check(plan, input_args) - result = parse_network_params(plan, input_args) + results = parse_network_params(plan, input_args) return struct( - participants=[ + chains=[ struct( - el_type=participant["el_type"], - el_image=participant["el_image"], - cl_type=participant["cl_type"], - cl_image=participant["cl_image"], - count=participant["count"], + participants=[ + struct( + el_type=participant["el_type"], + el_image=participant["el_image"], + cl_type=participant["cl_type"], + cl_image=participant["cl_image"], + count=participant["count"], + ) + for participant in result["participants"] + ], + network_params=struct( + network=result["network_params"]["network"], + network_id=result["network_params"]["network_id"], + seconds_per_slot=result["network_params"]["seconds_per_slot"], + name=result["network_params"]["name"], + fjord_time_offset=result["network_params"]["fjord_time_offset"], + granite_time_offset=result["network_params"]["granite_time_offset"], + holocene_time_offset=result["network_params"][ + "holocene_time_offset" + ], + interop_time_offset=result["network_params"]["interop_time_offset"], + ), + additional_services=result["additional_services"], ) - for participant in result["participants"] + for result in results["chains"] ], - network_params=struct( - network=result["network_params"]["network"], - network_id=result["network_params"]["network_id"], - seconds_per_slot=result["network_params"]["seconds_per_slot"], - name=result["network_params"]["name"], - fjord_time_offset=result["network_params"]["fjord_time_offset"], - granite_time_offset=result["network_params"]["granite_time_offset"], - holocene_time_offset=result["network_params"]["holocene_time_offset"], - interop_time_offset=result["network_params"]["interop_time_offset"], - ), - additional_services=result.get( - "additional_services", DEFAULT_ADDITIONAL_SERVICES - ), op_contract_deployer_params=struct( - image=result["op_contract_deployer_params"]["image"], + image=results["op_contract_deployer_params"]["image"], + artifacts_url=results["op_contract_deployer_params"]["artifacts_url"], ), ) def parse_network_params(plan, input_args): - result = default_input_args(input_args) - - for attr in input_args: - value = input_args[attr] - # if its insterted we use the value inserted - if attr not in ATTR_TO_BE_SKIPPED_AT_ROOT and attr in input_args: - result[attr] = value - elif attr == "network_params": - for sub_attr in input_args["network_params"]: - sub_value = input_args["network_params"][sub_attr] - result["network_params"][sub_attr] = sub_value - elif attr == "participants": - participants = [] - for participant in input_args["participants"]: - new_participant = default_participant() - for sub_attr, sub_value in participant.items(): - # if the value is set in input we set it in participant - new_participant[sub_attr] = sub_value - for _ in range(0, new_participant["count"]): - participant_copy = ( - ethereum_package_input_parser.deep_copy_participant( - new_participant + results = {} + chains = [] + + seen_names = {} + seen_network_ids = {} + for chain in input_args.get("chains", default_chains()): + network_params = default_network_params() + network_params.update(chain.get("network_params", {})) + + network_name = network_params["name"] + network_id = network_params["network_id"] + + if network_name in seen_names: + fail("Network name {0} is duplicated".format(network_name)) + + if network_id in seen_network_ids: + fail("Network id {0} is duplicated".format(network_id)) + + participants = [] + for i, p in enumerate(chain["participants"]): + participant = default_participant() + participant.update(p) + + el_type = participant["el_type"] + cl_type = participant["cl_type"] + el_image = participant["el_image"] + if el_image == "": + default_image = DEFAULT_EL_IMAGES.get(el_type, "") + if default_image == "": + fail( + "{0} received an empty image name and we don't have a default for it".format( + el_type ) ) - participants.append(participant_copy) - result["participants"] = participants - - for index, participant in enumerate(result["participants"]): - el_type = participant["el_type"] - cl_type = participant["cl_type"] - el_image = participant["el_image"] - if el_image == "": - default_image = DEFAULT_EL_IMAGES.get(el_type, "") - if default_image == "": - fail( - "{0} received an empty image name and we don't have a default for it".format( - el_type - ) - ) - participant["el_image"] = default_image + participant["el_image"] = default_image - cl_image = participant["cl_image"] - if cl_image == "": + cl_image = participant["cl_image"] if cl_image == "": default_image = DEFAULT_CL_IMAGES.get(cl_type, "") - if default_image == "": - fail( - "{0} received an empty image name and we don't have a default for it".format( - cl_type + if default_image == "": + fail( + "{0} received an empty image name and we don't have a default for it".format( + cl_type + ) ) - ) - participant["cl_image"] = default_image - - return result + participant["cl_image"] = default_image + + participants.append(participant) + + result = { + "participants": participants, + "network_params": network_params, + "additional_services": chain.get( + "additional_services", DEFAULT_ADDITIONAL_SERVICES + ), + } + chains.append(result) + + results["chains"] = chains + results["op_contract_deployer_params"] = default_op_contract_deployer_params() + results["op_contract_deployer_params"].update( + input_args.get("op_contract_deployer_params", {}) + ) + return results -def default_input_args(input_args): - network_params = default_network_params() - participants = [default_participant()] - op_contract_deployer_params = default_op_contract_deployer_params() +def default_optimism_args(): return { - "participants": participants, - "network_params": network_params, - "op_contract_deployer_params": op_contract_deployer_params, + "chains": default_chains(), + "op_contract_deployer_params": default_op_contract_deployer_params(), } +def default_chains(): + return [ + { + "participants": [default_participant()], + "network_params": default_network_params(), + "additional_services": DEFAULT_ADDITIONAL_SERVICES, + } + ] + + def default_network_params(): return { "network": "kurtosis", @@ -156,10 +177,33 @@ def default_participant(): "cl_type": "op-node", "cl_image": "", "count": 1, + "sequencer": False, } def default_op_contract_deployer_params(): return { - "image": "ethpandaops/optimism-contract-deployer:develop", + "image": "mslipper/op-deployer:latest", + "artifacts_url": "https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-4accd01f0c35c26f24d2aa71aba898dd7e5085a2ce5daadc8a84b10caf113409.tar.gz", + } + + +def default_ethereum_config(): + return { + "network_params": { + "preset": "minimal", + "genesis_delay": 5, + # Preload the Arachnid CREATE2 deployer + "additional_preloaded_contracts": json.encode( + { + "0x4e59b44847b379578588920cA78FbF26c0B4956C": { + "balance": "0ETH", + "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", + "storage": {}, + "nonce": 0, + "secretKey": "0x", + } + } + ), + } } diff --git a/src/package_io/sanity_check.star b/src/package_io/sanity_check.star index 00e48ca..9fdda40 100644 --- a/src/package_io/sanity_check.star +++ b/src/package_io/sanity_check.star @@ -5,6 +5,7 @@ PARTICIPANT_CATEGORIES = { "cl_type", "cl_image", "count", + "sequencer", ], } @@ -19,9 +20,13 @@ SUBCATEGORY_PARAMS = { "holocene_time_offset", "interop_time_offset", ], - "op_contract_deployer_params": ["image"], } +OP_CONTRACT_DEPLOYER_PARAMS = [ + "image", + "artifacts_url", +] + ADDITIONAL_SERVICES_PARAMS = [ "blockscout", ] @@ -50,41 +55,56 @@ def validate_params(plan, input_args, category, allowed_params): ) -def sanity_check(plan, input_args): - if type(input_args) == "list": - return "Cant bother with your input, you shall pass" +def sanity_check(plan, optimism_config): + chains = optimism_config.get("chains", []) + + if type(chains) != "list": + fail("Invalid input_args type, expected list") + + for input_args in chains: + # Checks participants + deep_validate_params( + plan, input_args, "participants", PARTICIPANT_CATEGORIES["participants"] + ) + + # Checks additional_services + if "additional_services" in input_args: + for additional_services in input_args["additional_services"]: + if additional_services not in ADDITIONAL_SERVICES_PARAMS: + fail( + "Invalid additional_services {0}, allowed fields: {1}".format( + additional_services, ADDITIONAL_SERVICES_PARAMS + ) + ) - # Checks participants - deep_validate_params( - plan, input_args, "participants", PARTICIPANT_CATEGORIES["participants"] - ) + # Checks subcategories + for subcategories in SUBCATEGORY_PARAMS.keys(): + validate_params( + plan, input_args, subcategories, SUBCATEGORY_PARAMS[subcategories] + ) + # Checks everything else + for param in input_args.keys(): + combined_root_params = ( + PARTICIPANT_CATEGORIES.keys() + SUBCATEGORY_PARAMS.keys() + ) + combined_root_params.append("additional_services") + combined_root_params.append("op_contract_deployer_params") - # Checks additional_services - if "additional_services" in input_args: - for additional_services in input_args["additional_services"]: - if additional_services not in ADDITIONAL_SERVICES_PARAMS: + if param not in combined_root_params: fail( - "Invalid additional_services {0}, allowed fields: {1}".format( - additional_services, ADDITIONAL_SERVICES_PARAMS + "Invalid parameter {0}, allowed fields {1}".format( + param, combined_root_params ) ) - # Checks subcategories - for subcategories in SUBCATEGORY_PARAMS.keys(): + # If everything passes, print a message + + if "op_contract_deployer_params" in optimism_config: validate_params( - plan, input_args, subcategories, SUBCATEGORY_PARAMS[subcategories] + plan, + optimism_config, + "op_contract_deployer_params", + OP_CONTRACT_DEPLOYER_PARAMS, ) - # Checks everything else - for param in input_args.keys(): - combined_root_params = PARTICIPANT_CATEGORIES.keys() + SUBCATEGORY_PARAMS.keys() - combined_root_params.append("additional_services") - - if param not in combined_root_params: - fail( - "Invalid parameter {0}, allowed fields {1}".format( - param, combined_root_params - ) - ) - # If everything passes, print a message plan.print("Sanity check for OP package passed") diff --git a/src/participant_network.star b/src/participant_network.star index 66120be..03ac102 100644 --- a/src/participant_network.star +++ b/src/participant_network.star @@ -3,6 +3,7 @@ participant_module = import_module("./participant.star") input_parser = import_module("./package_io/input_parser.star") op_batcher_launcher = import_module("./batcher/op-batcher/op_batcher_launcher.star") op_proposer_launcher = import_module("./proposer/op-proposer/op_proposer_launcher.star") +util = import_module("./util.star") def launch_participant_network( @@ -10,24 +11,20 @@ def launch_participant_network( participants, jwt_file, network_params, - el_cl_data, - gs_private_keys, + deployment_output, l1_config_env_vars, - l2oo_address, l2_services_suffix, ): num_participants = len(participants) - sequencer_enabled = True # First EL and sequencer CL all_el_contexts, all_cl_contexts = el_cl_client_launcher.launch( plan, jwt_file, network_params, - el_cl_data, + deployment_output, participants, num_participants, l1_config_env_vars, - gs_private_keys["GS_SEQUENCER_PRIVATE_KEY"], l2_services_suffix, ) @@ -48,24 +45,38 @@ def launch_participant_network( all_participants.append(participant_entry) + proposer_key = util.read_network_config_value( + plan, + deployment_output, + "proposer-{0}".format(network_params.network_id), + ".privateKey", + ) + batcher_key = util.read_network_config_value( + plan, + deployment_output, + "batcher-{0}".format(network_params.network_id), + ".privateKey", + ) + op_batcher_launcher.launch( plan, - "op-batcher{0}".format(l2_services_suffix), + "op-batcher-{0}".format(l2_services_suffix), input_parser.DEFAULT_BATCHER_IMAGES["op-batcher"], all_el_contexts[0], all_cl_contexts[0], l1_config_env_vars, - gs_private_keys["GS_BATCHER_PRIVATE_KEY"], + batcher_key, ) - op_proposer_launcher.launch( - plan, - "op-proposer{0}".format(l2_services_suffix), - input_parser.DEFAULT_PROPOSER_IMAGES["op-proposer"], - all_cl_contexts[0], - l1_config_env_vars, - gs_private_keys["GS_PROPOSER_PRIVATE_KEY"], - l2oo_address, - ) + # The OP Stack don't run the proposer anymore, it has been replaced with the challenger + # op_proposer_launcher.launch( + # plan, + # "op-proposer{0}".format(l2_services_suffix), + # input_parser.DEFAULT_PROPOSER_IMAGES["op-proposer"], + # all_cl_contexts[0], + # l1_config_env_vars, + # gs_private_keys["GS_PROPOSER_PRIVATE_KEY"], + # l2oo_address, + # ) return all_participants diff --git a/src/util.star b/src/util.star new file mode 100644 index 0000000..3599f54 --- /dev/null +++ b/src/util.star @@ -0,0 +1,24 @@ +DEPLOYMENT_UTILS_IMAGE = "mslipper/deployment-utils:latest" + + +def read_network_config_value(plan, network_config_file, json_file, json_path): + mounts = {"/network-data": network_config_file} + return read_json_value( + plan, "/network-data/{0}.json".format(json_file), json_path, mounts + ) + + +def read_json_value(plan, json_file, json_path, mounts=None): + run = plan.run_sh( + description="Read JSON value", + image=DEPLOYMENT_UTILS_IMAGE, + files=mounts, + run="cat {0} | jq -j '{1}'".format(json_file, json_path), + ) + return run.output + + +def to_hex_chain_id(chain_id): + out = "%x" % int(chain_id) + pad = 64 - len(out) + return "0x" + "0" * pad + out diff --git a/src/wait/wait_for_sync.star b/src/wait/wait_for_sync.star index e599710..8448afc 100644 --- a/src/wait/wait_for_sync.star +++ b/src/wait/wait_for_sync.star @@ -1,3 +1,6 @@ +utils = import_module("../util.star") + + def wait_for_sync(plan, l1_config_env_vars): plan.run_sh( name="wait-for-l1-sync", @@ -19,3 +22,14 @@ def wait_for_sync(plan, l1_config_env_vars): if [ "$is_optimistic" == "false" ]; then echo \'Node is synced!\'; break; fi; done', wait="72h", ) + + +def wait_for_startup(plan, l1_config_env_vars): + plan.run_sh( + name="wait-for-l1-startup", + description="Wait for L1 to start up - can take up to 2 minutes", + image=utils.DEPLOYMENT_UTILS_IMAGE, + env_vars=l1_config_env_vars, + run="while true; do sleep 5; echo 'L1 Chain is starting up'; if [ \"$(curl -s $CL_RPC_URL/eth/v1/beacon/headers/ | jq -r '.data[0].header.message.slot')\" != \"0\" ]; then echo 'L1 Chain has started!'; break; fi; done", + wait="300s", + ) diff --git a/static_files/scripts/fund.sh b/static_files/scripts/fund.sh new file mode 100644 index 0000000..37e8343 --- /dev/null +++ b/static_files/scripts/fund.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -euxo pipefail + +export ETH_RPC_URL="$L1_RPC_URL" + +addr=$(cast wallet address "$PRIVATE_KEY") +nonce=$(cast nonce "$addr") +mnemonic="test test test test test test test test test test test junk" + +IFS=',';read -r -a chain_ids <<< "$1" + +write_keyfile() { + echo "{\"address\":\"$1\",\"privateKey\":\"$2\"}" > "/network-data/$3.json" +} + +send() { + cast send $1 --value "$FUND_VALUE" --private-key "$PRIVATE_KEY" --nonce "$nonce" --async + nonce=$((nonce+1)) +} + +for chain_id in "${chain_ids[@]}"; do + proposer_priv=$(cast wallet private-key "$mnemonic" "m/44'/60'/2'/$chain_id/1") + proposer_addr=$(cast wallet address "$proposer_priv") + write_keyfile "$proposer_addr" "$proposer_priv" "proposer-$chain_id" + batcher_priv=$(cast wallet private-key "$mnemonic" "m/44'/60'/2'/$chain_id/2") + batcher_addr=$(cast wallet address "$batcher_priv") + write_keyfile "$batcher_addr" "$batcher_priv" "batcher-$chain_id" + sequencer_priv=$(cast wallet private-key "$mnemonic" "m/44'/60'/2'/$chain_id/3") + sequencer_addr=$(cast wallet address "$sequencer_priv") + write_keyfile "$sequencer_addr" "$sequencer_priv" "sequencer-$chain_id" + challenger_priv=$(cast wallet private-key "$mnemonic" "m/44'/60'/2'/$chain_id/4") + challenger_addr=$(cast wallet address "$challenger_priv") + write_keyfile "$challenger_addr" "$challenger_priv" "challenger-$chain_id" + send "$proposer_addr" + send "$batcher_addr" + send "$challenger_addr" + + cat "/network-data/genesis-$chain_id.json" | jq --from-file /fund-script/gen2spec.jq > "/network-data/chainspec-$chain_id.json" +done \ No newline at end of file diff --git a/static_files/chainspec_template/gen2spec.jq b/static_files/scripts/gen2spec.jq similarity index 99% rename from static_files/chainspec_template/gen2spec.jq rename to static_files/scripts/gen2spec.jq index b11cff1..bf898e2 100644 --- a/static_files/chainspec_template/gen2spec.jq +++ b/static_files/scripts/gen2spec.jq @@ -141,7 +141,7 @@ def optimism: "eip1153TransitionTimestamp": .config.cancunTime|to_hex, "eip5656TransitionTimestamp": .config.cancunTime|to_hex, "eip6780TransitionTimestamp": .config.cancunTime|to_hex, - + #Prague "rip7212TransitionTimestamp": .config.pragueTime|to_hex, @@ -170,4 +170,4 @@ def optimism: "parentBeaconBlockRoot": .parentBeaconBlockRoot, }, "accounts": ((.alloc|with_entries(.key|=(if startswith("0x") then . else "0x" + . end)))), -}|remove_empty \ No newline at end of file +}|remove_empty