diff --git a/Cargo.lock b/Cargo.lock index fa420d7c48d..0f284a79d2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8512,7 +8512,7 @@ dependencies = [ [[package]] name = "zksync_concurrency" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "anyhow", "once_cell", @@ -8539,7 +8539,7 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "anyhow", "async-trait", @@ -8560,7 +8560,7 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "anyhow", "blst", @@ -8578,10 +8578,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "anyhow", - "prost", "rand 0.8.5", "tracing", "vise", @@ -8593,14 +8592,12 @@ dependencies = [ "zksync_consensus_storage", "zksync_consensus_sync_blocks", "zksync_consensus_utils", - "zksync_protobuf", - "zksync_protobuf_build", ] [[package]] name = "zksync_consensus_network" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "anyhow", "async-trait", @@ -8624,7 +8621,7 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "anyhow", "bit-vec", @@ -8644,7 +8641,7 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "anyhow", "async-trait", @@ -8652,6 +8649,7 @@ dependencies = [ "rand 0.8.5", "thiserror", "tracing", + "vise", "zksync_concurrency", "zksync_consensus_roles", "zksync_protobuf", @@ -8661,7 +8659,7 @@ dependencies = [ [[package]] name = "zksync_consensus_sync_blocks" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "anyhow", "thiserror", @@ -8676,7 +8674,7 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "thiserror", "zksync_concurrency", @@ -8771,9 +8769,11 @@ dependencies = [ "zksync_concurrency", "zksync_config", "zksync_consensus_bft", + "zksync_consensus_crypto", "zksync_consensus_executor", "zksync_consensus_roles", "zksync_consensus_storage", + "zksync_consensus_utils", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -8784,7 +8784,6 @@ dependencies = [ "zksync_mini_merkle_tree", "zksync_object_store", "zksync_protobuf", - "zksync_protobuf_build", "zksync_queued_job_processor", "zksync_state", "zksync_storage", @@ -9008,7 +9007,7 @@ dependencies = [ [[package]] name = "zksync_protobuf" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "anyhow", "bit-vec", @@ -9026,7 +9025,7 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" version = "0.1.0" -source = "git+https://github.com/matter-labs/era-consensus.git?rev=84cdd9e45fd84bc1fac0b394c899ae33aef91afa#84cdd9e45fd84bc1fac0b394c899ae33aef91afa" +source = "git+https://github.com/matter-labs/era-consensus.git?rev=5727a3e0b22470bb90092388f9125bcb366df613#5727a3e0b22470bb90092388f9125bcb366df613" dependencies = [ "anyhow", "heck 0.4.1", diff --git a/core/lib/dal/.sqlx/query-1e54aebf94d27244638f04d1d35a5a088ceebfef0228701fcbed8255b74b1050.json b/core/lib/dal/.sqlx/query-1e54aebf94d27244638f04d1d35a5a088ceebfef0228701fcbed8255b74b1050.json deleted file mode 100644 index 7e970780d8e..00000000000 --- a/core/lib/dal/.sqlx/query-1e54aebf94d27244638f04d1d35a5a088ceebfef0228701fcbed8255b74b1050.json +++ /dev/null @@ -1,232 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n *\n FROM\n transactions\n WHERE\n miniblock_number = $1\n ORDER BY\n index_in_block\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "hash", - "type_info": "Bytea" - }, - { - "ordinal": 1, - "name": "is_priority", - "type_info": "Bool" - }, - { - "ordinal": 2, - "name": "full_fee", - "type_info": "Numeric" - }, - { - "ordinal": 3, - "name": "layer_2_tip_fee", - "type_info": "Numeric" - }, - { - "ordinal": 4, - "name": "initiator_address", - "type_info": "Bytea" - }, - { - "ordinal": 5, - "name": "nonce", - "type_info": "Int8" - }, - { - "ordinal": 6, - "name": "signature", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "input", - "type_info": "Bytea" - }, - { - "ordinal": 8, - "name": "data", - "type_info": "Jsonb" - }, - { - "ordinal": 9, - "name": "received_at", - "type_info": "Timestamp" - }, - { - "ordinal": 10, - "name": "priority_op_id", - "type_info": "Int8" - }, - { - "ordinal": 11, - "name": "l1_batch_number", - "type_info": "Int8" - }, - { - "ordinal": 12, - "name": "index_in_block", - "type_info": "Int4" - }, - { - "ordinal": 13, - "name": "error", - "type_info": "Varchar" - }, - { - "ordinal": 14, - "name": "gas_limit", - "type_info": "Numeric" - }, - { - "ordinal": 15, - "name": "gas_per_storage_limit", - "type_info": "Numeric" - }, - { - "ordinal": 16, - "name": "gas_per_pubdata_limit", - "type_info": "Numeric" - }, - { - "ordinal": 17, - "name": "tx_format", - "type_info": "Int4" - }, - { - "ordinal": 18, - "name": "created_at", - "type_info": "Timestamp" - }, - { - "ordinal": 19, - "name": "updated_at", - "type_info": "Timestamp" - }, - { - "ordinal": 20, - "name": "execution_info", - "type_info": "Jsonb" - }, - { - "ordinal": 21, - "name": "contract_address", - "type_info": "Bytea" - }, - { - "ordinal": 22, - "name": "in_mempool", - "type_info": "Bool" - }, - { - "ordinal": 23, - "name": "l1_block_number", - "type_info": "Int4" - }, - { - "ordinal": 24, - "name": "value", - "type_info": "Numeric" - }, - { - "ordinal": 25, - "name": "paymaster", - "type_info": "Bytea" - }, - { - "ordinal": 26, - "name": "paymaster_input", - "type_info": "Bytea" - }, - { - "ordinal": 27, - "name": "max_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 28, - "name": "max_priority_fee_per_gas", - "type_info": "Numeric" - }, - { - "ordinal": 29, - "name": "effective_gas_price", - "type_info": "Numeric" - }, - { - "ordinal": 30, - "name": "miniblock_number", - "type_info": "Int8" - }, - { - "ordinal": 31, - "name": "l1_batch_tx_index", - "type_info": "Int4" - }, - { - "ordinal": 32, - "name": "refunded_gas", - "type_info": "Int8" - }, - { - "ordinal": 33, - "name": "l1_tx_mint", - "type_info": "Numeric" - }, - { - "ordinal": 34, - "name": "l1_tx_refund_recipient", - "type_info": "Bytea" - }, - { - "ordinal": 35, - "name": "upgrade_id", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false, - false, - true, - true, - false, - true, - true, - true, - false, - false, - true, - true, - true, - true, - true, - true, - true, - true, - false, - false, - false, - true, - false, - true, - false, - false, - false, - true, - true, - true, - true, - true, - false, - true, - true, - true - ] - }, - "hash": "1e54aebf94d27244638f04d1d35a5a088ceebfef0228701fcbed8255b74b1050" -} diff --git a/core/lib/dal/.sqlx/query-2a2469109033ba08591db3647b73595fe783b7b894748d07fed9735c58fb28fb.json b/core/lib/dal/.sqlx/query-2a2469109033ba08591db3647b73595fe783b7b894748d07fed9735c58fb28fb.json deleted file mode 100644 index 8bcd8098bae..00000000000 --- a/core/lib/dal/.sqlx/query-2a2469109033ba08591db3647b73595fe783b7b894748d07fed9735c58fb28fb.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n number\n FROM\n miniblocks\n WHERE\n consensus IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "2a2469109033ba08591db3647b73595fe783b7b894748d07fed9735c58fb28fb" -} diff --git a/core/lib/dal/.sqlx/query-367ca58514762ffc26fd906c4c441a21691357494c2f9919bfcbcbb0e42315c2.json b/core/lib/dal/.sqlx/query-367ca58514762ffc26fd906c4c441a21691357494c2f9919bfcbcbb0e42315c2.json deleted file mode 100644 index 0eadba8f7f5..00000000000 --- a/core/lib/dal/.sqlx/query-367ca58514762ffc26fd906c4c441a21691357494c2f9919bfcbcbb0e42315c2.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n miniblocks\n WHERE\n number = $1\n AND consensus IS NOT NULL\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "367ca58514762ffc26fd906c4c441a21691357494c2f9919bfcbcbb0e42315c2" -} diff --git a/core/lib/dal/.sqlx/query-3b013b93ea4a6766162c9f0c60517a7ffc993cf436ad3aeeae82ed3e330b07bd.json b/core/lib/dal/.sqlx/query-3b013b93ea4a6766162c9f0c60517a7ffc993cf436ad3aeeae82ed3e330b07bd.json new file mode 100644 index 00000000000..6e7bffec485 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3b013b93ea4a6766162c9f0c60517a7ffc993cf436ad3aeeae82ed3e330b07bd.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n ORDER BY\n number ASC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "certificate", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "3b013b93ea4a6766162c9f0c60517a7ffc993cf436ad3aeeae82ed3e330b07bd" +} diff --git a/core/lib/dal/.sqlx/query-3c60ca71b8a3b544f5fe9d7f2fbb249026665c9fb17b6f53a2154473547cbbfd.json b/core/lib/dal/.sqlx/query-3c60ca71b8a3b544f5fe9d7f2fbb249026665c9fb17b6f53a2154473547cbbfd.json new file mode 100644 index 00000000000..8797c84ce88 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3c60ca71b8a3b544f5fe9d7f2fbb249026665c9fb17b6f53a2154473547cbbfd.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "certificate", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "3c60ca71b8a3b544f5fe9d7f2fbb249026665c9fb17b6f53a2154473547cbbfd" +} diff --git a/core/lib/dal/.sqlx/query-15893d68429ba09662ee27935653c17c7a7939195dd2d9aa42512b1479d2ed20.json b/core/lib/dal/.sqlx/query-4c7df374959728085b12ef71c8c34ea99bfdcb422a5066529dde25308f961f37.json similarity index 80% rename from core/lib/dal/.sqlx/query-15893d68429ba09662ee27935653c17c7a7939195dd2d9aa42512b1479d2ed20.json rename to core/lib/dal/.sqlx/query-4c7df374959728085b12ef71c8c34ea99bfdcb422a5066529dde25308f961f37.json index 8e4c16cca41..b6cf07da520 100644 --- a/core/lib/dal/.sqlx/query-15893d68429ba09662ee27935653c17c7a7939195dd2d9aa42512b1479d2ed20.json +++ b/core/lib/dal/.sqlx/query-4c7df374959728085b12ef71c8c34ea99bfdcb422a5066529dde25308f961f37.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n (\n SELECT\n MAX(m2.number)\n FROM\n miniblocks m2\n WHERE\n miniblocks.l1_batch_number = m2.l1_batch_number\n ) AS \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.consensus,\n miniblocks.protocol_version AS \"protocol_version!\",\n l1_batches.fee_account_address AS \"fee_account_address?\"\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n (\n SELECT\n MAX(m2.number)\n FROM\n miniblocks m2\n WHERE\n miniblocks.l1_batch_number = m2.l1_batch_number\n ) AS \"last_batch_miniblock?\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n l1_batches.fee_account_address AS \"fee_account_address?\"\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -55,16 +55,11 @@ }, { "ordinal": 10, - "name": "consensus", - "type_info": "Jsonb" - }, - { - "ordinal": 11, "name": "protocol_version!", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 11, "name": "fee_account_address?", "type_info": "Bytea" } @@ -86,9 +81,8 @@ false, false, true, - true, false ] }, - "hash": "15893d68429ba09662ee27935653c17c7a7939195dd2d9aa42512b1479d2ed20" + "hash": "4c7df374959728085b12ef71c8c34ea99bfdcb422a5066529dde25308f961f37" } diff --git a/core/lib/dal/.sqlx/query-c2fe6a5476e69c9588eec73baba9d0e2d571533d4d5f683919987b6f8cbb00e0.json b/core/lib/dal/.sqlx/query-c2fe6a5476e69c9588eec73baba9d0e2d571533d4d5f683919987b6f8cbb00e0.json new file mode 100644 index 00000000000..bdabc52d137 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c2fe6a5476e69c9588eec73baba9d0e2d571533d4d5f683919987b6f8cbb00e0.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n miniblocks_consensus (number, certificate)\n VALUES\n ($1, $2)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "c2fe6a5476e69c9588eec73baba9d0e2d571533d4d5f683919987b6f8cbb00e0" +} diff --git a/core/lib/dal/.sqlx/query-d90ed4c0f67c1826f9be90bb5566aba34bfab67494fee578613b03ef7255324d.json b/core/lib/dal/.sqlx/query-d90ed4c0f67c1826f9be90bb5566aba34bfab67494fee578613b03ef7255324d.json deleted file mode 100644 index 22f1d4d31bc..00000000000 --- a/core/lib/dal/.sqlx/query-d90ed4c0f67c1826f9be90bb5566aba34bfab67494fee578613b03ef7255324d.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE miniblocks\n SET\n consensus = $2\n WHERE\n number = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "d90ed4c0f67c1826f9be90bb5566aba34bfab67494fee578613b03ef7255324d" -} diff --git a/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json b/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json new file mode 100644 index 00000000000..c34d38ac2d0 --- /dev/null +++ b/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "certificate", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b" +} diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 2442714f3cf..6af26113360 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -18,9 +18,9 @@ zksync_system_constants = { path = "../constants" } zksync_contracts = { path = "../contracts" } zksync_types = { path = "../types" } zksync_health_check = { path = "../health_check" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } -zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } itertools = "0.10.1" thiserror = "1.0" @@ -55,4 +55,4 @@ tracing = "0.1" assert_matches = "1.5.0" [build-dependencies] -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } diff --git a/core/lib/dal/build.rs b/core/lib/dal/build.rs index 66896b3e02f..f9986b59603 100644 --- a/core/lib/dal/build.rs +++ b/core/lib/dal/build.rs @@ -3,7 +3,7 @@ fn main() { zksync_protobuf_build::Config { input_root: "src/models/proto".into(), proto_root: "zksync/dal".into(), - dependencies: vec!["::zksync_consensus_roles::proto".parse().unwrap()], + dependencies: vec![], protobuf_crate: "::zksync_protobuf".parse().unwrap(), is_public: true, } diff --git a/core/lib/dal/migrations/20240103123456_move_consensus_fields_to_new_table.down.sql b/core/lib/dal/migrations/20240103123456_move_consensus_fields_to_new_table.down.sql new file mode 100644 index 00000000000..ce7a2de360b --- /dev/null +++ b/core/lib/dal/migrations/20240103123456_move_consensus_fields_to_new_table.down.sql @@ -0,0 +1,2 @@ +DROP TABLE miniblocks_consensus; +ALTER TABLE miniblocks ADD COLUMN consensus JSONB NULL; diff --git a/core/lib/dal/migrations/20240103123456_move_consensus_fields_to_new_table.up.sql b/core/lib/dal/migrations/20240103123456_move_consensus_fields_to_new_table.up.sql new file mode 100644 index 00000000000..8f5538ce631 --- /dev/null +++ b/core/lib/dal/migrations/20240103123456_move_consensus_fields_to_new_table.up.sql @@ -0,0 +1,11 @@ +ALTER TABLE miniblocks DROP COLUMN consensus; + +CREATE TABLE miniblocks_consensus ( + number BIGINT NOT NULL, + certificate JSONB NOT NULL, + PRIMARY KEY(number), + CHECK((certificate->'message'->'proposal'->'number')::jsonb::numeric = number), + CONSTRAINT miniblocks_fk FOREIGN KEY(number) + REFERENCES miniblocks(number) + ON DELETE CASCADE +); diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 255533ba1cf..9d581adc763 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -15,7 +15,6 @@ use zksync_types::{ MAX_GAS_PER_PUBDATA_BYTE, U256, }; -pub use crate::models::storage_sync::ConsensusBlockFields; use crate::{ instrument::InstrumentExt, models::storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageMiniblockHeader}, @@ -645,84 +644,6 @@ impl BlocksDal<'_, '_> { Ok(()) } - /// Fetches the number of the last miniblock with consensus fields set. - /// Miniblocks with Consensus fields set constitute a prefix of sealed miniblocks, - /// so it is enough to traverse the miniblocks in descending order to find the last - /// with consensus fields. - /// - /// If better efficiency is needed we can add an index on "miniblocks without consensus fields". - pub async fn get_last_miniblock_number_with_consensus_fields( - &mut self, - ) -> anyhow::Result> { - let Some(row) = sqlx::query!( - r#" - SELECT - number - FROM - miniblocks - WHERE - consensus IS NOT NULL - ORDER BY - number DESC - LIMIT - 1 - "# - ) - .fetch_optional(self.storage.conn()) - .await? - else { - return Ok(None); - }; - Ok(Some(MiniblockNumber(row.number.try_into()?))) - } - - /// Checks whether the specified miniblock has consensus field set. - pub async fn has_consensus_fields(&mut self, number: MiniblockNumber) -> sqlx::Result { - Ok(sqlx::query!( - r#" - SELECT - COUNT(*) AS "count!" - FROM - miniblocks - WHERE - number = $1 - AND consensus IS NOT NULL - "#, - number.0 as i64 - ) - .fetch_one(self.storage.conn()) - .await? - .count - > 0) - } - - /// Sets consensus-related fields for the specified miniblock. - pub async fn set_miniblock_consensus_fields( - &mut self, - miniblock_number: MiniblockNumber, - consensus: &ConsensusBlockFields, - ) -> anyhow::Result<()> { - let result = sqlx::query!( - r#" - UPDATE miniblocks - SET - consensus = $2 - WHERE - number = $1 - "#, - miniblock_number.0 as i64, - zksync_protobuf::serde::serialize(consensus, serde_json::value::Serializer).unwrap(), - ) - .execute(self.storage.conn()) - .await?; - - anyhow::ensure!( - result.rows_affected() == 1, - "Miniblock #{miniblock_number} is not present in Postgres" - ); - Ok(()) - } - pub async fn get_last_sealed_miniblock_header( &mut self, ) -> sqlx::Result> { diff --git a/core/lib/dal/src/connection/mod.rs b/core/lib/dal/src/connection/mod.rs index 0e833625bf3..ef24195b764 100644 --- a/core/lib/dal/src/connection/mod.rs +++ b/core/lib/dal/src/connection/mod.rs @@ -10,14 +10,6 @@ use crate::{metrics::CONNECTION_METRICS, StorageProcessor}; pub mod holder; -/// Obtains the test database URL from the environment variable. -fn get_test_database_url() -> anyhow::Result { - env::var("TEST_DATABASE_URL").context( - "TEST_DATABASE_URL must be set. Normally, this is done by the 'zk' tool. \ - Make sure that you are running the tests with 'zk test rust' command or equivalent.", - ) -} - /// Builder for [`ConnectionPool`]s. pub struct ConnectionPoolBuilder { database_url: String, @@ -68,66 +60,24 @@ impl ConnectionPoolBuilder { statement_timeout = self.statement_timeout ); Ok(ConnectionPool { + database_url: self.database_url.clone(), inner: pool, max_size: self.max_size, }) } } -/// Constructs a new temporary database (with a randomized name) -/// by cloning the database template pointed by TEST_DATABASE_URL env var. -/// The template is expected to have all migrations from dal/migrations applied. -/// For efficiency, the Postgres container of TEST_DATABASE_URL should be -/// configured with option "fsync=off" - it disables waiting for disk synchronization -/// whenever you write to the DBs, therefore making it as fast as an in-memory Postgres instance. -/// The database is not cleaned up automatically, but rather the whole Postgres -/// container is recreated whenever you call "zk test rust". -pub(super) async fn create_test_db() -> anyhow::Result { - use rand::Rng as _; - use sqlx::{Connection as _, Executor as _}; - - const PREFIX: &str = "test-"; - - let db_url = get_test_database_url().unwrap(); - let mut db_url = url::Url::parse(&db_url) - .with_context(|| format!("{} is not a valid database address", db_url))?; - let db_name = db_url - .path() - .strip_prefix('/') - .with_context(|| format!("{} is not a valid database address", db_url.as_ref()))? - .to_string(); - let db_copy_name = format!("{PREFIX}{}", rand::thread_rng().gen::()); - db_url.set_path(""); - let mut attempts = 10; - let mut conn = loop { - match sqlx::PgConnection::connect(db_url.as_ref()).await { - Ok(conn) => break conn, - Err(err) => { - attempts -= 1; - if attempts == 0 { - return Err(err).context("sqlx::PgConnection::connect()"); - } - } - } - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - }; - conn.execute( - format!("CREATE DATABASE \"{db_copy_name}\" WITH TEMPLATE \"{db_name}\"").as_str(), - ) - .await - .context("failed to create a temporary database")?; - db_url.set_path(&db_copy_name); - Ok(db_url) -} - #[derive(Clone)] pub struct ConnectionPool { pub(crate) inner: PgPool, + database_url: String, max_size: u32, } impl fmt::Debug for ConnectionPool { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + // We don't print the `database_url`, as is may contain + // sensistive information (e.g. database password). formatter .debug_struct("ConnectionPool") .field("max_size", &self.max_size) @@ -135,18 +85,92 @@ impl fmt::Debug for ConnectionPool { } } -impl ConnectionPool { - pub async fn test_pool() -> ConnectionPool { - let db_url = create_test_db() +pub struct TestTemplate(url::Url); + +impl TestTemplate { + fn db_name(&self) -> &str { + self.0.path().strip_prefix('/').unwrap() + } + + fn url(&self, db_name: &str) -> url::Url { + let mut url = self.0.clone(); + url.set_path(db_name); + url + } + + async fn connect_to(db_url: &url::Url) -> sqlx::Result { + use sqlx::Connection as _; + let mut attempts = 10; + loop { + match sqlx::PgConnection::connect(db_url.as_ref()).await { + Ok(conn) => return Ok(conn), + Err(err) => { + attempts -= 1; + if attempts == 0 { + return Err(err); + } + } + } + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + } + } + + /// Obtains the test database URL from the environment variable. + pub fn empty() -> anyhow::Result { + let db_url = env::var("TEST_DATABASE_URL").context( + "TEST_DATABASE_URL must be set. Normally, this is done by the 'zk' tool. \ + Make sure that you are running the tests with 'zk test rust' command or equivalent.", + )?; + Ok(Self(db_url.parse()?)) + } + + /// Closes the connection pool, disallows connecting to the underlying db, + /// so that the db can be used as a template. + pub async fn freeze(pool: ConnectionPool) -> anyhow::Result { + use sqlx::Executor as _; + let mut conn = pool.acquire_connection_retried().await?; + conn.execute( + "UPDATE pg_database SET datallowconn = false WHERE datname = current_database()", + ) + .await + .context("SET dataallowconn = false")?; + drop(conn); + pool.inner.close().await; + Ok(Self(pool.database_url.parse()?)) + } + + /// Constructs a new temporary database (with a randomized name) + /// by cloning the database template pointed by TEST_DATABASE_URL env var. + /// The template is expected to have all migrations from dal/migrations applied. + /// For efficiency, the Postgres container of TEST_DATABASE_URL should be + /// configured with option "fsync=off" - it disables waiting for disk synchronization + /// whenever you write to the DBs, therefore making it as fast as an in-memory Postgres instance. + /// The database is not cleaned up automatically, but rather the whole Postgres + /// container is recreated whenever you call "zk test rust". + pub async fn create_db(&self) -> anyhow::Result { + use rand::Rng as _; + use sqlx::Executor as _; + + let mut conn = Self::connect_to(&self.url("")) + .await + .context("connect_to()")?; + let db_old = self.db_name(); + let db_new = format!("test-{}", rand::thread_rng().gen::()); + conn.execute(format!("CREATE DATABASE \"{db_new}\" WITH TEMPLATE \"{db_old}\"").as_str()) .await - .expect("Unable to prepare test database") - .to_string(); + .context("CREATE DATABASE")?; const TEST_MAX_CONNECTIONS: u32 = 50; // Expected to be enough for any unit test. - Self::builder(&db_url, TEST_MAX_CONNECTIONS) + ConnectionPool::builder(self.url(&db_new).as_ref(), TEST_MAX_CONNECTIONS) .build() .await - .unwrap() + .context("ConnectionPool::builder()") + } +} + +impl ConnectionPool { + pub async fn test_pool() -> ConnectionPool { + TestTemplate::empty().unwrap().create_db().await.unwrap() } /// Initializes a builder for connection pools. @@ -261,10 +285,12 @@ mod tests { #[tokio::test] async fn setting_statement_timeout() { - let db_url = create_test_db() + let db_url = TestTemplate::empty() + .unwrap() + .create_db() .await - .expect("Unable to prepare test database") - .to_string(); + .unwrap() + .database_url; let pool = ConnectionPool::singleton(&db_url) .set_statement_timeout(Some(Duration::from_secs(1))) diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 152191757ba..c53541166d3 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,13 +1,19 @@ +use anyhow::Context as _; +use zksync_consensus_roles::validator; use zksync_consensus_storage::ReplicaState; +use zksync_types::{Address, MiniblockNumber}; +pub use crate::models::storage_sync::Payload; use crate::StorageProcessor; +/// Storage access methods for `zksync_core::consensus` module. #[derive(Debug)] pub struct ConsensusDal<'a, 'c> { pub storage: &'a mut StorageProcessor<'c>, } impl ConsensusDal<'_, '_> { + /// Fetches the current BFT replica state. pub async fn replica_state(&mut self) -> anyhow::Result> { let Some(row) = sqlx::query!( r#" @@ -27,7 +33,8 @@ impl ConsensusDal<'_, '_> { Ok(Some(zksync_protobuf::serde::deserialize(row.state)?)) } - pub async fn put_replica_state(&mut self, state: &ReplicaState) -> sqlx::Result<()> { + /// Sets the current BFT replica state. + pub async fn set_replica_state(&mut self, state: &ReplicaState) -> sqlx::Result<()> { let state = zksync_protobuf::serde::serialize(state, serde_json::value::Serializer).unwrap(); sqlx::query!( @@ -47,6 +54,158 @@ impl ConsensusDal<'_, '_> { .await?; Ok(()) } + + /// Fetches the first consensus certificate. + /// Note that we didn't backfill the certificates for the past miniblocks + /// when enabling consensus certificate generation, so it might NOT be the certificate + /// for the genesis miniblock. + pub async fn first_certificate(&mut self) -> anyhow::Result> { + let Some(row) = sqlx::query!( + r#" + SELECT + certificate + FROM + miniblocks_consensus + ORDER BY + number ASC + LIMIT + 1 + "# + ) + .fetch_optional(self.storage.conn()) + .await? + else { + return Ok(None); + }; + Ok(Some(zksync_protobuf::serde::deserialize(row.certificate)?)) + } + + /// Fetches the last consensus certificate. + /// Currently certificates are NOT generated synchronously with miniblocks, + /// so it might NOT be the certificate for the last miniblock. + pub async fn last_certificate(&mut self) -> anyhow::Result> { + let Some(row) = sqlx::query!( + r#" + SELECT + certificate + FROM + miniblocks_consensus + ORDER BY + number DESC + LIMIT + 1 + "# + ) + .fetch_optional(self.storage.conn()) + .await? + else { + return Ok(None); + }; + Ok(Some(zksync_protobuf::serde::deserialize(row.certificate)?)) + } + + /// Fetches the consensus certificate for the miniblock with the given `block_number`. + pub async fn certificate( + &mut self, + block_number: validator::BlockNumber, + ) -> anyhow::Result> { + let Some(row) = sqlx::query!( + r#" + SELECT + certificate + FROM + miniblocks_consensus + WHERE + number = $1 + "#, + i64::try_from(block_number.0)? + ) + .fetch_optional(self.storage.conn()) + .await? + else { + return Ok(None); + }; + Ok(Some(zksync_protobuf::serde::deserialize(row.certificate)?)) + } + + /// Converts the miniblock `block_number` into consensus payload. `Payload` is an + /// opaque format for the miniblock that consensus understands and generates a + /// certificate for it. + pub async fn block_payload( + &mut self, + block_number: validator::BlockNumber, + operator_address: Address, + ) -> anyhow::Result> { + let block_number = MiniblockNumber(block_number.0.try_into()?); + let Some(block) = self + .storage + .sync_dal() + .sync_block_inner(block_number) + .await? + else { + return Ok(None); + }; + let transactions = self + .storage + .transactions_web3_dal() + .get_raw_miniblock_transactions(block_number) + .await?; + Ok(Some(block.into_payload(operator_address, transactions))) + } + + /// Inserts a certificate for the miniblock `cert.header().number`. + /// It verifies that + /// * the certified payload matches the miniblock in storage + /// * the `cert.header().parent` matches the parent miniblock. + /// * the parent block already has a certificate. + /// NOTE: This is an extra secure way of storing a certificate, + /// which will help us to detect bugs in the consensus implementation + /// while it is "fresh". If it turns out to take too long, + /// we can remove the verification checks later. + pub async fn insert_certificate( + &mut self, + cert: &validator::CommitQC, + operator_address: Address, + ) -> anyhow::Result<()> { + let header = &cert.message.proposal; + let mut txn = self.storage.start_transaction().await?; + if let Some(last) = txn.consensus_dal().last_certificate().await? { + let last = &last.message.proposal; + anyhow::ensure!( + last.number.next() == header.number, + "expected certificate for a block after the current head block" + ); + anyhow::ensure!(last.hash() == header.parent, "parent block mismatch"); + } else { + anyhow::ensure!( + header.parent == validator::BlockHeaderHash::genesis_parent(), + "inserting first block with non-zero parent hash" + ); + } + let want_payload = txn + .consensus_dal() + .block_payload(cert.message.proposal.number, operator_address) + .await? + .context("corresponding miniblock is missing")?; + anyhow::ensure!( + header.payload == want_payload.encode().hash(), + "consensus block payload doesn't match the miniblock" + ); + sqlx::query!( + r#" + INSERT INTO + miniblocks_consensus (number, certificate) + VALUES + ($1, $2) + "#, + header.number.0 as i64, + zksync_protobuf::serde::serialize(cert, serde_json::value::Serializer).unwrap(), + ) + .execute(txn.conn()) + .await?; + txn.commit().await?; + Ok(()) + } } #[cfg(test)] @@ -69,7 +228,7 @@ mod tests { let rng = &mut rand::thread_rng(); for _ in 0..10 { let want: ReplicaState = rng.gen(); - conn.consensus_dal().put_replica_state(&want).await.unwrap(); + conn.consensus_dal().set_replica_state(&want).await.unwrap(); assert_eq!( Some(want), conn.consensus_dal().replica_state().await.unwrap() diff --git a/core/lib/dal/src/models/proto/mod.proto b/core/lib/dal/src/models/proto/mod.proto index b817d35e032..66f3ba411a0 100644 --- a/core/lib/dal/src/models/proto/mod.proto +++ b/core/lib/dal/src/models/proto/mod.proto @@ -2,9 +2,23 @@ syntax = "proto3"; package zksync.dal; -import "zksync/roles/validator.proto"; +message Transaction { + // Default derive(serde::Serialize) encoding of the zksync_types::Transaction. + // TODO(BFT-407): it is neither efficient, unique, nor suitable for version control. + // replace with a more robust encoding. + optional string json = 1; // required +} -message ConsensusBlockFields { - optional roles.validator.BlockHeaderHash parent = 1; - optional roles.validator.CommitQC justification = 2; +message Payload { + // zksync-era ProtocolVersionId + optional uint32 protocol_version = 9; // required; u16 + optional bytes hash = 1; // required; H256 + optional uint32 l1_batch_number = 2; // required + optional uint64 timestamp = 3; // required; seconds since UNIX epoch + optional uint64 l1_gas_price = 4; // required; gwei + optional uint64 l2_fair_gas_price = 5; // required; gwei + optional uint32 virtual_blocks = 6; // required + optional bytes operator_address = 7; // required; H160 + repeated Transaction transactions = 8; + optional bool last_in_batch = 10; // required } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index e816e5a72ab..7ef804c2d80 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -1,8 +1,10 @@ use anyhow::Context as _; use zksync_consensus_roles::validator; use zksync_contracts::BaseSystemContractsHashes; -use zksync_protobuf::{read_required, ProtoFmt}; -use zksync_types::{api::en, Address, L1BatchNumber, MiniblockNumber, Transaction, H160, H256}; +use zksync_protobuf::{required, ProtoFmt}; +use zksync_types::{ + api::en, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction, H160, H256, +}; #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct StorageSyncBlock { @@ -20,7 +22,6 @@ pub(crate) struct StorageSyncBlock { pub protocol_version: i32, pub virtual_blocks: i64, pub hash: Vec, - pub consensus: Option, } fn parse_h256(bytes: &[u8]) -> anyhow::Result { @@ -31,117 +32,190 @@ fn parse_h160(bytes: &[u8]) -> anyhow::Result { Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) } -impl StorageSyncBlock { - pub(crate) fn into_sync_block( - self, - current_operator_address: Address, - transactions: Option>, - ) -> anyhow::Result { - Ok(en::SyncBlock { - number: MiniblockNumber(self.number.try_into().context("number")?), +pub(crate) struct SyncBlock { + pub number: MiniblockNumber, + pub l1_batch_number: L1BatchNumber, + pub last_in_batch: bool, + pub timestamp: u64, + pub l1_gas_price: u64, + pub l2_fair_gas_price: u64, + pub base_system_contracts_hashes: BaseSystemContractsHashes, + pub fee_account_address: Option
, + pub virtual_blocks: u32, + pub hash: H256, + pub protocol_version: ProtocolVersionId, +} + +impl TryFrom for SyncBlock { + type Error = anyhow::Error; + fn try_from(block: StorageSyncBlock) -> anyhow::Result { + Ok(Self { + number: MiniblockNumber(block.number.try_into().context("number")?), l1_batch_number: L1BatchNumber( - self.l1_batch_number.try_into().context("l1_batch_number")?, + block + .l1_batch_number + .try_into() + .context("l1_batch_number")?, ), - last_in_batch: self - .last_batch_miniblock - .map(|n| n == self.number) - .unwrap_or(false), - timestamp: self.timestamp.try_into().context("timestamp")?, - l1_gas_price: self.l1_gas_price.try_into().context("l1_gas_price")?, - l2_fair_gas_price: self + last_in_batch: block.last_batch_miniblock == Some(block.number), + timestamp: block.timestamp.try_into().context("timestamp")?, + l1_gas_price: block.l1_gas_price.try_into().context("l1_gas_price")?, + l2_fair_gas_price: block .l2_fair_gas_price .try_into() .context("l2_fair_gas_price")?, // TODO (SMA-1635): Make these fields non optional in database base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: parse_h256( - &self + &block .bootloader_code_hash .context("bootloader_code_hash should not be none")?, ) .context("bootloader_code_hash")?, default_aa: parse_h256( - &self + &block .default_aa_code_hash .context("default_aa_code_hash should not be none")?, ) .context("default_aa_code_hash")?, }, - operator_address: match self.fee_account_address { - Some(addr) => parse_h160(&addr).context("fee_account_address")?, - None => current_operator_address, - }, - transactions, - virtual_blocks: Some(self.virtual_blocks.try_into().context("virtual_blocks")?), - hash: Some(parse_h256(&self.hash).context("hash")?), - protocol_version: u16::try_from(self.protocol_version) + fee_account_address: block + .fee_account_address + .map(|a| parse_h160(&a)) + .transpose() + .context("fee_account_address")?, + virtual_blocks: block.virtual_blocks.try_into().context("virtual_blocks")?, + hash: parse_h256(&block.hash).context("hash")?, + protocol_version: u16::try_from(block.protocol_version) .context("protocol_version")? .try_into() .context("protocol_version")?, - consensus: match self.consensus { - None => None, - Some(v) => { - let v: ConsensusBlockFields = - zksync_protobuf::serde::deserialize(v).context("consensus")?; - Some(v.encode()) - } - }, }) } } -/// Consensus-related L2 block (= miniblock) fields. -#[derive(Debug, Clone, PartialEq)] -pub struct ConsensusBlockFields { - /// Hash of the previous consensus block. - pub parent: validator::BlockHeaderHash, - /// Quorum certificate for the block. - pub justification: validator::CommitQC, -} - -impl ConsensusBlockFields { - pub fn decode(src: &en::ConsensusBlockFields) -> anyhow::Result { - zksync_protobuf::decode(&src.0 .0) +impl SyncBlock { + pub(crate) fn into_api( + self, + current_operator_address: Address, + transactions: Option>, + ) -> en::SyncBlock { + en::SyncBlock { + number: self.number, + l1_batch_number: self.l1_batch_number, + last_in_batch: self.last_in_batch, + timestamp: self.timestamp, + l1_gas_price: self.l1_gas_price, + l2_fair_gas_price: self.l2_fair_gas_price, + base_system_contracts_hashes: self.base_system_contracts_hashes, + operator_address: self.fee_account_address.unwrap_or(current_operator_address), + transactions, + virtual_blocks: Some(self.virtual_blocks), + hash: Some(self.hash), + protocol_version: self.protocol_version, + } } - pub fn encode(&self) -> en::ConsensusBlockFields { - en::ConsensusBlockFields(zksync_protobuf::encode(self).into()) + pub(crate) fn into_payload( + self, + current_operator_address: Address, + transactions: Vec, + ) -> Payload { + Payload { + protocol_version: self.protocol_version, + hash: self.hash, + l1_batch_number: self.l1_batch_number, + timestamp: self.timestamp, + l1_gas_price: self.l1_gas_price, + l2_fair_gas_price: self.l2_fair_gas_price, + virtual_blocks: self.virtual_blocks, + operator_address: self.fee_account_address.unwrap_or(current_operator_address), + transactions, + last_in_batch: self.last_in_batch, + } } } -impl ProtoFmt for ConsensusBlockFields { - type Proto = crate::models::proto::ConsensusBlockFields; +/// L2 block (= miniblock) payload. +#[derive(Debug, PartialEq)] +pub struct Payload { + pub protocol_version: ProtocolVersionId, + pub hash: H256, + pub l1_batch_number: L1BatchNumber, + pub timestamp: u64, + pub l1_gas_price: u64, + pub l2_fair_gas_price: u64, + pub virtual_blocks: u32, + pub operator_address: Address, + pub transactions: Vec, + pub last_in_batch: bool, +} + +impl ProtoFmt for Payload { + type Proto = super::proto::Payload; + + fn read(message: &Self::Proto) -> anyhow::Result { + let mut transactions = Vec::with_capacity(message.transactions.len()); + for (i, tx) in message.transactions.iter().enumerate() { + transactions.push( + required(&tx.json) + .and_then(|json_str| Ok(serde_json::from_str(json_str)?)) + .with_context(|| format!("transaction[{i}]"))?, + ); + } - fn read(r: &Self::Proto) -> anyhow::Result { Ok(Self { - parent: read_required(&r.parent).context("parent")?, - justification: read_required(&r.justification).context("justification")?, + protocol_version: required(&message.protocol_version) + .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) + .context("protocol_version")?, + hash: required(&message.hash) + .and_then(|h| parse_h256(h)) + .context("hash")?, + l1_batch_number: L1BatchNumber( + *required(&message.l1_batch_number).context("l1_batch_number")?, + ), + timestamp: *required(&message.timestamp).context("timestamp")?, + l1_gas_price: *required(&message.l1_gas_price).context("l1_gas_price")?, + l2_fair_gas_price: *required(&message.l2_fair_gas_price) + .context("l2_fair_gas_price")?, + virtual_blocks: *required(&message.virtual_blocks).context("virtual_blocks")?, + operator_address: required(&message.operator_address) + .and_then(|a| parse_h160(a)) + .context("operator_address")?, + transactions, + last_in_batch: *required(&message.last_in_batch).context("last_in_batch")?, }) } - fn build(&self) -> Self::Proto { Self::Proto { - parent: Some(self.parent.build()), - justification: Some(self.justification.build()), + protocol_version: Some((self.protocol_version as u16).into()), + hash: Some(self.hash.as_bytes().into()), + l1_batch_number: Some(self.l1_batch_number.0), + timestamp: Some(self.timestamp), + l1_gas_price: Some(self.l1_gas_price), + l2_fair_gas_price: Some(self.l2_fair_gas_price), + virtual_blocks: Some(self.virtual_blocks), + operator_address: Some(self.operator_address.as_bytes().into()), + // Transactions are stored in execution order, therefore order is deterministic. + transactions: self + .transactions + .iter() + .map(|t| super::proto::Transaction { + // TODO: There is no guarantee that json encoding here will be deterministic. + json: Some(serde_json::to_string(t).unwrap()), + }) + .collect(), + last_in_batch: Some(self.last_in_batch), } } } -#[cfg(test)] -mod tests { - use rand::Rng; - use zksync_consensus_roles::validator; - - use super::ConsensusBlockFields; +impl Payload { + pub fn decode(payload: &validator::Payload) -> anyhow::Result { + zksync_protobuf::decode(&payload.0) + } - #[tokio::test] - async fn encode_decode() { - let rng = &mut rand::thread_rng(); - let block = rng.gen::(); - let want = ConsensusBlockFields { - parent: block.header.parent, - justification: block.justification, - }; - assert_eq!(want, ConsensusBlockFields::decode(&want.encode()).unwrap()); + pub fn encode(&self) -> validator::Payload { + validator::Payload(zksync_protobuf::encode(self)) } } diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index a80e810525e..9ece7dad484 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -1,9 +1,9 @@ -use zksync_types::{api::en::SyncBlock, Address, MiniblockNumber, Transaction}; +use zksync_types::{api::en, Address, MiniblockNumber}; use crate::{ instrument::InstrumentExt, metrics::MethodLatency, - models::{storage_sync::StorageSyncBlock, storage_transaction::StorageTransaction}, + models::storage_sync::{StorageSyncBlock, SyncBlock}, StorageProcessor, }; @@ -14,14 +14,11 @@ pub struct SyncDal<'a, 'c> { } impl SyncDal<'_, '_> { - pub async fn sync_block( + pub(super) async fn sync_block_inner( &mut self, block_number: MiniblockNumber, - current_operator_address: Address, - include_transactions: bool, ) -> anyhow::Result> { - let latency = MethodLatency::new("sync_dal_sync_block"); - let storage_block_details = sqlx::query_as!( + let Some(block) = sqlx::query_as!( StorageSyncBlock, r#" SELECT @@ -50,7 +47,6 @@ impl SyncDal<'_, '_> { miniblocks.default_aa_code_hash, miniblocks.virtual_blocks, miniblocks.hash, - miniblocks.consensus, miniblocks.protocol_version AS "protocol_version!", l1_batches.fee_account_address AS "fee_account_address?" FROM @@ -64,40 +60,34 @@ impl SyncDal<'_, '_> { .instrument("sync_dal_sync_block.block") .with_arg("block_number", &block_number) .fetch_optional(self.storage.conn()) - .await?; + .await? + else { + return Ok(None); + }; + Ok(Some(block.try_into()?)) + } - let Some(storage_block_details) = storage_block_details else { + pub async fn sync_block( + &mut self, + block_number: MiniblockNumber, + current_operator_address: Address, + include_transactions: bool, + ) -> anyhow::Result> { + let _latency = MethodLatency::new("sync_dal_sync_block"); + let Some(block) = self.sync_block_inner(block_number).await? else { return Ok(None); }; let transactions = if include_transactions { - let transactions = sqlx::query_as!( - StorageTransaction, - r#" - SELECT - * - FROM - transactions - WHERE - miniblock_number = $1 - ORDER BY - index_in_block - "#, - block_number.0 as i64 + Some( + self.storage + .transactions_web3_dal() + .get_raw_miniblock_transactions(block_number) + .await?, ) - .instrument("sync_dal_sync_block.transactions") - .with_arg("block_number", &block_number) - .fetch_all(self.storage.conn()) - .await?; - - Some(transactions.into_iter().map(Transaction::from).collect()) } else { None }; - - let block = - storage_block_details.into_sync_block(current_operator_address, transactions)?; - drop(latency); - Ok(Some(block)) + Ok(Some(block.into_api(current_operator_address, transactions))) } } @@ -106,7 +96,7 @@ mod tests { use zksync_types::{ block::{BlockGasCount, L1BatchHeader}, fee::TransactionExecutionMetrics, - L1BatchNumber, ProtocolVersion, ProtocolVersionId, + L1BatchNumber, ProtocolVersion, ProtocolVersionId, Transaction, }; use super::*; diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index e41d6cd2704..033a053cad2 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -387,7 +387,7 @@ impl TransactionsWeb3Dal<'_, '_> { pub async fn get_raw_miniblock_transactions( &mut self, miniblock: MiniblockNumber, - ) -> Result, SqlxError> { + ) -> sqlx::Result> { let rows = sqlx::query_as!( StorageTransaction, r#" diff --git a/core/lib/object_store/Cargo.toml b/core/lib/object_store/Cargo.toml index f1b27c76d02..ec42f47c6bf 100644 --- a/core/lib/object_store/Cargo.toml +++ b/core/lib/object_store/Cargo.toml @@ -13,7 +13,7 @@ categories = ["cryptography"] vise = { git = "https://github.com/matter-labs/vise.git", version = "0.1.0", rev = "1c9cc500e92cf9ea052b230e114a6f9cce4fb2c1" } zksync_config = { path = "../config" } zksync_types = { path = "../types" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } anyhow = "1.0" async-trait = "0.1" diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 92c04a41376..d04f9f8ec09 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -21,8 +21,8 @@ codegen = { git = "https://github.com/matter-labs/solidity_plonk_verifier.git", zkevm_test_harness = { git = "https://github.com/matter-labs/era-zkevm_test_harness.git", branch = "v1.3.3" } zk_evm_1_4_0 = { git = "https://github.com/matter-labs/era-zk_evm.git", branch = "v1.4.0", package = "zk_evm" } zk_evm = { git = "https://github.com/matter-labs/era-zk_evm.git", tag = "v1.3.3-rc2" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } anyhow = "1.0.75" chrono = { version = "0.4", features = ["serde"] } @@ -53,4 +53,4 @@ tokio = { version = "1", features = ["rt", "macros"] } serde_with = { version = "1", features = ["hex"] } [build-dependencies] -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } +zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index d64e28981e7..5b8e1abf8dd 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -7,12 +7,6 @@ use zksync_contracts::BaseSystemContractsHashes; use crate::ProtocolVersionId; -/// Protobuf-encoded consensus-related L2 block (= miniblock) fields. -/// See `zksync_dal::models::storage_sync::ConsensusBlockFields`. -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -#[serde(transparent)] -pub struct ConsensusBlockFields(pub zksync_basic_types::Bytes); - /// Representation of the L2 block, as needed for the EN synchronization. /// This structure has several fields that describe *L1 batch* rather than /// *L2 block*, thus they are the same for all the L2 blocks in the batch. @@ -48,7 +42,4 @@ pub struct SyncBlock { pub hash: Option, /// Version of the protocol used for this block. pub protocol_version: ProtocolVersionId, - /// Consensus-related information about the block. Not present if consensus is not enabled - /// for the environment. - pub consensus: Option, } diff --git a/core/lib/zksync_core/Cargo.toml b/core/lib/zksync_core/Cargo.toml index 67a26f67e5b..af62e4afff5 100644 --- a/core/lib/zksync_core/Cargo.toml +++ b/core/lib/zksync_core/Cargo.toml @@ -38,12 +38,14 @@ vlog = { path = "../vlog" } multivm = { path = "../multivm" } # Consensus dependenices -zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } -zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } -zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } -zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } -zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } -zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } +zksync_concurrency = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_consensus_crypto = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_consensus_roles = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_consensus_storage = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_consensus_executor = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_consensus_bft = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_consensus_utils = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } +zksync_protobuf = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "5727a3e0b22470bb90092388f9125bcb366df613" } prost = "0.12.1" serde = { version = "1.0", features = ["derive"] } @@ -90,6 +92,3 @@ assert_matches = "1.5" jsonrpsee = "0.21.0" tempfile = "3.0.2" test-casing = "0.1.2" - -[build-dependencies] -zksync_protobuf_build = { version = "0.1.0", git = "https://github.com/matter-labs/era-consensus.git", rev = "84cdd9e45fd84bc1fac0b394c899ae33aef91afa" } diff --git a/core/lib/zksync_core/build.rs b/core/lib/zksync_core/build.rs deleted file mode 100644 index 7e8cc45bb8c..00000000000 --- a/core/lib/zksync_core/build.rs +++ /dev/null @@ -1,12 +0,0 @@ -//! Generates rust code from protobufs. -fn main() { - zksync_protobuf_build::Config { - input_root: "src/consensus/proto".into(), - proto_root: "zksync/core/consensus".into(), - dependencies: vec![], - protobuf_crate: "::zksync_protobuf".parse().unwrap(), - is_public: false, - } - .generate() - .expect("generate()"); -} diff --git a/core/lib/zksync_core/src/consensus/mod.rs b/core/lib/zksync_core/src/consensus/mod.rs index 34c73274fb5..423ef75b7ce 100644 --- a/core/lib/zksync_core/src/consensus/mod.rs +++ b/core/lib/zksync_core/src/consensus/mod.rs @@ -1,61 +1,210 @@ //! Consensus-related functionality. -use std::sync::Arc; +#![allow(clippy::redundant_locals)] +use std::collections::{HashMap, HashSet}; use anyhow::Context as _; -use zksync_concurrency::{ctx, scope}; -use zksync_consensus_executor::{ConsensusConfig, Executor, ExecutorConfig}; +use serde::de::Error; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_consensus_crypto::{Text, TextFmt}; +use zksync_consensus_executor as executor; use zksync_consensus_roles::{node, validator}; +use zksync_consensus_storage::BlockStore; use zksync_dal::ConnectionPool; use zksync_types::Address; -mod payload; -mod proto; -mod storage; +use self::storage::Store; +use crate::sync_layer::sync_action::ActionQueueSender; +mod storage; #[cfg(test)] pub(crate) mod testonly; #[cfg(test)] mod tests; -pub(crate) use self::{payload::Payload, storage::sync_block_to_consensus_block}; +#[derive(PartialEq, Eq, Hash)] +pub struct SerdeText(pub T); + +impl<'de, T: TextFmt> serde::Deserialize<'de> for SerdeText { + fn deserialize>(d: D) -> Result { + Ok(Self( + T::decode(Text::new(<&str>::deserialize(d)?)).map_err(Error::custom)?, + )) + } +} + +/// Config (shared between main node and external node) which implements `serde` encoding +/// and therefore can be flattened into env vars. +#[derive(serde::Deserialize)] +pub struct SerdeConfig { + /// Local socket address to listen for the incoming connections. + pub server_addr: std::net::SocketAddr, + /// Public address of this node (should forward to `server_addr`) + /// that will be advertised to peers, so that they can connect to this + /// node. + pub public_addr: std::net::SocketAddr, + + /// Validator private key. Should be set only for the validator node. + pub validator_key: Option>, + + /// Validators participating in consensus. + pub validator_set: Vec>, + + /// Key of this node. It uniquely identifies the node. + pub node_key: SerdeText, + /// Limit on the number of inbound connections outside + /// of the `static_inbound` set. + pub gossip_dynamic_inbound_limit: u64, + /// Inbound gossip connections that should be unconditionally accepted. + pub gossip_static_inbound: HashSet>, + /// Outbound gossip connections that the node should actively try to + /// establish and maintain. + pub gossip_static_outbound: HashMap, std::net::SocketAddr>, + + pub operator_address: Option
, +} + +impl SerdeConfig { + /// Extracts consensus executor config from the `SerdeConfig`. + fn executor(&self) -> anyhow::Result { + Ok(executor::Config { + server_addr: self.server_addr, + validators: validator::ValidatorSet::new( + self.validator_set.iter().map(|k| k.0.clone()), + ) + .context("validator_set")?, + node_key: self.node_key.0.clone(), + gossip_dynamic_inbound_limit: self.gossip_dynamic_inbound_limit, + gossip_static_inbound: self + .gossip_static_inbound + .iter() + .map(|k| k.0.clone()) + .collect(), + gossip_static_outbound: self + .gossip_static_outbound + .iter() + .map(|(k, v)| (k.0.clone(), *v)) + .collect(), + }) + } + + /// Extracts a validator config from the `SerdeConfig`. + pub(crate) fn validator(&self) -> anyhow::Result { + let key = self + .validator_key + .as_ref() + .context("validator_key is required")?; + Ok(executor::ValidatorConfig { + key: key.0.clone(), + public_addr: self.public_addr, + }) + } +} + +impl TryFrom for MainNodeConfig { + type Error = anyhow::Error; + fn try_from(cfg: SerdeConfig) -> anyhow::Result { + Ok(Self { + executor: cfg.executor()?, + validator: cfg.validator()?, + operator_address: cfg + .operator_address + .context("operator_address is required")?, + }) + } +} -#[derive(Debug)] -pub struct Config { - pub executor: ExecutorConfig, - pub consensus: ConsensusConfig, - pub node_key: node::SecretKey, - pub validator_key: validator::SecretKey, +/// Main node consensus config. +#[derive(Debug, Clone)] +pub struct MainNodeConfig { + pub executor: executor::Config, + pub validator: executor::ValidatorConfig, pub operator_address: Address, } -impl Config { - #[allow(dead_code)] +impl MainNodeConfig { + /// Task generating consensus certificates for the miniblocks generated by `StateKeeper`. + /// Broadcasts the blocks with certificates to gossip network peers. pub async fn run(self, ctx: &ctx::Ctx, pool: ConnectionPool) -> anyhow::Result<()> { anyhow::ensure!( self.executor.validators - == validator::ValidatorSet::new(vec![self.validator_key.public()]).unwrap(), + == validator::ValidatorSet::new(vec![self.validator.key.public()]).unwrap(), "currently only consensus with just 1 validator is supported" ); - let store = Arc::new( - storage::SignedBlockStore::new( - ctx, - pool, - &self.executor.genesis_block, - self.operator_address, - ) - .await?, - ); - let mut executor = Executor::new(ctx, self.executor, self.node_key, store.clone()).await?; - executor - .set_validator( - self.consensus, - self.validator_key, - store.clone(), - store.clone(), - ) - .context("executor.set_validator()")?; scope::run!(&ctx, |ctx, s| async { - s.spawn_bg(store.run_background_tasks(ctx)); + let store = Store::new(pool, self.operator_address); + let mut block_store = store.clone().into_block_store(); + block_store + .try_init_genesis(ctx, &self.validator.key) + .await + .wrap("block_store.try_init_genesis()")?; + let (block_store, runner) = BlockStore::new(ctx, Box::new(block_store)) + .await + .wrap("BlockStore::new()")?; + s.spawn_bg(runner.run(ctx)); + let executor = executor::Executor { + config: self.executor, + block_store, + validator: Some(executor::Validator { + config: self.validator, + replica_store: Box::new(store.clone()), + payload_manager: Box::new(store.clone()), + }), + }; + executor.run(ctx).await + }) + .await + } +} + +/// External node consensus config. +#[derive(Debug, Clone)] +pub struct FetcherConfig { + executor: executor::Config, + operator_address: Address, +} + +impl TryFrom for FetcherConfig { + type Error = anyhow::Error; + fn try_from(cfg: SerdeConfig) -> anyhow::Result { + Ok(Self { + executor: cfg.executor()?, + operator_address: cfg + .operator_address + .context("operator_address is required")?, + }) + } +} + +impl FetcherConfig { + /// Task fetching L2 blocks using peer-to-peer gossip network. + pub async fn run( + self, + ctx: &ctx::Ctx, + pool: ConnectionPool, + actions: ActionQueueSender, + ) -> anyhow::Result<()> { + tracing::info!( + "Starting gossip fetcher with {:?} and node key {:?}", + self.executor, + self.executor.node_key.public(), + ); + + scope::run!(ctx, |ctx, s| async { + let store = Store::new(pool, self.operator_address); + let mut block_store = store.clone().into_block_store(); + block_store + .set_actions_queue(ctx, actions) + .await + .wrap("block_store.set_actions_queue()")?; + let (block_store, runner) = BlockStore::new(ctx, Box::new(block_store)) + .await + .wrap("BlockStore::new()")?; + s.spawn_bg(runner.run(ctx)); + let executor = executor::Executor { + config: self.executor, + block_store, + validator: None, + }; executor.run(ctx).await }) .await diff --git a/core/lib/zksync_core/src/consensus/payload.rs b/core/lib/zksync_core/src/consensus/payload.rs deleted file mode 100644 index be7a839c8f8..00000000000 --- a/core/lib/zksync_core/src/consensus/payload.rs +++ /dev/null @@ -1,105 +0,0 @@ -use anyhow::Context as _; -use zksync_consensus_roles::validator; -use zksync_protobuf::{required, ProtoFmt}; -use zksync_types::{ - api::en::SyncBlock, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256, -}; - -/// L2 block (= miniblock) payload. -#[derive(Debug, PartialEq)] -pub(crate) struct Payload { - pub protocol_version: ProtocolVersionId, - pub hash: H256, - pub l1_batch_number: L1BatchNumber, - pub timestamp: u64, - pub l1_gas_price: u64, - pub l2_fair_gas_price: u64, - pub virtual_blocks: u32, - pub operator_address: Address, - pub transactions: Vec, -} - -impl ProtoFmt for Payload { - type Proto = super::proto::Payload; - - fn read(message: &Self::Proto) -> anyhow::Result { - let mut transactions = Vec::with_capacity(message.transactions.len()); - for (i, tx) in message.transactions.iter().enumerate() { - transactions.push( - required(&tx.json) - .and_then(|json_str| Ok(serde_json::from_str(json_str)?)) - .with_context(|| format!("transaction[{i}]"))?, - ); - } - - Ok(Self { - protocol_version: required(&message.protocol_version) - .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) - .context("protocol_version")?, - hash: required(&message.hash) - .and_then(|bytes| Ok(<[u8; 32]>::try_from(bytes.as_slice())?.into())) - .context("hash")?, - l1_batch_number: L1BatchNumber( - *required(&message.l1_batch_number).context("l1_batch_number")?, - ), - timestamp: *required(&message.timestamp).context("timestamp")?, - l1_gas_price: *required(&message.l1_gas_price).context("l1_gas_price")?, - l2_fair_gas_price: *required(&message.l2_fair_gas_price) - .context("l2_fair_gas_price")?, - virtual_blocks: *required(&message.virtual_blocks).context("virtual_blocks")?, - operator_address: required(&message.operator_address) - .and_then(|bytes| Ok(<[u8; 20]>::try_from(bytes.as_slice())?.into())) - .context("operator_address")?, - transactions, - }) - } - fn build(&self) -> Self::Proto { - Self::Proto { - protocol_version: Some((self.protocol_version as u16).into()), - hash: Some(self.hash.as_bytes().into()), - l1_batch_number: Some(self.l1_batch_number.0), - timestamp: Some(self.timestamp), - l1_gas_price: Some(self.l1_gas_price), - l2_fair_gas_price: Some(self.l2_fair_gas_price), - virtual_blocks: Some(self.virtual_blocks), - operator_address: Some(self.operator_address.as_bytes().into()), - // Transactions are stored in execution order, therefore order is deterministic. - transactions: self - .transactions - .iter() - .map(|t| super::proto::Transaction { - // TODO: There is no guarantee that json encoding here will be deterministic. - json: Some(serde_json::to_string(t).unwrap()), - }) - .collect(), - } - } -} - -impl TryFrom for Payload { - type Error = anyhow::Error; - - fn try_from(block: SyncBlock) -> anyhow::Result { - Ok(Self { - protocol_version: block.protocol_version, - hash: block.hash.unwrap_or_default(), - l1_batch_number: block.l1_batch_number, - timestamp: block.timestamp, - l1_gas_price: block.l1_gas_price, - l2_fair_gas_price: block.l2_fair_gas_price, - virtual_blocks: block.virtual_blocks.unwrap_or(0), - operator_address: block.operator_address, - transactions: block.transactions.context("Transactions are required")?, - }) - } -} - -impl Payload { - pub fn decode(payload: &validator::Payload) -> anyhow::Result { - zksync_protobuf::decode(&payload.0) - } - - pub fn encode(&self) -> validator::Payload { - validator::Payload(zksync_protobuf::encode(self)) - } -} diff --git a/core/lib/zksync_core/src/consensus/proto/mod.proto b/core/lib/zksync_core/src/consensus/proto/mod.proto deleted file mode 100644 index c5f99582875..00000000000 --- a/core/lib/zksync_core/src/consensus/proto/mod.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; - -package zksync.core.consensus; - -message Transaction { - // Default derive(serde::Serialize) encoding of the zksync_types::Transaction. - // TODO(gprusak): it is neither efficient, unique, nor suitable for version control. - // replace with a more robust encoding. - optional string json = 1; // required -} - -message Payload { - // zksync-era ProtocolVersionId - optional uint32 protocol_version = 9; // required; u16 - optional bytes hash = 1; // required; H256 - optional uint32 l1_batch_number = 2; // required - optional uint64 timestamp = 3; // required; seconds since UNIX epoch - optional uint64 l1_gas_price = 4; // required; gwei - optional uint64 l2_fair_gas_price = 5; // required; gwei - optional uint32 virtual_blocks = 6; // required - optional bytes operator_address = 7; // required; H160 - repeated Transaction transactions = 8; -} diff --git a/core/lib/zksync_core/src/consensus/proto/mod.rs b/core/lib/zksync_core/src/consensus/proto/mod.rs deleted file mode 100644 index e6ac37696c2..00000000000 --- a/core/lib/zksync_core/src/consensus/proto/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -#![allow(warnings)] -include!(concat!(env!("OUT_DIR"), "/src/consensus/proto/gen.rs")); diff --git a/core/lib/zksync_core/src/consensus/storage.rs b/core/lib/zksync_core/src/consensus/storage.rs deleted file mode 100644 index f47fc75b648..00000000000 --- a/core/lib/zksync_core/src/consensus/storage.rs +++ /dev/null @@ -1,419 +0,0 @@ -//! Storage implementation based on DAL. -use std::ops; - -use anyhow::Context as _; -use zksync_concurrency::{ctx, error::Wrap as _, sync, time}; -use zksync_consensus_bft::PayloadSource; -use zksync_consensus_roles::validator; -use zksync_consensus_storage::{BlockStore, ReplicaState, ReplicaStateStore, WriteBlockStore}; -use zksync_dal::{blocks_dal::ConsensusBlockFields, ConnectionPool}; -use zksync_types::{api::en::SyncBlock, Address, MiniblockNumber}; - -use crate::consensus; - -pub(crate) fn sync_block_to_consensus_block( - block: SyncBlock, -) -> anyhow::Result { - let number = validator::BlockNumber(block.number.0.into()); - let consensus = ConsensusBlockFields::decode( - block - .consensus - .as_ref() - .context("Missing consensus fields")?, - ) - .context("ConsensusBlockFields::decode()")?; - - let payload: consensus::Payload = block.try_into()?; - let payload = payload.encode(); - anyhow::ensure!(payload.hash() == consensus.justification.message.proposal.payload); - Ok(validator::FinalBlock { - header: validator::BlockHeader { - parent: consensus.parent, - number, - payload: payload.hash(), - }, - payload, - justification: consensus.justification, - }) -} - -/// Context-aware `zksync_dal::StorageProcessor` wrapper. -pub(super) struct StorageProcessor<'a>(zksync_dal::StorageProcessor<'a>); - -pub(super) async fn storage<'a>( - ctx: &ctx::Ctx, - pool: &'a ConnectionPool, -) -> ctx::Result> { - Ok(StorageProcessor( - ctx.wait(pool.access_storage_tagged("sync_layer")).await??, - )) -} - -impl<'a> StorageProcessor<'a> { - pub async fn start_transaction<'b, 'c: 'b>( - &'c mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(StorageProcessor( - ctx.wait(self.0.start_transaction()) - .await? - .context("sqlx")?, - )) - } - - pub async fn commit(self, ctx: &ctx::Ctx) -> ctx::Result<()> { - Ok(ctx.wait(self.0.commit()).await?.context("sqlx")?) - } - - async fn fetch_sync_block( - &mut self, - ctx: &ctx::Ctx, - number: validator::BlockNumber, - operator_address: Address, - ) -> ctx::Result> { - let number = MiniblockNumber(number.0.try_into().context("MiniblockNumber")?); - Ok(ctx - .wait(self.0.sync_dal().sync_block(number, operator_address, true)) - .await? - .context("sync_block()")?) - } - - pub async fn fetch_block( - &mut self, - ctx: &ctx::Ctx, - number: validator::BlockNumber, - operator_address: Address, - ) -> ctx::Result> { - let Some(block) = self.fetch_sync_block(ctx, number, operator_address).await? else { - return Ok(None); - }; - if block.consensus.is_none() { - return Ok(None); - } - Ok(Some( - sync_block_to_consensus_block(block).context("sync_block_to_consensus_block()")?, - )) - } - - pub async fn fetch_payload( - &mut self, - ctx: &ctx::Ctx, - block_number: validator::BlockNumber, - operator_address: Address, - ) -> ctx::Result> { - let Some(sync_block) = self - .fetch_sync_block(ctx, block_number, operator_address) - .await? - else { - return Ok(None); - }; - Ok(Some(sync_block.try_into()?)) - } - - pub async fn put_block( - &mut self, - ctx: &ctx::Ctx, - block: &validator::FinalBlock, - operator_address: Address, - ) -> ctx::Result<()> { - let n = MiniblockNumber( - block - .header - .number - .0 - .try_into() - .context("MiniblockNumber")?, - ); - let mut txn = self - .start_transaction(ctx) - .await - .wrap("start_transaction()")?; - - // We require the block to be already stored in Postgres when we set the consensus field. - let sync_block = txn - .fetch_sync_block(ctx, block.header.number, operator_address) - .await? - .context("unknown block")?; - let want = &ConsensusBlockFields { - parent: block.header.parent, - justification: block.justification.clone(), - }; - - // If consensus field is already set, just validate the stored value but don't override it. - if sync_block.consensus.is_some() { - sync_block_to_consensus_block(sync_block) - .context("an invalid block found in storage")?; - return Ok(()); - } - - // Verify that the payload matches the storage. - let want_payload: consensus::Payload = sync_block.try_into()?; - if want_payload.encode() != block.payload { - let got_payload = consensus::Payload::decode(&block.payload)?; - return Err(anyhow::anyhow!( - "payload mismatch: got {got_payload:?}, want {want_payload:?}" - ) - .into()); - } - - ctx.wait(txn.0.blocks_dal().set_miniblock_consensus_fields(n, want)) - .await? - .context("set_miniblock_consensus_fields()")?; - txn.commit(ctx).await.wrap("commit()")?; - Ok(()) - } - - pub async fn find_head_number( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result { - let head = ctx - .wait( - self.0 - .blocks_dal() - .get_last_miniblock_number_with_consensus_fields(), - ) - .await? - .context("get_last_miniblock_number_with_consensus_fields()")? - .context("head not found")?; - Ok(validator::BlockNumber(head.0.into())) - } - - pub async fn find_head_forward( - &mut self, - ctx: &ctx::Ctx, - start_at: validator::BlockNumber, - ) -> ctx::Result { - let mut head = MiniblockNumber(start_at.0.try_into().context("MiniblockNumber")?); - while ctx - .wait(self.0.blocks_dal().has_consensus_fields(head + 1)) - .await? - .context("has_consensus_fields()")? - { - head += 1; - } - Ok(validator::BlockNumber(head.0.into())) - } -} - -/// Postgres-based [`BlockStore`] implementation, which -/// considers blocks as stored <=> they have consensus field set. -#[derive(Debug)] -pub(super) struct SignedBlockStore { - genesis: validator::BlockNumber, - head: sync::watch::Sender, - pool: ConnectionPool, - operator_address: Address, -} - -impl SignedBlockStore { - /// Creates a new storage handle. `pool` should have multiple connections to work efficiently. - pub async fn new( - ctx: &ctx::Ctx, - pool: ConnectionPool, - genesis: &validator::FinalBlock, - operator_address: Address, - ) -> anyhow::Result { - // Ensure that genesis block has consensus field set in Postgres. - let head = { - let mut storage = storage(ctx, &pool).await.wrap("storage()")?; - storage - .put_block(ctx, genesis, operator_address) - .await - .wrap("put_block()")?; - - // Find the last miniblock with consensus field set (aka head). - // We assume here that all blocks in range (genesis,head) also have consensus field set. - // WARNING: genesis should NEVER be moved to an earlier block. - storage - .find_head_number(ctx) - .await - .wrap("find_head_number()")? - }; - Ok(Self { - genesis: genesis.header.number, - head: sync::watch::channel(head).0, - pool, - operator_address, - }) - } -} - -#[async_trait::async_trait] -impl WriteBlockStore for SignedBlockStore { - /// Verify that `payload` is a correct proposal for the block `block_number`. - async fn verify_payload( - &self, - ctx: &ctx::Ctx, - block_number: validator::BlockNumber, - payload: &validator::Payload, - ) -> ctx::Result<()> { - let storage = &mut storage(ctx, &self.pool).await.wrap("storage()")?; - let want = storage - .fetch_payload(ctx, block_number, self.operator_address) - .await - .wrap("fetch_payload()")? - .context("unknown block")?; - let got = consensus::Payload::decode(payload).context("consensus::Payload::decode()")?; - if got != want { - return Err(anyhow::anyhow!("unexpected payload: got {got:?} want {want:?}").into()); - } - Ok(()) - } - - /// Puts a block into this storage. - async fn put_block(&self, ctx: &ctx::Ctx, block: &validator::FinalBlock) -> ctx::Result<()> { - let storage = &mut storage(ctx, &self.pool).await.wrap("storage()")?; - - // Currently main node is the only validator, so it should be the only one creating new - // blocks. To ensure that no gaps in the blocks are created we check here that we always - // insert the next block after the current head block. - let head = *self.head.borrow(); - let head = storage - .find_head_forward(ctx, head) - .await - .wrap("find_head_forward()")?; - if block.header.number != head.next() { - return Err(anyhow::anyhow!( - "expected block with number {}, got {}", - head.next(), - block.header.number - ) - .into()); - } - - storage - .put_block(ctx, block, self.operator_address) - .await - .wrap("put_block()") - } -} - -#[async_trait::async_trait] -impl BlockStore for SignedBlockStore { - async fn head_block(&self, ctx: &ctx::Ctx) -> ctx::Result { - let head = self.last_contiguous_block_number(ctx).await?; - Ok(self.block(ctx, head).await?.context("head block missing")?) - } - - async fn first_block(&self, ctx: &ctx::Ctx) -> ctx::Result { - Ok(self - .block(ctx, self.genesis) - .await? - .context("Genesis miniblock not present in Postgres")?) - } - - async fn last_contiguous_block_number( - &self, - ctx: &ctx::Ctx, - ) -> ctx::Result { - let storage = &mut storage(ctx, &self.pool).await.wrap("storage()")?; - let head = *self.head.borrow(); - storage - .find_head_forward(ctx, head) - .await - .wrap("find_head_forward()") - } - - async fn block( - &self, - ctx: &ctx::Ctx, - number: validator::BlockNumber, - ) -> ctx::Result> { - let storage = &mut storage(ctx, &self.pool).await.wrap("storage()")?; - storage - .fetch_block(ctx, number, self.operator_address) - .await - .wrap("fetch_block()") - } - - async fn missing_block_numbers( - &self, - ctx: &ctx::Ctx, - range: ops::Range, - ) -> ctx::Result> { - let last = self.last_contiguous_block_number(ctx).await?; - let mut output = vec![]; - output.extend((range.start.0..self.genesis.0).map(validator::BlockNumber)); - output.extend((last.next().0..range.end.0).map(validator::BlockNumber)); - Ok(output) - } - - fn subscribe_to_block_writes(&self) -> sync::watch::Receiver { - self.head.subscribe() - } -} - -#[async_trait::async_trait] -impl ReplicaStateStore for SignedBlockStore { - async fn replica_state(&self, ctx: &ctx::Ctx) -> ctx::Result> { - let storage = &mut storage(ctx, &self.pool).await.wrap("storage")?; - Ok(ctx - .wait(storage.0.consensus_dal().replica_state()) - .await? - .context("replica_state()")?) - } - - async fn put_replica_state( - &self, - ctx: &ctx::Ctx, - replica_state: &ReplicaState, - ) -> ctx::Result<()> { - let storage = &mut storage(ctx, &self.pool).await.wrap("storage")?; - Ok(ctx - .wait(storage.0.consensus_dal().put_replica_state(replica_state)) - .await? - .context("put_replica_state()")?) - } -} - -#[async_trait::async_trait] -impl PayloadSource for SignedBlockStore { - async fn propose( - &self, - ctx: &ctx::Ctx, - block_number: validator::BlockNumber, - ) -> ctx::Result { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); - loop { - let storage = &mut storage(ctx, &self.pool).await.wrap("storage()")?; - if let Some(payload) = storage - .fetch_payload(ctx, block_number, self.operator_address) - .await - .wrap("fetch_payload()")? - { - return Ok(payload.encode()); - } - ctx.sleep(POLL_INTERVAL).await?; - } - } -} - -impl SignedBlockStore { - pub async fn run_background_tasks(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); - let mut head = *self.head.borrow(); - let res: ctx::Result<()> = async { - loop { - let storage = &mut storage(ctx, &self.pool).await.wrap("storage()")?; - head = storage - .find_head_forward(ctx, head) - .await - .wrap("find_head_forward()")?; - self.head.send_if_modified(|x| { - if *x >= head { - return false; - } - *x = head; - true - }); - ctx.sleep(POLL_INTERVAL).await?; - } - } - .await; - match res.err().unwrap() { - ctx::Error::Canceled(_) => Ok(()), - ctx::Error::Internal(err) => Err(err), - } - } -} diff --git a/core/lib/zksync_core/src/consensus/storage/mod.rs b/core/lib/zksync_core/src/consensus/storage/mod.rs new file mode 100644 index 00000000000..e2e0f28c697 --- /dev/null +++ b/core/lib/zksync_core/src/consensus/storage/mod.rs @@ -0,0 +1,432 @@ +//! Storage implementation based on DAL. +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _, sync, time}; +use zksync_consensus_bft::PayloadManager; +use zksync_consensus_roles::validator; +use zksync_consensus_storage::{BlockStoreState, PersistentBlockStore, ReplicaState, ReplicaStore}; +use zksync_dal::{consensus_dal::Payload, ConnectionPool}; +use zksync_types::{Address, MiniblockNumber}; + +#[cfg(test)] +mod testonly; + +use crate::sync_layer::{ + fetcher::{FetchedBlock, FetcherCursor}, + sync_action::ActionQueueSender, +}; + +/// Context-aware `zksync_dal::StorageProcessor` wrapper. +pub(super) struct CtxStorage<'a>(zksync_dal::StorageProcessor<'a>); + +impl<'a> CtxStorage<'a> { + /// Wrapper for `access_storage_tagged()`. + pub async fn access(ctx: &ctx::Ctx, pool: &'a ConnectionPool) -> ctx::Result> { + Ok(Self( + ctx.wait(pool.access_storage_tagged("consensus")).await??, + )) + } + + /// Wrapper for `start_transaction()`. + pub async fn start_transaction<'b, 'c: 'b>( + &'c mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(CtxStorage( + ctx.wait(self.0.start_transaction()) + .await? + .context("sqlx")?, + )) + } + + /// Wrapper for `blocks_dal().get_sealed_miniblock_number()`. + pub async fn last_miniblock_number( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result { + let number = ctx + .wait(self.0.blocks_dal().get_sealed_miniblock_number()) + .await? + .context("sqlx")?; + Ok(validator::BlockNumber(number.0.into())) + } + + /// Wrapper for `commit()`. + pub async fn commit(self, ctx: &ctx::Ctx) -> ctx::Result<()> { + Ok(ctx.wait(self.0.commit()).await?.context("sqlx")?) + } + + /// Wrapper for `consensus_dal().block_payload()`. + pub async fn payload( + &mut self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + operator_address: Address, + ) -> ctx::Result> { + Ok(ctx + .wait( + self.0 + .consensus_dal() + .block_payload(number, operator_address), + ) + .await??) + } + + /// Wrapper for `consensus_dal().first_certificate()`. + pub async fn first_certificate( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().first_certificate()) + .await??) + } + + /// Wrapper for `consensus_dal().last_certificate()`. + pub async fn last_certificate( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().last_certificate()) + .await??) + } + + /// Wrapper for `consensus_dal().certificate()`. + pub async fn certificate( + &mut self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result> { + Ok(ctx + .wait(self.0.consensus_dal().certificate(number)) + .await??) + } + + /// Wrapper for `consensus_dal().insert_certificate()`. + pub async fn insert_certificate( + &mut self, + ctx: &ctx::Ctx, + cert: &validator::CommitQC, + operator_address: Address, + ) -> ctx::Result<()> { + Ok(ctx + .wait( + self.0 + .consensus_dal() + .insert_certificate(cert, operator_address), + ) + .await??) + } + + /// Wrapper for `consensus_dal().replica_state()`. + pub async fn replica_state(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { + Ok(ctx.wait(self.0.consensus_dal().replica_state()).await??) + } + + /// Wrapper for `consensus_dal().set_replica_state()`. + pub async fn set_replica_state( + &mut self, + ctx: &ctx::Ctx, + state: &ReplicaState, + ) -> ctx::Result<()> { + Ok(ctx + .wait(self.0.consensus_dal().set_replica_state(state)) + .await? + .context("sqlx")?) + } + + /// Wrapper for `FetcherCursor::new()`. + pub async fn new_fetcher_cursor(&mut self, ctx: &ctx::Ctx) -> ctx::Result { + Ok(ctx.wait(FetcherCursor::new(&mut self.0)).await??) + } +} + +#[derive(Debug)] +struct Cursor { + inner: FetcherCursor, + actions: ActionQueueSender, +} + +impl Cursor { + /// Advances the cursor by converting the block into actions and pushing them + /// to the actions queue. + /// Does nothing and returns Ok() if the block has been already processed. + /// Returns an error if a block with an earlier block number was expected. + async fn advance(&mut self, block: &validator::FinalBlock) -> anyhow::Result<()> { + let number = MiniblockNumber( + u32::try_from(block.header().number.0) + .context("Integer overflow converting block number")?, + ); + let payload = + Payload::decode(&block.payload).context("Failed deserializing block payload")?; + let want = self.inner.next_miniblock; + // Some blocks are missing. + if number > want { + return Err(anyhow::anyhow!("expected {want:?}, got {number:?}")); + } + // Block already processed. + if number < want { + return Ok(()); + } + let block = FetchedBlock { + number, + l1_batch_number: payload.l1_batch_number, + last_in_batch: payload.last_in_batch, + protocol_version: payload.protocol_version, + timestamp: payload.timestamp, + reference_hash: Some(payload.hash), + l1_gas_price: payload.l1_gas_price, + l2_fair_gas_price: payload.l2_fair_gas_price, + virtual_blocks: payload.virtual_blocks, + operator_address: payload.operator_address, + transactions: payload.transactions, + }; + self.actions.push_actions(self.inner.advance(block)).await; + Ok(()) + } +} + +/// Wrapper of `ConnectionPool` implementing `ReplicaStore` and `PayloadManager`. +#[derive(Clone, Debug)] +pub(super) struct Store { + pool: ConnectionPool, + operator_address: Address, +} + +/// Wrapper of `ConnectionPool` implementing `PersistentBlockStore`. +#[derive(Debug)] +pub(super) struct BlockStore { + inner: Store, + /// Mutex preventing concurrent execution of `store_next_block` calls. + store_next_block_mutex: sync::Mutex>, +} + +impl Store { + /// Creates a `Store`. `pool` should have multiple connections to work efficiently. + pub fn new(pool: ConnectionPool, operator_address: Address) -> Self { + Self { + pool, + operator_address, + } + } + + /// Converts `Store` into a `BlockStore`. + pub fn into_block_store(self) -> BlockStore { + BlockStore { + inner: self, + store_next_block_mutex: sync::Mutex::new(None), + } + } +} + +impl BlockStore { + /// Generates and stores the genesis cert (signed by `validator_key`) for the last sealed miniblock. + /// No-op if db already contains a genesis cert. + pub async fn try_init_genesis( + &mut self, + ctx: &ctx::Ctx, + validator_key: &validator::SecretKey, + ) -> ctx::Result<()> { + let mut storage = CtxStorage::access(ctx, &self.inner.pool) + .await + .wrap("access()")?; + // Fetch last miniblock number outside of the transaction to avoid taking a lock. + let number = storage + .last_miniblock_number(ctx) + .await + .wrap("last_miniblock_number()")?; + + let mut txn = storage + .start_transaction(ctx) + .await + .wrap("start_transaction()")?; + if txn + .first_certificate(ctx) + .await + .wrap("first_certificate()")? + .is_some() + { + return Ok(()); + } + let payload = txn + .payload(ctx, number, self.inner.operator_address) + .await + .wrap("payload()")? + .context("miniblock disappeared")?; + let (genesis, _) = zksync_consensus_bft::testonly::make_genesis( + &[validator_key.clone()], + payload.encode(), + number, + ); + txn.insert_certificate(ctx, &genesis.justification, self.inner.operator_address) + .await + .wrap("insert_certificate()")?; + txn.commit(ctx).await.wrap("commit()") + } + + /// Sets an `ActionQueueSender` in the `BlockStore`. See `store_next_block()` for details. + pub async fn set_actions_queue( + &mut self, + ctx: &ctx::Ctx, + actions: ActionQueueSender, + ) -> ctx::Result<()> { + let mut storage = CtxStorage::access(ctx, &self.inner.pool) + .await + .wrap("access()")?; + let inner = storage + .new_fetcher_cursor(ctx) + .await + .wrap("new_fetcher_cursor()")?; + *sync::lock(ctx, &self.store_next_block_mutex).await? = Some(Cursor { inner, actions }); + Ok(()) + } +} + +#[async_trait::async_trait] +impl PersistentBlockStore for BlockStore { + async fn state(&self, ctx: &ctx::Ctx) -> ctx::Result { + let mut storage = CtxStorage::access(ctx, &self.inner.pool) + .await + .wrap("access()")?; + let first = storage + .first_certificate(ctx) + .await + .wrap("first_certificate()")? + .context("store is empty")?; + let last = storage + .last_certificate(ctx) + .await + .wrap("last_certificate()")? + .context("store is empty")?; + Ok(BlockStoreState { first, last }) + } + + async fn block( + &self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result { + let storage = &mut CtxStorage::access(ctx, &self.inner.pool) + .await + .wrap("access()")?; + let justification = storage + .certificate(ctx, number) + .await + .wrap("certificate()")? + .context("not found")?; + let payload = storage + .payload(ctx, number, self.inner.operator_address) + .await + .wrap("payload()")? + .context("miniblock disappeared from storage")?; + Ok(validator::FinalBlock { + payload: payload.encode(), + justification, + }) + } + + /// If actions queue is set (and the block has not been stored yet), + /// the block will be translated into a sequence of actions. + /// The received actions should be fed + /// to `ExternalIO`, so that `StateKeeper` will store the corresponding miniblock in the db. + /// + /// `store_next_block()` call will wait synchronously for the miniblock. + /// Once miniblock is observed in storage, `store_next_block()` will store a cert for this + /// miniblock. + async fn store_next_block( + &self, + ctx: &ctx::Ctx, + block: &validator::FinalBlock, + ) -> ctx::Result<()> { + // This mutex prevents concurrent store_next_block calls. + let mut guard = ctx.wait(self.store_next_block_mutex.lock()).await?; + if let Some(cursor) = &mut *guard { + cursor.advance(block).await.context("cursor.advance()")?; + } + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); + loop { + let mut storage = CtxStorage::access(ctx, &self.inner.pool) + .await + .wrap("access()")?; + let number = storage + .last_miniblock_number(ctx) + .await + .wrap("last_miniblock_number()")?; + if number >= block.header().number { + storage + .insert_certificate(ctx, &block.justification, self.inner.operator_address) + .await + .wrap("insert_certificate()")?; + return Ok(()); + } + drop(storage); + ctx.sleep(POLL_INTERVAL).await?; + } + } +} + +#[async_trait::async_trait] +impl ReplicaStore for Store { + async fn state(&self, ctx: &ctx::Ctx) -> ctx::Result> { + let storage = &mut CtxStorage::access(ctx, &self.pool).await.wrap("access()")?; + storage.replica_state(ctx).await.wrap("replica_state()") + } + + async fn set_state(&self, ctx: &ctx::Ctx, state: &ReplicaState) -> ctx::Result<()> { + let storage = &mut CtxStorage::access(ctx, &self.pool).await.wrap("access()")?; + storage + .set_replica_state(ctx, state) + .await + .wrap("set_replica_state()") + } +} + +#[async_trait::async_trait] +impl PayloadManager for Store { + /// Currently (for the main node) proposing is implemented as just converting a miniblock from db (without a cert) into a + /// payload. + async fn propose( + &self, + ctx: &ctx::Ctx, + block_number: validator::BlockNumber, + ) -> ctx::Result { + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); + let mut storage = CtxStorage::access(ctx, &self.pool).await.wrap("access()")?; + storage + .certificate(ctx, block_number.prev()) + .await + .wrap("certificate()")? + .with_context(|| format!("parent of {block_number:?} is missing"))?; + drop(storage); + loop { + let mut storage = CtxStorage::access(ctx, &self.pool).await.wrap("access()")?; + if let Some(payload) = storage + .payload(ctx, block_number, self.operator_address) + .await + .wrap("payload()")? + { + return Ok(payload.encode()); + } + drop(storage); + ctx.sleep(POLL_INTERVAL).await?; + } + } + + /// Verify that `payload` is a correct proposal for the block `block_number`. + /// Currently (for the main node) it is implemented as checking whether the received payload + /// matches the miniblock in the db. + async fn verify( + &self, + ctx: &ctx::Ctx, + block_number: validator::BlockNumber, + payload: &validator::Payload, + ) -> ctx::Result<()> { + let want = self.propose(ctx, block_number).await?; + let want = Payload::decode(&want).context("Payload::decode(want)")?; + let got = Payload::decode(payload).context("Payload::decode(got)")?; + if got != want { + return Err(anyhow::anyhow!("unexpected payload: got {got:?} want {want:?}").into()); + } + Ok(()) + } +} diff --git a/core/lib/zksync_core/src/consensus/storage/testonly.rs b/core/lib/zksync_core/src/consensus/storage/testonly.rs new file mode 100644 index 00000000000..a0c32c57a69 --- /dev/null +++ b/core/lib/zksync_core/src/consensus/storage/testonly.rs @@ -0,0 +1,47 @@ +//! Storage test helpers. +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _, time}; +use zksync_consensus_roles::validator; +use zksync_consensus_storage as storage; + +use super::{BlockStore, CtxStorage}; + +impl BlockStore { + /// Waits for the `number` miniblock to have a certificate. + pub async fn wait_for_certificate( + &self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result<()> { + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); + loop { + let mut storage = CtxStorage::access(ctx, &self.inner.pool) + .await + .wrap("access()")?; + if storage.certificate(ctx, number).await?.is_some() { + return Ok(()); + } + ctx.sleep(POLL_INTERVAL).await?; + } + } + + /// Waits for `want_last` block to have certificate, then fetches all miniblocks with certificates + /// and verifies them. + pub async fn wait_for_blocks_and_verify( + &self, + ctx: &ctx::Ctx, + validators: &validator::ValidatorSet, + want_last: validator::BlockNumber, + ) -> ctx::Result> { + self.wait_for_certificate(ctx, want_last).await?; + let blocks = storage::testonly::dump(ctx, self).await; + let got_last = blocks.last().context("empty store")?.header().number; + assert_eq!(got_last, want_last); + for block in &blocks { + block + .validate(validators, 1) + .context(block.header().number)?; + } + Ok(blocks) + } +} diff --git a/core/lib/zksync_core/src/consensus/testonly.rs b/core/lib/zksync_core/src/consensus/testonly.rs index b32e495eef7..18908d91b0e 100644 --- a/core/lib/zksync_core/src/consensus/testonly.rs +++ b/core/lib/zksync_core/src/consensus/testonly.rs @@ -1,6 +1,7 @@ +//! Utilities for testing the consensus module. use anyhow::Context as _; use rand::Rng; -use zksync_concurrency::{ctx, scope, sync, time}; +use zksync_concurrency::{ctx, error::Wrap as _, scope, sync, time}; use zksync_consensus_roles::validator; use zksync_contracts::{BaseSystemContractsHashes, SystemContractCode}; use zksync_dal::ConnectionPool; @@ -10,6 +11,10 @@ use zksync_types::{ }; use crate::{ + consensus::{ + storage::{BlockStore, CtxStorage}, + Store, + }, genesis::{ensure_genesis_state, GenesisParams}, state_keeper::{ seal_criteria::NoopSealer, tests::MockBatchExecutorBuilder, MiniblockSealer, @@ -75,7 +80,6 @@ impl MockMainNodeClient { virtual_blocks: Some(!is_fictive as u32), hash: Some(miniblock_hash), protocol_version: ProtocolVersionId::latest(), - consensus: None, } }); @@ -134,73 +138,113 @@ impl MainNodeClient for MockMainNodeClient { } } -pub(crate) struct StateKeeperHandle { - next_batch: L1BatchNumber, - next_block: MiniblockNumber, - next_timestamp: u64, +/// Fake StateKeeper for tests. +pub(super) struct StateKeeper { + // Batch of the last_block. + last_batch: L1BatchNumber, + last_block: MiniblockNumber, + // timestamp of the last block. + last_timestamp: u64, batch_sealed: bool, fee_per_gas: u64, gas_per_pubdata: u32, operator_address: Address, - actions_sender: ActionQueueSender, + pub(super) actions_sender: ActionQueueSender, + pub(super) pool: ConnectionPool, } -pub(crate) struct StateKeeperRunner { +/// Fake StateKeeper task to be executed in the background. +pub(super) struct StateKeeperRunner { actions_queue: ActionQueue, operator_address: Address, + pool: ConnectionPool, } -impl StateKeeperHandle { - pub fn new(operator_address: Address) -> (Self, StateKeeperRunner) { +impl StateKeeper { + /// Constructs and initializes a new `StateKeeper`. + /// Caller has to run `StateKeeperRunner.run()` task in the background. + pub async fn new( + pool: ConnectionPool, + operator_address: Address, + ) -> anyhow::Result<(Self, StateKeeperRunner)> { + // ensure genesis + let mut storage = pool.access_storage().await.context("access_storage()")?; + if storage + .blocks_dal() + .is_genesis_needed() + .await + .context("is_genesis_needed()")? + { + let mut params = GenesisParams::mock(); + params.first_validator = operator_address; + ensure_genesis_state(&mut storage, L2ChainId::default(), ¶ms) + .await + .context("ensure_genesis_state()")?; + } + let last_batch = storage + .blocks_dal() + .get_newest_l1_batch_header() + .await + .context("get_newest_l1_batch_header()")?; + let pending_batch = storage + .blocks_dal() + .pending_batch_exists() + .await + .context("pending_batch_exists()")?; let (actions_sender, actions_queue) = ActionQueue::new(); - ( + Ok(( Self { - next_batch: L1BatchNumber(1), - next_block: MiniblockNumber(1), - next_timestamp: 124356, - batch_sealed: true, + last_batch: last_batch.number + if pending_batch { 1 } else { 0 }, + last_block: storage + .blocks_dal() + .get_sealed_miniblock_number() + .await + .context("get_sealed_miniblock_number()")?, + last_timestamp: last_batch.timestamp, + batch_sealed: !pending_batch, fee_per_gas: 10, gas_per_pubdata: 100, operator_address, actions_sender, + pool: pool.clone(), }, StateKeeperRunner { operator_address, actions_queue, + pool: pool.clone(), }, - ) + )) } fn open_block(&mut self) -> SyncAction { if self.batch_sealed { - let action = SyncAction::OpenBatch { - number: self.next_batch, - timestamp: self.next_timestamp, + self.last_batch += 1; + self.last_block += 1; + self.last_timestamp += 5; + self.batch_sealed = false; + SyncAction::OpenBatch { + number: self.last_batch, + timestamp: self.last_timestamp, l1_gas_price: 2, l2_fair_gas_price: 3, operator_address: self.operator_address, protocol_version: ProtocolVersionId::latest(), - first_miniblock_info: (self.next_block, 1), - }; - self.next_batch += 1; - self.next_block += 1; - self.next_timestamp += 5; - self.batch_sealed = false; - action + first_miniblock_info: (self.last_block, 1), + } } else { - let action = SyncAction::Miniblock { - number: self.next_block, - timestamp: self.next_timestamp, + self.last_block += 1; + self.last_timestamp += 2; + SyncAction::Miniblock { + number: self.last_block, + timestamp: self.last_timestamp, virtual_blocks: 0, - }; - self.next_block += 1; - self.next_timestamp += 2; - action + } } } + /// Pushes a new miniblock with `transactions` transactions to the `StateKeeper`. pub async fn push_block(&mut self, transactions: usize) { assert!(transactions > 0); let mut actions = vec![self.open_block()]; @@ -208,21 +252,20 @@ impl StateKeeperHandle { let tx = create_l2_transaction(self.fee_per_gas, self.gas_per_pubdata); actions.push(SyncAction::Tx(Box::new(tx.into()))); } - actions.push(SyncAction::SealMiniblock(None)); + actions.push(SyncAction::SealMiniblock); self.actions_sender.push_actions(actions).await; } + /// Pushes `SealBatch` command to the `StateKeeper`. pub async fn seal_batch(&mut self) { // Each batch ends with an empty block (aka fictive block). let mut actions = vec![self.open_block()]; - actions.push(SyncAction::SealBatch { - virtual_blocks: 0, - consensus: None, - }); + actions.push(SyncAction::SealBatch { virtual_blocks: 0 }); self.actions_sender.push_actions(actions).await; self.batch_sealed = true; } + /// Pushes `count` random miniblocks to the StateKeeper. pub async fn push_random_blocks(&mut self, rng: &mut impl Rng, count: usize) { for _ in 0..count { // 20% chance to seal an L1 batch. @@ -235,72 +278,47 @@ impl StateKeeperHandle { } } - // Wait for all pushed miniblocks to be produced. - pub async fn sync(&self, ctx: &ctx::Ctx, pool: &ConnectionPool) -> anyhow::Result<()> { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); - loop { - let mut storage = pool.access_storage().await.context("access_storage()")?; - let head = storage - .blocks_dal() - .get_sealed_miniblock_number() - .await - .context("get_sealed_miniblock_number()")?; - if head.0 >= self.next_block.0 - 1 { - return Ok(()); - } - ctx.sleep(POLL_INTERVAL).await?; - } + /// Last block that has been pushed to the `StateKeeper` via `ActionQueue`. + /// It might NOT be present in storage yet. + pub fn last_block(&self) -> validator::BlockNumber { + validator::BlockNumber(self.last_block.0 as u64) } - // Wait for all pushed miniblocks to have consensus certificate. - pub async fn sync_consensus( - &self, - ctx: &ctx::Ctx, - pool: &ConnectionPool, - ) -> anyhow::Result<()> { + /// Creates a new `BlockStore` for the underlying `ConnectionPool`. + pub fn store(&self) -> BlockStore { + Store::new(self.pool.clone(), self.operator_address).into_block_store() + } + + // Wait for all pushed miniblocks to be produced. + pub async fn wait_for_miniblocks(&self, ctx: &ctx::Ctx) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); loop { - let mut storage = pool.access_storage().await.context("access_storage()")?; - if let Some(head) = storage - .blocks_dal() - .get_last_miniblock_number_with_consensus_fields() + let mut storage = CtxStorage::access(ctx, &self.pool).await.wrap("access()")?; + if storage + .payload(ctx, self.last_block(), self.operator_address) .await - .context("get_last_miniblock_number_with_consensus_fields()")? + .wrap("storage.payload()")? + .is_some() { - if head.0 >= self.next_block.0 - 1 { - return Ok(()); - } + return Ok(()); } ctx.sleep(POLL_INTERVAL).await?; } } - - /// Validate consensus certificates for all expected miniblocks. - pub async fn validate_consensus( - &self, - ctx: &ctx::Ctx, - pool: &ConnectionPool, - genesis: validator::BlockNumber, - validators: &validator::ValidatorSet, - ) -> anyhow::Result<()> { - let mut storage = super::storage::storage(ctx, pool) - .await - .context("storage()")?; - for i in genesis.0..self.next_block.0 as u64 { - let block = storage - .fetch_block(ctx, validator::BlockNumber(i), self.operator_address) - .await? - .with_context(|| format!("missing block {i}"))?; - block.validate(validators, 1).unwrap(); - } - Ok(()) - } } -// Waits for L1 batches to be sealed and then populates them with mock metadata. -async fn run_mock_metadata_calculator(ctx: &ctx::Ctx, pool: ConnectionPool) -> anyhow::Result<()> { +/// Waits for L1 batches to be sealed and then populates them with mock metadata. +async fn run_mock_metadata_calculator(ctx: &ctx::Ctx, pool: &ConnectionPool) -> anyhow::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); - let mut n = L1BatchNumber(1); + let mut n = { + let mut storage = pool.access_storage().await.context("access_storage()")?; + storage + .blocks_dal() + .get_last_l1_batch_number_with_metadata() + .await + .context("get_last_l1_batch_number_with_metadata()")? + .context("no L1 batches in Postgres")? + }; while let Ok(()) = ctx.sleep(POLL_INTERVAL).await { let mut storage = pool.access_storage().await.context("access_storage()")?; let last = storage @@ -310,41 +328,29 @@ async fn run_mock_metadata_calculator(ctx: &ctx::Ctx, pool: ConnectionPool) -> a .context("get_sealed_l1_batch_number()")? .context("no L1 batches in Postgres")?; - while n <= last { + while n < last { + n += 1; let metadata = create_l1_batch_metadata(n.0); storage .blocks_dal() .save_l1_batch_metadata(n, &metadata, H256::zero(), false) .await .context("save_l1_batch_metadata()")?; - n += 1; } } Ok(()) } impl StateKeeperRunner { - pub async fn run(self, ctx: &ctx::Ctx, pool: &ConnectionPool) -> anyhow::Result<()> { + /// Executes the StateKeeper task. + pub async fn run(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { scope::run!(ctx, |ctx, s| async { - let mut storage = pool.access_storage().await.context("access_storage()")?; - // ensure genesis - if storage - .blocks_dal() - .is_genesis_needed() - .await - .context("is_genesis_needed()")? - { - let mut params = GenesisParams::mock(); - params.first_validator = self.operator_address; - ensure_genesis_state(&mut storage, L2ChainId::default(), ¶ms) - .await - .context("ensure_genesis_state()")?; - } let (stop_sender, stop_receiver) = sync::watch::channel(false); - let (miniblock_sealer, miniblock_sealer_handle) = MiniblockSealer::new(pool.clone(), 5); + let (miniblock_sealer, miniblock_sealer_handle) = + MiniblockSealer::new(self.pool.clone(), 5); let io = ExternalIO::new( miniblock_sealer_handle, - pool.clone(), + self.pool.clone(), self.actions_queue, SyncState::new(), Box::::default(), @@ -354,7 +360,7 @@ impl StateKeeperRunner { ) .await; s.spawn_bg(miniblock_sealer.run()); - s.spawn_bg(run_mock_metadata_calculator(ctx, pool.clone())); + s.spawn_bg(run_mock_metadata_calculator(ctx, &self.pool)); s.spawn_bg( ZkSyncStateKeeper::new( stop_receiver, diff --git a/core/lib/zksync_core/src/consensus/tests.rs b/core/lib/zksync_core/src/consensus/tests.rs index 25ac048ae62..d2f423e1d50 100644 --- a/core/lib/zksync_core/src/consensus/tests.rs +++ b/core/lib/zksync_core/src/consensus/tests.rs @@ -1,79 +1,324 @@ +use std::ops::Range; + +use tracing::Instrument as _; use zksync_concurrency::{ctx, scope}; -use zksync_consensus_executor::testonly::FullValidatorConfig; -use zksync_consensus_roles::validator; -use zksync_dal::ConnectionPool; +use zksync_consensus_executor::testonly::{connect_full_node, ValidatorNode}; +use zksync_consensus_storage as storage; +use zksync_consensus_storage::PersistentBlockStore as _; +use zksync_consensus_utils::no_copy::NoCopy; +use zksync_dal::{connection::TestTemplate, ConnectionPool}; use zksync_types::Address; use super::*; +use crate::consensus::storage::CtxStorage; -// In the current implementation, consensus certificates are created asynchronously -// for the miniblocks constructed by the StateKeeper. This means that consensus actor -// is effectively just back filling the consensus certificates for the miniblocks in storage. -#[tokio::test(flavor = "multi_thread")] -async fn test_backfill() { - const OPERATOR_ADDRESS: Address = Address::repeat_byte(17); - const GENESIS_BLOCK: validator::BlockNumber = validator::BlockNumber(5); +const OPERATOR_ADDRESS: Address = Address::repeat_byte(17); + +async fn make_blocks( + ctx: &ctx::Ctx, + pool: &ConnectionPool, + mut range: Range, +) -> ctx::Result> { + let rng = &mut ctx.rng(); + let mut storage = CtxStorage::access(ctx, pool).await.wrap("access()")?; + let mut blocks: Vec = vec![]; + while !range.is_empty() { + let payload = storage + .payload(ctx, range.start, OPERATOR_ADDRESS) + .await + .wrap(range.start)? + .context("payload not found")? + .encode(); + let header = match blocks.last().as_ref() { + Some(parent) => validator::BlockHeader::new(parent.header(), payload.hash()), + None => validator::BlockHeader::genesis(payload.hash(), range.start), + }; + blocks.push(validator::FinalBlock { + payload, + justification: validator::testonly::make_justification( + rng, + &header, + validator::ProtocolVersion::EARLIEST, + ), + }); + range.start = range.start.next(); + } + Ok(blocks) +} +#[tokio::test(flavor = "multi_thread")] +async fn test_validator_block_store() { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test_pool().await; + // Fill storage with unsigned miniblocks. + // Fetch a suffix of blocks that we will generate (fake) certs for. + let want = scope::run!(ctx, |ctx, s| async { + // Start state keeper. + let (mut sk, runner) = testonly::StateKeeper::new(pool.clone(), OPERATOR_ADDRESS).await?; + s.spawn_bg(runner.run(ctx)); + sk.push_random_blocks(rng, 10).await; + sk.wait_for_miniblocks(ctx).await?; + let range = Range { + start: validator::BlockNumber(4), + end: sk.last_block(), + }; + make_blocks(ctx, &sk.pool, range) + .await + .context("make_blocks") + }) + .await + .unwrap(); + + // Insert blocks one by one and check the storage state. + for (i, block) in want.iter().enumerate() { + let store = Store::new(pool.clone(), OPERATOR_ADDRESS).into_block_store(); + store.store_next_block(ctx, block).await.unwrap(); + assert_eq!(want[..i + 1], storage::testonly::dump(ctx, &store).await); + } +} + +// In the current implementation, consensus certificates are created asynchronously +// for the miniblocks constructed by the StateKeeper. This means that consensus actor +// is effectively just backfilling the consensus certificates for the miniblocks in storage. +#[tokio::test(flavor = "multi_thread")] +async fn test_validator() { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + scope::run!(ctx, |ctx, s| async { // Start state keeper. - let (mut sk, sk_runner) = testonly::StateKeeperHandle::new(OPERATOR_ADDRESS); - s.spawn_bg(sk_runner.run(ctx, &pool)); + let pool = ConnectionPool::test_pool().await; + let (mut sk, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + s.spawn_bg(runner.run(ctx)); // Populate storage with a bunch of blocks. - sk.push_random_blocks(rng, 10).await; - sk.sync(ctx, &pool).await.context("sk.sync(<1st phase>)")?; - - // Prepare genesis block for consensus. - let genesis_payload = { - let mut storage = storage::storage(ctx, &pool).await.context("storage()")?; - storage - .fetch_payload(ctx, GENESIS_BLOCK, OPERATOR_ADDRESS) - .await - .context("fetch_payload()")? - .context("genesis block missing")? - }; - let cfg = FullValidatorConfig::for_single_validator( - &mut ctx.rng(), - genesis_payload.encode(), - GENESIS_BLOCK, - ); - let validators = cfg.node_config.validators.clone(); - - // Start consensus actor and wait for it to catch up. - let cfg = Config { - executor: cfg.node_config, - consensus: cfg.consensus_config, - node_key: cfg.node_key, - validator_key: cfg.validator_key, - operator_address: OPERATOR_ADDRESS, - }; - s.spawn_bg(cfg.run(ctx, pool.clone())); - sk.sync_consensus(ctx, &pool) + sk.push_random_blocks(rng, 5).await; + sk.wait_for_miniblocks(ctx) .await - .context("sk.sync_consensus(<1st phase>)")?; + .context("sk.wait_for_miniblocks(<1st phase>)")?; + + let cfg = ValidatorNode::for_single_validator(&mut ctx.rng()); + let validators = cfg.node.validators.clone(); + + // Restart consensus actor a couple times, making it process a bunch of blocks each time. + for iteration in 0..3 { + scope::run!(ctx, |ctx, s| async { + // Start consensus actor (in the first iteration it will select a genesis block and + // store a cert for it). + let cfg = MainNodeConfig { + executor: cfg.node.clone(), + validator: cfg.validator.clone(), + operator_address: OPERATOR_ADDRESS, + }; + s.spawn_bg(cfg.run(ctx, sk.pool.clone())); + sk.store() + .wait_for_certificate(ctx, sk.last_block()) + .await + .context("wait_for_certificate(<1st phase>)")?; + + // Generate couple more blocks and wait for consensus to catch up. + sk.push_random_blocks(rng, 3).await; + sk.store() + .wait_for_certificate(ctx, sk.last_block()) + .await + .context("wait_for_certificate(<2nd phase>)")?; - // Generate couple more blocks and wait for consensus to catch up. - sk.push_random_blocks(rng, 7).await; - sk.sync_consensus(ctx, &pool) + // Synchronously produce blocks one by one, and wait for consensus. + for _ in 0..2 { + sk.push_random_blocks(rng, 1).await; + sk.store() + .wait_for_certificate(ctx, sk.last_block()) + .await + .context("wait_for_certificate(<3rd phase>)")?; + } + + sk.store() + .wait_for_blocks_and_verify(ctx, &validators, sk.last_block()) + .await + .context("wait_for_blocks_and_verify()")?; + Ok(()) + }) .await - .context("sk.sync_consensus(<2nd phase>)")?; + .context(iteration)?; + } + Ok(()) + }) + .await + .unwrap(); +} + +// Test running a validator node and a couple of full nodes (aka fetchers). +// Validator is producing signed blocks and fetchers are expected to fetch +// them directly or indirectly. +#[tokio::test(flavor = "multi_thread")] +async fn test_fetcher() { + const FETCHERS: usize = 2; + + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + + // topology: + // validator <-> fetcher <-> fetcher <-> ... + let cfg = ValidatorNode::for_single_validator(rng); + let validators = cfg.node.validators.clone(); + let mut cfg = MainNodeConfig { + executor: cfg.node, + validator: cfg.validator, + operator_address: OPERATOR_ADDRESS, + }; + let mut fetcher_cfgs = vec![connect_full_node(rng, &mut cfg.executor)]; + while fetcher_cfgs.len() < FETCHERS { + let cfg = connect_full_node(rng, fetcher_cfgs.last_mut().unwrap()); + fetcher_cfgs.push(cfg); + } + let fetcher_cfgs: Vec<_> = fetcher_cfgs + .into_iter() + .map(|executor| FetcherConfig { + executor, + operator_address: OPERATOR_ADDRESS, + }) + .collect(); - // Synchronously produce blocks one by one, and wait for consensus. - for _ in 0..5 { - sk.push_random_blocks(rng, 1).await; - sk.sync_consensus(ctx, &pool) + // Create an initial database snapshot, which contains a cert for genesis block. + let pool = scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::test_pool().await; + let (mut sk, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(cfg.clone().run(ctx, sk.pool.clone())); + sk.push_random_blocks(rng, 5).await; + sk.store() + .wait_for_certificate(ctx, sk.last_block()) + .await?; + Ok(sk.pool) + }) + .await + .unwrap(); + let template = TestTemplate::freeze(pool).await.unwrap(); + + // Run validator and fetchers in parallel. + scope::run!(ctx, |ctx, s| async { + // Run validator. + let pool = template.create_db().await?; + let (mut validator, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + s.spawn_bg(async { + runner + .run(ctx) + .instrument(tracing::info_span!("validator")) .await - .context("sk.sync_consensus(<3rd phase>)")?; + .context("validator") + }); + s.spawn_bg(cfg.run(ctx, validator.pool.clone())); + + // Run fetchers. + let mut fetchers = vec![]; + for (i, cfg) in fetcher_cfgs.into_iter().enumerate() { + let i = NoCopy::from(i); + let pool = template.create_db().await?; + let (fetcher, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + fetchers.push(fetcher.store()); + s.spawn_bg(async { + let i = i; + runner + .run(ctx) + .instrument(tracing::info_span!("fetcher", i = *i)) + .await + .with_context(|| format!("fetcher{}", *i)) + }); + s.spawn_bg(cfg.run(ctx, fetcher.pool, fetcher.actions_sender)); } - sk.validate_consensus(ctx, &pool, GENESIS_BLOCK, &validators) - .await - .context("sk.validate_consensus()")?; + // Make validator produce blocks and wait for fetchers to get them. + validator.push_random_blocks(rng, 5).await; + let want_last = validator.last_block(); + let want = validator + .store() + .wait_for_blocks_and_verify(ctx, &validators, want_last) + .await?; + for fetcher in &fetchers { + assert_eq!( + want, + fetcher + .wait_for_blocks_and_verify(ctx, &validators, want_last) + .await? + ); + } + Ok(()) + }) + .await + .unwrap(); +} + +// Test fetcher backfilling missing certs. +#[tokio::test(flavor = "multi_thread")] +async fn test_fetcher_backfill_certs() { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + + let cfg = ValidatorNode::for_single_validator(rng); + let mut cfg = MainNodeConfig { + executor: cfg.node, + validator: cfg.validator, + operator_address: OPERATOR_ADDRESS, + }; + let fetcher_cfg = FetcherConfig { + executor: connect_full_node(rng, &mut cfg.executor), + operator_address: OPERATOR_ADDRESS, + }; + + // Create an initial database snapshot, which contains some blocks: some with certs, some + // without. + let pool = scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::test_pool().await; + let (mut sk, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + s.spawn_bg(runner.run(ctx)); + + // Some blocks with certs. + scope::run!(ctx, |ctx, s| async { + s.spawn_bg(cfg.clone().run(ctx, sk.pool.clone())); + sk.push_random_blocks(rng, 5).await; + sk.store() + .wait_for_certificate(ctx, sk.last_block()) + .await?; + Ok(()) + }) + .await?; + + // Some blocks without certs. + sk.push_random_blocks(rng, 5).await; + sk.wait_for_miniblocks(ctx).await?; + Ok(sk.pool) + }) + .await + .unwrap(); + let template = TestTemplate::freeze(pool).await.unwrap(); + + // Run validator and fetchers in parallel. + scope::run!(ctx, |ctx, s| async { + // Run validator. + let pool = template.create_db().await?; + let (mut validator, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(cfg.run(ctx, validator.pool.clone())); + + // Run fetcher. + let pool = template.create_db().await?; + let (fetcher, runner) = testonly::StateKeeper::new(pool, OPERATOR_ADDRESS).await?; + let fetcher_store = fetcher.store(); + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(fetcher_cfg.run(ctx, fetcher.pool, fetcher.actions_sender)); + + // Make validator produce new blocks and + // wait for the fetcher to get both the missing certs and the new blocks. + validator.push_random_blocks(rng, 5).await; + fetcher_store + .wait_for_certificate(ctx, validator.last_block()) + .await?; Ok(()) }) .await diff --git a/core/lib/zksync_core/src/lib.rs b/core/lib/zksync_core/src/lib.rs index 7b5a9f933ee..2c2e93705f7 100644 --- a/core/lib/zksync_core/src/lib.rs +++ b/core/lib/zksync_core/src/lib.rs @@ -77,7 +77,7 @@ use crate::{ pub mod api_server; pub mod basic_witness_input_producer; pub mod block_reverter; -mod consensus; +pub mod consensus; pub mod consistency_checker; pub mod eth_sender; pub mod eth_watch; diff --git a/core/lib/zksync_core/src/state_keeper/io/mempool.rs b/core/lib/zksync_core/src/state_keeper/io/mempool.rs index eab6c5d41c5..d9644577a08 100644 --- a/core/lib/zksync_core/src/state_keeper/io/mempool.rs +++ b/core/lib/zksync_core/src/state_keeper/io/mempool.rs @@ -266,7 +266,6 @@ impl StateKeeperIO for MempoolIO { self.current_l1_batch_number, self.current_miniblock_number, self.l2_erc20_bridge_addr, - None, false, ); self.miniblock_sealer_handle.submit(command).await; @@ -317,7 +316,6 @@ impl StateKeeperIO for MempoolIO { l1_batch_env, finished_batch, self.l2_erc20_bridge_addr, - None, ) .await; self.current_miniblock_number += 1; // Due to fictive miniblock being sealed. diff --git a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs index c0fc6b96a0f..6720ab3fa3e 100644 --- a/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs +++ b/core/lib/zksync_core/src/state_keeper/io/seal_logic.rs @@ -11,7 +11,7 @@ use multivm::{ interface::{FinishedL1Batch, L1BatchEnv}, utils::get_batch_base_fee, }; -use zksync_dal::{blocks_dal::ConsensusBlockFields, StorageProcessor}; +use zksync_dal::StorageProcessor; use zksync_system_constants::ACCOUNT_CODE_STORAGE_ADDRESS; use zksync_types::{ block::{unpack_block_info, L1BatchHeader, MiniblockHeader}, @@ -55,7 +55,6 @@ impl UpdatesManager { l1_batch_env: &L1BatchEnv, finished_batch: FinishedL1Batch, l2_erc20_bridge_addr: Address, - consensus: Option, ) { let started_at = Instant::now(); let progress = L1_BATCH_METRICS.start(L1BatchSealStage::VmFinalization); @@ -77,7 +76,6 @@ impl UpdatesManager { l1_batch_env.number, current_miniblock_number, l2_erc20_bridge_addr, - consensus, false, // fictive miniblocks don't have txs, so it's fine to pass `false` here. ); miniblock_command.seal_inner(&mut transaction, true).await; @@ -452,18 +450,6 @@ impl MiniblockSealCommand { .await; progress.observe(user_l2_to_l1_log_count); - let progress = MINIBLOCK_METRICS.start(MiniblockSealStage::InsertConsensus, is_fictive); - // We want to add miniblock consensus fields atomically with the miniblock data so that we - // don't need to deal with corner cases (e.g., a miniblock w/o consensus fields). - if let Some(consensus) = &self.consensus { - transaction - .blocks_dal() - .set_miniblock_consensus_fields(self.miniblock_number, consensus) - .await - .unwrap(); - } - progress.observe(None); - let progress = MINIBLOCK_METRICS.start(MiniblockSealStage::CommitMiniblock, is_fictive); let current_l2_virtual_block_info = transaction .storage_dal() diff --git a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs index 13dd8261152..52a5f26dcfd 100644 --- a/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/io/tests/mod.rs @@ -239,7 +239,6 @@ async fn processing_storage_logs_when_sealing_miniblock() { base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), l2_erc20_bridge_addr: Address::default(), - consensus: None, pre_insert_txs: false, }; let mut conn = connection_pool @@ -316,7 +315,6 @@ async fn processing_events_when_sealing_miniblock() { base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), l2_erc20_bridge_addr: Address::default(), - consensus: None, pre_insert_txs: false, }; let mut conn = pool.access_storage_tagged("state_keeper").await.unwrap(); @@ -431,7 +429,6 @@ async fn miniblock_sealer_handle_blocking() { L1BatchNumber(1), MiniblockNumber(1), Address::default(), - None, false, ); sealer_handle.submit(seal_command).await; @@ -441,7 +438,6 @@ async fn miniblock_sealer_handle_blocking() { L1BatchNumber(1), MiniblockNumber(2), Address::default(), - None, false, ); { @@ -471,7 +467,6 @@ async fn miniblock_sealer_handle_blocking() { L1BatchNumber(2), MiniblockNumber(3), Address::default(), - None, false, ); sealer_handle.submit(seal_command).await; @@ -492,7 +487,6 @@ async fn miniblock_sealer_handle_parallel_processing() { L1BatchNumber(1), MiniblockNumber(i), Address::default(), - None, false, ); sealer_handle.submit(seal_command).await; diff --git a/core/lib/zksync_core/src/state_keeper/metrics.rs b/core/lib/zksync_core/src/state_keeper/metrics.rs index 8daccb5a3aa..8f1b3319df5 100644 --- a/core/lib/zksync_core/src/state_keeper/metrics.rs +++ b/core/lib/zksync_core/src/state_keeper/metrics.rs @@ -248,7 +248,6 @@ pub(super) enum MiniblockSealStage { InsertEvents, ExtractL2ToL1Logs, InsertL2ToL1Logs, - InsertConsensus, CommitMiniblock, } diff --git a/core/lib/zksync_core/src/state_keeper/updates/mod.rs b/core/lib/zksync_core/src/state_keeper/updates/mod.rs index a3fd3fef0ab..c17eebab114 100644 --- a/core/lib/zksync_core/src/state_keeper/updates/mod.rs +++ b/core/lib/zksync_core/src/state_keeper/updates/mod.rs @@ -3,7 +3,6 @@ use multivm::{ utils::get_batch_base_fee, }; use zksync_contracts::BaseSystemContractsHashes; -use zksync_dal::blocks_dal::ConsensusBlockFields; use zksync_types::{ block::BlockGasCount, storage_writes_deduplicator::StorageWritesDeduplicator, tx::tx_execution_info::ExecutionMetrics, vm_trace::Call, Address, L1BatchNumber, @@ -82,7 +81,6 @@ impl UpdatesManager { l1_batch_number: L1BatchNumber, miniblock_number: MiniblockNumber, l2_erc20_bridge_addr: Address, - consensus: Option, pre_insert_txs: bool, ) -> MiniblockSealCommand { MiniblockSealCommand { @@ -96,7 +94,6 @@ impl UpdatesManager { base_system_contracts_hashes: self.base_system_contract_hashes, protocol_version: Some(self.protocol_version), l2_erc20_bridge_addr, - consensus, pre_insert_txs, } } @@ -183,7 +180,6 @@ pub(crate) struct MiniblockSealCommand { pub base_system_contracts_hashes: BaseSystemContractsHashes, pub protocol_version: Option, pub l2_erc20_bridge_addr: Address, - pub consensus: Option, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB /// before they are included into miniblocks. diff --git a/core/lib/zksync_core/src/sync_layer/external_io.rs b/core/lib/zksync_core/src/sync_layer/external_io.rs index 0c5440a38ed..b082d4cbc79 100644 --- a/core/lib/zksync_core/src/sync_layer/external_io.rs +++ b/core/lib/zksync_core/src/sync_layer/external_io.rs @@ -225,10 +225,7 @@ impl IoSealCriteria for ExternalIO { } fn should_seal_miniblock(&mut self, _manager: &UpdatesManager) -> bool { - matches!( - self.actions.peek_action(), - Some(SyncAction::SealMiniblock(_)) - ) + matches!(self.actions.peek_action(), Some(SyncAction::SealMiniblock)) } } @@ -452,7 +449,7 @@ impl StateKeeperIO for ExternalIO { async fn seal_miniblock(&mut self, updates_manager: &UpdatesManager) { let action = self.actions.pop_action(); - let Some(SyncAction::SealMiniblock(consensus)) = action else { + let Some(SyncAction::SealMiniblock) = action else { panic!("State keeper requested to seal miniblock, but the next action is {action:?}"); }; @@ -461,7 +458,6 @@ impl StateKeeperIO for ExternalIO { self.current_l1_batch_number, self.current_miniblock_number, self.l2_erc20_bridge_addr, - consensus, true, ); self.miniblock_sealer_handle.submit(command).await; @@ -481,7 +477,7 @@ impl StateKeeperIO for ExternalIO { finished_batch: FinishedL1Batch, ) -> anyhow::Result<()> { let action = self.actions.pop_action(); - let Some(SyncAction::SealBatch { consensus, .. }) = action else { + let Some(SyncAction::SealBatch { .. }) = action else { anyhow::bail!( "State keeper requested to seal the batch, but the next action is {action:?}" ); @@ -499,7 +495,6 @@ impl StateKeeperIO for ExternalIO { l1_batch_env, finished_batch, self.l2_erc20_bridge_addr, - consensus, ) .await; transaction.commit().await.unwrap(); diff --git a/core/lib/zksync_core/src/sync_layer/fetcher.rs b/core/lib/zksync_core/src/sync_layer/fetcher.rs index 1c52864b0d1..ca7a483ce83 100644 --- a/core/lib/zksync_core/src/sync_layer/fetcher.rs +++ b/core/lib/zksync_core/src/sync_layer/fetcher.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context as _; use tokio::sync::watch; -use zksync_dal::{blocks_dal::ConsensusBlockFields, StorageProcessor}; +use zksync_dal::StorageProcessor; use zksync_types::{ api::en::SyncBlock, block::MiniblockHasher, Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, H256, @@ -22,7 +22,7 @@ const RETRY_DELAY_INTERVAL: Duration = Duration::from_secs(5); /// Common denominator for blocks fetched by an external node. #[derive(Debug)] -pub(super) struct FetchedBlock { +pub(crate) struct FetchedBlock { pub number: MiniblockNumber, pub l1_batch_number: L1BatchNumber, pub last_in_batch: bool, @@ -34,7 +34,6 @@ pub(super) struct FetchedBlock { pub virtual_blocks: u32, pub operator_address: Address, pub transactions: Vec, - pub consensus: Option, } impl FetchedBlock { @@ -65,12 +64,6 @@ impl TryFrom for FetchedBlock { transactions: block .transactions .context("Transactions are always requested")?, - consensus: block - .consensus - .as_ref() - .map(ConsensusBlockFields::decode) - .transpose() - .context("ConsensusBlockFields::decode()")?, }) } } @@ -79,7 +72,7 @@ impl TryFrom for FetchedBlock { #[derive(Debug)] pub struct FetcherCursor { // Fields are public for testing purposes. - pub(super) next_miniblock: MiniblockNumber, + pub(crate) next_miniblock: MiniblockNumber, pub(super) prev_miniblock_hash: H256, pub(super) l1_batch: L1BatchNumber, } @@ -126,7 +119,7 @@ impl FetcherCursor { }) } - pub(super) fn advance(&mut self, block: FetchedBlock) -> Vec { + pub(crate) fn advance(&mut self, block: FetchedBlock) -> Vec { assert_eq!(block.number, self.next_miniblock); let local_block_hash = block.compute_hash(self.prev_miniblock_hash); if let Some(reference_hash) = block.reference_hash { @@ -189,10 +182,9 @@ impl FetcherCursor { new_actions.push(SyncAction::SealBatch { // `block.virtual_blocks` can be `None` only for old VM versions where it's not used, so it's fine to provide any number. virtual_blocks: block.virtual_blocks, - consensus: block.consensus, }); } else { - new_actions.push(SyncAction::SealMiniblock(block.consensus)); + new_actions.push(SyncAction::SealMiniblock); } self.next_miniblock += 1; self.prev_miniblock_hash = local_block_hash; diff --git a/core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs deleted file mode 100644 index 4c6a1a73f03..00000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/buffered/mod.rs +++ /dev/null @@ -1,351 +0,0 @@ -//! Buffered [`BlockStore`] implementation. - -use std::{collections::BTreeMap, ops, time::Instant}; - -use async_trait::async_trait; -#[cfg(test)] -use zksync_concurrency::ctx::channel; -use zksync_concurrency::{ - ctx, scope, - sync::{self, watch, Mutex}, -}; -use zksync_consensus_roles::validator::{BlockNumber, FinalBlock, Payload}; -use zksync_consensus_storage::{BlockStore, WriteBlockStore}; - -use super::{ - metrics::{BlockResponseKind, METRICS}, - utils::MissingBlockNumbers, -}; - -#[cfg(test)] -mod tests; - -/// [`BlockStore`] variation that upholds additional invariants as to how blocks are processed. -/// -/// The invariants are as follows: -/// -/// - Stored blocks always have contiguous numbers; there are no gaps. -/// - Blocks can be scheduled to be added using [`Self::schedule_next_block()`] only. New blocks do not -/// appear in the store otherwise. -#[async_trait] -pub(super) trait ContiguousBlockStore: BlockStore { - /// Schedules a block to be added to the store. Unlike [`WriteBlockStore::put_block()`], - /// there is no expectation that the block is added to the store *immediately*. It's - /// expected that it will be added to the store eventually, which will be signaled via - /// a subscriber returned from [`BlockStore::subscribe_to_block_writes()`]. - /// - /// [`Buffered`] guarantees that this method will only ever be called: - /// - /// - with the next block (i.e., one immediately after [`BlockStore::head_block()`]) - /// - sequentially (i.e., multiple blocks cannot be scheduled at once) - async fn schedule_next_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> ctx::Result<()>; -} - -/// In-memory buffer or [`FinalBlock`]s received from peers, but not executed and persisted locally yet. -/// -/// Unlike with executed / persisted blocks, there may be gaps between blocks in the buffer. -/// These blocks are shared with peers using the gossip network, but are not persisted and lost -/// on the node restart. -#[derive(Debug)] -struct BlockBuffer { - store_block_number: BlockNumber, - blocks: BTreeMap, -} - -impl BlockBuffer { - fn new(store_block_number: BlockNumber) -> Self { - Self { - store_block_number, - blocks: BTreeMap::new(), - } - } - - fn head_block(&self) -> Option { - self.blocks.values().next_back().cloned() - } - - #[tracing::instrument(level = "trace", skip(self))] - fn set_store_block(&mut self, store_block_number: BlockNumber) { - assert!( - store_block_number > self.store_block_number, - "`ContiguousBlockStore` invariant broken: unexpected new head block number" - ); - - self.store_block_number = store_block_number; - let old_len = self.blocks.len(); - self.blocks = self.blocks.split_off(&store_block_number.next()); - // ^ Removes all entries up to and including `store_block_number` - tracing::debug!("Removed {} blocks from buffer", old_len - self.blocks.len()); - METRICS.buffer_size.set(self.blocks.len()); - } - - fn last_contiguous_block_number(&self) -> BlockNumber { - // By design, blocks in the underlying store are always contiguous. - let mut last_number = self.store_block_number; - for &number in self.blocks.keys() { - if number > last_number.next() { - return last_number; - } - last_number = number; - } - last_number - } - - fn missing_block_numbers(&self, mut range: ops::Range) -> Vec { - // Clamp the range start so we don't produce extra missing blocks. - range.start = range.start.max(self.store_block_number.next()); - if range.is_empty() { - return vec![]; // Return early to not trigger panic in `BTreeMap::range()` - } - - let keys = self.blocks.range(range.clone()).map(|(&num, _)| num); - MissingBlockNumbers::new(range, keys).collect() - } - - fn put_block(&mut self, block: FinalBlock) { - let block_number = block.header.number; - assert!(block_number > self.store_block_number); - // ^ Must be checked previously - self.blocks.insert(block_number, block); - tracing::debug!(%block_number, "Inserted block in buffer"); - METRICS.buffer_size.set(self.blocks.len()); - } -} - -/// Events emitted by [`Buffered`] storage. -#[cfg(test)] -#[derive(Debug)] -pub(super) enum BufferedStorageEvent { - /// Update was received from the underlying storage. - UpdateReceived(BlockNumber), -} - -/// [`BlockStore`] with an in-memory buffer for pending blocks. -/// -/// # Data flow -/// -/// The store is plugged into the `SyncBlocks` actor, so that it can receive new blocks -/// from peers over the gossip network and to share blocks with peers. Received blocks are stored -/// in a [`BlockBuffer`]. The `SyncBlocks` actor doesn't guarantee that blocks are received in order, -/// so we have a background task that waits for successive blocks and feeds them to -/// the underlying storage ([`ContiguousBlockStore`]). The underlying storage executes and persists -/// blocks using the state keeper; see [`PostgresBlockStorage`](super::PostgresBlockStorage) for more details. -/// This logic is largely shared with the old syncing logic using JSON-RPC; the only differing part -/// is producing block data. -/// -/// Once a block is processed and persisted by the state keeper, it can be removed from the [`BlockBuffer`]; -/// we do this in another background task. Removing blocks from the buffer ensures that it doesn't -/// grow infinitely; it also allows to track syncing progress via metrics. -#[derive(Debug)] -pub(super) struct Buffered { - inner: T, - inner_subscriber: watch::Receiver, - block_writes_sender: watch::Sender, - buffer: Mutex, - #[cfg(test)] - events_sender: channel::UnboundedSender, -} - -impl Buffered { - /// Creates a new buffered storage. The buffer is initially empty. - pub fn new(store: T) -> Self { - let inner_subscriber = store.subscribe_to_block_writes(); - let store_block_number = *inner_subscriber.borrow(); - tracing::debug!( - store_block_number = store_block_number.0, - "Initialized buffer storage" - ); - Self { - inner: store, - inner_subscriber, - block_writes_sender: watch::channel(store_block_number).0, - buffer: Mutex::new(BlockBuffer::new(store_block_number)), - #[cfg(test)] - events_sender: channel::unbounded().0, - } - } - - #[cfg(test)] - fn set_events_sender(&mut self, sender: channel::UnboundedSender) { - self.events_sender = sender; - } - - pub(super) fn inner(&self) -> &T { - &self.inner - } - - #[cfg(test)] - async fn buffer_len(&self) -> usize { - self.buffer.lock().await.blocks.len() - } - - /// Listens to the updates in the underlying storage. - #[tracing::instrument(level = "trace", skip_all)] - async fn listen_to_updates(&self, ctx: &ctx::Ctx) { - let mut subscriber = self.inner_subscriber.clone(); - loop { - let store_block_number = { - let Ok(number) = sync::changed(ctx, &mut subscriber).await else { - return; // Do not propagate cancellation errors - }; - *number - }; - tracing::debug!( - store_block_number = store_block_number.0, - "Underlying block number updated" - ); - - let Ok(mut buffer) = sync::lock(ctx, &self.buffer).await else { - return; // Do not propagate cancellation errors - }; - buffer.set_store_block(store_block_number); - #[cfg(test)] - self.events_sender - .send(BufferedStorageEvent::UpdateReceived(store_block_number)); - } - } - - /// Schedules blocks in the underlying store as they are pushed to this store. - #[tracing::instrument(level = "trace", skip_all, err)] - async fn schedule_blocks(&self, ctx: &ctx::Ctx) -> ctx::Result<()> { - let mut blocks_subscriber = self.block_writes_sender.subscribe(); - - let mut next_scheduled_block_number = { - let Ok(buffer) = sync::lock(ctx, &self.buffer).await else { - return Ok(()); // Do not propagate cancellation errors - }; - buffer.store_block_number.next() - }; - loop { - loop { - let block = match self.buffered_block(ctx, next_scheduled_block_number).await { - Err(ctx::Canceled) => return Ok(()), // Do not propagate cancellation errors - Ok(None) => break, - Ok(Some(block)) => block, - }; - self.inner.schedule_next_block(ctx, &block).await?; - next_scheduled_block_number = next_scheduled_block_number.next(); - } - // Wait until some more blocks are pushed into the buffer. - let Ok(number) = sync::changed(ctx, &mut blocks_subscriber).await else { - return Ok(()); // Do not propagate cancellation errors - }; - tracing::debug!(block_number = number.0, "Received new block"); - } - } - - async fn buffered_block( - &self, - ctx: &ctx::Ctx, - number: BlockNumber, - ) -> ctx::OrCanceled> { - Ok(sync::lock(ctx, &self.buffer) - .await? - .blocks - .get(&number) - .cloned()) - } - - /// Runs background tasks for this store. This method **must** be spawned as a background task - /// which should be running as long at the [`Buffered`] is in use; otherwise, it will function incorrectly. - pub async fn run_background_tasks(&self, ctx: &ctx::Ctx) -> ctx::Result<()> { - scope::run!(ctx, |ctx, s| { - s.spawn(async { - self.listen_to_updates(ctx).await; - Ok(()) - }); - self.schedule_blocks(ctx) - }) - .await - } -} - -#[async_trait] -impl BlockStore for Buffered { - async fn head_block(&self, ctx: &ctx::Ctx) -> ctx::Result { - let buffered_head_block = sync::lock(ctx, &self.buffer).await?.head_block(); - if let Some(block) = buffered_head_block { - return Ok(block); - } - self.inner.head_block(ctx).await - } - - async fn first_block(&self, ctx: &ctx::Ctx) -> ctx::Result { - // First block is always situated in the underlying store - self.inner.first_block(ctx).await - } - - async fn last_contiguous_block_number(&self, ctx: &ctx::Ctx) -> ctx::Result { - Ok(sync::lock(ctx, &self.buffer) - .await? - .last_contiguous_block_number()) - } - - async fn block(&self, ctx: &ctx::Ctx, number: BlockNumber) -> ctx::Result> { - let started_at = Instant::now(); - { - let buffer = sync::lock(ctx, &self.buffer).await?; - if number > buffer.store_block_number { - let block = buffer.blocks.get(&number).cloned(); - METRICS.get_block_latency[&BlockResponseKind::InMemory] - .observe(started_at.elapsed()); - return Ok(block); - } - } - let block = self.inner.block(ctx, number).await?; - METRICS.get_block_latency[&BlockResponseKind::Persisted].observe(started_at.elapsed()); - Ok(block) - } - - async fn missing_block_numbers( - &self, - ctx: &ctx::Ctx, - range: ops::Range, - ) -> ctx::Result> { - // By design, the underlying store has no missing blocks. - Ok(sync::lock(ctx, &self.buffer) - .await? - .missing_block_numbers(range)) - } - - fn subscribe_to_block_writes(&self) -> watch::Receiver { - self.block_writes_sender.subscribe() - } -} - -#[async_trait] -impl WriteBlockStore for Buffered { - /// Verify that `payload` is a correct proposal for the block `block_number`. - async fn verify_payload( - &self, - _ctx: &ctx::Ctx, - _block_number: BlockNumber, - _payload: &Payload, - ) -> ctx::Result<()> { - // This is storage for non-validator nodes (aka full nodes), - // so `verify_payload()` won't be called. - // Still, it probably would be better to either - // * move `verify_payload()` to `BlockStore`, so that Buffered can just forward the call - // * create another separate trait for `verify_payload`. - // It will be clear what needs to be done when we implement multi-validator consensus for - // zksync-era. - unimplemented!() - } - - async fn put_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> ctx::Result<()> { - let buffer_block_latency = METRICS.buffer_block_latency.start(); - { - let mut buffer = sync::lock(ctx, &self.buffer).await?; - let block_number = block.header.number; - if block_number <= buffer.store_block_number { - return Err(anyhow::anyhow!( - "Cannot replace a block #{block_number} since it is already present in the underlying storage", - ).into()); - } - buffer.put_block(block.clone()); - } - self.block_writes_sender.send_replace(block.header.number); - buffer_block_latency.observe(); - Ok(()) - } -} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs deleted file mode 100644 index acf15416ab2..00000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/buffered/tests.rs +++ /dev/null @@ -1,284 +0,0 @@ -//! Tests for buffered storage. - -use std::{iter, ops}; - -use assert_matches::assert_matches; -use async_trait::async_trait; -use rand::{rngs::StdRng, seq::SliceRandom, Rng}; -use test_casing::test_casing; -use zksync_concurrency::{ - ctx::{self, channel}, - scope, - sync::{self, watch}, - testonly::abort_on_panic, - time, -}; -use zksync_consensus_roles::validator::{BlockHeader, BlockNumber, FinalBlock, Payload}; -use zksync_consensus_storage::{BlockStore, InMemoryStorage, WriteBlockStore}; - -use super::*; - -fn init_store(rng: &mut impl Rng) -> (FinalBlock, InMemoryStorage) { - let payload = Payload(vec![]); - let genesis_block = FinalBlock { - header: BlockHeader::genesis(payload.hash(), BlockNumber(0)), - payload, - justification: rng.gen(), - }; - let block_store = InMemoryStorage::new(genesis_block.clone()); - (genesis_block, block_store) -} - -fn gen_blocks(rng: &mut impl Rng, genesis_block: FinalBlock, count: usize) -> Vec { - let blocks = iter::successors(Some(genesis_block), |parent| { - let payload = Payload(vec![]); - let header = BlockHeader { - parent: parent.header.hash(), - number: parent.header.number.next(), - payload: payload.hash(), - }; - Some(FinalBlock { - header, - payload, - justification: rng.gen(), - }) - }); - blocks.skip(1).take(count).collect() -} - -#[derive(Debug)] -struct MockContiguousStore { - inner: InMemoryStorage, - block_sender: channel::UnboundedSender, -} - -impl MockContiguousStore { - fn new(inner: InMemoryStorage) -> (Self, channel::UnboundedReceiver) { - let (block_sender, block_receiver) = channel::unbounded(); - let this = Self { - inner, - block_sender, - }; - (this, block_receiver) - } - - async fn run_updates( - &self, - ctx: &ctx::Ctx, - mut block_receiver: channel::UnboundedReceiver, - ) -> ctx::Result<()> { - let rng = &mut ctx.rng(); - while let Ok(block) = block_receiver.recv(ctx).await { - let head_block_number = self.head_block(ctx).await?.header.number; - assert_eq!(block.header.number, head_block_number.next()); - - let sleep_duration = time::Duration::milliseconds(rng.gen_range(0..5)); - ctx.sleep(sleep_duration).await?; - self.inner.put_block(ctx, &block).await?; - } - Ok(()) - } -} - -#[async_trait] -impl BlockStore for MockContiguousStore { - async fn head_block(&self, ctx: &ctx::Ctx) -> ctx::Result { - self.inner.head_block(ctx).await - } - - async fn first_block(&self, ctx: &ctx::Ctx) -> ctx::Result { - self.inner.first_block(ctx).await - } - - async fn last_contiguous_block_number(&self, ctx: &ctx::Ctx) -> ctx::Result { - self.inner.last_contiguous_block_number(ctx).await - } - - async fn block(&self, ctx: &ctx::Ctx, number: BlockNumber) -> ctx::Result> { - self.inner.block(ctx, number).await - } - - async fn missing_block_numbers( - &self, - ctx: &ctx::Ctx, - range: ops::Range, - ) -> ctx::Result> { - self.inner.missing_block_numbers(ctx, range).await - } - - fn subscribe_to_block_writes(&self) -> watch::Receiver { - self.inner.subscribe_to_block_writes() - } -} - -#[async_trait] -impl ContiguousBlockStore for MockContiguousStore { - async fn schedule_next_block(&self, _ctx: &ctx::Ctx, block: &FinalBlock) -> ctx::Result<()> { - tracing::trace!(block_number = block.header.number.0, "Scheduled next block"); - self.block_sender.send(block.clone()); - Ok(()) - } -} - -#[tracing::instrument(level = "trace", skip(shuffle_blocks))] -async fn test_buffered_storage( - initial_block_count: usize, - block_count: usize, - block_interval: time::Duration, - shuffle_blocks: impl FnOnce(&mut StdRng, &mut [FinalBlock]), -) { - abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - - let (genesis_block, block_store) = init_store(rng); - let mut initial_blocks = gen_blocks(rng, genesis_block.clone(), initial_block_count); - for block in &initial_blocks { - block_store.put_block(ctx, block).await.unwrap(); - } - initial_blocks.insert(0, genesis_block.clone()); - - let (block_store, block_receiver) = MockContiguousStore::new(block_store); - let mut buffered_store = Buffered::new(block_store); - let (events_sender, mut events_receiver) = channel::unbounded(); - buffered_store.set_events_sender(events_sender); - - // Check initial values returned by the store. - let last_initial_block = initial_blocks.last().unwrap().clone(); - assert_eq!( - buffered_store.head_block(ctx).await.unwrap(), - last_initial_block - ); - for block in &initial_blocks { - let block_result = buffered_store.block(ctx, block.header.number).await; - assert_eq!(block_result.unwrap().as_ref(), Some(block)); - } - let mut subscriber = buffered_store.subscribe_to_block_writes(); - assert_eq!( - *subscriber.borrow(), - BlockNumber(initial_block_count as u64) - ); - - let mut blocks = gen_blocks(rng, last_initial_block, block_count); - shuffle_blocks(rng, &mut blocks); - let last_block_number = BlockNumber((block_count + initial_block_count) as u64); - - scope::run!(ctx, |ctx, s| async { - s.spawn_bg(buffered_store.inner().run_updates(ctx, block_receiver)); - s.spawn_bg(buffered_store.run_background_tasks(ctx)); - - for (idx, block) in blocks.iter().enumerate() { - buffered_store.put_block(ctx, block).await?; - let new_block_number = *sync::changed(ctx, &mut subscriber).await?; - assert_eq!(new_block_number, block.header.number); - - // Check that all written blocks are immediately accessible. - for existing_block in initial_blocks.iter().chain(&blocks[0..=idx]) { - let number = existing_block.header.number; - assert_eq!( - buffered_store.block(ctx, number).await?.as_ref(), - Some(existing_block) - ); - } - assert_eq!(buffered_store.first_block(ctx).await?, genesis_block); - - let expected_head_block = blocks[0..=idx] - .iter() - .max_by_key(|block| block.header.number) - .unwrap(); - assert_eq!(buffered_store.head_block(ctx).await?, *expected_head_block); - - let expected_last_contiguous_block = blocks[(idx + 1)..] - .iter() - .map(|block| block.header.number) - .min() - .map_or(last_block_number, BlockNumber::prev); - assert_eq!( - buffered_store.last_contiguous_block_number(ctx).await?, - expected_last_contiguous_block - ); - - ctx.sleep(block_interval).await?; - } - - let mut inner_subscriber = buffered_store.inner().subscribe_to_block_writes(); - while buffered_store - .inner() - .last_contiguous_block_number(ctx) - .await? - < last_block_number - { - sync::changed(ctx, &mut inner_subscriber).await?; - } - - // Check events emitted by the buffered storage. This also ensures that all underlying storage - // updates are processed before proceeding to the following checks. - let expected_numbers = (initial_block_count as u64 + 1)..=last_block_number.0; - for expected_number in expected_numbers.map(BlockNumber) { - assert_matches!( - events_receiver.recv(ctx).await?, - BufferedStorageEvent::UpdateReceived(number) if number == expected_number - ); - } - - assert_eq!(buffered_store.buffer_len().await, 0); - Ok(()) - }) - .await - .unwrap(); -} - -// Choose intervals so that they are both smaller and larger than the sleep duration in -// `MockContiguousStore::run_updates()`. -const BLOCK_INTERVALS: [time::Duration; 4] = [ - time::Duration::ZERO, - time::Duration::milliseconds(3), - time::Duration::milliseconds(5), - time::Duration::milliseconds(10), -]; - -#[test_casing(4, BLOCK_INTERVALS)] -#[tokio::test] -async fn buffered_storage_with_sequential_blocks(block_interval: time::Duration) { - test_buffered_storage(0, 30, block_interval, |_, _| { - // Do not perform shuffling - }) - .await; -} - -#[test_casing(4, BLOCK_INTERVALS)] -#[tokio::test] -async fn buffered_storage_with_random_blocks(block_interval: time::Duration) { - test_buffered_storage(0, 30, block_interval, |rng, blocks| blocks.shuffle(rng)).await; -} - -#[test_casing(4, BLOCK_INTERVALS)] -#[tokio::test] -async fn buffered_storage_with_slightly_shuffled_blocks(block_interval: time::Duration) { - test_buffered_storage(0, 30, block_interval, |rng, blocks| { - for chunk in blocks.chunks_mut(4) { - chunk.shuffle(rng); - } - }) - .await; -} - -#[test_casing(4, BLOCK_INTERVALS)] -#[tokio::test] -async fn buffered_storage_with_initial_blocks(block_interval: time::Duration) { - test_buffered_storage(10, 20, block_interval, |_, _| { - // Do not perform shuffling - }) - .await; -} - -#[test_casing(4, BLOCK_INTERVALS)] -#[tokio::test] -async fn buffered_storage_with_initial_blocks_and_slight_shuffling(block_interval: time::Duration) { - test_buffered_storage(10, 20, block_interval, |rng, blocks| { - for chunk in blocks.chunks_mut(5) { - chunk.shuffle(rng); - } - }) - .await; -} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs b/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs deleted file mode 100644 index d991fbb4924..00000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/conversions.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! Conversion logic between server and consensus types. -use anyhow::Context as _; -use zksync_consensus_roles::validator::FinalBlock; -use zksync_dal::blocks_dal::ConsensusBlockFields; -use zksync_types::MiniblockNumber; - -use crate::{consensus, sync_layer::fetcher::FetchedBlock}; - -impl FetchedBlock { - pub(super) fn from_gossip_block( - block: &FinalBlock, - last_in_batch: bool, - ) -> anyhow::Result { - let number = u32::try_from(block.header.number.0) - .context("Integer overflow converting block number")?; - let payload = consensus::Payload::decode(&block.payload) - .context("Failed deserializing block payload")?; - - Ok(Self { - number: MiniblockNumber(number), - l1_batch_number: payload.l1_batch_number, - last_in_batch, - protocol_version: payload.protocol_version, - timestamp: payload.timestamp, - reference_hash: Some(payload.hash), - l1_gas_price: payload.l1_gas_price, - l2_fair_gas_price: payload.l2_fair_gas_price, - virtual_blocks: payload.virtual_blocks, - operator_address: payload.operator_address, - transactions: payload.transactions, - consensus: Some(ConsensusBlockFields { - parent: block.header.parent, - justification: block.justification.clone(), - }), - }) - } -} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/metrics.rs b/core/lib/zksync_core/src/sync_layer/gossip/metrics.rs deleted file mode 100644 index 73caf510269..00000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/metrics.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! Metrics for gossip-powered syncing. - -use std::time::Duration; - -use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit}; - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "kind", rename_all = "snake_case")] -pub(super) enum BlockResponseKind { - Persisted, - InMemory, -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "external_node_gossip_fetcher")] -pub(super) struct GossipFetcherMetrics { - /// Number of currently buffered unexecuted blocks. - pub buffer_size: Gauge, - /// Latency of a `get_block` call. - #[metrics(unit = Unit::Seconds, buckets = Buckets::LATENCIES)] - pub get_block_latency: Family>, - /// Latency of putting a block into the buffered storage. This may include the time to queue - /// block actions, but does not include block execution. - #[metrics(unit = Unit::Seconds, buckets = Buckets::LATENCIES)] - pub buffer_block_latency: Histogram, -} - -#[vise::register] -pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/zksync_core/src/sync_layer/gossip/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/mod.rs deleted file mode 100644 index 9360a169bbe..00000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/mod.rs +++ /dev/null @@ -1,108 +0,0 @@ -//! Consensus adapter for EN synchronization logic. - -use std::sync::Arc; - -use anyhow::Context as _; -use tokio::sync::watch; -use zksync_concurrency::{ctx, error::Wrap as _, scope}; -use zksync_consensus_executor::{Executor, ExecutorConfig}; -use zksync_consensus_roles::node; -use zksync_dal::ConnectionPool; -use zksync_types::Address; - -use self::{buffered::Buffered, storage::PostgresBlockStorage}; -use super::{fetcher::FetcherCursor, sync_action::ActionQueueSender}; - -mod buffered; -mod conversions; -mod metrics; -mod storage; -#[cfg(test)] -mod tests; -mod utils; - -/// Starts fetching L2 blocks using peer-to-peer gossip network. -pub async fn run_gossip_fetcher( - pool: ConnectionPool, - actions: ActionQueueSender, - executor_config: ExecutorConfig, - node_key: node::SecretKey, - mut stop_receiver: watch::Receiver, - operator_address: Address, -) -> anyhow::Result<()> { - scope::run!(&ctx::root(), |ctx, s| async { - s.spawn_bg(run_gossip_fetcher_inner( - ctx, - pool, - actions, - executor_config, - node_key, - operator_address, - )); - if stop_receiver.changed().await.is_err() { - tracing::warn!( - "Stop signal sender for gossip fetcher was dropped without sending a signal" - ); - } - tracing::info!("Stop signal received, gossip fetcher is shutting down"); - Ok(()) - }) - .await -} - -async fn run_gossip_fetcher_inner( - ctx: &ctx::Ctx, - pool: ConnectionPool, - actions: ActionQueueSender, - executor_config: ExecutorConfig, - node_key: node::SecretKey, - operator_address: Address, -) -> anyhow::Result<()> { - tracing::info!( - "Starting gossip fetcher with {executor_config:?} and node key {:?}", - node_key.public() - ); - - let mut storage = pool - .access_storage_tagged("sync_layer") - .await - .context("Failed acquiring Postgres connection for cursor")?; - let cursor = FetcherCursor::new(&mut storage) - .await - .context("FetcherCursor::new()")?; - drop(storage); - - let store = PostgresBlockStorage::new( - ctx, - pool, - actions, - cursor, - &executor_config.genesis_block, - operator_address, - ) - .await - .wrap("PostgresBlockStorage::new()")?; - let buffered = Arc::new(Buffered::new(store)); - let store = buffered.inner(); - - scope::run!(ctx, |ctx, s| async { - let executor = Executor::new(ctx, executor_config, node_key, buffered.clone()) - .await - .context("Node executor misconfiguration")?; - s.spawn_bg(async { - store - .run_background_tasks(ctx) - .await - .context("`PostgresBlockStorage` background tasks failed") - }); - s.spawn_bg(async { - buffered - .run_background_tasks(ctx) - .await - .context("`Buffered` storage background tasks failed") - }); - - executor.run(ctx).await.context("Node executor terminated") - }) - .await -} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs b/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs deleted file mode 100644 index cae4ea4f06c..00000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/storage/mod.rs +++ /dev/null @@ -1,358 +0,0 @@ -//! Storage implementation based on DAL. - -use std::ops; - -use anyhow::Context as _; -use async_trait::async_trait; -use zksync_concurrency::{ - ctx, - error::Wrap as _, - sync::{self, watch, Mutex}, - time, -}; -use zksync_consensus_roles::validator::{BlockNumber, FinalBlock}; -use zksync_consensus_storage::BlockStore; -use zksync_dal::{blocks_dal::ConsensusBlockFields, ConnectionPool, StorageProcessor}; -use zksync_types::{api::en::SyncBlock, Address, MiniblockNumber}; - -#[cfg(test)] -mod tests; - -use super::buffered::ContiguousBlockStore; -use crate::{ - consensus, - consensus::sync_block_to_consensus_block, - sync_layer::{ - fetcher::{FetchedBlock, FetcherCursor}, - sync_action::{ActionQueueSender, SyncAction}, - }, -}; - -#[derive(Debug)] -struct CursorWithCachedBlock { - inner: FetcherCursor, - maybe_last_block_in_batch: Option, -} - -impl From for CursorWithCachedBlock { - fn from(inner: FetcherCursor) -> Self { - Self { - inner, - maybe_last_block_in_batch: None, - } - } -} - -impl CursorWithCachedBlock { - fn advance(&mut self, block: FetchedBlock) -> Vec> { - let mut actions = Vec::with_capacity(2); - if let Some(mut prev_block) = self.maybe_last_block_in_batch.take() { - prev_block.last_in_batch = prev_block.l1_batch_number != block.l1_batch_number; - actions.push(self.inner.advance(prev_block)); - } - - // We take advantage of the fact that the last block in a batch is a *fictive* block that - // does not contain transactions. Thus, any block with transactions cannot be last in an L1 batch. - let can_be_last_in_batch = block.transactions.is_empty(); - if can_be_last_in_batch { - self.maybe_last_block_in_batch = Some(block); - // We cannot convert the block into actions yet, since we don't know whether it seals an L1 batch. - } else { - actions.push(self.inner.advance(block)); - } - actions - } -} - -/// Postgres-based [`BlockStore`] implementation. New blocks are scheduled to be written via -/// [`ContiguousBlockStore`] trait, which internally uses an [`ActionQueueSender`] to queue -/// block data (miniblock and L1 batch parameters, transactions) for the state keeper. Block data processing -/// is shared with JSON-RPC-based syncing. -#[derive(Debug)] -pub(super) struct PostgresBlockStorage { - pool: ConnectionPool, - first_block_number: MiniblockNumber, - actions: ActionQueueSender, - block_sender: watch::Sender, - cursor: Mutex, - operator_address: Address, -} - -impl PostgresBlockStorage { - /// Creates a new storage handle. `pool` should have multiple connections to work efficiently. - pub async fn new( - ctx: &ctx::Ctx, - pool: ConnectionPool, - actions: ActionQueueSender, - cursor: FetcherCursor, - genesis_block: &FinalBlock, - operator_address: Address, - ) -> ctx::Result { - let mut storage = ctx.wait(pool.access_storage_tagged("sync_layer")).await??; - Self::ensure_genesis_block(ctx, &mut storage, genesis_block, operator_address) - .await - .wrap("ensure_genesis_block()")?; - drop(storage); - - let first_block_number = u32::try_from(genesis_block.header.number.0) - .context("Block number overflow for genesis block")?; - let first_block_number = MiniblockNumber(first_block_number); - - Ok(Self::new_unchecked( - pool, - first_block_number, - actions, - cursor, - operator_address, - )) - } - - fn new_unchecked( - pool: ConnectionPool, - first_block_number: MiniblockNumber, - actions: ActionQueueSender, - cursor: FetcherCursor, - operator_address: Address, - ) -> Self { - let current_block_number = cursor.next_miniblock.0.saturating_sub(1).into(); - Self { - pool, - first_block_number, - actions, - block_sender: watch::channel(BlockNumber(current_block_number)).0, - cursor: Mutex::new(cursor.into()), - operator_address, - } - } - - async fn ensure_genesis_block( - ctx: &ctx::Ctx, - storage: &mut StorageProcessor<'_>, - genesis_block: &FinalBlock, - operator_address: Address, - ) -> ctx::Result<()> { - let block_number = u32::try_from(genesis_block.header.number.0) - .context("Block number overflow for genesis block")?; - let block = Self::sync_block( - ctx, - storage, - MiniblockNumber(block_number), - operator_address, - ) - .await - .wrap("sync_block();")?; - let block = block - .with_context(|| { - format!("Genesis block #{block_number} (first block with consensus data) is not present in Postgres") - })?; - let actual_consensus_fields = block.consensus.clone(); - - // Some of the following checks are duplicated in `Executor` initialization, but it's necessary - // to run them if the genesis consensus block is not present locally. - let expected_payload = consensus::Payload::decode(&genesis_block.payload) - .context("Cannot decode genesis block payload")?; - let actual_payload: consensus::Payload = block.try_into()?; - if actual_payload != expected_payload { - return Err(anyhow::anyhow!( - "Genesis block payload from Postgres {actual_payload:?} does not match the configured one \ - {expected_payload:?}" - ).into()); - } - - let expected_consensus_fields = ConsensusBlockFields { - parent: genesis_block.header.parent, - justification: genesis_block.justification.clone(), - }; - if let Some(actual_consensus_fields) = &actual_consensus_fields { - let actual_consensus_fields = ConsensusBlockFields::decode(actual_consensus_fields) - .context("ConsensusBlockFields::decode()")?; - // While justifications may differ among nodes for an arbitrary block, we assume that - // the genesis block has a hard coded justification. - if actual_consensus_fields != expected_consensus_fields { - return Err(anyhow::anyhow!( - "Genesis block consensus fields in Postgres {actual_consensus_fields:?} do not match \ - the configured ones {expected_consensus_fields:?}" - ).into()); - } - } else { - tracing::info!( - "Postgres doesn't have consensus fields for genesis block; saving {expected_consensus_fields:?}" - ); - ctx.wait(storage.blocks_dal().set_miniblock_consensus_fields( - MiniblockNumber(block_number), - &expected_consensus_fields, - )) - .await? - .context("Failed saving consensus fields for genesis block")?; - } - Ok(()) - } - - /// Runs background tasks for this store. This method **must** be spawned as a background task - /// which should be running as long at the [`PostgresBlockStorage`] is in use; otherwise, - /// it will function incorrectly. - pub async fn run_background_tasks(&self, ctx: &ctx::Ctx) -> anyhow::Result<()> { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); - loop { - let sealed_miniblock_number = match self.sealed_miniblock_number(ctx).await { - Ok(number) => number, - Err(ctx::Error::Internal(err)) => return Err(err), - Err(ctx::Error::Canceled(_)) => return Ok(()), // Do not propagate cancellation errors - }; - self.block_sender.send_if_modified(|number| { - if *number != sealed_miniblock_number { - *number = sealed_miniblock_number; - true - } else { - false - } - }); - if let Err(ctx::Canceled) = ctx.sleep(POLL_INTERVAL).await { - return Ok(()); // Do not propagate cancellation errors - } - } - } - - async fn storage(&self, ctx: &ctx::Ctx) -> ctx::Result> { - Ok(ctx - .wait(self.pool.access_storage_tagged("sync_layer")) - .await? - .context("Failed to connect to Postgres")?) - } - - async fn sync_block( - ctx: &ctx::Ctx, - storage: &mut StorageProcessor<'_>, - number: MiniblockNumber, - operator_address: Address, - ) -> ctx::Result> { - Ok(ctx - .wait( - storage - .sync_dal() - .sync_block(number, operator_address, true), - ) - .await? - .with_context(|| format!("Failed getting miniblock #{number} from Postgres"))?) - } - - async fn block( - ctx: &ctx::Ctx, - storage: &mut StorageProcessor<'_>, - number: MiniblockNumber, - operator_address: Address, - ) -> ctx::Result> { - let Some(block) = Self::sync_block(ctx, storage, number, operator_address) - .await - .wrap("Self::sync_block()")? - else { - return Ok(None); - }; - let block = - sync_block_to_consensus_block(block).context("sync_block_to_consensus_block()")?; - Ok(Some(block)) - } - - async fn sealed_miniblock_number(&self, ctx: &ctx::Ctx) -> ctx::Result { - let mut storage = self.storage(ctx).await.wrap("storage()")?; - let number = ctx - .wait(storage.blocks_dal().get_sealed_miniblock_number()) - .await? - .context("Failed getting sealed miniblock number")?; - Ok(BlockNumber(number.0.into())) - } -} - -#[async_trait] -impl BlockStore for PostgresBlockStorage { - async fn head_block(&self, ctx: &ctx::Ctx) -> ctx::Result { - let mut storage = self.storage(ctx).await.wrap("storage()")?; - let miniblock_number = ctx - .wait(storage.blocks_dal().get_sealed_miniblock_number()) - .await? - .context("Failed getting sealed miniblock number")?; - // ^ The number can get stale, but it's OK for our purposes - Ok( - Self::block(ctx, &mut storage, miniblock_number, self.operator_address) - .await - .wrap("Self::block()")? - .with_context(|| { - format!("Miniblock #{miniblock_number} disappeared from Postgres") - })?, - ) - } - - async fn first_block(&self, ctx: &ctx::Ctx) -> ctx::Result { - let mut storage = self.storage(ctx).await.wrap("storage()")?; - Ok(Self::block( - ctx, - &mut storage, - self.first_block_number, - self.operator_address, - ) - .await - .wrap("Self::block()")? - .context("Genesis miniblock not present in Postgres")?) - } - - async fn last_contiguous_block_number(&self, ctx: &ctx::Ctx) -> ctx::Result { - self.sealed_miniblock_number(ctx) - .await - .wrap("sealed_miniblock_number()") - } - - async fn block(&self, ctx: &ctx::Ctx, number: BlockNumber) -> ctx::Result> { - let Ok(number) = u32::try_from(number.0) else { - return Ok(None); - }; - let number = MiniblockNumber(number); - if number < self.first_block_number { - return Ok(None); - } - let mut storage = self.storage(ctx).await.wrap("storage()")?; - Self::block(ctx, &mut storage, number, self.operator_address) - .await - .wrap("Self::block()") - } - - async fn missing_block_numbers( - &self, - ctx: &ctx::Ctx, - range: ops::Range, - ) -> ctx::Result> { - let mut output = vec![]; - let first_block_number = u64::from(self.first_block_number.0); - let numbers_before_first_block = (range.start.0..first_block_number).map(BlockNumber); - output.extend(numbers_before_first_block); - - let last_block_number = self - .sealed_miniblock_number(ctx) - .await - .wrap("sealed_miniblock_number()")?; - let numbers_after_last_block = (last_block_number.next().0..range.end.0).map(BlockNumber); - output.extend(numbers_after_last_block); - - // By design, no blocks are missing in the `first_block_number..=last_block_number` range. - Ok(output) - } - - fn subscribe_to_block_writes(&self) -> watch::Receiver { - self.block_sender.subscribe() - } -} - -#[async_trait] -impl ContiguousBlockStore for PostgresBlockStorage { - async fn schedule_next_block(&self, ctx: &ctx::Ctx, block: &FinalBlock) -> ctx::Result<()> { - // `last_in_batch` is always set to `false` by this call; it is properly set by `CursorWithCachedBlock`. - let fetched_block = - FetchedBlock::from_gossip_block(block, false).context("from_gossip_block()")?; - let actions = sync::lock(ctx, &self.cursor).await?.advance(fetched_block); - for actions_chunk in actions { - // We don't wrap this in `ctx.wait()` because `PostgresBlockStorage` will get broken - // if it gets reused after context cancellation. - self.actions.push_actions(actions_chunk).await; - } - Ok(()) - } -} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs deleted file mode 100644 index d8b1eff0e24..00000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/storage/tests.rs +++ /dev/null @@ -1,376 +0,0 @@ -//! Tests for Postgres storage implementation. - -use rand::{thread_rng, Rng}; -use zksync_concurrency::{scope, testonly::abort_on_panic}; -use zksync_consensus_roles::validator; -use zksync_types::L2ChainId; - -use super::*; -use crate::{ - genesis::{ensure_genesis_state, GenesisParams}, - sync_layer::{ - gossip::tests::{ - add_consensus_fields, assert_first_block_actions, assert_second_block_actions, - block_payload, create_genesis_block, load_final_block, - }, - tests::{run_state_keeper_with_multiple_miniblocks, OPERATOR_ADDRESS}, - ActionQueue, - }, -}; - -const TEST_TIMEOUT: time::Duration = time::Duration::seconds(10); - -#[tokio::test] -async fn block_store_basics_for_postgres() { - abort_on_panic(); - let pool = ConnectionPool::test_pool().await; - run_state_keeper_with_multiple_miniblocks(pool.clone()).await; - - let mut storage = pool.access_storage().await.unwrap(); - add_consensus_fields(&mut storage, &thread_rng().gen(), 0..3).await; - let cursor = FetcherCursor::new(&mut storage).await.unwrap(); - drop(storage); - let (actions_sender, _) = ActionQueue::new(); - let storage = PostgresBlockStorage::new_unchecked( - pool.clone(), - MiniblockNumber(0), - actions_sender, - cursor, - OPERATOR_ADDRESS, - ); - - let ctx = &ctx::test_root(&ctx::RealClock); - let genesis_block = BlockStore::first_block(&storage, ctx).await.unwrap(); - assert_eq!(genesis_block.header.number, BlockNumber(0)); - let head_block = BlockStore::head_block(&storage, ctx).await.unwrap(); - assert_eq!(head_block.header.number, BlockNumber(2)); - let last_contiguous_block_number = storage.last_contiguous_block_number(ctx).await.unwrap(); - assert_eq!(last_contiguous_block_number, BlockNumber(2)); - - let block = storage - .block(ctx, BlockNumber(1)) - .await - .unwrap() - .expect("no block #1"); - assert_eq!(block.header.number, BlockNumber(1)); - let missing_block = storage.block(ctx, BlockNumber(3)).await.unwrap(); - assert!(missing_block.is_none(), "{missing_block:?}"); -} - -#[tokio::test] -async fn subscribing_to_block_updates_for_postgres() { - abort_on_panic(); - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - if storage.blocks_dal().is_genesis_needed().await.unwrap() { - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) - .await - .unwrap(); - } - let cursor = FetcherCursor::new(&mut storage).await.unwrap(); - // ^ This is logically incorrect (the storage should not be updated other than using - // `ContiguousBlockStore`), but for testing subscriptions this is fine. - drop(storage); - let (actions_sender, _) = ActionQueue::new(); - let storage = PostgresBlockStorage::new_unchecked( - pool.clone(), - MiniblockNumber(0), - actions_sender, - cursor, - OPERATOR_ADDRESS, - ); - let mut subscriber = storage.subscribe_to_block_writes(); - - let ctx = &ctx::test_root(&ctx::RealClock); - scope::run!(&ctx.with_timeout(TEST_TIMEOUT), |ctx, s| async { - s.spawn_bg(storage.run_background_tasks(ctx)); - s.spawn(async { - run_state_keeper_with_multiple_miniblocks(pool.clone()).await; - Ok(()) - }); - - loop { - let block = *sync::changed(ctx, &mut subscriber).await?; - if block == BlockNumber(2) { - // We should receive at least the last update. - break; - } - } - Ok(()) - }) - .await - .unwrap(); -} - -#[tokio::test] -async fn processing_new_blocks() { - abort_on_panic(); - let pool = ConnectionPool::test_pool().await; - run_state_keeper_with_multiple_miniblocks(pool.clone()).await; - - let mut storage = pool.access_storage().await.unwrap(); - add_consensus_fields(&mut storage, &thread_rng().gen(), 0..3).await; - let first_block = load_final_block(&mut storage, 1).await; - let second_block = load_final_block(&mut storage, 2).await; - storage - .transactions_dal() - .reset_transactions_state(MiniblockNumber(0)) - .await; - storage - .blocks_dal() - .delete_miniblocks(MiniblockNumber(0)) - .await - .unwrap(); - let cursor = FetcherCursor::new(&mut storage).await.unwrap(); - drop(storage); - - let (actions_sender, mut actions) = ActionQueue::new(); - let storage = PostgresBlockStorage::new_unchecked( - pool.clone(), - MiniblockNumber(0), - actions_sender, - cursor, - OPERATOR_ADDRESS, - ); - let ctx = &ctx::test_root(&ctx::RealClock); - let ctx = &ctx.with_timeout(TEST_TIMEOUT); - storage - .schedule_next_block(ctx, &first_block) - .await - .unwrap(); - assert_first_block_actions(&mut actions).await; - - storage - .schedule_next_block(ctx, &second_block) - .await - .unwrap(); - assert_second_block_actions(&mut actions).await; -} - -#[tokio::test] -async fn ensuring_consensus_fields_for_genesis_block() { - abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - if storage.blocks_dal().is_genesis_needed().await.unwrap() { - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) - .await - .unwrap(); - } - let cursor = FetcherCursor::new(&mut storage).await.unwrap(); - let block_payload = block_payload(&mut storage, 0).await.encode(); - drop(storage); - - let validator_key = validator::SecretKey::generate(&mut ctx.rng()); - let genesis_block = create_genesis_block(&validator_key, 0, block_payload.clone()); - - let (actions_sender, _) = ActionQueue::new(); - PostgresBlockStorage::new( - ctx, - pool.clone(), - actions_sender, - cursor, - &genesis_block, - OPERATOR_ADDRESS, - ) - .await - .unwrap(); - - // Check that the consensus fields are persisted for the genesis block. - let mut storage = pool.access_storage().await.unwrap(); - let sync_block = storage - .sync_dal() - .sync_block(MiniblockNumber(0), Address::default(), false) - .await - .unwrap() - .expect("No genesis block"); - assert!(sync_block.consensus.is_some()); - let cursor = FetcherCursor::new(&mut storage).await.unwrap(); - let other_cursor = FetcherCursor::new(&mut storage).await.unwrap(); - drop(storage); - - // Check that the storage can be initialized again. - let (actions_sender, _) = ActionQueue::new(); - PostgresBlockStorage::new( - ctx, - pool.clone(), - actions_sender, - cursor, - &genesis_block, - OPERATOR_ADDRESS, - ) - .await - .unwrap(); - - // Create a genesis block with another validator. - let validator_key = validator::SecretKey::generate(&mut ctx.rng()); - let other_genesis_block = create_genesis_block(&validator_key, 0, block_payload); - - // Storage should not be able to initialize with other genesis block. - let (actions_sender, _) = ActionQueue::new(); - PostgresBlockStorage::new( - ctx, - pool, - actions_sender, - other_cursor, - &other_genesis_block, - OPERATOR_ADDRESS, - ) - .await - .unwrap_err(); -} - -#[tokio::test] -async fn genesis_block_payload_mismatch() { - abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - if storage.blocks_dal().is_genesis_needed().await.unwrap() { - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) - .await - .unwrap(); - } - let cursor = FetcherCursor::new(&mut storage).await.unwrap(); - let other_cursor = FetcherCursor::new(&mut storage).await.unwrap(); - - let bogus_block_payload = validator::Payload(vec![]); - let validator_key = validator::SecretKey::generate(&mut ctx.rng()); - let genesis_block = create_genesis_block(&validator_key, 0, bogus_block_payload); - - let (actions_sender, _) = ActionQueue::new(); - PostgresBlockStorage::new( - ctx, - pool.clone(), - actions_sender, - cursor, - &genesis_block, - OPERATOR_ADDRESS, - ) - .await - .unwrap_err(); - - let mut bogus_block_payload = block_payload(&mut storage, 0).await; - bogus_block_payload.timestamp += 1; - let genesis_block = create_genesis_block(&validator_key, 0, bogus_block_payload.encode()); - - let (actions_sender, _) = ActionQueue::new(); - PostgresBlockStorage::new( - ctx, - pool.clone(), - actions_sender, - other_cursor, - &genesis_block, - OPERATOR_ADDRESS, - ) - .await - .unwrap_err(); -} - -#[tokio::test] -async fn missing_genesis_block() { - abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let pool = ConnectionPool::test_pool().await; - let mut storage = pool.access_storage().await.unwrap(); - if storage.blocks_dal().is_genesis_needed().await.unwrap() { - ensure_genesis_state(&mut storage, L2ChainId::default(), &GenesisParams::mock()) - .await - .unwrap(); - } - let cursor = FetcherCursor::new(&mut storage).await.unwrap(); - let block_payload = block_payload(&mut storage, 0).await.encode(); - drop(storage); - - // Create a genesis block for the (non-existing) block #2. - let validator_key = validator::SecretKey::generate(&mut ctx.rng()); - let genesis_block = create_genesis_block(&validator_key, 2, block_payload.clone()); - - let (actions_sender, _) = ActionQueue::new(); - PostgresBlockStorage::new( - ctx, - pool, - actions_sender, - cursor, - &genesis_block, - OPERATOR_ADDRESS, - ) - .await - .unwrap_err(); -} - -#[tokio::test] -async fn using_non_zero_genesis_block() { - abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let pool = ConnectionPool::test_pool().await; - run_state_keeper_with_multiple_miniblocks(pool.clone()).await; - - let mut storage = pool.access_storage().await.unwrap(); - let cursor = FetcherCursor::new(&mut storage).await.unwrap(); - let block_payload = block_payload(&mut storage, 2).await.encode(); - drop(storage); - - let validator_key = validator::SecretKey::generate(&mut ctx.rng()); - let genesis_block = create_genesis_block(&validator_key, 2, block_payload.clone()); - - let (actions_sender, _) = ActionQueue::new(); - let store = PostgresBlockStorage::new( - ctx, - pool, - actions_sender, - cursor, - &genesis_block, - OPERATOR_ADDRESS, - ) - .await - .unwrap(); - - let head_block = store.head_block(ctx).await.unwrap(); - assert_eq!(head_block.header.number, BlockNumber(2)); - assert_eq!( - head_block.header.parent, - validator::BlockHeaderHash::from_bytes([0; 32]) - ); - let first_block = store.first_block(ctx).await.unwrap(); - assert_eq!(first_block.header.number, BlockNumber(2)); - let last_contiguous_block_number = store.last_contiguous_block_number(ctx).await.unwrap(); - assert_eq!(last_contiguous_block_number, BlockNumber(2)); - - let block = store.block(ctx, BlockNumber(2)).await.unwrap(); - assert_eq!(block, Some(head_block)); - for number in [0, 1, 3] { - let missing_block = store.block(ctx, BlockNumber(number)).await.unwrap(); - assert!(missing_block.is_none()); - } - - let missing_blocks = store - .missing_block_numbers(ctx, BlockNumber(0)..BlockNumber(5)) - .await - .unwrap(); - assert_eq!( - missing_blocks, - [ - BlockNumber(0), - BlockNumber(1), - BlockNumber(3), - BlockNumber(4) - ] - ); - let missing_blocks = store - .missing_block_numbers(ctx, BlockNumber(0)..BlockNumber(2)) - .await - .unwrap(); - assert_eq!(missing_blocks, [BlockNumber(0), BlockNumber(1)]); - let missing_blocks = store - .missing_block_numbers(ctx, BlockNumber(2)..BlockNumber(5)) - .await - .unwrap(); - assert_eq!(missing_blocks, [BlockNumber(3), BlockNumber(4)]); - let missing_blocks = store - .missing_block_numbers(ctx, BlockNumber(2)..BlockNumber(3)) - .await - .unwrap(); - assert_eq!(missing_blocks, []); -} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs b/core/lib/zksync_core/src/sync_layer/gossip/tests.rs deleted file mode 100644 index ad9b00e9c15..00000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/tests.rs +++ /dev/null @@ -1,534 +0,0 @@ -//! Tests for consensus adapters for EN synchronization logic. - -use std::ops; - -use assert_matches::assert_matches; -use test_casing::{test_casing, Product}; -use zksync_concurrency::{ctx, scope, testonly::abort_on_panic, time}; -use zksync_consensus_executor::testonly::FullValidatorConfig; -use zksync_consensus_roles::validator::{self, FinalBlock}; -use zksync_consensus_storage::{InMemoryStorage, WriteBlockStore}; -use zksync_dal::{blocks_dal::ConsensusBlockFields, ConnectionPool, StorageProcessor}; -use zksync_types::{api::en::SyncBlock, L1BatchNumber, MiniblockNumber, H256}; - -use super::*; -use crate::{ - consensus, - sync_layer::{ - sync_action::SyncAction, - tests::{ - mock_l1_batch_hash_computation, run_state_keeper_with_multiple_l1_batches, - run_state_keeper_with_multiple_miniblocks, StateKeeperHandles, OPERATOR_ADDRESS, - }, - ActionQueue, - }, -}; - -const CLOCK_SPEEDUP: i64 = 20; -const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50 * CLOCK_SPEEDUP); - -async fn load_sync_block(storage: &mut StorageProcessor<'_>, number: u32) -> SyncBlock { - storage - .sync_dal() - .sync_block(MiniblockNumber(number), OPERATOR_ADDRESS, true) - .await - .unwrap() - .unwrap_or_else(|| panic!("no sync block #{number}")) -} - -/// Loads a block from the storage and converts it to a `FinalBlock`. -pub(super) async fn load_final_block( - storage: &mut StorageProcessor<'_>, - number: u32, -) -> FinalBlock { - let sync_block = load_sync_block(storage, number).await; - consensus::sync_block_to_consensus_block(sync_block).unwrap() -} - -fn convert_sync_blocks(sync_blocks: Vec) -> Vec { - sync_blocks - .into_iter() - .map(|sync_block| consensus::sync_block_to_consensus_block(sync_block).unwrap()) - .collect() -} - -pub(super) async fn block_payload( - storage: &mut StorageProcessor<'_>, - number: u32, -) -> consensus::Payload { - let sync_block = load_sync_block(storage, number).await; - consensus::Payload::try_from(sync_block).unwrap() -} - -/// Adds consensus information for the specified `count` of miniblocks, starting from the genesis. -pub(super) async fn add_consensus_fields( - storage: &mut StorageProcessor<'_>, - validator_key: &validator::SecretKey, - block_numbers: ops::Range, -) { - let mut prev_block_hash = validator::BlockHeaderHash::from_bytes([0; 32]); - let validator_set = validator::ValidatorSet::new([validator_key.public()]).unwrap(); - for number in block_numbers { - let payload = block_payload(storage, number).await.encode(); - let block_header = validator::BlockHeader { - parent: prev_block_hash, - number: validator::BlockNumber(number.into()), - payload: payload.hash(), - }; - let replica_commit = validator::ReplicaCommit { - protocol_version: validator::ProtocolVersion::EARLIEST, - view: validator::ViewNumber(number.into()), - proposal: block_header, - }; - let replica_commit = validator_key.sign_msg(replica_commit); - let justification = validator::CommitQC::from(&[replica_commit], &validator_set) - .expect("Failed creating QC"); - - let consensus = ConsensusBlockFields { - parent: prev_block_hash, - justification, - }; - storage - .blocks_dal() - .set_miniblock_consensus_fields(MiniblockNumber(number), &consensus) - .await - .unwrap(); - prev_block_hash = block_header.hash(); - } -} - -/// Creates a genesis block for the consensus with the specified number / payload authored by a single validator. -pub(super) fn create_genesis_block( - validator_key: &validator::SecretKey, - number: u64, - payload: validator::Payload, -) -> FinalBlock { - let block_header = validator::BlockHeader { - parent: validator::BlockHeaderHash::from_bytes([0; 32]), - number: validator::BlockNumber(number), - payload: payload.hash(), - }; - let validator_set = validator::ValidatorSet::new([validator_key.public()]).unwrap(); - let replica_commit = validator::ReplicaCommit { - protocol_version: validator::ProtocolVersion::EARLIEST, - view: validator::ViewNumber(number), - proposal: block_header, - }; - let replica_commit = validator_key.sign_msg(replica_commit); - let justification = - validator::CommitQC::from(&[replica_commit], &validator_set).expect("Failed creating QC"); - FinalBlock { - header: block_header, - payload, - justification, - } -} - -pub(super) async fn assert_first_block_actions(actions: &mut ActionQueue) -> Vec { - let mut received_actions = vec![]; - while !matches!(received_actions.last(), Some(SyncAction::SealMiniblock(_))) { - received_actions.push(actions.recv_action().await); - } - assert_matches!( - received_actions.as_slice(), - [ - SyncAction::OpenBatch { - number: L1BatchNumber(1), - timestamp: 1, - first_miniblock_info: (MiniblockNumber(1), 1), - .. - }, - SyncAction::Tx(_), - SyncAction::Tx(_), - SyncAction::Tx(_), - SyncAction::Tx(_), - SyncAction::Tx(_), - SyncAction::SealMiniblock(_), - ] - ); - received_actions -} - -pub(super) async fn assert_second_block_actions(actions: &mut ActionQueue) -> Vec { - let mut received_actions = vec![]; - while !matches!(received_actions.last(), Some(SyncAction::SealMiniblock(_))) { - received_actions.push(actions.recv_action().await); - } - assert_matches!( - received_actions.as_slice(), - [ - SyncAction::Miniblock { - number: MiniblockNumber(2), - timestamp: 2, - virtual_blocks: 1, - }, - SyncAction::Tx(_), - SyncAction::Tx(_), - SyncAction::Tx(_), - SyncAction::SealMiniblock(_), - ] - ); - received_actions -} - -#[test_casing(4, Product(([false, true], [false, true])))] -#[tokio::test] -async fn syncing_via_gossip_fetcher(delay_first_block: bool, delay_second_block: bool) { - abort_on_panic(); - let pool = ConnectionPool::test_pool().await; - let tx_hashes = run_state_keeper_with_multiple_miniblocks(pool.clone()).await; - - let mut storage = pool.access_storage().await.unwrap(); - let genesis_block_payload = block_payload(&mut storage, 0).await.encode(); - let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); - let rng = &mut ctx.rng(); - let mut validator = FullValidatorConfig::for_single_validator( - rng, - genesis_block_payload, - validator::BlockNumber(0), - ); - let validator_set = validator.node_config.validators.clone(); - let external_node = validator.connect_full_node(rng); - - let genesis_block = validator.node_config.genesis_block.clone(); - add_consensus_fields(&mut storage, &validator.validator_key, 0..3).await; - let blocks = convert_sync_blocks(reset_storage(storage).await); - let [first_block, second_block] = blocks.as_slice() else { - unreachable!("Unexpected blocks in storage: {blocks:?}"); - }; - tracing::trace!("Node storage reset"); - - let validator_storage = Arc::new(InMemoryStorage::new(genesis_block)); - if !delay_first_block { - validator_storage.put_block(ctx, first_block).await.unwrap(); - if !delay_second_block { - validator_storage - .put_block(ctx, second_block) - .await - .unwrap(); - } - } - - let (actions_sender, mut actions) = ActionQueue::new(); - let (keeper_actions_sender, keeper_actions) = ActionQueue::new(); - let state_keeper = StateKeeperHandles::new(pool.clone(), keeper_actions, &[&tx_hashes]).await; - scope::run!(ctx, |ctx, s| async { - let validator = Executor::new( - ctx, - validator.node_config, - validator.node_key, - validator_storage.clone(), - ) - .await?; - // ^ We intentionally do not run consensus on the validator node, since it'll produce blocks - // with payloads that cannot be parsed by the external node. - - s.spawn_bg(validator.run(ctx)); - s.spawn_bg(run_gossip_fetcher_inner( - ctx, - pool.clone(), - actions_sender, - external_node.node_config, - external_node.node_key, - OPERATOR_ADDRESS, - )); - - if delay_first_block { - ctx.sleep(POLL_INTERVAL).await?; - validator_storage.put_block(ctx, first_block).await.unwrap(); - if !delay_second_block { - validator_storage - .put_block(ctx, second_block) - .await - .unwrap(); - } - } - - let received_actions = assert_first_block_actions(&mut actions).await; - // Manually replicate actions to the state keeper. - keeper_actions_sender.push_actions(received_actions).await; - - if delay_second_block { - validator_storage - .put_block(ctx, second_block) - .await - .unwrap(); - } - - let received_actions = assert_second_block_actions(&mut actions).await; - keeper_actions_sender.push_actions(received_actions).await; - state_keeper - .wait(|state| state.get_local_block() == MiniblockNumber(2)) - .await; - Ok(()) - }) - .await - .unwrap(); - - // Check that received blocks have consensus fields persisted. - let mut storage = pool.access_storage().await.unwrap(); - for number in [1, 2] { - let block = load_final_block(&mut storage, number).await; - block.justification.verify(&validator_set, 1).unwrap(); - } -} - -/// Returns the removed blocks. -async fn reset_storage(mut storage: StorageProcessor<'_>) -> Vec { - let sealed_miniblock_number = storage - .blocks_dal() - .get_sealed_miniblock_number() - .await - .unwrap(); - let mut blocks = vec![]; - for number in 1..=sealed_miniblock_number.0 { - blocks.push(load_sync_block(&mut storage, number).await); - } - - storage - .transactions_dal() - .reset_transactions_state(MiniblockNumber(0)) - .await; - storage - .blocks_dal() - .delete_miniblocks(MiniblockNumber(0)) - .await - .unwrap(); - storage - .blocks_dal() - .delete_l1_batches(L1BatchNumber(0)) - .await - .unwrap(); - blocks -} - -#[test_casing(4, [3, 2, 1, 0])] -#[tokio::test] -async fn syncing_via_gossip_fetcher_with_multiple_l1_batches(initial_block_count: usize) { - assert!(initial_block_count <= 3); - abort_on_panic(); - - let pool = ConnectionPool::test_pool().await; - let tx_hashes = run_state_keeper_with_multiple_l1_batches(pool.clone()).await; - let tx_hashes: Vec<_> = tx_hashes.iter().map(Vec::as_slice).collect(); - - let mut storage = pool.access_storage().await.unwrap(); - let genesis_block_payload = block_payload(&mut storage, 0).await.encode(); - let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); - let rng = &mut ctx.rng(); - let mut validator = FullValidatorConfig::for_single_validator( - rng, - genesis_block_payload, - validator::BlockNumber(0), - ); - let validator_set = validator.node_config.validators.clone(); - let external_node = validator.connect_full_node(rng); - - let genesis_block = validator.node_config.genesis_block.clone(); - add_consensus_fields(&mut storage, &validator.validator_key, 0..4).await; - let blocks = convert_sync_blocks(reset_storage(storage).await); - assert_eq!(blocks.len(), 3); // 2 real + 1 fictive blocks - tracing::trace!("Node storage reset"); - let (initial_blocks, delayed_blocks) = blocks.split_at(initial_block_count); - - let validator_storage = Arc::new(InMemoryStorage::new(genesis_block)); - for block in initial_blocks { - validator_storage.put_block(ctx, block).await.unwrap(); - } - - let (actions_sender, actions) = ActionQueue::new(); - let state_keeper = StateKeeperHandles::new(pool.clone(), actions, &tx_hashes).await; - scope::run!(ctx, |ctx, s| async { - let validator = Executor::new( - ctx, - validator.node_config, - validator.node_key, - validator_storage.clone(), - ) - .await?; - - s.spawn_bg(validator.run(ctx)); - s.spawn_bg(async { - for block in delayed_blocks { - ctx.sleep(POLL_INTERVAL).await?; - validator_storage.put_block(ctx, block).await?; - } - Ok(()) - }); - - s.spawn_bg(async { - mock_l1_batch_hash_computation(pool.clone(), 1).await; - Ok(()) - }); - s.spawn_bg(run_gossip_fetcher_inner( - ctx, - pool.clone(), - actions_sender, - external_node.node_config, - external_node.node_key, - OPERATOR_ADDRESS, - )); - - state_keeper - .wait(|state| state.get_local_block() == MiniblockNumber(3)) - .await; - Ok(()) - }) - .await - .unwrap(); - - // Check that received blocks have consensus fields persisted. - let mut storage = pool.access_storage().await.unwrap(); - for number in [1, 2, 3] { - let block = load_final_block(&mut storage, number).await; - block.validate(&validator_set, 1).unwrap(); - } -} - -#[test_casing(2, [1, 2])] -#[tokio::test(flavor = "multi_thread")] -async fn syncing_from_non_zero_block(first_block_number: u32) { - abort_on_panic(); - let pool = ConnectionPool::test_pool().await; - let tx_hashes = run_state_keeper_with_multiple_l1_batches(pool.clone()).await; - let tx_hashes: Vec<_> = tx_hashes.iter().map(Vec::as_slice).collect(); - - let mut storage = pool.access_storage().await.unwrap(); - let genesis_block_payload = block_payload(&mut storage, first_block_number) - .await - .encode(); - let ctx = &ctx::test_root(&ctx::AffineClock::new(CLOCK_SPEEDUP as f64)); - let rng = &mut ctx.rng(); - let mut validator = FullValidatorConfig::for_single_validator( - rng, - genesis_block_payload.clone(), - validator::BlockNumber(0), - ); - // Override the genesis block since it has an incorrect block number. - let genesis_block = create_genesis_block( - &validator.validator_key, - first_block_number.into(), - genesis_block_payload, - ); - validator.node_config.genesis_block = genesis_block.clone(); - let validator_set = validator.node_config.validators.clone(); - let external_node = validator.connect_full_node(rng); - - add_consensus_fields( - &mut storage, - &validator.validator_key, - first_block_number..4, - ) - .await; - let mut initial_blocks = reset_storage(storage).await; - let delayed_blocks = initial_blocks.split_off(first_block_number as usize); - assert!(!initial_blocks.is_empty()); - assert!(!delayed_blocks.is_empty()); - let delayed_blocks = convert_sync_blocks(delayed_blocks); - - // Re-insert initial blocks to the storage. This allows to more precisely emulate node syncing - // (e.g., missing L1 batch relation for the latest blocks). - insert_sync_blocks(pool.clone(), initial_blocks, &tx_hashes).await; - tracing::trace!("Re-inserted blocks to node storage"); - - let validator_storage = Arc::new(InMemoryStorage::new(genesis_block)); - let tx_hashes = if first_block_number >= 2 { - &tx_hashes[1..] // Skip transactions in L1 batch #1, since they won't be executed - } else { - &tx_hashes - }; - let (actions_sender, actions) = ActionQueue::new(); - let state_keeper = StateKeeperHandles::new(pool.clone(), actions, tx_hashes).await; - scope::run!(ctx, |ctx, s| async { - let validator = Executor::new( - ctx, - validator.node_config, - validator.node_key, - validator_storage.clone(), - ) - .await?; - - s.spawn_bg(async { validator.run(ctx).await.context("validator.run()") }); - - s.spawn_bg(async { - for block in &delayed_blocks { - ctx.sleep(POLL_INTERVAL).await?; - validator_storage - .put_block(ctx, block) - .await - .wrap("validator_stroage.put_block()")?; - } - Ok(()) - }); - - if first_block_number < 2 { - // L1 batch #1 will be sealed during the state keeper operation; we need to emulate - // computing metadata for it. - s.spawn_bg(async { - ctx.wait(mock_l1_batch_hash_computation(pool.clone(), 1)) - .await?; - Ok(()) - }); - } - - s.spawn_bg(async { - run_gossip_fetcher_inner( - ctx, - pool.clone(), - actions_sender, - external_node.node_config, - external_node.node_key, - OPERATOR_ADDRESS, - ) - .await - .context("run_gossip_fetcher_inner()") - }); - - ctx.wait(state_keeper.wait(|state| state.get_local_block() == MiniblockNumber(3))) - .await?; - Ok(()) - }) - .await - .unwrap(); - - // Check that received blocks have consensus fields persisted. - let mut storage = pool.access_storage().await.unwrap(); - for number in first_block_number..4 { - let block = load_final_block(&mut storage, number).await; - block.justification.verify(&validator_set, 1).unwrap(); - } -} - -async fn insert_sync_blocks(pool: ConnectionPool, blocks: Vec, tx_hashes: &[&[H256]]) { - let expected_block_number = blocks.last().expect("blocks cannot be empty").number; - let sealed_l1_batches = blocks - .iter() - .filter_map(|block| block.last_in_batch.then_some(block.l1_batch_number)); - let sealed_l1_batches: Vec<_> = sealed_l1_batches.collect(); - - let mut fetcher = FetcherCursor::new(&mut pool.access_storage().await.unwrap()) - .await - .unwrap(); - let (actions_sender, actions) = ActionQueue::new(); - let state_keeper = StateKeeperHandles::new(pool.clone(), actions, tx_hashes).await; - for block in blocks { - let block_actions = fetcher.advance(block.try_into().unwrap()); - actions_sender.push_actions(block_actions).await; - } - - let hash_tasks: Vec<_> = sealed_l1_batches - .into_iter() - .map(|l1_batch_number| { - tokio::spawn(mock_l1_batch_hash_computation( - pool.clone(), - l1_batch_number.0, - )) - }) - .collect(); - state_keeper - .wait(|state| state.get_local_block() == expected_block_number) - .await; - for hash_task in hash_tasks { - hash_task.await.unwrap(); - } -} diff --git a/core/lib/zksync_core/src/sync_layer/gossip/utils.rs b/core/lib/zksync_core/src/sync_layer/gossip/utils.rs deleted file mode 100644 index 8407821a2ec..00000000000 --- a/core/lib/zksync_core/src/sync_layer/gossip/utils.rs +++ /dev/null @@ -1,48 +0,0 @@ -use std::{iter, ops}; - -use zksync_consensus_roles::validator::BlockNumber; - -/// Iterator over missing block numbers. -pub(crate) struct MissingBlockNumbers { - range: ops::Range, - existing_numbers: iter::Peekable, -} - -impl MissingBlockNumbers -where - I: Iterator, -{ - /// Creates a new iterator based on the provided params. - pub(crate) fn new(range: ops::Range, existing_numbers: I) -> Self { - Self { - range, - existing_numbers: existing_numbers.peekable(), - } - } -} - -impl Iterator for MissingBlockNumbers -where - I: Iterator, -{ - type Item = BlockNumber; - - fn next(&mut self) -> Option { - // Loop while existing numbers match the starting numbers from the range. The check - // that the range is non-empty is redundant given how `existing_numbers` are constructed - // (they are guaranteed to be lesser than the upper range bound); we add it just to be safe. - while !self.range.is_empty() - && matches!(self.existing_numbers.peek(), Some(&num) if num == self.range.start) - { - self.range.start = self.range.start.next(); - self.existing_numbers.next(); // Advance to the next number - } - - if self.range.is_empty() { - return None; - } - let next_number = self.range.start; - self.range.start = self.range.start.next(); - Some(next_number) - } -} diff --git a/core/lib/zksync_core/src/sync_layer/mod.rs b/core/lib/zksync_core/src/sync_layer/mod.rs index df059947e3e..e216ef4f8c5 100644 --- a/core/lib/zksync_core/src/sync_layer/mod.rs +++ b/core/lib/zksync_core/src/sync_layer/mod.rs @@ -3,7 +3,6 @@ mod client; pub mod external_io; pub mod fetcher; pub mod genesis; -mod gossip; mod metrics; pub(crate) mod sync_action; mod sync_state; @@ -11,6 +10,6 @@ mod sync_state; mod tests; pub use self::{ - client::MainNodeClient, external_io::ExternalIO, gossip::run_gossip_fetcher, - sync_action::ActionQueue, sync_state::SyncState, + client::MainNodeClient, external_io::ExternalIO, sync_action::ActionQueue, + sync_state::SyncState, }; diff --git a/core/lib/zksync_core/src/sync_layer/sync_action.rs b/core/lib/zksync_core/src/sync_layer/sync_action.rs index ecbccdd237d..52914577373 100644 --- a/core/lib/zksync_core/src/sync_layer/sync_action.rs +++ b/core/lib/zksync_core/src/sync_layer/sync_action.rs @@ -1,5 +1,4 @@ use tokio::sync::mpsc; -use zksync_dal::blocks_dal::ConsensusBlockFields; use zksync_types::{Address, L1BatchNumber, MiniblockNumber, ProtocolVersionId, Transaction}; use super::metrics::QUEUE_METRICS; @@ -52,7 +51,7 @@ impl ActionQueueSender { return Err(format!("Unexpected Tx: {:?}", actions)); } } - SyncAction::SealMiniblock(_) | SyncAction::SealBatch { .. } => { + SyncAction::SealMiniblock | SyncAction::SealBatch { .. } => { if !opened || miniblock_sealed { return Err(format!("Unexpected SealMiniblock/SealBatch: {:?}", actions)); } @@ -145,13 +144,11 @@ pub(crate) enum SyncAction { /// that they are sealed, but at the same time the next miniblock may not exist yet. /// By having a dedicated action for that we prevent a situation where the miniblock is kept open on the EN until /// the next one is sealed on the main node. - SealMiniblock(Option), + SealMiniblock, /// Similarly to `SealMiniblock` we must be able to seal the batch even if there is no next miniblock yet. SealBatch { /// Virtual blocks count for the fictive miniblock. virtual_blocks: u32, - /// Consensus-related fields for the fictive miniblock. - consensus: Option, }, } @@ -204,14 +201,11 @@ mod tests { } fn seal_miniblock() -> SyncAction { - SyncAction::SealMiniblock(None) + SyncAction::SealMiniblock } fn seal_batch() -> SyncAction { - SyncAction::SealBatch { - virtual_blocks: 1, - consensus: None, - } + SyncAction::SealBatch { virtual_blocks: 1 } } #[test] diff --git a/core/lib/zksync_core/src/sync_layer/tests.rs b/core/lib/zksync_core/src/sync_layer/tests.rs index 011f8b608ff..35de5e597df 100644 --- a/core/lib/zksync_core/src/sync_layer/tests.rs +++ b/core/lib/zksync_core/src/sync_layer/tests.rs @@ -145,7 +145,7 @@ async fn external_io_basics() { let tx = create_l2_transaction(10, 100); let tx_hash = tx.hash(); let tx = SyncAction::Tx(Box::new(tx.into())); - let actions = vec![open_l1_batch, tx, SyncAction::SealMiniblock(None)]; + let actions = vec![open_l1_batch, tx, SyncAction::SealMiniblock]; let (actions_sender, action_queue) = ActionQueue::new(); let state_keeper = @@ -188,7 +188,7 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPo }); let first_miniblock_actions: Vec<_> = iter::once(open_l1_batch) .chain(txs) - .chain([SyncAction::SealMiniblock(None)]) + .chain([SyncAction::SealMiniblock]) .collect(); let open_miniblock = SyncAction::Miniblock { @@ -202,7 +202,7 @@ pub(super) async fn run_state_keeper_with_multiple_miniblocks(pool: ConnectionPo }); let second_miniblock_actions: Vec<_> = iter::once(open_miniblock) .chain(more_txs) - .chain([SyncAction::SealMiniblock(None)]) + .chain([SyncAction::SealMiniblock]) .collect(); let tx_hashes = extract_tx_hashes( @@ -276,7 +276,7 @@ async fn test_external_io_recovery(pool: ConnectionPool, mut tx_hashes: Vec { + SyncAction::SealMiniblock => { assert_eq!(tx_count_in_miniblock, 1); } } @@ -545,7 +542,7 @@ async fn fetcher_with_real_server() { assert_eq!(tx.hash(), tx_hashes.pop_front().unwrap()); tx_count_in_miniblock += 1; } - SyncAction::SealMiniblock(_) => { + SyncAction::SealMiniblock => { assert_eq!( tx_count_in_miniblock, miniblock_number_to_tx_count[¤t_miniblock_number] diff --git a/spellcheck/era.dic b/spellcheck/era.dic index 4fa6a82866d..dfe4cabdbf4 100644 --- a/spellcheck/era.dic +++ b/spellcheck/era.dic @@ -306,6 +306,7 @@ tokenomics validator validator's validator +Validators CHAINID PREVRANDAO ECDSA @@ -619,6 +620,7 @@ gluk emilluta // Programming related words +backfill bytecode bytecodes impl @@ -794,6 +796,7 @@ HealthCheck readonly upgrader startup +BFT PingCAP witgen ok