From 5d041331c091686edebe994363a710ad15f8ae1d Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 21 Oct 2021 12:31:47 +0200 Subject: [PATCH 1/8] Remove light client, change tries and CHTs --- Cargo.lock | 20 - Cargo.toml | 1 - bin/node-template/node/src/chain_spec.rs | 1 - bin/node-template/node/src/service.rs | 3 - bin/node/cli/src/chain_spec.rs | 1 - bin/node/cli/src/service.rs | 7 +- bin/node/executor/tests/basic.rs | 72 +- bin/node/executor/tests/common.rs | 9 +- bin/node/executor/tests/fees.rs | 14 +- bin/node/executor/tests/submit_transaction.rs | 12 +- bin/node/primitives/src/lib.rs | 2 +- bin/node/rpc/src/lib.rs | 34 - bin/node/testing/src/bench.rs | 1 - bin/node/testing/src/client.rs | 6 +- bin/node/testing/src/genesis.rs | 11 +- client/api/src/backend.rs | 108 +- client/api/src/cht.rs | 474 ---- client/api/src/in_mem.rs | 121 +- client/api/src/lib.rs | 3 - client/api/src/light.rs | 372 --- client/api/src/proof_provider.rs | 25 +- .../basic-authorship/src/basic_authorship.rs | 12 +- client/block-builder/src/lib.rs | 15 +- client/consensus/aura/src/import_queue.rs | 12 +- client/consensus/aura/src/lib.rs | 71 +- client/consensus/babe/src/lib.rs | 30 +- client/consensus/babe/src/tests.rs | 22 +- client/consensus/babe/src/verification.rs | 13 +- client/consensus/common/src/block_import.rs | 7 +- client/consensus/manual-seal/src/consensus.rs | 4 +- .../manual-seal/src/consensus/babe.rs | 13 +- client/consensus/pow/src/lib.rs | 12 +- client/consensus/slots/src/lib.rs | 6 +- client/db/src/cache/list_cache.rs | 2351 ----------------- client/db/src/cache/list_entry.rs | 187 -- client/db/src/cache/list_storage.rs | 441 ---- client/db/src/cache/mod.rs | 413 --- client/db/src/changes_tries_storage.rs | 1168 -------- client/db/src/lib.rs | 258 +- client/db/src/light.rs | 1329 ---------- client/db/src/parity_db.rs | 6 +- client/db/src/utils.rs | 79 +- client/executor/src/integration_tests/mod.rs | 2 +- client/finality-grandpa/src/import.rs | 7 +- client/finality-grandpa/src/lib.rs | 5 +- client/finality-grandpa/src/tests.rs | 51 +- client/light/Cargo.toml | 27 - client/light/README.md | 3 - client/light/src/backend.rs | 578 ---- client/light/src/blockchain.rs | 219 -- client/light/src/call_executor.rs | 206 -- client/light/src/lib.rs | 41 - client/network/src/behaviour.rs | 41 +- client/network/src/config.rs | 6 - client/network/src/lib.rs | 1 - client/network/src/light_client_requests.rs | 267 -- .../src/light_client_requests/handler.rs | 116 +- .../src/light_client_requests/sender.rs | 1294 --------- client/network/src/on_demand_layer.rs | 241 -- client/network/src/service.rs | 37 - client/network/src/service/tests.rs | 1 - client/network/test/src/lib.rs | 183 +- client/network/test/src/sync.rs | 105 +- client/rpc/src/chain/chain_light.rs | 114 - client/rpc/src/chain/mod.rs | 25 - client/rpc/src/state/state_full.rs | 115 +- client/rpc/src/state/tests.rs | 27 +- client/service/src/builder.rs | 24 +- client/service/src/client/call_executor.rs | 15 +- client/service/src/client/client.rs | 399 +-- client/service/src/lib.rs | 2 +- client/service/test/Cargo.toml | 1 - client/service/test/src/client/mod.rs | 263 +- client/transaction-pool/src/api.rs | 129 +- client/transaction-pool/src/lib.rs | 32 +- frame/aura/src/lib.rs | 4 +- frame/babe/src/lib.rs | 2 +- frame/beefy-mmr/src/tests.rs | 2 +- frame/beefy/src/lib.rs | 4 +- frame/beefy/src/tests.rs | 3 +- frame/executive/src/lib.rs | 7 +- frame/grandpa/src/lib.rs | 2 +- frame/grandpa/src/mock.rs | 2 +- frame/support/src/storage/mod.rs | 15 +- frame/system/benchmarking/src/lib.rs | 21 +- frame/system/src/lib.rs | 64 +- .../api/proc-macro/src/impl_runtime_apis.rs | 5 - .../proc-macro/src/mock_impl_runtime_apis.rs | 4 - primitives/api/src/lib.rs | 5 +- primitives/api/test/tests/runtime_calls.rs | 2 +- primitives/blockchain/src/backend.rs | 31 - primitives/consensus/aura/src/digests.rs | 4 +- primitives/consensus/babe/src/digests.rs | 7 +- primitives/consensus/common/src/lib.rs | 7 +- primitives/core/src/changes_trie.rs | 321 --- primitives/core/src/lib.rs | 2 - primitives/externalities/src/lib.rs | 7 - primitives/io/src/lib.rs | 14 +- primitives/runtime-interface/test/src/lib.rs | 2 +- primitives/runtime/src/generic/digest.rs | 163 +- primitives/runtime/src/generic/header.rs | 10 +- primitives/runtime/src/generic/mod.rs | 2 +- primitives/runtime/src/generic/tests.rs | 11 +- primitives/runtime/src/testing.rs | 4 +- primitives/runtime/src/traits.rs | 16 +- primitives/state-machine/src/backend.rs | 26 - primitives/state-machine/src/basic.rs | 4 - .../state-machine/src/changes_trie/build.rs | 1083 -------- .../src/changes_trie/build_cache.rs | 278 -- .../src/changes_trie/build_iterator.rs | 487 ---- .../src/changes_trie/changes_iterator.rs | 748 ------ .../state-machine/src/changes_trie/input.rs | 207 -- .../state-machine/src/changes_trie/mod.rs | 428 --- .../state-machine/src/changes_trie/prune.rs | 204 -- .../state-machine/src/changes_trie/storage.rs | 214 -- .../src/changes_trie/surface_iterator.rs | 326 --- primitives/state-machine/src/ext.rs | 179 +- primitives/state-machine/src/lib.rs | 99 +- .../src/overlayed_changes/changeset.rs | 1 - .../src/overlayed_changes/mod.rs | 134 +- primitives/state-machine/src/read_only.rs | 4 - primitives/state-machine/src/testing.rs | 68 +- primitives/storage/src/lib.rs | 3 - primitives/tasks/src/async_externalities.rs | 4 - primitives/test-primitives/src/lib.rs | 8 +- test-utils/client/Cargo.toml | 1 - test-utils/client/src/lib.rs | 6 +- test-utils/runtime/client/Cargo.toml | 1 - .../runtime/client/src/block_builder_ext.rs | 13 - test-utils/runtime/client/src/lib.rs | 168 +- test-utils/runtime/src/genesismap.rs | 7 - test-utils/runtime/src/lib.rs | 13 +- test-utils/runtime/src/system.rs | 32 +- test-utils/test-runner/src/client.rs | 3 - test-utils/test-runner/src/node.rs | 6 +- utils/frame/benchmarking-cli/src/command.rs | 14 +- utils/frame/rpc/system/src/lib.rs | 89 +- .../cli/src/commands/follow_chain.rs | 3 +- utils/frame/try-runtime/cli/src/lib.rs | 3 +- 139 files changed, 450 insertions(+), 17480 deletions(-) delete mode 100644 client/api/src/cht.rs delete mode 100644 client/api/src/light.rs delete mode 100644 client/db/src/cache/list_cache.rs delete mode 100644 client/db/src/cache/list_entry.rs delete mode 100644 client/db/src/cache/list_storage.rs delete mode 100644 client/db/src/cache/mod.rs delete mode 100644 client/db/src/changes_tries_storage.rs delete mode 100644 client/db/src/light.rs delete mode 100644 client/light/Cargo.toml delete mode 100644 client/light/README.md delete mode 100644 client/light/src/backend.rs delete mode 100644 client/light/src/blockchain.rs delete mode 100644 client/light/src/call_executor.rs delete mode 100644 client/light/src/lib.rs delete mode 100644 client/network/src/light_client_requests/sender.rs delete mode 100644 client/network/src/on_demand_layer.rs delete mode 100644 client/rpc/src/chain/chain_light.rs delete mode 100644 primitives/core/src/changes_trie.rs delete mode 100644 primitives/state-machine/src/changes_trie/build.rs delete mode 100644 primitives/state-machine/src/changes_trie/build_cache.rs delete mode 100644 primitives/state-machine/src/changes_trie/build_iterator.rs delete mode 100644 primitives/state-machine/src/changes_trie/changes_iterator.rs delete mode 100644 primitives/state-machine/src/changes_trie/input.rs delete mode 100644 primitives/state-machine/src/changes_trie/mod.rs delete mode 100644 primitives/state-machine/src/changes_trie/prune.rs delete mode 100644 primitives/state-machine/src/changes_trie/storage.rs delete mode 100644 primitives/state-machine/src/changes_trie/surface_iterator.rs diff --git a/Cargo.lock b/Cargo.lock index 84ad0c0a563c0..720ec241309d0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8051,23 +8051,6 @@ dependencies = [ "tempfile", ] -[[package]] -name = "sc-light" -version = "4.0.0-dev" -dependencies = [ - "hash-db", - "parity-scale-codec", - "parking_lot 0.11.1", - "sc-client-api", - "sc-executor", - "sp-api", - "sp-blockchain", - "sp-core", - "sp-externalities", - "sp-runtime", - "sp-state-machine", -] - [[package]] name = "sc-network" version = "0.10.0-dev" @@ -8398,7 +8381,6 @@ dependencies = [ "sc-client-db", "sc-consensus", "sc-executor", - "sc-light", "sc-network", "sc-service", "sc-transaction-pool-api", @@ -10003,7 +9985,6 @@ dependencies = [ "sc-client-db", "sc-consensus", "sc-executor", - "sc-light", "sc-offchain", "sc-service", "serde", @@ -10072,7 +10053,6 @@ dependencies = [ "sc-block-builder", "sc-client-api", "sc-consensus", - "sc-light", "sp-api", "sp-blockchain", "sp-consensus", diff --git a/Cargo.toml b/Cargo.toml index 4a228203159eb..337d171bd15bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,7 +43,6 @@ members = [ "client/finality-grandpa", "client/informant", "client/keystore", - "client/light", "client/network", "client/network-gossip", "client/network/test", diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index 7009b3be5c279..9eb9ed7db008b 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -134,7 +134,6 @@ fn testnet_genesis( system: SystemConfig { // Add Wasm runtime to storage. code: wasm_binary.to_vec(), - changes_trie_config: Default::default(), }, balances: BalancesConfig { // Configure endowed accounts with initial balance of 1 << 60. diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index d673a54a94882..a12cc03360475 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -194,7 +194,6 @@ pub fn new_full(mut config: Configuration) -> Result transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, - on_demand: None, block_announce_validator_builder: None, warp_sync: Some(warp_sync), })?; @@ -234,8 +233,6 @@ pub fn new_full(mut config: Configuration) -> Result task_manager: &mut task_manager, transaction_pool: transaction_pool.clone(), rpc_extensions_builder, - on_demand: None, - remote_blockchain: None, backend, system_rpc_tx, config, diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 7b1ed90017c36..0e1e0aa2e677d 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -297,7 +297,6 @@ pub fn testnet_genesis( GenesisConfig { system: SystemConfig { code: wasm_binary_unwrap().to_vec(), - changes_trie_config: Default::default(), }, balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect(), diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 2220614ebaf2a..159f5b8d57c48 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -336,7 +336,6 @@ pub fn new_full_base( transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, - on_demand: None, block_announce_validator_builder: None, warp_sync: Some(warp_sync), })?; @@ -367,8 +366,6 @@ pub fn new_full_base( rpc_extensions_builder: Box::new(rpc_extensions_builder), transaction_pool: transaction_pool.clone(), task_manager: &mut task_manager, - on_demand: None, - remote_blockchain: None, system_rpc_tx, telemetry: telemetry.as_mut(), })?; @@ -532,7 +529,7 @@ mod tests { use sc_service_test::TestNetNode; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; use sp_consensus::{BlockOrigin, Environment, Proposer}; - use sp_core::{crypto::Pair as CryptoPair, Public, H256}; + use sp_core::{crypto::Pair as CryptoPair, Public}; use sp_inherents::InherentDataProvider; use sp_keyring::AccountKeyring; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; @@ -611,7 +608,7 @@ mod tests { None, ); - let mut digest = Digest::::default(); + let mut digest = Digest::default(); // even though there's only one authority some slots might be empty, // so we must keep trying the next slots until we can claim one. diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index bbb9339189b06..b7df7bf0bd41a 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -29,7 +29,7 @@ use sp_runtime::{ use node_primitives::{Balance, Hash}; use node_runtime::{ constants::{currency::*, time::SLOT_DURATION}, - Balances, Block, Call, CheckedExtrinsic, Event, Header, Runtime, System, TransactionPayment, + Balances, Call, CheckedExtrinsic, Event, Header, Runtime, System, TransactionPayment, UncheckedExtrinsic, }; use node_testing::keyring::*; @@ -78,7 +78,7 @@ fn set_heap_pages(ext: &mut E, heap_pages: u64) { fn changes_trie_block() -> (Vec, Hash) { let time = 42 * 1000; construct_block( - &mut new_test_ext(compact_code_unwrap(), true), + &mut new_test_ext(compact_code_unwrap()), 1, GENESIS_HASH.into(), vec![ @@ -102,7 +102,7 @@ fn changes_trie_block() -> (Vec, Hash) { /// are not guaranteed to be deterministic) and to ensure that the correct state is propagated /// from block1's execution to block2 to derive the correct storage_root. fn blocks() -> ((Vec, Hash), (Vec, Hash)) { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); let time1 = 42 * 1000; let block1 = construct_block( &mut t, @@ -160,7 +160,7 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { construct_block( - &mut new_test_ext(compact_code_unwrap(), false), + &mut new_test_ext(compact_code_unwrap()), 1, GENESIS_HASH.into(), vec![ @@ -179,7 +179,7 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { #[test] fn panic_execution_with_foreign_code_gives_error() { - let mut t = new_test_ext(bloaty_code_unwrap(), false); + let mut t = new_test_ext(bloaty_code_unwrap()); t.insert( >::hashed_key_for(alice()), (69u128, 0u32, 0u128, 0u128, 0u128).encode(), @@ -211,7 +211,7 @@ fn panic_execution_with_foreign_code_gives_error() { #[test] fn bad_extrinsic_with_native_equivalent_code_gives_error() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); t.insert( >::hashed_key_for(alice()), (0u32, 0u32, 0u32, 69u128, 0u128, 0u128, 0u128).encode(), @@ -243,7 +243,7 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { #[test] fn successful_execution_with_native_equivalent_code_gives_ok() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); t.insert( >::hashed_key_for(alice()), AccountInfo::<::Index, _> { @@ -296,7 +296,7 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { #[test] fn successful_execution_with_foreign_code_gives_ok() { - let mut t = new_test_ext(bloaty_code_unwrap(), false); + let mut t = new_test_ext(bloaty_code_unwrap()); t.insert( >::hashed_key_for(alice()), AccountInfo::<::Index, _> { @@ -349,7 +349,7 @@ fn successful_execution_with_foreign_code_gives_ok() { #[test] fn full_native_block_import_works() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); let (block1, block2) = blocks(); @@ -529,7 +529,7 @@ fn full_native_block_import_works() { #[test] fn full_wasm_block_import_works() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); let (block1, block2) = blocks(); @@ -679,7 +679,7 @@ fn deploying_wasm_contract_should_work() { let time = 42 * 1000; let b = construct_block( - &mut new_test_ext(compact_code_unwrap(), false), + &mut new_test_ext(compact_code_unwrap()), 1, GENESIS_HASH.into(), vec![ @@ -712,7 +712,7 @@ fn deploying_wasm_contract_should_work() { (time / SLOT_DURATION).into(), ); - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); executor_call:: _>(&mut t, "Core_execute_block", &b.0, false, None) .0 @@ -727,7 +727,7 @@ fn deploying_wasm_contract_should_work() { #[test] fn wasm_big_block_import_fails() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); set_heap_pages(&mut t.ext(), 4); @@ -744,7 +744,7 @@ fn wasm_big_block_import_fails() { #[test] fn native_big_block_import_succeeds() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); executor_call:: _>( &mut t, @@ -759,7 +759,7 @@ fn native_big_block_import_succeeds() { #[test] fn native_big_block_import_fails_on_fallback() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); // We set the heap pages to 8 because we know that should give an OOM in WASM with the given // block. @@ -778,7 +778,7 @@ fn native_big_block_import_fails_on_fallback() { #[test] fn panic_execution_gives_error() { - let mut t = new_test_ext(bloaty_code_unwrap(), false); + let mut t = new_test_ext(bloaty_code_unwrap()); t.insert( >::hashed_key_for(alice()), AccountInfo::<::Index, _> { @@ -815,7 +815,7 @@ fn panic_execution_gives_error() { #[test] fn successful_execution_gives_ok() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); t.insert( >::hashed_key_for(alice()), AccountInfo::<::Index, _> { @@ -874,44 +874,6 @@ fn successful_execution_gives_ok() { }); } -#[test] -fn full_native_block_import_works_with_changes_trie() { - let block1 = changes_trie_block(); - let block_data = block1.0; - let block = Block::decode(&mut &block_data[..]).unwrap(); - - let mut t = new_test_ext(compact_code_unwrap(), true); - executor_call:: _>( - &mut t, - "Core_execute_block", - &block.encode(), - true, - None, - ) - .0 - .unwrap(); - - assert!(t.ext().storage_changes_root(&GENESIS_HASH).unwrap().is_some()); -} - -#[test] -fn full_wasm_block_import_works_with_changes_trie() { - let block1 = changes_trie_block(); - - let mut t = new_test_ext(compact_code_unwrap(), true); - executor_call:: _>( - &mut t, - "Core_execute_block", - &block1.0, - false, - None, - ) - .0 - .unwrap(); - - assert!(t.ext().storage_changes_root(&GENESIS_HASH).unwrap().is_some()); -} - #[test] fn should_import_block_with_test_client() { use node_testing::client::{ diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index d1c24c83c836d..18da5af1b0fac 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -81,7 +81,7 @@ pub const SPEC_VERSION: u32 = node_runtime::VERSION.spec_version; pub const TRANSACTION_VERSION: u32 = node_runtime::VERSION.transaction_version; -pub type TestExternalities = CoreTestExternalities; +pub type TestExternalities = CoreTestExternalities; pub fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { node_testing::keyring::sign(xt, SPEC_VERSION, TRANSACTION_VERSION, GENESIS_HASH) @@ -123,14 +123,13 @@ pub fn executor_call< executor().call::(&mut t, &runtime_code, method, data, use_native, native_call) } -pub fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities { - let mut ext = TestExternalities::new_with_code( +pub fn new_test_ext(code: &[u8]) -> TestExternalities { + let ext = TestExternalities::new_with_code( code, - node_testing::genesis::config(support_changes_trie, Some(code)) + node_testing::genesis::config(Some(code)) .build_storage() .unwrap(), ); - ext.changes_trie_storage().insert(0, GENESIS_HASH.into(), Default::default()); ext } diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index 379cdda5b76a3..d93d68f6e75ed 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -36,7 +36,7 @@ use self::common::{sign, *}; #[test] fn fee_multiplier_increases_and_decreases_on_big_weight() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); // initial fee multiplier must be one. let mut prev_multiplier = Multiplier::one(); @@ -45,7 +45,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { assert_eq!(TransactionPayment::next_fee_multiplier(), prev_multiplier); }); - let mut tt = new_test_ext(compact_code_unwrap(), false); + let mut tt = new_test_ext(compact_code_unwrap()); let time1 = 42 * 1000; // big one in terms of weight. @@ -151,7 +151,7 @@ fn transaction_fee_is_correct() { // - 1 MILLICENTS in substrate node. // - 1 milli-dot based on current polkadot runtime. // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); t.insert(>::hashed_key_for(alice()), new_account_info(100)); t.insert(>::hashed_key_for(bob()), new_account_info(10)); t.insert( @@ -226,9 +226,9 @@ fn block_weight_capacity_report() { use node_primitives::Index; // execution ext. - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); // setup ext. - let mut tt = new_test_ext(compact_code_unwrap(), false); + let mut tt = new_test_ext(compact_code_unwrap()); let factor = 50; let mut time = 10; @@ -300,9 +300,9 @@ fn block_length_capacity_report() { use node_primitives::Index; // execution ext. - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); // setup ext. - let mut tt = new_test_ext(compact_code_unwrap(), false); + let mut tt = new_test_ext(compact_code_unwrap()); let factor = 256 * 1024; let mut time = 10; diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index 19ca8e5677c43..f047c6a44a667 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -28,7 +28,7 @@ use self::common::*; #[test] fn should_submit_unsigned_transaction() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); let (pool, state) = TestTransactionPoolExt::new(); t.register_extension(TransactionPoolExt::new(pool)); @@ -56,7 +56,7 @@ const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put c #[test] fn should_submit_signed_transaction() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); let (pool, state) = TestTransactionPoolExt::new(); t.register_extension(TransactionPoolExt::new(pool)); @@ -99,7 +99,7 @@ fn should_submit_signed_transaction() { #[test] fn should_submit_signed_twice_from_the_same_account() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); let (pool, state) = TestTransactionPoolExt::new(); t.register_extension(TransactionPoolExt::new(pool)); @@ -156,7 +156,7 @@ fn should_submit_signed_twice_from_the_same_account() { #[test] fn should_submit_signed_twice_from_all_accounts() { - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); let (pool, state) = TestTransactionPoolExt::new(); t.register_extension(TransactionPoolExt::new(pool)); @@ -220,7 +220,7 @@ fn submitted_transaction_should_be_valid() { transaction_validity::{TransactionSource, TransactionTag}, }; - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); let (pool, state) = TestTransactionPoolExt::new(); t.register_extension(TransactionPoolExt::new(pool)); @@ -249,7 +249,7 @@ fn submitted_transaction_should_be_valid() { // check that transaction is valid, but reset environment storage, // since CreateTransaction increments the nonce let tx0 = state.read().transactions[0].clone(); - let mut t = new_test_ext(compact_code_unwrap(), false); + let mut t = new_test_ext(compact_code_unwrap()); t.execute_with(|| { let source = TransactionSource::External; let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap(); diff --git a/bin/node/primitives/src/lib.rs b/bin/node/primitives/src/lib.rs index dade598c704d2..fc3bd5f5114e3 100644 --- a/bin/node/primitives/src/lib.rs +++ b/bin/node/primitives/src/lib.rs @@ -57,7 +57,7 @@ pub type Hash = sp_core::H256; pub type Timestamp = u64; /// Digest item type. -pub type DigestItem = generic::DigestItem; +pub type DigestItem = generic::DigestItem; /// Header type. pub type Header = generic::Header; /// Block type. diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 2f7862d3d2644..c0c79fbe4f5fa 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -51,18 +51,6 @@ use sp_consensus::SelectChain; use sp_consensus_babe::BabeApi; use sp_keystore::SyncCryptoStorePtr; -/// Light client extra dependencies. -pub struct LightDeps { - /// The client instance to use. - pub client: Arc, - /// Transaction pool instance. - pub pool: Arc

, - /// Remote access to the blockchain (async). - pub remote_blockchain: Arc>, - /// Fetcher instance. - pub fetcher: Arc, -} - /// Extra dependencies for BABE. pub struct BabeDeps { /// BABE protocol config. @@ -184,25 +172,3 @@ where Ok(io) } -/// Instantiate all Light RPC extensions. -pub fn create_light(deps: LightDeps) -> jsonrpc_core::IoHandler -where - C: sp_blockchain::HeaderBackend, - C: Send + Sync + 'static, - F: sc_client_api::light::Fetcher + 'static, - P: TransactionPool + 'static, - M: jsonrpc_core::Metadata + Default, -{ - use substrate_frame_rpc_system::{LightSystem, SystemApi}; - - let LightDeps { client, pool, remote_blockchain, fetcher } = deps; - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with(SystemApi::::to_delegate(LightSystem::new( - client, - remote_blockchain, - fetcher, - pool, - ))); - - io -} diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index cf0a463cc3e99..9a6f328ad8d07 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -581,7 +581,6 @@ impl BenchKeyring { /// Generate genesis with accounts from this keyring endowed with some balance. pub fn generate_genesis(&self) -> node_runtime::GenesisConfig { crate::genesis::config_endowed( - false, Some(node_runtime::wasm_binary_unwrap()), self.collect_account_ids(), ) diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index 8bd75834c5496..4852e33de6070 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -42,13 +42,11 @@ pub type Transaction = sc_client_api::backend::TransactionFor Storage { - crate::genesis::config(self.support_changes_trie, None).build_storage().unwrap() + crate::genesis::config(None).build_storage().unwrap() } } diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 80399a6670e86..2afefd9bd3f20 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -24,19 +24,17 @@ use node_runtime::{ GenesisConfig, GrandpaConfig, IndicesConfig, SessionConfig, SocietyConfig, StakerStatus, StakingConfig, SystemConfig, BABE_GENESIS_EPOCH_CONFIG, }; -use sp_core::ChangesTrieConfiguration; use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; use sp_runtime::Perbill; /// Create genesis runtime configuration for tests. -pub fn config(support_changes_trie: bool, code: Option<&[u8]>) -> GenesisConfig { - config_endowed(support_changes_trie, code, Default::default()) +pub fn config(code: Option<&[u8]>) -> GenesisConfig { + config_endowed(code, Default::default()) } /// Create genesis runtime configuration for tests with some extra /// endowed accounts. pub fn config_endowed( - support_changes_trie: bool, code: Option<&[u8]>, extra_endowed: Vec, ) -> GenesisConfig { @@ -53,11 +51,6 @@ pub fn config_endowed( GenesisConfig { system: SystemConfig { - changes_trie_config: if support_changes_trie { - Some(ChangesTrieConfiguration { digest_interval: 2, digest_levels: 2 }) - } else { - None - }, code: code.map(|x| x.to_vec()).unwrap_or_else(|| wasm_binary_unwrap().to_vec()), }, indices: IndicesConfig { indices: vec![] }, diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 8b5bd50ffa614..ee5ede6fdeed6 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -20,27 +20,22 @@ use crate::{ blockchain::{well_known_cache_keys, Backend as BlockchainBackend}, - light::RemoteBlockchain, UsageInfo, }; use parking_lot::RwLock; use sp_blockchain; use sp_consensus::BlockOrigin; -use sp_core::{offchain::OffchainStorage, ChangesTrieConfigurationRange}; +use sp_core::offchain::OffchainStorage; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, HashFor, NumberFor}, Justification, Justifications, Storage, }; use sp_state_machine::{ - ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, ChildStorageCollection, IndexOperation, OffchainChangesCollection, StorageCollection, }; -use sp_storage::{ChildInfo, PrefixedStorageKey, StorageData, StorageKey}; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; +use sp_storage::{ChildInfo, StorageData, StorageKey}; +use std::collections::{HashMap, HashSet}; pub use sp_state_machine::Backend as StateBackend; use std::marker::PhantomData; @@ -191,12 +186,6 @@ pub trait BlockImportOperation { Ok(()) } - /// Inject changes trie data into the database. - fn update_changes_trie( - &mut self, - update: ChangesTrieTransaction, NumberFor>, - ) -> sp_blockchain::Result<()>; - /// Insert auxiliary keys. /// /// Values are `None` if should be deleted. @@ -418,28 +407,6 @@ pub trait StorageProvider> { child_info: &ChildInfo, key: &StorageKey, ) -> sp_blockchain::Result>; - - /// Get longest range within [first; last] that is possible to use in `key_changes` - /// and `key_changes_proof` calls. - /// Range could be shortened from the beginning if some changes tries have been pruned. - /// Returns Ok(None) if changes tries are not supported. - fn max_key_changes_range( - &self, - first: NumberFor, - last: BlockId, - ) -> sp_blockchain::Result, BlockId)>>; - - /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. - /// Works only for runtimes that are supporting changes tries. - /// - /// Changes are returned in descending order (i.e. last block comes first). - fn key_changes( - &self, - first: NumberFor, - last: BlockId, - storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result, u32)>>; } /// Client backend. @@ -504,9 +471,6 @@ pub trait Backend: AuxStore + Send + Sync { /// Returns current usage statistics. fn usage_info(&self) -> Option; - /// Returns reference to changes trie storage. - fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage>; - /// Returns a handle to offchain storage. fn offchain_storage(&self) -> Option; @@ -561,72 +525,6 @@ pub trait Backend: AuxStore + Send + Sync { fn get_import_lock(&self) -> &RwLock<()>; } -/// Changes trie storage that supports pruning. -pub trait PrunableStateChangesTrieStorage: - StateChangesTrieStorage, NumberFor> -{ - /// Get reference to StateChangesTrieStorage. - fn storage(&self) -> &dyn StateChangesTrieStorage, NumberFor>; - /// Get configuration at given block. - fn configuration_at( - &self, - at: &BlockId, - ) -> sp_blockchain::Result, Block::Hash>>; - /// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. - /// It is guaranteed that we have no any changes tries before (and including) this block. - /// It is guaranteed that all existing changes tries after this block are not yet pruned (if - /// created). - fn oldest_pruned_digest_range_end(&self) -> NumberFor; -} - /// Mark for all Backend implementations, that are making use of state data, stored locally. pub trait LocalBackend: Backend {} -/// Mark for all Backend implementations, that are fetching required state data from remote nodes. -pub trait RemoteBackend: Backend { - /// Returns true if the state for given block is available locally. - fn is_local_state_available(&self, block: &BlockId) -> bool; - - /// Returns reference to blockchain backend. - /// - /// Returned backend either resolves blockchain data - /// locally, or prepares request to fetch that data from remote node. - fn remote_blockchain(&self) -> Arc>; -} - -/// Return changes tries state at given block. -pub fn changes_tries_state_at_block<'a, Block: BlockT>( - block: &BlockId, - maybe_storage: Option<&'a dyn PrunableStateChangesTrieStorage>, -) -> sp_blockchain::Result, NumberFor>>> { - let storage = match maybe_storage { - Some(storage) => storage, - None => return Ok(None), - }; - - let config_range = storage.configuration_at(block)?; - match config_range.config { - Some(config) => - Ok(Some(ChangesTrieState::new(config, config_range.zero.0, storage.storage()))), - None => Ok(None), - } -} - -/// Provide CHT roots. These are stored on a light client and generated dynamically on a full -/// client. -pub trait ProvideChtRoots { - /// Get headers CHT root for given block. Returns None if the block is not a part of any CHT. - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result>; - - /// Get changes trie CHT root for given block. Returns None if the block is not a part of any - /// CHT. - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result>; -} diff --git a/client/api/src/cht.rs b/client/api/src/cht.rs deleted file mode 100644 index ee7854b5d8297..0000000000000 --- a/client/api/src/cht.rs +++ /dev/null @@ -1,474 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Canonical hash trie definitions and helper functions. -//! -//! Each CHT is a trie mapping block numbers to canonical hash. -//! One is generated for every `SIZE` blocks, allowing us to discard those blocks in -//! favor of the trie root. When the "ancient" blocks need to be accessed, we simply -//! request an inclusion proof of a specific block number against the trie with the -//! root hash. A correct proof implies that the claimed block is identical to the one -//! we discarded. - -use codec::Encode; -use hash_db; -use sp_trie; - -use sp_core::{convert_hash, H256}; -use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero}; -use sp_state_machine::{ - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend, - Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend, -}; - -use sp_blockchain::{Error as ClientError, Result as ClientResult}; - -/// The size of each CHT. This value is passed to every CHT-related function from -/// production code. Other values are passed from tests. -const SIZE: u32 = 2048; - -/// Gets default CHT size. -pub fn size>() -> N { - SIZE.into() -} - -/// Returns Some(cht_number) if CHT is need to be built when the block with given number is -/// canonized. -pub fn is_build_required(cht_size: N, block_num: N) -> Option -where - N: Clone + AtLeast32Bit, -{ - let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?; - let two = N::one() + N::one(); - if block_cht_num < two { - return None - } - let cht_start = start_number(cht_size, block_cht_num.clone()); - if cht_start != block_num { - return None - } - - Some(block_cht_num - two) -} - -/// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number. -pub fn max_cht_number(cht_size: N, max_canonical_block: N) -> Option -where - N: Clone + AtLeast32Bit, -{ - let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?; - let two = N::one() + N::one(); - if max_cht_number < two { - return None - } - Some(max_cht_number - two) -} - -/// Compute a CHT root from an iterator of block hashes. Fails if shorter than -/// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`. -/// Discards the trie's nodes. -pub fn compute_root( - cht_size: Header::Number, - cht_num: Header::Number, - hashes: I, -) -> ClientResult -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - I: IntoIterator>>, -{ - use sp_trie::TrieConfiguration; - Ok(sp_trie::trie_types::Layout::::trie_root(build_pairs::( - cht_size, cht_num, hashes, - )?)) -} - -/// Build CHT-based header proof. -pub fn build_proof( - cht_size: Header::Number, - cht_num: Header::Number, - blocks: BlocksI, - hashes: HashesI, -) -> ClientResult -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, - BlocksI: IntoIterator, - HashesI: IntoIterator>>, -{ - let transaction = build_pairs::(cht_size, cht_num, hashes)? - .into_iter() - .map(|(k, v)| (k, Some(v))) - .collect::>(); - let storage = InMemoryBackend::::default().update(vec![(None, transaction)]); - let trie_storage = storage - .as_trie_backend() - .expect("InMemoryState::as_trie_backend always returns Some; qed"); - prove_read_on_trie_backend( - trie_storage, - blocks.into_iter().map(|number| encode_cht_key(number)), - ) - .map_err(ClientError::from_state) -} - -/// Check CHT-based header proof. -pub fn check_proof( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - remote_proof: StorageProof, -) -> ClientResult<()> -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, -{ - do_check_proof::( - local_root, - local_number, - remote_hash, - move |local_root, local_cht_key| { - read_proof_check::( - local_root, - remote_proof, - ::std::iter::once(local_cht_key), - ) - .map(|mut map| map.remove(local_cht_key).expect("checked proof of local_cht_key; qed")) - .map_err(ClientError::from_state) - }, - ) -} - -/// Check CHT-based header proof on pre-created proving backend. -pub fn check_proof_on_proving_backend( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - proving_backend: &TrieBackend, Hasher>, -) -> ClientResult<()> -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, -{ - do_check_proof::( - local_root, - local_number, - remote_hash, - |_, local_cht_key| { - read_proof_check_on_proving_backend::(proving_backend, local_cht_key) - .map_err(ClientError::from_state) - }, - ) -} - -/// Check CHT-based header proof using passed checker function. -fn do_check_proof( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - checker: F, -) -> ClientResult<()> -where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, -{ - let root: Hasher::Out = convert_hash(&local_root); - let local_cht_key = encode_cht_key(local_number); - let local_cht_value = checker(root, &local_cht_key)?; - let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?; - let local_hash = - decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?; - match &local_hash[..] == remote_hash.as_ref() { - true => Ok(()), - false => Err(ClientError::InvalidCHTProof.into()), - } -} - -/// Group ordered blocks by CHT number and call functor with blocks of each group. -pub fn for_each_cht_group( - cht_size: Header::Number, - blocks: I, - mut functor: F, - mut functor_param: P, -) -> ClientResult<()> -where - Header: HeaderT, - I: IntoIterator, - F: FnMut(P, Header::Number, Vec) -> ClientResult

, -{ - let mut current_cht_num = None; - let mut current_cht_blocks = Vec::new(); - for block in blocks { - let new_cht_num = block_to_cht_number(cht_size, block).ok_or_else(|| { - ClientError::Backend(format!("Cannot compute CHT root for the block #{}", block)) - })?; - - let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num); - if advance_to_next_cht { - let current_cht_num = current_cht_num.expect( - "advance_to_next_cht is true; - it is true only when current_cht_num is Some; qed", - ); - assert!( - new_cht_num > current_cht_num, - "for_each_cht_group only supports ordered iterators" - ); - - functor_param = - functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?; - } - - current_cht_blocks.push(block); - current_cht_num = Some(new_cht_num); - } - - if let Some(current_cht_num) = current_cht_num { - functor(functor_param, current_cht_num, std::mem::take(&mut current_cht_blocks))?; - } - - Ok(()) -} - -/// Build pairs for computing CHT. -fn build_pairs( - cht_size: Header::Number, - cht_num: Header::Number, - hashes: I, -) -> ClientResult, Vec)>> -where - Header: HeaderT, - I: IntoIterator>>, -{ - let start_num = start_number(cht_size, cht_num); - let mut pairs = Vec::new(); - let mut hash_index = Header::Number::zero(); - for hash in hashes.into_iter() { - let hash = - hash?.ok_or_else(|| ClientError::from(ClientError::MissingHashRequiredForCHT))?; - pairs.push((encode_cht_key(start_num + hash_index).to_vec(), encode_cht_value(hash))); - hash_index += Header::Number::one(); - if hash_index == cht_size { - break - } - } - - if hash_index == cht_size { - Ok(pairs) - } else { - Err(ClientError::MissingHashRequiredForCHT) - } -} - -/// Get the starting block of a given CHT. -/// CHT 0 includes block 1...SIZE, -/// CHT 1 includes block SIZE + 1 ... 2*SIZE -/// More generally: CHT N includes block (1 + N*SIZE)...((N+1)*SIZE). -/// This is because the genesis hash is assumed to be known -/// and including it would be redundant. -pub fn start_number(cht_size: N, cht_num: N) -> N { - (cht_num * cht_size) + N::one() -} - -/// Get the ending block of a given CHT. -pub fn end_number(cht_size: N, cht_num: N) -> N { - (cht_num + N::one()) * cht_size -} - -/// Convert a block number to a CHT number. -/// Returns `None` for `block_num` == 0, `Some` otherwise. -pub fn block_to_cht_number(cht_size: N, block_num: N) -> Option { - if block_num == N::zero() { - None - } else { - Some((block_num - N::one()) / cht_size) - } -} - -/// Convert header number into CHT key. -pub fn encode_cht_key(number: N) -> Vec { - number.encode() -} - -/// Convert header hash into CHT value. -fn encode_cht_value>(hash: Hash) -> Vec { - hash.as_ref().to_vec() -} - -/// Convert CHT value into block header hash. -pub fn decode_cht_value(value: &[u8]) -> Option { - match value.len() { - 32 => Some(H256::from_slice(&value[0..32])), - _ => None, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use sp_runtime::{generic, traits::BlakeTwo256}; - - type Header = generic::Header; - - #[test] - fn is_build_required_works() { - assert_eq!(is_build_required(SIZE, 0u32.into()), None); - assert_eq!(is_build_required(SIZE, 1u32.into()), None); - assert_eq!(is_build_required(SIZE, SIZE), None); - assert_eq!(is_build_required(SIZE, SIZE + 1), None); - assert_eq!(is_build_required(SIZE, 2 * SIZE), None); - assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0)); - assert_eq!(is_build_required(SIZE, 2 * SIZE + 2), None); - assert_eq!(is_build_required(SIZE, 3 * SIZE), None); - assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1)); - assert_eq!(is_build_required(SIZE, 3 * SIZE + 2), None); - } - - #[test] - fn max_cht_number_works() { - assert_eq!(max_cht_number(SIZE, 0u32.into()), None); - assert_eq!(max_cht_number(SIZE, 1u32.into()), None); - assert_eq!(max_cht_number(SIZE, SIZE), None); - assert_eq!(max_cht_number(SIZE, SIZE + 1), None); - assert_eq!(max_cht_number(SIZE, 2 * SIZE), None); - assert_eq!(max_cht_number(SIZE, 2 * SIZE + 1), Some(0)); - assert_eq!(max_cht_number(SIZE, 2 * SIZE + 2), Some(0)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE), Some(0)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE + 1), Some(1)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE + 2), Some(1)); - } - - #[test] - fn start_number_works() { - assert_eq!(start_number(SIZE, 0u32), 1u32); - assert_eq!(start_number(SIZE, 1u32), SIZE + 1); - assert_eq!(start_number(SIZE, 2u32), SIZE + SIZE + 1); - } - - #[test] - fn end_number_works() { - assert_eq!(end_number(SIZE, 0u32), SIZE); - assert_eq!(end_number(SIZE, 1u32), SIZE + SIZE); - assert_eq!(end_number(SIZE, 2u32), SIZE + SIZE + SIZE); - } - - #[test] - fn build_pairs_fails_when_no_enough_blocks() { - assert!(build_pairs::( - SIZE as _, - 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2) - ) - .is_err()); - } - - #[test] - fn build_pairs_fails_when_missing_block() { - assert!(build_pairs::( - SIZE as _, - 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize / 2) - .chain(::std::iter::once(Ok(None))) - .chain( - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))) - .take(SIZE as usize / 2 - 1) - ) - ) - .is_err()); - } - - #[test] - fn compute_root_works() { - assert!(compute_root::( - SIZE as _, - 42, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) - ) - .is_ok()); - } - - #[test] - #[should_panic] - fn build_proof_panics_when_querying_wrong_block() { - assert!(build_proof::( - SIZE as _, - 0, - vec![(SIZE * 1000) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) - ) - .is_err()); - } - - #[test] - fn build_proof_works() { - assert!(build_proof::( - SIZE as _, - 0, - vec![(SIZE / 2) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) - ) - .is_ok()); - } - - #[test] - #[should_panic] - fn for_each_cht_group_panics() { - let cht_size = SIZE as u64; - let _ = for_each_cht_group::( - cht_size, - vec![cht_size * 5, cht_size * 2], - |_, _, _| Ok(()), - (), - ); - } - - #[test] - fn for_each_cht_group_works() { - let cht_size = SIZE as u64; - let _ = for_each_cht_group::( - cht_size, - vec![ - cht_size * 2 + 1, - cht_size * 2 + 2, - cht_size * 2 + 5, - cht_size * 4 + 1, - cht_size * 4 + 7, - cht_size * 6 + 1, - ], - |_, cht_num, blocks| { - match cht_num { - 2 => assert_eq!( - blocks, - vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5] - ), - 4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]), - 6 => assert_eq!(blocks, vec![cht_size * 6 + 1]), - _ => unreachable!(), - } - - Ok(()) - }, - (), - ); - } -} diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 2f4327dfc4e4a..454920df0e0c3 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -29,7 +29,7 @@ use sp_runtime::{ Justification, Justifications, Storage, }; use sp_state_machine::{ - Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, + Backend as StateBackend, ChildStorageCollection, InMemoryBackend, IndexOperation, StorageCollection, }; use std::{ @@ -39,10 +39,10 @@ use std::{ }; use crate::{ - backend::{self, NewBlockState, ProvideChtRoots}, + backend::{self, NewBlockState}, blockchain::{self, well_known_cache_keys::Id as CacheKeyId, BlockStatus, HeaderBackend}, leaves::LeafSet, - light, UsageInfo, + UsageInfo, }; struct PendingBlock { @@ -109,7 +109,6 @@ struct BlockchainStorage { finalized_number: NumberFor, genesis_hash: Block::Hash, header_cht_roots: HashMap, Block::Hash>, - changes_trie_cht_roots: HashMap, Block::Hash>, leaves: LeafSet>, aux: HashMap, Vec>, } @@ -152,7 +151,6 @@ impl Blockchain { finalized_number: Zero::zero(), genesis_hash: Default::default(), header_cht_roots: HashMap::new(), - changes_trie_cht_roots: HashMap::new(), leaves: LeafSet::new(), aux: HashMap::new(), })); @@ -442,10 +440,6 @@ impl blockchain::Backend for Blockchain { Ok(self.storage.read().finalized_hash.clone()) } - fn cache(&self) -> Option>> { - None - } - fn leaves(&self) -> sp_blockchain::Result> { Ok(self.storage.read().leaves.hashes()) } @@ -466,12 +460,6 @@ impl blockchain::Backend for Blockchain { } } -impl blockchain::ProvideCache for Blockchain { - fn cache(&self) -> Option>> { - None - } -} - impl backend::AuxStore for Blockchain { fn insert_aux< 'a, @@ -499,82 +487,6 @@ impl backend::AuxStore for Blockchain { } } -impl light::Storage for Blockchain -where - Block::Hash: From<[u8; 32]>, -{ - fn import_header( - &self, - header: Block::Header, - _cache: HashMap>, - state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> sp_blockchain::Result<()> { - let hash = header.hash(); - self.insert(hash, header, None, None, state)?; - - self.write_aux(aux_ops); - Ok(()) - } - - fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { - Blockchain::set_head(self, id) - } - - fn last_finalized(&self) -> sp_blockchain::Result { - Ok(self.storage.read().finalized_hash.clone()) - } - - fn finalize_header(&self, id: BlockId) -> sp_blockchain::Result<()> { - Blockchain::finalize_header(self, id, None) - } - - fn cache(&self) -> Option>> { - None - } - - fn usage_info(&self) -> Option { - None - } -} - -impl ProvideChtRoots for Blockchain { - fn header_cht_root( - &self, - _cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - self.storage - .read() - .header_cht_roots - .get(&block) - .cloned() - .ok_or_else(|| { - sp_blockchain::Error::Backend(format!("Header CHT for block {} not exists", block)) - }) - .map(Some) - } - - fn changes_trie_cht_root( - &self, - _cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - self.storage - .read() - .changes_trie_cht_roots - .get(&block) - .cloned() - .ok_or_else(|| { - sp_blockchain::Error::Backend(format!( - "Changes trie CHT for block {} not exists", - block - )) - }) - .map(Some) - } -} - /// In-memory operation. pub struct BlockImportOperation { pending_block: Option>, @@ -650,13 +562,6 @@ where Ok(()) } - fn update_changes_trie( - &mut self, - _update: ChangesTrieTransaction, NumberFor>, - ) -> sp_blockchain::Result<()> { - Ok(()) - } - fn set_genesis_state( &mut self, storage: Storage, @@ -846,10 +751,6 @@ where None } - fn changes_trie_storage(&self) -> Option<&dyn backend::PrunableStateChangesTrieStorage> { - None - } - fn offchain_storage(&self) -> Option { None } @@ -885,22 +786,6 @@ where impl backend::LocalBackend for Backend where Block::Hash: Ord {} -impl backend::RemoteBackend for Backend -where - Block::Hash: Ord, -{ - fn is_local_state_available(&self, block: &BlockId) -> bool { - self.blockchain - .expect_block_number_from_id(block) - .map(|num| num.is_zero()) - .unwrap_or(false) - } - - fn remote_blockchain(&self) -> Arc> { - unimplemented!() - } -} - /// Check that genesis storage is valid. pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 16935b1e846cf..e0466ff39dbdd 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -21,19 +21,16 @@ pub mod backend; pub mod call_executor; -pub mod cht; pub mod client; pub mod execution_extensions; pub mod in_mem; pub mod leaves; -pub mod light; pub mod notifications; pub mod proof_provider; pub use backend::*; pub use call_executor::*; pub use client::*; -pub use light::*; pub use notifications::*; pub use proof_provider::*; pub use sp_blockchain as blockchain; diff --git a/client/api/src/light.rs b/client/api/src/light.rs deleted file mode 100644 index 8638ddf741f30..0000000000000 --- a/client/api/src/light.rs +++ /dev/null @@ -1,372 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Substrate light client interfaces - -use std::{ - collections::{BTreeMap, HashMap}, - future::Future, - sync::Arc, -}; - -use crate::{ - backend::{AuxStore, NewBlockState}, - ProvideChtRoots, UsageInfo, -}; -use sp_blockchain::{ - well_known_cache_keys, Cache as BlockchainCache, Error as ClientError, HeaderBackend, - HeaderMetadata, Result as ClientResult, -}; -use sp_core::{storage::PrefixedStorageKey, ChangesTrieConfigurationRange}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, -}; -use sp_state_machine::StorageProof; - -/// Remote call request. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct RemoteCallRequest { - /// Call at state of given block. - pub block: Header::Hash, - /// Header of block at which call is performed. - pub header: Header, - /// Method to call. - pub method: String, - /// Call data. - pub call_data: Vec, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Remote canonical header request. -#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] -pub struct RemoteHeaderRequest { - /// The root of CHT this block is included in. - pub cht_root: Header::Hash, - /// Number of the header to query. - pub block: Header::Number, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Remote storage read request. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct RemoteReadRequest { - /// Read at state of given block. - pub block: Header::Hash, - /// Header of block at which read is performed. - pub header: Header, - /// Storage key to read. - pub keys: Vec>, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Remote storage read child request. -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub struct RemoteReadChildRequest { - /// Read at state of given block. - pub block: Header::Hash, - /// Header of block at which read is performed. - pub header: Header, - /// Storage key for child. - pub storage_key: PrefixedStorageKey, - /// Child storage key to read. - pub keys: Vec>, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Remote key changes read request. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct RemoteChangesRequest { - /// All changes trie configurations that are valid within [first_block; last_block]. - pub changes_trie_configs: Vec>, - /// Query changes from range of blocks, starting (and including) with this hash... - pub first_block: (Header::Number, Header::Hash), - /// ...ending (and including) with this hash. Should come after first_block and - /// be the part of the same fork. - pub last_block: (Header::Number, Header::Hash), - /// Only use digests from blocks up to this hash. Should be last_block OR come - /// after this block and be the part of the same fork. - pub max_block: (Header::Number, Header::Hash), - /// Known changes trie roots for the range of blocks [tries_roots.0..max_block]. - /// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node. - pub tries_roots: (Header::Number, Header::Hash, Vec), - /// Optional Child Storage key to read. - pub storage_key: Option, - /// Storage key to read. - pub key: Vec, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Key changes read proof. -#[derive(Debug, PartialEq, Eq)] -pub struct ChangesProof { - /// Max block that has been used in changes query. - pub max_block: Header::Number, - /// All touched nodes of all changes tries. - pub proof: Vec>, - /// All changes tries roots that have been touched AND are missing from - /// the requester' node. It is a map of block number => changes trie root. - pub roots: BTreeMap, - /// The proofs for all changes tries roots that have been touched AND are - /// missing from the requester' node. It is a map of CHT number => proof. - pub roots_proof: StorageProof, -} - -/// Remote block body request -#[derive(Clone, Default, Debug, PartialEq, Eq, Hash)] -pub struct RemoteBodyRequest { - /// Header of the requested block body - pub header: Header, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, -} - -/// Light client data fetcher. Implementations of this trait must check if remote data -/// is correct (see FetchedDataChecker) and return already checked data. -pub trait Fetcher: Send + Sync { - /// Remote header future. - type RemoteHeaderResult: Future> - + Unpin - + Send - + 'static; - /// Remote storage read future. - type RemoteReadResult: Future, Option>>, ClientError>> - + Unpin - + Send - + 'static; - /// Remote call result future. - type RemoteCallResult: Future, ClientError>> + Unpin + Send + 'static; - /// Remote changes result future. - type RemoteChangesResult: Future, u32)>, ClientError>> - + Unpin - + Send - + 'static; - /// Remote block body result future. - type RemoteBodyResult: Future, ClientError>> - + Unpin - + Send - + 'static; - - /// Fetch remote header. - fn remote_header( - &self, - request: RemoteHeaderRequest, - ) -> Self::RemoteHeaderResult; - /// Fetch remote storage value. - fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult; - /// Fetch remote storage child value. - fn remote_read_child( - &self, - request: RemoteReadChildRequest, - ) -> Self::RemoteReadResult; - /// Fetch remote call result. - fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; - /// Fetch remote changes ((block number, extrinsic index)) where given key has been changed - /// at a given blocks range. - fn remote_changes( - &self, - request: RemoteChangesRequest, - ) -> Self::RemoteChangesResult; - /// Fetch remote block body - fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult; -} - -/// Light client remote data checker. -/// -/// Implementations of this trait should not use any prunable blockchain data -/// except that is passed to its methods. -pub trait FetchChecker: Send + Sync { - /// Check remote header proof. - fn check_header_proof( - &self, - request: &RemoteHeaderRequest, - header: Option, - remote_proof: StorageProof, - ) -> ClientResult; - /// Check remote storage read proof. - fn check_read_proof( - &self, - request: &RemoteReadRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>>; - /// Check remote storage read proof. - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>>; - /// Check remote method execution proof. - fn check_execution_proof( - &self, - request: &RemoteCallRequest, - remote_proof: StorageProof, - ) -> ClientResult>; - /// Check remote changes query proof. - fn check_changes_proof( - &self, - request: &RemoteChangesRequest, - proof: ChangesProof, - ) -> ClientResult, u32)>>; - /// Check remote body proof. - fn check_body_proof( - &self, - request: &RemoteBodyRequest, - body: Vec, - ) -> ClientResult>; -} - -/// Light client blockchain storage. -pub trait Storage: - AuxStore - + HeaderBackend - + HeaderMetadata - + ProvideChtRoots -{ - /// Store new header. Should refuse to revert any finalized blocks. - /// - /// Takes new authorities, the leaf state of the new block, and - /// any auxiliary storage updates to place in the same operation. - fn import_header( - &self, - header: Block::Header, - cache: HashMap>, - state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()>; - - /// Set an existing block as new best block. - fn set_head(&self, block: BlockId) -> ClientResult<()>; - - /// Mark historic header as finalized. - fn finalize_header(&self, block: BlockId) -> ClientResult<()>; - - /// Get last finalized header. - fn last_finalized(&self) -> ClientResult; - - /// Get storage cache. - fn cache(&self) -> Option>>; - - /// Get storage usage statistics. - fn usage_info(&self) -> Option; -} - -/// Remote header. -#[derive(Debug)] -pub enum LocalOrRemote { - /// When data is available locally, it is returned. - Local(Data), - /// When data is unavailable locally, the request to fetch it from remote node is returned. - Remote(Request), - /// When data is unknown. - Unknown, -} - -/// Futures-based blockchain backend that either resolves blockchain data -/// locally, or fetches required data from remote node. -pub trait RemoteBlockchain: Send + Sync { - /// Get block header. - fn header( - &self, - id: BlockId, - ) -> ClientResult>>; -} - -/// Returns future that resolves header either locally, or remotely. -pub fn future_header>( - blockchain: &dyn RemoteBlockchain, - fetcher: &F, - id: BlockId, -) -> impl Future, ClientError>> { - use futures::future::{ready, Either, FutureExt}; - - match blockchain.header(id) { - Ok(LocalOrRemote::Remote(request)) => - Either::Left(fetcher.remote_header(request).then(|header| ready(header.map(Some)))), - Ok(LocalOrRemote::Unknown) => Either::Right(ready(Ok(None))), - Ok(LocalOrRemote::Local(local_header)) => Either::Right(ready(Ok(Some(local_header)))), - Err(err) => Either::Right(ready(Err(err))), - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use futures::future::Ready; - use parking_lot::Mutex; - use sp_blockchain::Error as ClientError; - use sp_test_primitives::{Block, Extrinsic, Header}; - - #[derive(Debug, thiserror::Error)] - #[error("Not implemented on test node")] - struct MockError; - - impl Into for MockError { - fn into(self) -> ClientError { - ClientError::Application(Box::new(self)) - } - } - - pub type OkCallFetcher = Mutex>; - - fn not_implemented_in_tests() -> Ready> { - futures::future::ready(Err(MockError.into())) - } - - impl Fetcher for OkCallFetcher { - type RemoteHeaderResult = Ready>; - type RemoteReadResult = Ready, Option>>, ClientError>>; - type RemoteCallResult = Ready, ClientError>>; - type RemoteChangesResult = Ready, u32)>, ClientError>>; - type RemoteBodyResult = Ready, ClientError>>; - - fn remote_header(&self, _request: RemoteHeaderRequest

) -> Self::RemoteHeaderResult { - not_implemented_in_tests() - } - - fn remote_read(&self, _request: RemoteReadRequest
) -> Self::RemoteReadResult { - not_implemented_in_tests() - } - - fn remote_read_child( - &self, - _request: RemoteReadChildRequest
, - ) -> Self::RemoteReadResult { - not_implemented_in_tests() - } - - fn remote_call(&self, _request: RemoteCallRequest
) -> Self::RemoteCallResult { - futures::future::ready(Ok((*self.lock()).clone())) - } - - fn remote_changes( - &self, - _request: RemoteChangesRequest
, - ) -> Self::RemoteChangesResult { - not_implemented_in_tests() - } - - fn remote_body(&self, _request: RemoteBodyRequest
) -> Self::RemoteBodyResult { - not_implemented_in_tests() - } - } -} diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 79444f0069232..9a0d2c6a8132f 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -17,9 +17,9 @@ // along with this program. If not, see . //! Proof utilities -use crate::{ChangesProof, StorageProof}; +use crate::StorageProof; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use sp_storage::{ChildInfo, PrefixedStorageKey, StorageKey}; +use sp_storage::ChildInfo; /// Interface for providing block proving utilities. pub trait ProofProvider { @@ -49,27 +49,6 @@ pub trait ProofProvider { method: &str, call_data: &[u8], ) -> sp_blockchain::Result<(Vec, StorageProof)>; - /// Reads given header and generates CHT-based header proof. - fn header_proof( - &self, - id: &BlockId, - ) -> sp_blockchain::Result<(Block::Header, StorageProof)>; - - /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given - /// blocks range. `min` is the hash of the first block, which changes trie root is known to the - /// requester - when we're using changes tries from ascendants of this block, we should provide - /// proofs for changes tries roots `max` is the hash of the last block known to the requester - - /// we can't use changes tries from descendants of this block. - /// Works only for runtimes that are supporting changes tries. - fn key_changes_proof( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result>; /// Given a `BlockId` iterate over all storage values starting at `start_key` exclusively, /// building proofs until size limit is reached. Returns combined proof and the number of diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 0055254b67091..5bcfeebc6bad0 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -40,8 +40,9 @@ use sp_consensus::{ use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use sp_runtime::{ + Digest, generic::BlockId, - traits::{BlakeTwo256, Block as BlockT, DigestFor, Hash as HashT, Header as HeaderT}, + traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, }; use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; @@ -261,7 +262,7 @@ where fn propose( self, inherent_data: InherentData, - inherent_digests: DigestFor, + inherent_digests: Digest, max_duration: time::Duration, block_size_limit: Option, ) -> Self::Proposal { @@ -309,7 +310,7 @@ where async fn propose_with( self, inherent_data: InherentData, - inherent_digests: DigestFor, + inherent_digests: Digest, deadline: time::Instant, block_size_limit: Option, ) -> Result, PR::Proof>, sp_blockchain::Error> @@ -688,12 +689,9 @@ mod tests { api.execute_block(&block_id, proposal.block).unwrap(); let state = backend.state_at(block_id).unwrap(); - let changes_trie_state = - backend::changes_tries_state_at_block(&block_id, backend.changes_trie_storage()) - .unwrap(); let storage_changes = api - .into_storage_changes(&state, changes_trie_state.as_ref(), genesis_hash) + .into_storage_changes(&state, genesis_hash) .unwrap(); assert_eq!( diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index e89421edfb168..c86e849f7eaa2 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -34,8 +34,9 @@ use sp_api::{ use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; use sp_runtime::{ + Digest, generic::BlockId, - traits::{Block as BlockT, DigestFor, Hash, HashFor, Header as HeaderT, NumberFor, One}, + traits::{Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One}, }; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; @@ -119,14 +120,14 @@ where fn new_block_at>( &self, parent: &BlockId, - inherent_digests: DigestFor, + inherent_digests: Digest, record_proof: R, ) -> sp_blockchain::Result>; /// Create a new block, built on the head of the chain. fn new_block( &self, - inherent_digests: DigestFor, + inherent_digests: Digest, ) -> sp_blockchain::Result>; } @@ -159,7 +160,7 @@ where parent_hash: Block::Hash, parent_number: NumberFor, record_proof: RecordProof, - inherent_digests: DigestFor, + inherent_digests: Digest, backend: &'a B, ) -> Result { let header = <::Header as HeaderT>::new( @@ -237,15 +238,11 @@ where let proof = self.api.extract_proof(); let state = self.backend.state_at(self.block_id)?; - let changes_trie_state = backend::changes_tries_state_at_block( - &self.block_id, - self.backend.changes_trie_storage(), - )?; let parent_hash = self.parent_hash; let storage_changes = self .api - .into_storage_changes(&state, changes_trie_state.as_ref(), parent_hash) + .into_storage_changes(&state, parent_hash) .map_err(|e| sp_blockchain::Error::StorageChanges(e))?; Ok(BuiltBlock { diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index a4dbe5012ea19..2311aeed4af22 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -33,7 +33,7 @@ use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{ well_known_cache_keys::{self, Id as CacheKeyId}, - HeaderBackend, ProvideCache, + HeaderBackend, }; use sp_consensus::{CanAuthorWith, Error as ConsensusError}; use sp_consensus_aura::{ @@ -44,8 +44,9 @@ use sp_consensus_slots::Slot; use sp_core::{crypto::Pair, ExecutionContext}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; use sp_runtime::{ + DigestItem, generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, DigestItemFor, Header}, + traits::{Block as BlockT, Header}, }; use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; @@ -61,9 +62,8 @@ fn check_header( hash: B::Hash, authorities: &[AuthorityId

], check_for_equivocation: CheckForEquivocation, -) -> Result)>, Error> +) -> Result, Error> where - DigestItemFor: CompatibleDigestItem, P::Signature: Codec, C: sc_client_api::backend::AuxStore, P::Public: Encode + Decode + PartialEq + Clone, @@ -193,10 +193,8 @@ where + Send + Sync + sc_client_api::backend::AuxStore - + ProvideCache + BlockOf, C::Api: BlockBuilderApi + AuraApi> + ApiExt, - DigestItemFor: CompatibleDigestItem, P: Pair + Send + Sync + 'static, P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, P::Signature: Encode + Decode, @@ -385,7 +383,6 @@ where C: 'static + ProvideRuntimeApi + BlockOf - + ProvideCache + Send + Sync + AuxStore @@ -395,7 +392,6 @@ where + Send + Sync + 'static, - DigestItemFor: CompatibleDigestItem, P: Pair + Send + Sync + 'static, P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, P::Signature: Encode + Decode, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 946e0b90c4dd4..d5664b9b18bef 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -52,7 +52,7 @@ use sc_consensus_slots::{ use sc_telemetry::TelemetryHandle; use sp_api::ProvideRuntimeApi; use sp_application_crypto::{AppKey, AppPublic}; -use sp_blockchain::{HeaderBackend, ProvideCache, Result as CResult}; +use sp_blockchain::{HeaderBackend, Result as CResult}; use sp_consensus::{ BlockOrigin, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, }; @@ -61,8 +61,9 @@ use sp_core::crypto::{Pair, Public}; use sp_inherents::CreateInherentDataProviders; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ + DigestItem, generic::BlockId, - traits::{Block as BlockT, DigestItemFor, Header, Member, NumberFor, Zero}, + traits::{Block as BlockT, Header, Member, NumberFor, Zero}, }; mod import_queue; @@ -178,7 +179,7 @@ where P::Public: AppPublic + Hash + Member + Encode + Decode, P::Signature: TryFrom> + Hash + Member + Encode + Decode, B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, + C: ProvideRuntimeApi + BlockOf + AuxStore + HeaderBackend + Send + Sync, C::Api: AuraApi>, SC: SelectChain, I: BlockImport> + Send + Sync + 'static, @@ -267,7 +268,7 @@ pub fn build_aura_worker( ) -> impl sc_consensus_slots::SlotWorker>::Proof> where B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + HeaderBackend + Send + Sync, + C: ProvideRuntimeApi + BlockOf + AuxStore + HeaderBackend + Send + Sync, C::Api: AuraApi>, PF: Environment + Send + Sync + 'static, PF::Proposer: Proposer>, @@ -316,7 +317,7 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraWorker where B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + HeaderBackend + Sync, + C: ProvideRuntimeApi + BlockOf + HeaderBackend + Sync, C::Api: AuraApi>, E: Environment + Send + Sync, E::Proposer: Proposer>, @@ -381,8 +382,8 @@ where &self, slot: Slot, _claim: &Self::Claim, - ) -> Vec> { - vec![ as CompatibleDigestItem>::aura_pre_digest(slot)] + ) -> Vec { + vec![>::aura_pre_digest(slot)] } fn block_import_params( @@ -426,7 +427,7 @@ where .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; let signature_digest_item = - as CompatibleDigestItem>::aura_seal(signature); + >::aura_seal(signature); let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(signature_digest_item); @@ -545,7 +546,7 @@ fn authorities(client: &C, at: &BlockId) -> Result, Consensus where A: Codec + Debug, B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache, + C: ProvideRuntimeApi + BlockOf, C::Api: AuraApi, { client @@ -574,7 +575,7 @@ mod tests { use sp_consensus_aura::sr25519::AuthorityPair; use sp_inherents::InherentData; use sp_keyring::sr25519::Keyring; - use sp_runtime::traits::{Block as BlockT, DigestFor, Header as _}; + use sp_runtime::{Digest, traits::{Block as BlockT, Header as _}}; use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; use std::{ task::Poll, @@ -611,7 +612,7 @@ mod tests { fn propose( self, _: InherentData, - digests: DigestFor, + digests: Digest, _: Duration, _: Option, ) -> Self::Proposal { @@ -661,29 +662,25 @@ mod tests { _cfg: &ProtocolConfig, _peer_data: &(), ) -> Self::Verifier { - match client { - PeersClient::Full(client, _) => { - let slot_duration = slot_duration(&*client).expect("slot duration available"); - - assert_eq!(slot_duration.slot_duration().as_millis() as u64, SLOT_DURATION); - import_queue::AuraVerifier::new( - client, - Box::new(|_, _| async { - let timestamp = TimestampInherentDataProvider::from_system_time(); - let slot = InherentDataProvider::from_timestamp_and_duration( - *timestamp, - Duration::from_secs(6), - ); - - Ok((timestamp, slot)) - }), - AlwaysCanAuthor, - CheckForEquivocation::Yes, - None, - ) - }, - PeersClient::Light(_, _) => unreachable!("No (yet) tests for light client + Aura"), - } + let client = client.as_client(); + let slot_duration = slot_duration(&*client).expect("slot duration available"); + + assert_eq!(slot_duration.slot_duration().as_millis() as u64, SLOT_DURATION); + import_queue::AuraVerifier::new( + client, + Box::new(|_, _| async { + let timestamp = TimestampInherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_duration( + *timestamp, + Duration::from_secs(6), + ); + + Ok((timestamp, slot)) + }), + AlwaysCanAuthor, + CheckForEquivocation::Yes, + None, + ) } fn make_block_import( @@ -724,7 +721,7 @@ mod tests { for (peer_id, key) in peers { let mut net = net.lock(); let peer = net.peer(*peer_id); - let client = peer.client().as_full().expect("full clients are created").clone(); + let client = peer.client().as_client(); let select_chain = peer.select_chain().expect("full client has a select chain"); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); let keystore = Arc::new( @@ -823,7 +820,7 @@ mod tests { let mut net = net.lock(); let peer = net.peer(3); - let client = peer.client().as_full().expect("full clients are created").clone(); + let client = peer.client().as_client(); let environ = DummyFactory(client.clone()); let worker = AuraWorker { @@ -875,7 +872,7 @@ mod tests { let mut net = net.lock(); let peer = net.peer(3); - let client = peer.client().as_full().expect("full clients are created").clone(); + let client = peer.client().as_client(); let environ = DummyFactory(client.clone()); let mut worker = AuraWorker { diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 1fde788041155..71f45b67134ae 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -104,7 +104,7 @@ use sp_api::{ApiExt, NumberFor, ProvideRuntimeApi}; use sp_application_crypto::AppKey; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{ - Error as ClientError, HeaderBackend, HeaderMetadata, ProvideCache, Result as ClientResult, + Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult, }; use sp_consensus::{ BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, @@ -116,8 +116,9 @@ use sp_core::{crypto::Public, ExecutionContext}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ + DigestItem, generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, DigestItemFor, Header, Zero}, + traits::{Block as BlockT, Header, Zero}, }; pub use sc_consensus_slots::SlotProportion; @@ -465,7 +466,6 @@ pub fn start_babe( where B: BlockT, C: ProvideRuntimeApi - + ProvideCache + ProvideUncles + BlockchainEvents + HeaderBackend @@ -539,7 +539,6 @@ async fn answer_requests( epoch_changes: SharedEpochChanges, ) where C: ProvideRuntimeApi - + ProvideCache + ProvideUncles + BlockchainEvents + HeaderBackend @@ -678,7 +677,6 @@ impl sc_consensus_slots::SimpleSlotWorker where B: BlockT, C: ProvideRuntimeApi - + ProvideCache + HeaderBackend + HeaderMetadata, C::Api: BabeApi, @@ -778,8 +776,8 @@ where &self, _slot: Slot, claim: &Self::Claim, - ) -> Vec> { - vec![ as CompatibleDigestItem>::babe_pre_digest(claim.0.clone())] + ) -> Vec { + vec![::babe_pre_digest(claim.0.clone())] } fn block_import_params( @@ -821,7 +819,7 @@ where .try_into() .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; let digest_item = - as CompatibleDigestItem>::babe_seal(signature.into()); + ::babe_seal(signature.into()); let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(digest_item); @@ -921,10 +919,7 @@ pub fn find_pre_digest(header: &B::Header) -> Result( header: &B::Header, -) -> Result, Error> -where - DigestItemFor: CompatibleDigestItem, -{ +) -> Result, Error> { let mut epoch_digest: Option<_> = None; for log in header.digest().logs() { trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); @@ -943,10 +938,7 @@ where /// Extract the BABE config change digest from the given header, if it exists. fn find_next_config_digest( header: &B::Header, -) -> Result, Error> -where - DigestItemFor: CompatibleDigestItem, -{ +) -> Result, Error> { let mut config_digest: Option<_> = None; for log in header.digest().logs() { trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); @@ -1132,8 +1124,7 @@ where + ProvideRuntimeApi + Send + Sync - + AuxStore - + ProvideCache, + + AuxStore, Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith + Send + Sync, @@ -1332,7 +1323,6 @@ where + HeaderMetadata + AuxStore + ProvideRuntimeApi - + ProvideCache + Send + Sync, Client::Api: BabeApi + ApiExt, @@ -1399,7 +1389,6 @@ where + HeaderMetadata + AuxStore + ProvideRuntimeApi - + ProvideCache + Send + Sync, Client::Api: BabeApi + ApiExt, @@ -1756,7 +1745,6 @@ where + Sync + 'static, Client: ProvideRuntimeApi - + ProvideCache + HeaderBackend + HeaderMetadata + AuxStore diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index c033f4535be0b..82203148f99a8 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -43,13 +43,13 @@ use sp_consensus_babe::{ use sp_core::crypto::Pair; use sp_keystore::{vrf::make_transcript as transcript_from_data, SyncCryptoStore}; use sp_runtime::{ - generic::DigestItem, - traits::{Block as BlockT, DigestFor}, + generic::{Digest, DigestItem}, + traits::{Block as BlockT}, }; use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; use std::{cell::RefCell, task::Poll, time::Duration}; -type Item = DigestItem; +type Item = DigestItem; type Error = sp_blockchain::Error; @@ -108,7 +108,7 @@ impl Environment for DummyFactory { impl DummyProposer { fn propose_with( &mut self, - pre_digests: DigestFor, + pre_digests: Digest, ) -> future::Ready< Result< Proposal< @@ -181,7 +181,7 @@ impl Proposer for DummyProposer { fn propose( mut self, _: InherentData, - pre_digests: DigestFor, + pre_digests: Digest, _: Duration, _: Option, ) -> Self::Proposal { @@ -295,7 +295,7 @@ impl TestNetFactory for BabeTestNet { Option>, Option, ) { - let client = client.as_full().expect("only full clients are tested"); + let client = client.as_client(); let config = Config::get_or_compute(&*client).expect("config available"); let (block_import, link) = crate::block_import(config, client.clone(), client.clone()) @@ -320,7 +320,7 @@ impl TestNetFactory for BabeTestNet { ) -> Self::Verifier { use substrate_test_runtime_client::DefaultTestClientBuilderExt; - let client = client.as_full().expect("only full clients are used in test"); + let client = client.as_client(); trace!(target: "babe", "Creating a verifier"); // ensure block import and verifier are linked correctly. @@ -395,7 +395,7 @@ fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static for (peer_id, seed) in peers { let mut net = net.lock(); let peer = net.peer(*peer_id); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let client = peer.client().as_client(); let select_chain = peer.select_chain().expect("Full client has select_chain"); let keystore_path = tempfile::tempdir().expect("Creates keystore path"); @@ -679,7 +679,7 @@ fn importing_block_one_sets_genesis_epoch() { let peer = net.peer(0); let data = peer.data.as_ref().expect("babe link set up during initialization"); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let client = peer.client().as_client(); let mut proposer_factory = DummyFactory { client: client.clone(), @@ -721,7 +721,7 @@ fn importing_epoch_change_block_prunes_tree() { let peer = net.peer(0); let data = peer.data.as_ref().expect("babe link set up during initialization"); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let client = peer.client().as_client(); let mut block_import = data.block_import.lock().take().expect("import set up during init"); let epoch_changes = data.link.epoch_changes.clone(); @@ -836,7 +836,7 @@ fn verify_slots_are_strictly_increasing() { let peer = net.peer(0); let data = peer.data.as_ref().expect("babe link set up during initialization"); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let client = peer.client().as_client(); let mut block_import = data.block_import.lock().take().expect("import set up during init"); let mut proposer_factory = DummyFactory { diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index af118312dd07c..fe23fe81c231f 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -32,7 +32,7 @@ use sp_consensus_babe::{ }; use sp_consensus_slots::Slot; use sp_core::{Pair, Public}; -use sp_runtime::traits::{DigestItemFor, Header}; +use sp_runtime::{ DigestItem, traits::Header }; /// BABE verification parameters pub(super) struct VerificationParams<'a, B: 'a + BlockT> { @@ -61,10 +61,7 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// with each having different validation logic. pub(super) fn check_header( params: VerificationParams, -) -> Result>, Error> -where - DigestItemFor: CompatibleDigestItem, -{ +) -> Result, Error> { let VerificationParams { mut header, pre_digest, slot_now, epoch } = params; let authorities = &epoch.authorities; @@ -137,9 +134,9 @@ where Ok(CheckedHeader::Checked(header, info)) } -pub(super) struct VerifiedHeaderInfo { - pub(super) pre_digest: DigestItemFor, - pub(super) seal: DigestItemFor, +pub(super) struct VerifiedHeaderInfo { + pub(super) pre_digest: DigestItem, + pub(super) seal: DigestItem, pub(super) author: AuthorityId, } diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index d828e54bc7e3e..b7f33f4a8a46e 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -20,7 +20,8 @@ use serde::{Deserialize, Serialize}; use sp_runtime::{ - traits::{Block as BlockT, DigestItemFor, HashFor, Header as HeaderT, NumberFor}, + DigestItem, + traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor}, Justification, Justifications, }; use std::{any::Any, borrow::Cow, collections::HashMap, sync::Arc}; @@ -122,7 +123,7 @@ pub struct BlockCheckParams { /// Precomputed storage. pub enum StorageChanges { /// Changes coming from block execution. - Changes(sp_state_machine::StorageChanges, NumberFor>), + Changes(sp_state_machine::StorageChanges>), /// Whole new state. Import(ImportedState), } @@ -175,7 +176,7 @@ pub struct BlockImportParams { pub justifications: Option, /// Digest items that have been added after the runtime for external /// work, like a consensus signature. - pub post_digests: Vec>, + pub post_digests: Vec, /// The body of the block. pub body: Option>, /// Indexed transaction body of the block. diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index 33a4c8616f6d2..bdc6120398dfc 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -21,7 +21,7 @@ use super::Error; use sc_consensus::BlockImportParams; use sp_inherents::InherentData; -use sp_runtime::traits::{Block as BlockT, DigestFor}; +use sp_runtime::{Digest, traits::Block as BlockT}; pub mod babe; @@ -36,7 +36,7 @@ pub trait ConsensusDataProvider: Send + Sync { &self, parent: &B::Header, inherents: &InherentData, - ) -> Result, Error>; + ) -> Result; /// set up the neccessary import params. fn append_block_import( diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 1d3afe392d62f..711a22245e552 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -48,8 +48,9 @@ use sp_consensus_babe::{ use sp_consensus_slots::Slot; use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier}; use sp_runtime::{ + DigestItem, generic::{BlockId, Digest}, - traits::{Block as BlockT, DigestFor, DigestItemFor, Header, Zero}, + traits::{Block as BlockT, Header, Zero}, }; use sp_timestamp::{InherentType, TimestampInherentData, INHERENT_IDENTIFIER}; @@ -197,7 +198,7 @@ where &self, parent: &B::Header, inherents: &InherentData, - ) -> Result, Error> { + ) -> Result { let slot = inherents .babe_inherent_data()? .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; @@ -207,7 +208,7 @@ where let logs = if let Some((predigest, _)) = authorship::claim_slot(slot, &epoch, &self.keystore) { - vec![ as CompatibleDigestItem>::babe_pre_digest(predigest)] + vec![::babe_pre_digest(predigest)] } else { // well we couldn't claim a slot because this is an existing chain and we're not in the // authorities. we need to tell BabeBlockImport that the epoch has changed, and we put @@ -244,13 +245,13 @@ where }); vec![ - DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode()), - DigestItemFor::::Consensus(BABE_ENGINE_ID, next_epoch.encode()), + DigestItem::PreRuntime(BABE_ENGINE_ID, predigest.encode()), + DigestItem::Consensus(BABE_ENGINE_ID, next_epoch.encode()), ] }, ViableEpochDescriptor::UnimportedGenesis(_) => { // since this is the genesis, secondary predigest works for now. - vec![DigestItemFor::::PreRuntime(BABE_ENGINE_ID, predigest.encode())] + vec![DigestItem::PreRuntime(BABE_ENGINE_ID, predigest.encode())] }, } }; diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 7b1012888e869..bc2b772c8e0f2 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -55,7 +55,7 @@ use sc_consensus::{ }; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend, ProvideCache}; +use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend}; use sp_consensus::{ CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, }; @@ -240,7 +240,7 @@ where B: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, - C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, + C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf, C::Api: BlockBuilderApi, Algorithm: PowAlgorithm, CAW: CanAuthorWith, @@ -319,7 +319,7 @@ where I: BlockImport> + Send + Sync, I::Error: Into, S: SelectChain, - C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, + C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + BlockOf, C::Api: BlockBuilderApi, Algorithm: PowAlgorithm + Send + Sync, Algorithm::Difficulty: 'static + Send, @@ -428,7 +428,7 @@ impl PowVerifier { fn check_header( &self, mut header: B::Header, - ) -> Result<(B::Header, DigestItem), Error> + ) -> Result<(B::Header, DigestItem), Error> where Algorithm: PowAlgorithm, { @@ -630,7 +630,7 @@ where }, }; - let mut inherent_digest = Digest::::default(); + let mut inherent_digest = Digest::default(); if let Some(pre_runtime) = &pre_runtime { inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); } @@ -703,7 +703,7 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err /// Fetch PoW seal. fn fetch_seal( - digest: Option<&DigestItem>, + digest: Option<&DigestItem>, hash: B::Hash, ) -> Result, Error> { match digest { diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index bfaa388014ef0..a319fcdb5b2c4 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -45,7 +45,7 @@ use sp_consensus_slots::Slot; use sp_inherents::CreateInherentDataProviders; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor}, + traits::{Block as BlockT, HashFor, Header as HeaderT}, }; use sp_timestamp::Timestamp; use std::{fmt::Debug, ops::Deref, time::Duration}; @@ -54,7 +54,7 @@ use std::{fmt::Debug, ops::Deref, time::Duration}; /// /// See [`sp_state_machine::StorageChanges`] for more information. pub type StorageChanges = - sp_state_machine::StorageChanges, NumberFor>; + sp_state_machine::StorageChanges>; /// The result of [`SlotWorker::on_slot`]. #[derive(Debug, Clone)] @@ -145,7 +145,7 @@ pub trait SimpleSlotWorker { &self, slot: Slot, claim: &Self::Claim, - ) -> Vec>; + ) -> Vec; /// Returns a function which produces a `BlockImportParams`. fn block_import_params( diff --git a/client/db/src/cache/list_cache.rs b/client/db/src/cache/list_cache.rs deleted file mode 100644 index 795cb8f901183..0000000000000 --- a/client/db/src/cache/list_cache.rs +++ /dev/null @@ -1,2351 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! List-based cache. -//! -//! Maintains several lists, containing nodes that are inserted whenever -//! cached value at new block differs from the value at previous block. -//! Example: -//! B1(a) <--- B2(b) <--- B3(b) <--- B4(c) -//! N1(b) <-------------- N2(c) -//! -//! There's single list for all finalized blocks and >= 0 lists for unfinalized -//! blocks. -//! When new non-final block is inserted (with value that differs from the value -//! at parent), it starts new unfinalized fork. -//! When new final block is inserted (with value that differs from the value at -//! parent), new entry is appended to the finalized fork. -//! When existing non-final block is finalized (with value that differs from the -//! value at parent), new entry is appended to the finalized fork AND unfinalized -//! fork is dropped. -//! -//! Entries from abandoned unfinalized forks (forks that are forking from block B -//! which is ascendant of the best finalized block) are deleted when block F with -//! number B.number (i.e. 'parallel' canon block) is finalized. -//! -//! Finalized entry E1 is pruned when block B is finalized so that: -//! EntryAt(B.number - prune_depth).points_to(E1) - -use std::collections::{BTreeMap, BTreeSet}; - -use log::warn; - -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_runtime::traits::{Block as BlockT, Bounded, CheckedSub, NumberFor, Zero}; - -use crate::cache::{ - list_entry::{Entry, StorageEntry}, - list_storage::{Metadata, Storage, StorageTransaction}, - CacheItemT, ComplexBlockId, EntryType, -}; - -/// Pruning strategy. -#[derive(Debug, Clone, Copy)] -pub enum PruningStrategy { - /// Prune entries when they're too far behind best finalized block. - ByDepth(N), - /// Do not prune old entries at all. - NeverPrune, -} - -/// List-based cache. -pub struct ListCache> { - /// Cache storage. - storage: S, - /// Pruning strategy. - pruning_strategy: PruningStrategy>, - /// Best finalized block. - best_finalized_block: ComplexBlockId, - /// Best finalized entry (if exists). - best_finalized_entry: Option>, - /// All unfinalized 'forks'. - unfinalized: Vec>, -} - -/// All possible list cache operations that could be performed after transaction is committed. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub enum CommitOperation { - /// New block is appended to the fork without changing the cached value. - AppendNewBlock(usize, ComplexBlockId), - /// New block is appended to the fork with the different value. - AppendNewEntry(usize, Entry), - /// New fork is added with the given head entry. - AddNewFork(Entry), - /// New block is finalized and possibly: - /// - new entry is finalized AND/OR - /// - some forks are destroyed - BlockFinalized(ComplexBlockId, Option>, BTreeSet), - /// When best block is reverted - contains the forks that have to be updated - /// (they're either destroyed, or their best entry is updated to earlier block). - BlockReverted(BTreeMap>>), -} - -/// A set of commit operations. -#[derive(Debug)] -pub struct CommitOperations { - operations: Vec>, -} - -/// Single fork of list-based cache. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct Fork { - /// The best block of this fork. We do not save this field in the database to avoid - /// extra updates => it could be None after restart. It will be either filled when - /// the block is appended to this fork, or the whole fork will be abandoned when the - /// block from the other fork is finalized - best_block: Option>, - /// The head entry of this fork. - head: Entry, -} - -/// Outcome of Fork::try_append_or_fork. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub enum ForkAppendResult { - /// New entry should be appended to the end of the fork. - Append, - /// New entry should be forked from the fork, starting with entry at given block. - Fork(ComplexBlockId), -} - -impl> ListCache { - /// Create new db list cache entry. - pub fn new( - storage: S, - pruning_strategy: PruningStrategy>, - best_finalized_block: ComplexBlockId, - ) -> ClientResult { - let (best_finalized_entry, unfinalized) = - storage.read_meta().and_then(|meta| read_forks(&storage, meta))?; - - Ok(ListCache { - storage, - pruning_strategy, - best_finalized_block, - best_finalized_entry, - unfinalized, - }) - } - - /// Get reference to the storage. - pub fn storage(&self) -> &S { - &self.storage - } - - /// Get unfinalized forks reference. - #[cfg(test)] - pub fn unfinalized(&self) -> &[Fork] { - &self.unfinalized - } - - /// Get value valid at block. - pub fn value_at_block( - &self, - at: &ComplexBlockId, - ) -> ClientResult, Option>, T)>> { - let head = if at.number <= self.best_finalized_block.number { - // if the block is older than the best known finalized block - // => we're should search for the finalized value - - // BUT since we're not guaranteeing to provide correct values for forks - // behind the finalized block, check if the block is finalized first - if !chain::is_finalized_block(&self.storage, &at, Bounded::max_value())? { - return Err(ClientError::NotInFinalizedChain) - } - - self.best_finalized_entry.as_ref() - } else if self.unfinalized.is_empty() { - // there are no unfinalized entries - // => we should search for the finalized value - self.best_finalized_entry.as_ref() - } else { - // there are unfinalized entries - // => find the fork containing given block and read from this fork - // IF there's no matching fork, ensure that this isn't a block from a fork that has - // forked behind the best finalized block and search at finalized fork - - match self.find_unfinalized_fork(&at)? { - Some(fork) => Some(&fork.head), - None => match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) - if chain::is_connected_to_block( - &self.storage, - &at, - &best_finalized_entry.valid_from, - )? => - Some(best_finalized_entry), - _ => None, - }, - } - }; - - match head { - Some(head) => head - .search_best_before(&self.storage, at.number) - .map(|e| e.map(|e| (e.0.valid_from, e.1, e.0.value))), - None => Ok(None), - } - } - - /// When new block is inserted into database. - /// - /// None passed as value means that the value has not changed since previous block. - pub fn on_block_insert>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - value: Option, - entry_type: EntryType, - operations: &mut CommitOperations, - ) -> ClientResult<()> { - Ok(operations - .append(self.do_on_block_insert(tx, parent, block, value, entry_type, operations)?)) - } - - /// When previously inserted block is finalized. - pub fn on_block_finalize>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - operations: &mut CommitOperations, - ) -> ClientResult<()> { - Ok(operations.append(self.do_on_block_finalize(tx, parent, block, operations)?)) - } - - /// When block is reverted. - pub fn on_block_revert>( - &self, - tx: &mut Tx, - reverted_block: &ComplexBlockId, - operations: &mut CommitOperations, - ) -> ClientResult<()> { - Ok(operations.append(Some(self.do_on_block_revert(tx, reverted_block)?))) - } - - /// When transaction is committed. - pub fn on_transaction_commit(&mut self, ops: CommitOperations) { - for op in ops.operations { - match op { - CommitOperation::AppendNewBlock(index, best_block) => { - let mut fork = self.unfinalized.get_mut(index).expect( - "ListCache is a crate-private type; - internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed", - ); - fork.best_block = Some(best_block); - }, - CommitOperation::AppendNewEntry(index, entry) => { - let mut fork = self.unfinalized.get_mut(index).expect( - "ListCache is a crate-private type; - internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed", - ); - fork.best_block = Some(entry.valid_from.clone()); - fork.head = entry; - }, - CommitOperation::AddNewFork(entry) => { - self.unfinalized - .push(Fork { best_block: Some(entry.valid_from.clone()), head: entry }); - }, - CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { - self.best_finalized_block = block; - if let Some(finalizing_entry) = finalizing_entry { - self.best_finalized_entry = Some(finalizing_entry); - } - for fork_index in forks.iter().rev() { - self.unfinalized.remove(*fork_index); - } - }, - CommitOperation::BlockReverted(forks) => { - for (fork_index, updated_fork) in forks.into_iter().rev() { - match updated_fork { - Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, - None => { - self.unfinalized.remove(fork_index); - }, - } - } - }, - } - } - } - - fn do_on_block_insert>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - value: Option, - entry_type: EntryType, - operations: &CommitOperations, - ) -> ClientResult>> { - // this guarantee is currently provided by LightStorage && we're relying on it here - let prev_operation = operations.operations.last(); - debug_assert!( - entry_type != EntryType::Final || - self.unfinalized.is_empty() || - self.best_finalized_block.hash == parent.hash || - match prev_operation { - Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => - best_finalized_block.hash == parent.hash, - _ => false, - } - ); - - // we do not store any values behind finalized - if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { - return Ok(None) - } - - // if the block is not final, it is possibly appended to/forking from existing unfinalized - // fork - let is_final = entry_type == EntryType::Final || entry_type == EntryType::Genesis; - if !is_final { - let mut fork_and_action = None; - - // when value hasn't changed and block isn't final, there's nothing we need to do - if value.is_none() { - return Ok(None) - } - - // first: try to find fork that is known to has the best block we're appending to - for (index, fork) in self.unfinalized.iter().enumerate() { - if fork.try_append(&parent) { - fork_and_action = Some((index, ForkAppendResult::Append)); - break - } - } - - // if not found, check cases: - // - we're appending to the fork for the first time after restart; - // - we're forking existing unfinalized fork from the middle; - if fork_and_action.is_none() { - let best_finalized_entry_block = - self.best_finalized_entry.as_ref().map(|f| f.valid_from.number); - for (index, fork) in self.unfinalized.iter().enumerate() { - if let Some(action) = - fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? - { - fork_and_action = Some((index, action)); - break - } - } - } - - // if we have found matching unfinalized fork => early exit - match fork_and_action { - // append to unfinalized fork - Some((index, ForkAppendResult::Append)) => { - let new_storage_entry = match self.unfinalized[index].head.try_update(value) { - Some(new_storage_entry) => new_storage_entry, - None => return Ok(Some(CommitOperation::AppendNewBlock(index, block))), - }; - - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = - CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block)); - tx.update_meta( - self.best_finalized_entry.as_ref(), - &self.unfinalized, - &operation, - ); - return Ok(Some(operation)) - }, - // fork from the middle of unfinalized fork - Some((_, ForkAppendResult::Fork(prev_valid_from))) => { - // it is possible that we're inserting extra (but still required) fork here - let new_storage_entry = StorageEntry { - prev_valid_from: Some(prev_valid_from), - value: value.expect("checked above that !value.is_none(); qed"), - }; - - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = - CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta( - self.best_finalized_entry.as_ref(), - &self.unfinalized, - &operation, - ); - return Ok(Some(operation)) - }, - None => (), - } - } - - // if we're here, then one of following is true: - // - either we're inserting final block => all ancestors are already finalized AND the only - // thing we can do is to try to update last finalized entry - // - either we're inserting non-final blocks that has no ancestors in any known unfinalized - // forks - - let new_storage_entry = match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) => best_finalized_entry.try_update(value), - None if value.is_some() => Some(StorageEntry { - prev_valid_from: None, - value: value.expect("value.is_some(); qed"), - }), - None => None, - }; - - if !is_final { - return Ok(match new_storage_entry { - Some(new_storage_entry) => { - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = - CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta( - self.best_finalized_entry.as_ref(), - &self.unfinalized, - &operation, - ); - Some(operation) - }, - None => None, - }) - } - - // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); - self.prune_finalized_entries(tx, &block); - - match new_storage_entry { - Some(new_storage_entry) => { - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::BlockFinalized( - block.clone(), - Some(new_storage_entry.into_entry(block)), - abandoned_forks, - ); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - Ok(Some(operation)) - }, - None => Ok(Some(CommitOperation::BlockFinalized(block, None, abandoned_forks))), - } - } - - fn do_on_block_finalize>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - operations: &CommitOperations, - ) -> ClientResult>> { - // this guarantee is currently provided by db backend && we're relying on it here - let prev_operation = operations.operations.last(); - debug_assert!( - self.best_finalized_block.hash == parent.hash || - match prev_operation { - Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => - best_finalized_block.hash == parent.hash, - _ => false, - } - ); - - // there could be at most one entry that is finalizing - let finalizing_entry = - self.storage.read_entry(&block)?.map(|entry| entry.into_entry(block.clone())); - - // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); - self.prune_finalized_entries(tx, &block); - - let operation = CommitOperation::BlockFinalized(block, finalizing_entry, abandoned_forks); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - - Ok(Some(operation)) - } - - fn do_on_block_revert>( - &self, - tx: &mut Tx, - reverted_block: &ComplexBlockId, - ) -> ClientResult> { - // can't revert finalized blocks - debug_assert!(self.best_finalized_block.number < reverted_block.number); - - // iterate all unfinalized forks and truncate/destroy if required - let mut updated = BTreeMap::new(); - for (index, fork) in self.unfinalized.iter().enumerate() { - // we only need to truncate fork if its head is ancestor of truncated block - if fork.head.valid_from.number < reverted_block.number { - continue - } - - // we only need to truncate fork if its head is connected to truncated block - if !chain::is_connected_to_block(&self.storage, reverted_block, &fork.head.valid_from)? - { - continue - } - - let updated_fork = fork.truncate( - &self.storage, - tx, - reverted_block.number, - self.best_finalized_block.number, - )?; - updated.insert(index, updated_fork); - } - - // schedule commit operation and update meta - let operation = CommitOperation::BlockReverted(updated); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - - Ok(operation) - } - - /// Prune old finalized entries. - fn prune_finalized_entries>( - &self, - tx: &mut Tx, - block: &ComplexBlockId, - ) { - let prune_depth = match self.pruning_strategy { - PruningStrategy::ByDepth(prune_depth) => prune_depth, - PruningStrategy::NeverPrune => return, - }; - - let mut do_pruning = || -> ClientResult<()> { - // calculate last ancient block number - let ancient_block = match block.number.checked_sub(&prune_depth) { - Some(number) => match self.storage.read_id(number)? { - Some(hash) => ComplexBlockId::new(hash, number), - None => return Ok(()), - }, - None => return Ok(()), - }; - - // if there's an entry at this block: - // - remove reference from this entry to the previous entry - // - destroy fork starting with previous entry - let current_entry = match self.storage.read_entry(&ancient_block)? { - Some(current_entry) => current_entry, - None => return Ok(()), - }; - let first_entry_to_truncate = match current_entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(()), - }; - - // truncate ancient entry - tx.insert_storage_entry( - &ancient_block, - &StorageEntry { prev_valid_from: None, value: current_entry.value }, - ); - - // destroy 'fork' ending with previous entry - destroy_fork(first_entry_to_truncate, &self.storage, tx, None) - }; - - if let Err(error) = do_pruning() { - warn!(target: "db", "Failed to prune ancient cache entries: {}", error); - } - } - - /// Try to destroy abandoned forks (forked before best finalized block) when block is finalized. - fn destroy_abandoned_forks>( - &self, - tx: &mut Tx, - block: &ComplexBlockId, - prev_operation: Option<&CommitOperation>, - ) -> BTreeSet { - // if some block has been finalized already => take it into account - let prev_abandoned_forks = match prev_operation { - Some(&CommitOperation::BlockFinalized(_, _, ref abandoned_forks)) => - Some(abandoned_forks), - _ => None, - }; - - let mut destroyed = prev_abandoned_forks.cloned().unwrap_or_else(|| BTreeSet::new()); - let live_unfinalized = self.unfinalized.iter().enumerate().filter(|(idx, _)| { - prev_abandoned_forks - .map(|prev_abandoned_forks| !prev_abandoned_forks.contains(idx)) - .unwrap_or(true) - }); - for (index, fork) in live_unfinalized { - if fork.head.valid_from.number == block.number { - destroyed.insert(index); - if fork.head.valid_from.hash != block.hash { - if let Err(error) = fork.destroy(&self.storage, tx, Some(block.number)) { - warn!(target: "db", "Failed to destroy abandoned unfinalized cache fork: {}", error); - } - } - } - } - - destroyed - } - - /// Search unfinalized fork where given block belongs. - fn find_unfinalized_fork( - &self, - block: &ComplexBlockId, - ) -> ClientResult>> { - for unfinalized in &self.unfinalized { - if unfinalized.matches(&self.storage, block)? { - return Ok(Some(&unfinalized)) - } - } - - Ok(None) - } -} - -impl Fork { - /// Get reference to the head entry of this fork. - pub fn head(&self) -> &Entry { - &self.head - } - - /// Check if the block is the part of the fork. - pub fn matches>( - &self, - storage: &S, - block: &ComplexBlockId, - ) -> ClientResult { - let range = self.head.search_best_range_before(storage, block.number)?; - match range { - None => Ok(false), - Some((begin, end)) => - chain::is_connected_to_range(storage, block, (&begin, end.as_ref())), - } - } - - /// Try to append NEW block to the fork. This method will only 'work' (return true) when block - /// is actually appended to the fork AND the best known block of the fork is known (i.e. some - /// block has been already appended to this fork after last restart). - pub fn try_append(&self, parent: &ComplexBlockId) -> bool { - // when the best block of the fork is known, the check is trivial - // - // most of calls will hopefully end here, because best_block is only unknown - // after restart and until new block is appended to the fork - self.best_block.as_ref() == Some(parent) - } - - /// Try to append new block to the fork OR fork it. - pub fn try_append_or_fork>( - &self, - storage: &S, - parent: &ComplexBlockId, - best_finalized_entry_block: Option>, - ) -> ClientResult>> { - // try to find entries that are (possibly) surrounding the parent block - let range = self.head.search_best_range_before(storage, parent.number)?; - let begin = match range { - Some((begin, _)) => begin, - None => return Ok(None), - }; - - // check if the parent is connected to the beginning of the range - if !chain::is_connected_to_block(storage, parent, &begin)? { - return Ok(None) - } - - // the block is connected to the begin-entry. If begin is the head entry - // => we need to append new block to the fork - if begin == self.head.valid_from { - return Ok(Some(ForkAppendResult::Append)) - } - - // the parent block belongs to this fork AND it is located after last finalized entry - // => we need to make a new fork - if best_finalized_entry_block.map(|f| begin.number > f).unwrap_or(true) { - return Ok(Some(ForkAppendResult::Fork(begin))) - } - - Ok(None) - } - - /// Destroy fork by deleting all unfinalized entries. - pub fn destroy, Tx: StorageTransaction>( - &self, - storage: &S, - tx: &mut Tx, - best_finalized_block: Option>, - ) -> ClientResult<()> { - destroy_fork(self.head.valid_from.clone(), storage, tx, best_finalized_block) - } - - /// Truncate fork by deleting all entries that are descendants of given block. - pub fn truncate, Tx: StorageTransaction>( - &self, - storage: &S, - tx: &mut Tx, - reverting_block: NumberFor, - best_finalized_block: NumberFor, - ) -> ClientResult>> { - let mut current = self.head.valid_from.clone(); - loop { - // read pointer to previous entry - let entry = storage.require_entry(¤t)?; - - // truncation stops when we have reached the ancestor of truncated block - if current.number < reverting_block { - // if we have reached finalized block => destroy fork - if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(None) - } - - // else fork needs to be updated - return Ok(Some(Fork { best_block: None, head: entry.into_entry(current) })) - } - - tx.remove_storage_entry(¤t); - - // truncation also stops when there are no more entries in the list - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(None), - }; - } - } -} - -impl Default for CommitOperations { - fn default() -> Self { - CommitOperations { operations: Vec::new() } - } -} - -// This should never be allowed for non-test code to avoid revealing its internals. -#[cfg(test)] -impl From>> - for CommitOperations -{ - fn from(operations: Vec>) -> Self { - CommitOperations { operations } - } -} - -impl CommitOperations { - /// Append operation to the set. - fn append(&mut self, new_operation: Option>) { - let new_operation = match new_operation { - Some(new_operation) => new_operation, - None => return, - }; - - let last_operation = match self.operations.pop() { - Some(last_operation) => last_operation, - None => { - self.operations.push(new_operation); - return - }, - }; - - // we are able (and obliged to) to merge two consequent block finalization operations - match last_operation { - CommitOperation::BlockFinalized( - old_finalized_block, - old_finalized_entry, - old_abandoned_forks, - ) => match new_operation { - CommitOperation::BlockFinalized( - new_finalized_block, - new_finalized_entry, - new_abandoned_forks, - ) => { - self.operations.push(CommitOperation::BlockFinalized( - new_finalized_block, - new_finalized_entry, - new_abandoned_forks, - )); - }, - _ => { - self.operations.push(CommitOperation::BlockFinalized( - old_finalized_block, - old_finalized_entry, - old_abandoned_forks, - )); - self.operations.push(new_operation); - }, - }, - _ => { - self.operations.push(last_operation); - self.operations.push(new_operation); - }, - } - } -} - -/// Destroy fork by deleting all unfinalized entries. -pub fn destroy_fork< - Block: BlockT, - T: CacheItemT, - S: Storage, - Tx: StorageTransaction, ->( - head_valid_from: ComplexBlockId, - storage: &S, - tx: &mut Tx, - best_finalized_block: Option>, -) -> ClientResult<()> { - let mut current = head_valid_from; - loop { - // optionally: deletion stops when we found entry at finalized block - if let Some(best_finalized_block) = best_finalized_block { - if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(()) - } - } - - // read pointer to previous entry - let entry = storage.require_entry(¤t)?; - tx.remove_storage_entry(¤t); - - // deletion stops when there are no more entries in the list - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(()), - }; - } -} - -/// Blockchain related functions. -mod chain { - use super::*; - use sp_runtime::traits::Header as HeaderT; - - /// Is the block1 connected both ends of the range. - pub fn is_connected_to_range>( - storage: &S, - block: &ComplexBlockId, - range: (&ComplexBlockId, Option<&ComplexBlockId>), - ) -> ClientResult { - let (begin, end) = range; - Ok(is_connected_to_block(storage, block, begin)? && - match end { - Some(end) => is_connected_to_block(storage, block, end)?, - None => true, - }) - } - - /// Is the block1 directly connected (i.e. part of the same fork) to block2? - pub fn is_connected_to_block>( - storage: &S, - block1: &ComplexBlockId, - block2: &ComplexBlockId, - ) -> ClientResult { - let (begin, end) = if *block1 > *block2 { (block2, block1) } else { (block1, block2) }; - let mut current = storage - .read_header(&end.hash)? - .ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; - while *current.number() > begin.number { - current = storage - .read_header(current.parent_hash())? - .ok_or_else(|| ClientError::UnknownBlock(format!("{}", current.parent_hash())))?; - } - - Ok(begin.hash == current.hash()) - } - - /// Returns true if the given block is finalized. - pub fn is_finalized_block>( - storage: &S, - block: &ComplexBlockId, - best_finalized_block: NumberFor, - ) -> ClientResult { - if block.number > best_finalized_block { - return Ok(false) - } - - storage.read_id(block.number).map(|hash| hash.as_ref() == Some(&block.hash)) - } -} - -/// Read list cache forks at blocks IDs. -fn read_forks>( - storage: &S, - meta: Metadata, -) -> ClientResult<(Option>, Vec>)> { - let finalized = match meta.finalized { - Some(finalized) => Some(storage.require_entry(&finalized)?.into_entry(finalized)), - None => None, - }; - - let unfinalized = meta - .unfinalized - .into_iter() - .map(|unfinalized| { - storage.require_entry(&unfinalized).map(|storage_entry| Fork { - best_block: None, - head: storage_entry.into_entry(unfinalized), - }) - }) - .collect::>()?; - - Ok((finalized, unfinalized)) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cache::list_storage::tests::{DummyStorage, DummyTransaction, FaultyStorage}; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, Header}; - use substrate_test_runtime_client::runtime::H256; - - type Block = RawBlock>; - - fn test_id(number: u64) -> ComplexBlockId { - ComplexBlockId::new(H256::from_low_u64_be(number), number) - } - - fn correct_id(number: u64) -> ComplexBlockId { - ComplexBlockId::new(test_header(number).hash(), number) - } - - fn fork_id(fork_nonce: u64, fork_from: u64, number: u64) -> ComplexBlockId { - ComplexBlockId::new(fork_header(fork_nonce, fork_from, number).hash(), number) - } - - fn test_header(number: u64) -> Header { - Header { - parent_hash: if number == 0 { - Default::default() - } else { - test_header(number - 1).hash() - }, - number, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - fn fork_header(fork_nonce: u64, fork_from: u64, number: u64) -> Header { - if fork_from == number { - test_header(number) - } else { - Header { - parent_hash: fork_header(fork_nonce, fork_from, number - 1).hash(), - number, - state_root: H256::from_low_u64_be(1 + fork_nonce), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - } - - #[test] - fn list_value_at_block_works() { - // when block is earlier than best finalized block AND it is not finalized - // --- 50 --- - // ----------> [100] - assert!(ListCache::<_, u64, _>::new( - DummyStorage::new(), - PruningStrategy::ByDepth(1024), - test_id(100) - ) - .unwrap() - .value_at_block(&test_id(50)) - .is_err()); - // when block is earlier than best finalized block AND it is finalized AND value is some - // [30] ---- 50 ---> [100] - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } - ) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), - test_id(100) - ) - .unwrap() - .value_at_block(&test_id(50)) - .unwrap(), - Some((test_id(30), Some(test_id(100)), 30)) - ); - // when block is the best finalized block AND value is some - // ---> [100] - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(100, H256::from_low_u64_be(100)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } - ) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), - test_id(100) - ) - .unwrap() - .value_at_block(&test_id(100)) - .unwrap(), - Some((test_id(100), None, 100)) - ); - // when block is parallel to the best finalized block - // ---- 100 - // ---> [100] - assert!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } - ) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), - test_id(100) - ) - .unwrap() - .value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)) - .is_err()); - - // when block is later than last finalized block AND there are no forks AND finalized value - // is Some ---> [100] --- 200 - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 } - ), - PruningStrategy::ByDepth(1024), - test_id(100) - ) - .unwrap() - .value_at_block(&test_id(200)) - .unwrap(), - Some((test_id(100), None, 100)) - ); - - // when block is later than last finalized block AND there are no matching forks - // AND block is connected to finalized block AND finalized value is Some - // --- 3 - // ---> [2] /---------> [4] - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } - ) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - PruningStrategy::ByDepth(1024), - test_id(2) - ) - .unwrap() - .value_at_block(&fork_id(0, 2, 3)) - .unwrap(), - Some((correct_id(2), None, 2)) - ); - // when block is later than last finalized block AND there are no matching forks - // AND block is not connected to finalized block - // --- 2 --- 3 - // 1 /---> [2] ---------> [4] - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } - ) - .with_header(test_header(1)) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 2)), - PruningStrategy::ByDepth(1024), - test_id(2) - ) - .unwrap() - .value_at_block(&fork_id(0, 1, 3)) - .unwrap(), - None - ); - - // when block is later than last finalized block AND it appends to unfinalized fork from the - // end AND unfinalized value is Some - // ---> [2] ---> [4] ---> 5 - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } - ) - .with_header(test_header(4)) - .with_header(test_header(5)), - PruningStrategy::ByDepth(1024), - test_id(2) - ) - .unwrap() - .value_at_block(&correct_id(5)) - .unwrap(), - Some((correct_id(4), None, 4)) - ); - // when block is later than last finalized block AND it does not fits unfinalized fork - // AND it is connected to the finalized block AND finalized value is Some - // ---> [2] ----------> [4] - // \--- 3 - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 } - ) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - PruningStrategy::ByDepth(1024), - test_id(2) - ) - .unwrap() - .value_at_block(&fork_id(0, 2, 3)) - .unwrap(), - Some((correct_id(2), None, 2)) - ); - } - - #[test] - fn list_on_block_insert_works() { - let nfin = EntryType::NonFinal; - let fin = EntryType::Final; - - // when trying to insert block < finalized number - let mut ops = Default::default(); - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .unwrap() - .do_on_block_insert( - &mut DummyTransaction::new(), - test_id(49), - test_id(50), - Some(50), - nfin, - &mut ops, - ) - .unwrap() - .is_none()); - // when trying to insert block @ finalized number - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .unwrap() - .do_on_block_insert( - &mut DummyTransaction::new(), - test_id(99), - test_id(100), - Some(100), - nfin, - &Default::default(), - ) - .unwrap() - .is_none()); - - // when trying to insert non-final block AND it appends to the best block of unfinalized - // fork AND new value is the same as in the fork' best block - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta(None, vec![test_id(4)]) - .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: 4 }), - PruningStrategy::ByDepth(1024), - test_id(2), - ) - .unwrap(); - cache.unfinalized[0].best_block = Some(test_id(4)); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - test_id(4), - test_id(5), - Some(4), - nfin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::AppendNewBlock(0, test_id(5))), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND it appends to the best block of unfinalized - // fork AND new value is the same as in the fork' best block - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - test_id(4), - test_id(5), - Some(5), - nfin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 })), - ); - assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] }) - ); - - // when trying to insert non-final block AND it is the first block that appends to the best - // block of unfinalized fork AND new value is the same as in the fork' best block - let cache = ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: 4 }) - .with_header(test_header(4)), - PruningStrategy::ByDepth(1024), - test_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(4), - correct_id(5), - Some(4), - nfin, - &Default::default(), - ) - .unwrap(), - Some(CommitOperation::AppendNewBlock(0, correct_id(5))), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND it is the first block that appends to the best - // block of unfinalized fork AND new value is the same as in the fork' best block - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(4), - correct_id(5), - Some(5), - nfin, - &Default::default(), - ) - .unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 })), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] }) - ); - - // when trying to insert non-final block AND it forks unfinalized fork - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }, - ) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(3), - fork_id(0, 3, 4), - Some(14), - nfin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 })), - ); - assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { - finalized: Some(correct_id(2)), - unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] - }) - ); - - // when trying to insert non-final block AND there are no unfinalized forks - // AND value is the same as last finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(2), - nfin, - &Default::default() - ) - .unwrap(), - None, - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND there are no unfinalized forks - // AND value differs from last finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(3), - nfin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 })), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] }) - ); - - // when inserting finalized entry AND there are no previous finalized entries - let cache = - ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(3), - fin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(3), - Some(Entry { valid_from: correct_id(3), value: 3 }), - Default::default(), - )), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }) - ); - // when inserting finalized entry AND value is the same as in previous finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(2), - fin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when inserting finalized entry AND value differs from previous finalized - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(3), - fin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(3), - Some(Entry { valid_from: correct_id(3), value: 3 }), - Default::default(), - )), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] }) - ); - - // inserting finalized entry removes abandoned fork EVEN if new entry is not inserted - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_insert( - &mut tx, - correct_id(2), - correct_id(3), - Some(2), - fin, - &Default::default() - ) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(3), - None, - vec![0].into_iter().collect() - )), - ); - } - - #[test] - fn list_on_block_finalized_works() { - // finalization does not finalizes entry if it does not exists - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, - ), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()) - .unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(5)] }), - ); - // finalization finalizes entry - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, - ), - PruningStrategy::ByDepth(1024), - correct_id(4), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_finalize(&mut tx, correct_id(4), correct_id(5), &Default::default()) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(5), - Some(Entry { valid_from: correct_id(5), value: 5 }), - vec![0].into_iter().collect(), - )), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] }) - ); - // finalization removes abandoned forks - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache - .do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(3), - None, - vec![0].into_iter().collect() - )), - ); - } - - #[test] - fn list_transaction_commit_works() { - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }, - ) - .with_entry( - correct_id(6), - StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }, - ), - PruningStrategy::ByDepth(1024), - correct_id(2), - ) - .unwrap(); - - // when new block is appended to unfinalized fork - cache.on_transaction_commit(vec![CommitOperation::AppendNewBlock(0, correct_id(6))].into()); - assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); - // when new entry is appended to unfinalized fork - cache.on_transaction_commit( - vec![CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 })] - .into(), - ); - assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); - assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: 7 }); - // when new fork is added - cache.on_transaction_commit( - vec![CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 })] - .into(), - ); - assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); - assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: 10 }); - // when block is finalized + entry is finalized + unfinalized forks are deleted - cache.on_transaction_commit( - vec![CommitOperation::BlockFinalized( - correct_id(20), - Some(Entry { valid_from: correct_id(20), value: 20 }), - vec![0, 1, 2].into_iter().collect(), - )] - .into(), - ); - assert_eq!(cache.best_finalized_block, correct_id(20)); - assert_eq!( - cache.best_finalized_entry, - Some(Entry { valid_from: correct_id(20), value: 20 }) - ); - assert!(cache.unfinalized.is_empty()); - } - - #[test] - fn list_find_unfinalized_fork_works() { - // ----------> [3] - // --- [2] ---------> 4 ---> [5] - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) - .with_entry( - fork_id(0, 1, 3), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } - ) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } - ) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)), - PruningStrategy::ByDepth(1024), - correct_id(0) - ) - .unwrap() - .find_unfinalized_fork((&correct_id(4)).into()) - .unwrap() - .unwrap() - .head - .valid_from, - correct_id(5) - ); - // --- [2] ---------------> [5] - // ----------> [3] ---> 4 - assert_eq!( - ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry( - fork_id(0, 1, 3), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } - ) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } - ) - .with_entry( - correct_id(2), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 } - ) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(0, 1, 2)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 4)), - PruningStrategy::ByDepth(1024), - correct_id(0) - ) - .unwrap() - .find_unfinalized_fork((&fork_id(0, 1, 4)).into()) - .unwrap() - .unwrap() - .head - .valid_from, - fork_id(0, 1, 3) - ); - // --- [2] ---------------> [5] - // ----------> [3] - // -----------------> 4 - assert!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry( - fork_id(0, 1, 3), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 } - ) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 } - ) - .with_entry( - correct_id(2), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 } - ) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 4)) - .with_header(fork_header(1, 1, 2)) - .with_header(fork_header(1, 1, 3)) - .with_header(fork_header(1, 1, 4)), - PruningStrategy::ByDepth(1024), - correct_id(0) - ) - .unwrap() - .find_unfinalized_fork((&fork_id(1, 1, 4)).into()) - .unwrap() - .is_none()); - } - - #[test] - fn fork_matches_works() { - // when block is not within list range - let storage = DummyStorage::new() - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, - ) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - assert_eq!( - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .matches(&storage, (&test_id(20)).into()) - .unwrap(), - false - ); - // when block is not connected to the begin block - let storage = DummyStorage::new() - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 2, 4)) - .with_header(fork_header(0, 2, 3)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .matches(&storage, (&fork_id(0, 2, 4)).into()) - .unwrap(), - false - ); - // when block is not connected to the end block - let storage = DummyStorage::new() - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 3, 4)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .matches(&storage, (&fork_id(0, 3, 4)).into()) - .unwrap(), - false - ); - // when block is connected to the begin block AND end is open - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: 100 }) - .with_header(test_header(5)) - .with_header(test_header(6)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .matches(&storage, (&correct_id(6)).into()) - .unwrap(), - true - ); - // when block is connected to the begin block AND to the end block - let storage = DummyStorage::new() - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .matches(&storage, (&correct_id(4)).into()) - .unwrap(), - true - ); - } - - #[test] - fn fork_try_append_works() { - // when best block is unknown - assert_eq!( - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(100)), - false - ); - // when best block is known but different - assert_eq!( - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(101)), - false - ); - // when best block is known and the same - assert_eq!( - Fork::<_, u64> { - best_block: Some(test_id(100)), - head: Entry { valid_from: test_id(100), value: 0 } - } - .try_append(&test_id(100)), - true - ); - } - - #[test] - fn fork_try_append_or_fork_works() { - // when there's no entry before parent - let storage = DummyStorage::new() - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, - ) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - assert_eq!( - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append_or_fork(&storage, &test_id(30), None) - .unwrap(), - None - ); - // when parent does not belong to the fork - let storage = DummyStorage::new() - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 2, 4)) - .with_header(fork_header(0, 2, 3)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .try_append_or_fork(&storage, &fork_id(0, 2, 4), None) - .unwrap(), - None - ); - // when the entry before parent is the head entry - let storage = DummyStorage::new() - .with_entry( - ComplexBlockId::new(test_header(5).hash(), 5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_header(test_header(6)) - .with_header(test_header(5)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(5), value: 100 } - } - .try_append_or_fork(&storage, &correct_id(6), None) - .unwrap(), - Some(ForkAppendResult::Append) - ); - // when the parent located after last finalized entry - let storage = DummyStorage::new() - .with_entry( - correct_id(6), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(6)) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 4, 5)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(6), value: 100 } - } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), None) - .unwrap(), - Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3))) - ); - // when the parent located before last finalized entry - let storage = DummyStorage::new() - .with_entry( - correct_id(6), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(6)) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 4, 5)); - assert_eq!( - Fork::<_, u64> { - best_block: None, - head: Entry { valid_from: correct_id(6), value: 100 } - } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)) - .unwrap(), - None - ); - } - - #[test] - fn fork_destroy_works() { - // when we reached finalized entry without iterations - let storage = DummyStorage::new().with_id(100, H256::from_low_u64_be(100)); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)) - .unwrap(); - assert!(tx.removed_entries().is_empty()); - // when we reach finalized entry with iterations - let storage = DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, - ) - .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(20)), value: 50 }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) - .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(5)), value: 10 }) - .with_entry(test_id(5), StorageEntry { prev_valid_from: Some(test_id(3)), value: 5 }) - .with_entry(test_id(3), StorageEntry { prev_valid_from: None, value: 0 }); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)) - .unwrap(); - assert_eq!( - *tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash, test_id(20).hash] - .into_iter() - .collect() - ); - // when we reach beginning of fork before finalized block - let storage = DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }, - ) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)) - .unwrap(); - assert_eq!( - *tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash].into_iter().collect() - ); - } - - #[test] - fn is_connected_to_block_fails() { - // when storage returns error - assert!(chain::is_connected_to_block::<_, u64, _>( - &FaultyStorage, - (&test_id(1)).into(), - &test_id(100), - ) - .is_err(),); - // when there's no header in the storage - assert!(chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new(), - (&test_id(1)).into(), - &test_id(100), - ) - .is_err(),); - } - - #[test] - fn is_connected_to_block_works() { - // when without iterations we end up with different block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new().with_header(test_header(1)), - (&test_id(1)).into(), - &correct_id(1) - ) - .unwrap(), - false - ); - // when with ASC iterations we end up with different block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&test_id(0)).into(), - &correct_id(2) - ) - .unwrap(), - false - ); - // when with DESC iterations we end up with different block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(2)).into(), - &test_id(0) - ) - .unwrap(), - false - ); - // when without iterations we end up with the same block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new().with_header(test_header(1)), - (&correct_id(1)).into(), - &correct_id(1) - ) - .unwrap(), - true - ); - // when with ASC iterations we end up with the same block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(0)).into(), - &correct_id(2) - ) - .unwrap(), - true - ); - // when with DESC iterations we end up with the same block - assert_eq!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(2)).into(), - &correct_id(0) - ) - .unwrap(), - true - ); - } - - #[test] - fn is_finalized_block_fails() { - // when storage returns error - assert!(chain::is_finalized_block::<_, u64, _>(&FaultyStorage, &test_id(1), 100).is_err()); - } - - #[test] - fn is_finalized_block_works() { - // when number of block is larger than last finalized block - assert_eq!( - chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), - false - ); - // when there's no hash for this block number in the database - assert_eq!( - chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), - false - ); - // when there's different hash for this block number in the database - assert_eq!( - chain::is_finalized_block::<_, u64, _>( - &DummyStorage::new().with_id(1, H256::from_low_u64_be(2)), - &test_id(1), - 100 - ) - .unwrap(), - false - ); - // when there's the same hash for this block number in the database - assert_eq!( - chain::is_finalized_block::<_, u64, _>( - &DummyStorage::new().with_id(1, H256::from_low_u64_be(1)), - &test_id(1), - 100 - ) - .unwrap(), - true - ); - } - - #[test] - fn read_forks_fails() { - // when storage returns error during finalized entry read - assert!(read_forks::( - &FaultyStorage, - Metadata { finalized: Some(test_id(1)), unfinalized: vec![] } - ) - .is_err()); - // when storage returns error during unfinalized entry read - assert!(read_forks::( - &FaultyStorage, - Metadata { finalized: None, unfinalized: vec![test_id(1)] } - ) - .is_err()); - // when finalized entry is not found - assert!(read_forks::( - &DummyStorage::new(), - Metadata { finalized: Some(test_id(1)), unfinalized: vec![] } - ) - .is_err()); - // when unfinalized entry is not found - assert!(read_forks::( - &DummyStorage::new(), - Metadata { finalized: None, unfinalized: vec![test_id(1)] } - ) - .is_err()); - } - - #[test] - fn read_forks_works() { - let storage = DummyStorage::new() - .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(1)), value: 11 }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(2)), value: 0 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 33 }); - let expected = ( - Some(Entry { valid_from: test_id(10), value: 11 }), - vec![ - Fork { best_block: None, head: Entry { valid_from: test_id(20), value: 0 } }, - Fork { best_block: None, head: Entry { valid_from: test_id(30), value: 33 } }, - ], - ); - - assert_eq!( - expected, - read_forks( - &storage, - Metadata { - finalized: Some(test_id(10)), - unfinalized: vec![test_id(20), test_id(30)], - } - ) - .unwrap() - ); - } - - #[test] - fn ancient_entries_are_pruned_when_pruning_enabled() { - fn do_test(strategy: PruningStrategy) { - let cache = ListCache::new( - DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_id(20, H256::from_low_u64_be(20)) - .with_id(30, H256::from_low_u64_be(30)) - .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) - .with_entry( - test_id(20), - StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }, - ) - .with_entry( - test_id(30), - StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }, - ), - strategy, - test_id(9), - ) - .unwrap(); - let mut tx = DummyTransaction::new(); - - // when finalizing entry #10: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(10)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #19: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(19)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #20: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(20)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is - // enabled) - cache.prune_finalized_entries(&mut tx, &test_id(30)); - match strategy { - PruningStrategy::NeverPrune => { - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - }, - PruningStrategy::ByDepth(_) => { - assert_eq!(*tx.removed_entries(), vec![test_id(10).hash].into_iter().collect()); - assert_eq!( - *tx.inserted_entries(), - vec![test_id(20).hash].into_iter().collect() - ); - }, - } - } - - do_test(PruningStrategy::ByDepth(10)); - do_test(PruningStrategy::NeverPrune) - } - - #[test] - fn revert_block_works() { - // 1 -> (2) -> 3 -> 4 -> 5 - // \ - // -> 5'' - // \ - // -> (3') -> 4' -> 5' - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta( - Some(correct_id(1)), - vec![correct_id(5), fork_id(1, 2, 5), fork_id(2, 4, 5)], - ) - .with_id(1, correct_id(1).hash) - .with_entry(correct_id(1), StorageEntry { prev_valid_from: None, value: 1 }) - .with_entry( - correct_id(3), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 3 }, - ) - .with_entry( - correct_id(4), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 4 }, - ) - .with_entry( - correct_id(5), - StorageEntry { prev_valid_from: Some(correct_id(4)), value: 5 }, - ) - .with_entry( - fork_id(1, 2, 4), - StorageEntry { prev_valid_from: Some(correct_id(1)), value: 14 }, - ) - .with_entry( - fork_id(1, 2, 5), - StorageEntry { prev_valid_from: Some(fork_id(1, 2, 4)), value: 15 }, - ) - .with_entry( - fork_id(2, 4, 5), - StorageEntry { prev_valid_from: Some(correct_id(4)), value: 25 }, - ) - .with_header(test_header(1)) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(1, 2, 3)) - .with_header(fork_header(1, 2, 4)) - .with_header(fork_header(1, 2, 5)) - .with_header(fork_header(2, 4, 5)), - PruningStrategy::ByDepth(1024), - correct_id(1), - ) - .unwrap(); - - // when 5 is reverted: entry 5 is truncated - let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(5)).unwrap(); - assert_eq!( - op, - CommitOperation::BlockReverted( - vec![( - 0, - Some(Fork { - best_block: None, - head: Entry { valid_from: correct_id(4), value: 4 } - }) - ),] - .into_iter() - .collect() - ) - ); - cache.on_transaction_commit(vec![op].into()); - - // when 3 is reverted: entries 4+5' are truncated - let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(3)).unwrap(); - assert_eq!( - op, - CommitOperation::BlockReverted(vec![(0, None), (2, None),].into_iter().collect()) - ); - cache.on_transaction_commit(vec![op].into()); - - // when 2 is reverted: entries 4'+5' are truncated - let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(2)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![(0, None),].into_iter().collect())); - cache.on_transaction_commit(vec![op].into()); - } - - #[test] - fn append_commit_operation_works() { - let mut ops = CommitOperations::default(); - ops.append(None); - assert_eq!(ops.operations, Vec::new()); - - ops.append(Some(CommitOperation::BlockFinalized( - test_id(10), - Some(Entry { valid_from: test_id(10), value: 10 }), - vec![5].into_iter().collect(), - ))); - assert_eq!( - ops.operations, - vec![CommitOperation::BlockFinalized( - test_id(10), - Some(Entry { valid_from: test_id(10), value: 10 }), - vec![5].into_iter().collect(), - )], - ); - - ops.append(Some(CommitOperation::BlockFinalized( - test_id(20), - Some(Entry { valid_from: test_id(20), value: 20 }), - vec![5, 6].into_iter().collect(), - ))); - - assert_eq!( - ops.operations, - vec![CommitOperation::BlockFinalized( - test_id(20), - Some(Entry { valid_from: test_id(20), value: 20 }), - vec![5, 6].into_iter().collect(), - )], - ); - } -} diff --git a/client/db/src/cache/list_entry.rs b/client/db/src/cache/list_entry.rs deleted file mode 100644 index 7cee7a5146260..0000000000000 --- a/client/db/src/cache/list_entry.rs +++ /dev/null @@ -1,187 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! List-cache storage entries. - -use codec::{Decode, Encode}; -use sp_blockchain::Result as ClientResult; -use sp_runtime::traits::{Block as BlockT, NumberFor}; - -use crate::cache::{list_storage::Storage, CacheItemT, ComplexBlockId}; - -/// Single list-based cache entry. -#[derive(Debug)] -#[cfg_attr(test, derive(PartialEq))] -pub struct Entry { - /// first block, when this value became actual. - pub valid_from: ComplexBlockId, - /// Value stored at this entry. - pub value: T, -} - -/// Internal representation of the single list-based cache entry. The entry points to the -/// previous entry in the cache, allowing us to traverse back in time in list-style. -#[derive(Debug, Encode, Decode)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct StorageEntry { - /// None if valid from the beginning. - pub prev_valid_from: Option>, - /// Value stored at this entry. - pub value: T, -} - -impl Entry { - /// Returns Some if the entry should be updated with the new value. - pub fn try_update(&self, value: Option) -> Option> { - match value { - Some(value) => match self.value == value { - true => None, - false => - Some(StorageEntry { prev_valid_from: Some(self.valid_from.clone()), value }), - }, - None => None, - } - } - - /// Wrapper that calls search_before to get range where the given block fits. - pub fn search_best_range_before>( - &self, - storage: &S, - block: NumberFor, - ) -> ClientResult, Option>)>> { - Ok(self - .search_best_before(storage, block)? - .map(|(entry, next)| (entry.valid_from, next))) - } - - /// Searches the list, ending with THIS entry for the best entry preceding (or at) - /// given block number. - /// If the entry is found, result is the entry and the block id of next entry (if exists). - /// NOTE that this function does not check that the passed block is actually linked to - /// the blocks it found. - pub fn search_best_before>( - &self, - storage: &S, - block: NumberFor, - ) -> ClientResult, Option>)>> { - // we're looking for the best value - let mut next = None; - let mut current = self.valid_from.clone(); - if block >= self.valid_from.number { - let value = self.value.clone(); - return Ok(Some((Entry { valid_from: current, value }, next))) - } - - // else - travel back in time - loop { - let entry = storage.require_entry(¤t)?; - if block >= current.number { - return Ok(Some((Entry { valid_from: current, value: entry.value }, next))) - } - - next = Some(current); - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(None), - }; - } - } -} - -impl StorageEntry { - /// Converts storage entry into an entry, valid from given block. - pub fn into_entry(self, valid_from: ComplexBlockId) -> Entry { - Entry { valid_from, value: self.value } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; - use substrate_test_runtime_client::runtime::{Block, H256}; - - fn test_id(number: u64) -> ComplexBlockId { - ComplexBlockId::new(H256::from_low_u64_be(number), number) - } - - #[test] - fn entry_try_update_works() { - // when trying to update with None value - assert_eq!(Entry::<_, u64> { valid_from: test_id(1), value: 42 }.try_update(None), None); - // when trying to update with the same Some value - assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(1)), None); - // when trying to update with different Some value - assert_eq!( - Entry { valid_from: test_id(1), value: 1 }.try_update(Some(2)), - Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: 2 }) - ); - } - - #[test] - fn entry_search_best_before_fails() { - // when storage returns error - assert!(Entry::<_, u64> { valid_from: test_id(100), value: 42 } - .search_best_before(&FaultyStorage, 50) - .is_err()); - } - - #[test] - fn entry_search_best_before_works() { - // when block is better than our best block - assert_eq!( - Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new(), 150) - .unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(100), value: 100 }, None)) - ); - // when block is found between two entries - assert_eq!( - Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before( - &DummyStorage::new() - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 } - ) - .with_entry( - test_id(50), - StorageEntry { prev_valid_from: Some(test_id(30)), value: 50 } - ), - 75 - ) - .unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(50), value: 50 }, Some(test_id(100)))) - ); - // when block is not found - assert_eq!( - Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before( - &DummyStorage::new() - .with_entry( - test_id(100), - StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 } - ) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }), - 30 - ) - .unwrap(), - None - ); - } -} diff --git a/client/db/src/cache/list_storage.rs b/client/db/src/cache/list_storage.rs deleted file mode 100644 index bb47b8dab5a7f..0000000000000 --- a/client/db/src/cache/list_storage.rs +++ /dev/null @@ -1,441 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! List-cache storage definition and implementation. - -use std::sync::Arc; - -use crate::utils::{self, meta_keys}; -use codec::{Decode, Encode}; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_database::{Database, Transaction}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor}, -}; - -use crate::{ - cache::{ - list_cache::{CommitOperation, Fork}, - list_entry::{Entry, StorageEntry}, - CacheItemT, ComplexBlockId, - }, - DbHash, -}; - -/// Single list-cache metadata. -#[derive(Debug)] -#[cfg_attr(test, derive(Clone, PartialEq))] -pub struct Metadata { - /// Block at which best finalized entry is stored. - pub finalized: Option>, - /// A set of blocks at which best unfinalized entries are stored. - pub unfinalized: Vec>, -} - -/// Readonly list-cache storage trait. -pub trait Storage { - /// Reads hash of the block at given number. - fn read_id(&self, at: NumberFor) -> ClientResult>; - - /// Reads header of the block with given hash. - fn read_header(&self, at: &Block::Hash) -> ClientResult>; - - /// Reads cache metadata: best finalized entry (if some) and the list. - fn read_meta(&self) -> ClientResult>; - - /// Reads cache entry from the storage. - fn read_entry( - &self, - at: &ComplexBlockId, - ) -> ClientResult>>; - - /// Reads referenced (and thus existing) cache entry from the storage. - fn require_entry(&self, at: &ComplexBlockId) -> ClientResult> { - self.read_entry(at).and_then(|entry| { - entry.ok_or_else(|| { - ClientError::from(ClientError::Backend(format!( - "Referenced cache entry at {:?} is not found", - at - ))) - }) - }) - } -} - -/// List-cache storage transaction. -pub trait StorageTransaction { - /// Insert storage entry at given block. - fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry); - - /// Delete storage entry at given block. - fn remove_storage_entry(&mut self, at: &ComplexBlockId); - - /// Update metadata of the cache. - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ); -} - -/// A set of columns used by the DbStorage. -#[derive(Debug)] -pub struct DbColumns { - /// Column holding cache meta. - pub meta: u32, - /// Column holding the mapping of { block number => block hash } for blocks of the best chain. - pub key_lookup: u32, - /// Column holding the mapping of { block hash => block header }. - pub header: u32, - /// Column holding cache entries. - pub cache: u32, -} - -/// Database-backed list cache storage. -pub struct DbStorage { - name: Vec, - meta_key: Vec, - db: Arc>, - columns: DbColumns, -} - -impl DbStorage { - /// Create new database-backed list cache storage. - pub fn new(name: Vec, db: Arc>, columns: DbColumns) -> Self { - let meta_key = meta::key(&name); - DbStorage { name, meta_key, db, columns } - } - - /// Get reference to the database. - pub fn db(&self) -> &Arc> { - &self.db - } - - /// Get reference to the database columns. - pub fn columns(&self) -> &DbColumns { - &self.columns - } - - /// Encode block id for storing as a key in cache column. - /// We append prefix to the actual encoding to allow several caches - /// store entries in the same column. - pub fn encode_block_id(&self, block: &ComplexBlockId) -> Vec { - let mut encoded = self.name.clone(); - encoded.extend(block.hash.as_ref()); - encoded - } -} - -impl Storage for DbStorage { - fn read_id(&self, at: NumberFor) -> ClientResult> { - utils::read_header::( - &*self.db, - self.columns.key_lookup, - self.columns.header, - BlockId::Number(at), - ) - .map(|maybe_header| maybe_header.map(|header| header.hash())) - } - - fn read_header(&self, at: &Block::Hash) -> ClientResult> { - utils::read_header::( - &*self.db, - self.columns.key_lookup, - self.columns.header, - BlockId::Hash(*at), - ) - } - - fn read_meta(&self) -> ClientResult> { - match self.db.get(self.columns.meta, &self.meta_key) { - Some(meta) => meta::decode(&*meta), - None => Ok(Metadata { finalized: None, unfinalized: Vec::new() }), - } - } - - fn read_entry( - &self, - at: &ComplexBlockId, - ) -> ClientResult>> { - match self.db.get(self.columns.cache, &self.encode_block_id(at)) { - Some(entry) => StorageEntry::::decode(&mut &entry[..]) - .map_err(|_| ClientError::Backend("Failed to decode cache entry".into())) - .map(Some), - None => Ok(None), - } - } -} - -/// Database-backed list cache storage transaction. -pub struct DbStorageTransaction<'a> { - storage: &'a DbStorage, - tx: &'a mut Transaction, -} - -impl<'a> DbStorageTransaction<'a> { - /// Create new database transaction. - pub fn new(storage: &'a DbStorage, tx: &'a mut Transaction) -> Self { - DbStorageTransaction { storage, tx } - } -} - -impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorageTransaction<'a> { - fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry) { - self.tx.set_from_vec( - self.storage.columns.cache, - &self.storage.encode_block_id(at), - entry.encode(), - ); - } - - fn remove_storage_entry(&mut self, at: &ComplexBlockId) { - self.tx.remove(self.storage.columns.cache, &self.storage.encode_block_id(at)); - } - - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ) { - self.tx.set_from_vec( - self.storage.columns.meta, - &self.storage.meta_key, - meta::encode(best_finalized_entry, unfinalized, operation), - ); - } -} - -/// Metadata related functions. -mod meta { - use super::*; - - /// Convert cache name into cache metadata key. - pub fn key(name: &[u8]) -> Vec { - let mut key_name = meta_keys::CACHE_META_PREFIX.to_vec(); - key_name.extend_from_slice(name); - key_name - } - - /// Encode cache metadata 'applying' commit operation before encoding. - pub fn encode( - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - op: &CommitOperation, - ) -> Vec { - let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from); - let mut unfinalized = - unfinalized.iter().map(|fork| &fork.head().valid_from).collect::>(); - - match op { - CommitOperation::AppendNewBlock(_, _) => (), - CommitOperation::AppendNewEntry(index, ref entry) => { - unfinalized[*index] = &entry.valid_from; - }, - CommitOperation::AddNewFork(ref entry) => { - unfinalized.push(&entry.valid_from); - }, - CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => { - if let Some(finalizing_entry) = finalizing_entry.as_ref() { - finalized = Some(&finalizing_entry.valid_from); - } - for fork_index in forks.iter().rev() { - unfinalized.remove(*fork_index); - } - }, - CommitOperation::BlockReverted(ref forks) => { - for (fork_index, updated_fork) in forks.iter().rev() { - match updated_fork { - Some(updated_fork) => - unfinalized[*fork_index] = &updated_fork.head().valid_from, - None => { - unfinalized.remove(*fork_index); - }, - } - } - }, - } - - (finalized, unfinalized).encode() - } - - /// Decode meta information. - pub fn decode(encoded: &[u8]) -> ClientResult> { - let input = &mut &*encoded; - let finalized: Option> = Decode::decode(input).map_err(|_| { - ClientError::from(ClientError::Backend("Error decoding cache meta".into())) - })?; - let unfinalized: Vec> = Decode::decode(input).map_err(|_| { - ClientError::from(ClientError::Backend("Error decoding cache meta".into())) - })?; - - Ok(Metadata { finalized, unfinalized }) - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use std::collections::{HashMap, HashSet}; - - pub struct FaultyStorage; - - impl Storage for FaultyStorage { - fn read_id(&self, _at: NumberFor) -> ClientResult> { - Err(ClientError::Backend("TestError".into())) - } - - fn read_header(&self, _at: &Block::Hash) -> ClientResult> { - Err(ClientError::Backend("TestError".into())) - } - - fn read_meta(&self) -> ClientResult> { - Err(ClientError::Backend("TestError".into())) - } - - fn read_entry( - &self, - _at: &ComplexBlockId, - ) -> ClientResult>> { - Err(ClientError::Backend("TestError".into())) - } - } - - pub struct DummyStorage { - meta: Metadata, - ids: HashMap, Block::Hash>, - headers: HashMap, - entries: HashMap>, - } - - impl DummyStorage { - pub fn new() -> Self { - DummyStorage { - meta: Metadata { finalized: None, unfinalized: Vec::new() }, - ids: HashMap::new(), - headers: HashMap::new(), - entries: HashMap::new(), - } - } - - pub fn with_meta( - mut self, - finalized: Option>, - unfinalized: Vec>, - ) -> Self { - self.meta.finalized = finalized; - self.meta.unfinalized = unfinalized; - self - } - - pub fn with_id(mut self, at: NumberFor, id: Block::Hash) -> Self { - self.ids.insert(at, id); - self - } - - pub fn with_header(mut self, header: Block::Header) -> Self { - self.headers.insert(header.hash(), header); - self - } - - pub fn with_entry( - mut self, - at: ComplexBlockId, - entry: StorageEntry, - ) -> Self { - self.entries.insert(at.hash, entry); - self - } - } - - impl Storage for DummyStorage { - fn read_id(&self, at: NumberFor) -> ClientResult> { - Ok(self.ids.get(&at).cloned()) - } - - fn read_header(&self, at: &Block::Hash) -> ClientResult> { - Ok(self.headers.get(&at).cloned()) - } - - fn read_meta(&self) -> ClientResult> { - Ok(self.meta.clone()) - } - - fn read_entry( - &self, - at: &ComplexBlockId, - ) -> ClientResult>> { - Ok(self.entries.get(&at.hash).cloned()) - } - } - - pub struct DummyTransaction { - updated_meta: Option>, - inserted_entries: HashSet, - removed_entries: HashSet, - } - - impl DummyTransaction { - pub fn new() -> Self { - DummyTransaction { - updated_meta: None, - inserted_entries: HashSet::new(), - removed_entries: HashSet::new(), - } - } - - pub fn inserted_entries(&self) -> &HashSet { - &self.inserted_entries - } - - pub fn removed_entries(&self) -> &HashSet { - &self.removed_entries - } - - pub fn updated_meta(&self) -> &Option> { - &self.updated_meta - } - } - - impl StorageTransaction for DummyTransaction { - fn insert_storage_entry( - &mut self, - at: &ComplexBlockId, - _entry: &StorageEntry, - ) { - self.inserted_entries.insert(at.hash); - } - - fn remove_storage_entry(&mut self, at: &ComplexBlockId) { - self.removed_entries.insert(at.hash); - } - - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ) { - self.updated_meta = Some( - meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap(), - ); - } - } -} diff --git a/client/db/src/cache/mod.rs b/client/db/src/cache/mod.rs deleted file mode 100644 index 5502896aced2c..0000000000000 --- a/client/db/src/cache/mod.rs +++ /dev/null @@ -1,413 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! DB-backed cache of blockchain data. - -use parking_lot::RwLock; -use std::{ - collections::{hash_map::Entry, HashMap}, - sync::Arc, -}; - -use crate::{ - utils::{self, COLUMN_META}, - DbHash, -}; -use codec::{Decode, Encode}; -use sc_client_api::blockchain::{ - well_known_cache_keys::{self, Id as CacheKeyId}, - Cache as BlockchainCache, -}; -use sp_blockchain::{HeaderMetadataCache, Result as ClientResult}; -use sp_database::{Database, Transaction}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, -}; - -use self::list_cache::{ListCache, PruningStrategy}; - -mod list_cache; -mod list_entry; -mod list_storage; - -/// Minimal post-finalization age of finalized blocks before they'll pruned. -const PRUNE_DEPTH: u32 = 1024; - -/// The type of entry that is inserted to the cache. -#[derive(Clone, Copy, Debug, PartialEq)] -pub enum EntryType { - /// Non-final entry. - NonFinal, - /// Final entry. - Final, - /// Genesis entry (inserted during cache initialization). - Genesis, -} - -/// Block identifier that holds both hash and number. -#[derive(Clone, Debug, Encode, Decode, PartialEq)] -pub struct ComplexBlockId { - /// Hash of the block. - pub(crate) hash: Block::Hash, - /// Number of the block. - pub(crate) number: NumberFor, -} - -impl ComplexBlockId { - /// Create new complex block id. - pub fn new(hash: Block::Hash, number: NumberFor) -> Self { - ComplexBlockId { hash, number } - } -} - -impl ::std::cmp::PartialOrd for ComplexBlockId { - fn partial_cmp(&self, other: &ComplexBlockId) -> Option<::std::cmp::Ordering> { - self.number.partial_cmp(&other.number) - } -} - -/// All cache items must implement this trait. -pub trait CacheItemT: Clone + Decode + Encode + PartialEq {} - -impl CacheItemT for T where T: Clone + Decode + Encode + PartialEq {} - -/// Database-backed blockchain data cache. -pub struct DbCache { - cache_at: HashMap, self::list_storage::DbStorage>>, - header_metadata_cache: Arc>, - db: Arc>, - key_lookup_column: u32, - header_column: u32, - cache_column: u32, - genesis_hash: Block::Hash, - best_finalized_block: ComplexBlockId, -} - -impl DbCache { - /// Create new cache. - pub fn new( - db: Arc>, - header_metadata_cache: Arc>, - key_lookup_column: u32, - header_column: u32, - cache_column: u32, - genesis_hash: Block::Hash, - best_finalized_block: ComplexBlockId, - ) -> Self { - Self { - cache_at: HashMap::new(), - db, - header_metadata_cache, - key_lookup_column, - header_column, - cache_column, - genesis_hash, - best_finalized_block, - } - } - - /// Set genesis block hash. - pub fn set_genesis_hash(&mut self, genesis_hash: Block::Hash) { - self.genesis_hash = genesis_hash; - } - - /// Begin cache transaction. - pub fn transaction<'a>( - &'a mut self, - tx: &'a mut Transaction, - ) -> DbCacheTransaction<'a, Block> { - DbCacheTransaction { - cache: self, - tx, - cache_at_ops: HashMap::new(), - best_finalized_block: None, - } - } - - /// Begin cache transaction with given ops. - pub fn transaction_with_ops<'a>( - &'a mut self, - tx: &'a mut Transaction, - ops: DbCacheTransactionOps, - ) -> DbCacheTransaction<'a, Block> { - DbCacheTransaction { - cache: self, - tx, - cache_at_ops: ops.cache_at_ops, - best_finalized_block: ops.best_finalized_block, - } - } - - /// Run post-commit cache operations. - pub fn commit(&mut self, ops: DbCacheTransactionOps) -> ClientResult<()> { - for (name, ops) in ops.cache_at_ops.into_iter() { - self.get_cache(name)?.on_transaction_commit(ops); - } - if let Some(best_finalized_block) = ops.best_finalized_block { - self.best_finalized_block = best_finalized_block; - } - Ok(()) - } - - /// Creates `ListCache` with the given name or returns a reference to the existing. - pub(crate) fn get_cache( - &mut self, - name: CacheKeyId, - ) -> ClientResult<&mut ListCache, self::list_storage::DbStorage>> { - get_cache_helper( - &mut self.cache_at, - name, - &self.db, - self.key_lookup_column, - self.header_column, - self.cache_column, - &self.best_finalized_block, - ) - } -} - -// This helper is needed because otherwise the borrow checker will require to -// clone all parameters outside of the closure. -fn get_cache_helper<'a, Block: BlockT>( - cache_at: &'a mut HashMap, self::list_storage::DbStorage>>, - name: CacheKeyId, - db: &Arc>, - key_lookup: u32, - header: u32, - cache: u32, - best_finalized_block: &ComplexBlockId, -) -> ClientResult<&'a mut ListCache, self::list_storage::DbStorage>> { - match cache_at.entry(name) { - Entry::Occupied(entry) => Ok(entry.into_mut()), - Entry::Vacant(entry) => { - let cache = ListCache::new( - self::list_storage::DbStorage::new( - name.to_vec(), - db.clone(), - self::list_storage::DbColumns { meta: COLUMN_META, key_lookup, header, cache }, - ), - cache_pruning_strategy(name), - best_finalized_block.clone(), - )?; - Ok(entry.insert(cache)) - }, - } -} - -/// Cache operations that are to be committed after database transaction is committed. -#[derive(Default)] -pub struct DbCacheTransactionOps { - cache_at_ops: HashMap>>, - best_finalized_block: Option>, -} - -impl DbCacheTransactionOps { - /// Empty transaction ops. - pub fn empty() -> DbCacheTransactionOps { - DbCacheTransactionOps { cache_at_ops: HashMap::new(), best_finalized_block: None } - } -} - -/// Database-backed blockchain data cache transaction valid for single block import. -pub struct DbCacheTransaction<'a, Block: BlockT> { - cache: &'a mut DbCache, - tx: &'a mut Transaction, - cache_at_ops: HashMap>>, - best_finalized_block: Option>, -} - -impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { - /// Convert transaction into post-commit operations set. - pub fn into_ops(self) -> DbCacheTransactionOps { - DbCacheTransactionOps { - cache_at_ops: self.cache_at_ops, - best_finalized_block: self.best_finalized_block, - } - } - - /// When new block is inserted into database. - pub fn on_block_insert( - mut self, - parent: ComplexBlockId, - block: ComplexBlockId, - data_at: HashMap>, - entry_type: EntryType, - ) -> ClientResult { - // prepare list of caches that are not update - // (we might still need to do some cache maintenance in this case) - let missed_caches = self - .cache - .cache_at - .keys() - .filter(|cache| !data_at.contains_key(*cache)) - .cloned() - .collect::>(); - - let mut insert_op = |name: CacheKeyId, - value: Option>| - -> Result<(), sp_blockchain::Error> { - let cache = self.cache.get_cache(name)?; - let cache_ops = self.cache_at_ops.entry(name).or_default(); - cache.on_block_insert( - &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), - parent.clone(), - block.clone(), - value, - entry_type, - cache_ops, - )?; - - Ok(()) - }; - - data_at.into_iter().try_for_each(|(name, data)| insert_op(name, Some(data)))?; - missed_caches.into_iter().try_for_each(|name| insert_op(name, None))?; - - match entry_type { - EntryType::Final | EntryType::Genesis => self.best_finalized_block = Some(block), - EntryType::NonFinal => (), - } - - Ok(self) - } - - /// When previously inserted block is finalized. - pub fn on_block_finalize( - mut self, - parent: ComplexBlockId, - block: ComplexBlockId, - ) -> ClientResult { - for (name, cache) in self.cache.cache_at.iter() { - let cache_ops = self.cache_at_ops.entry(*name).or_default(); - cache.on_block_finalize( - &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), - parent.clone(), - block.clone(), - cache_ops, - )?; - } - - self.best_finalized_block = Some(block); - - Ok(self) - } - - /// When block is reverted. - pub fn on_block_revert(mut self, reverted_block: &ComplexBlockId) -> ClientResult { - for (name, cache) in self.cache.cache_at.iter() { - let cache_ops = self.cache_at_ops.entry(*name).or_default(); - cache.on_block_revert( - &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), - reverted_block, - cache_ops, - )?; - } - - Ok(self) - } -} - -/// Synchronous implementation of database-backed blockchain data cache. -pub struct DbCacheSync(pub RwLock>); - -impl BlockchainCache for DbCacheSync { - fn initialize(&self, key: &CacheKeyId, data: Vec) -> ClientResult<()> { - let mut cache = self.0.write(); - let genesis_hash = cache.genesis_hash; - let cache_contents = vec![(*key, data)].into_iter().collect(); - let db = cache.db.clone(); - let mut dbtx = Transaction::new(); - let tx = cache.transaction(&mut dbtx); - let tx = tx.on_block_insert( - ComplexBlockId::new(Default::default(), Zero::zero()), - ComplexBlockId::new(genesis_hash, Zero::zero()), - cache_contents, - EntryType::Genesis, - )?; - let tx_ops = tx.into_ops(); - db.commit(dbtx)?; - cache.commit(tx_ops)?; - - Ok(()) - } - - fn get_at( - &self, - key: &CacheKeyId, - at: &BlockId, - ) -> ClientResult< - Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>, - > { - let mut cache = self.0.write(); - let header_metadata_cache = cache.header_metadata_cache.clone(); - let cache = cache.get_cache(*key)?; - let storage = cache.storage(); - let db = storage.db(); - let columns = storage.columns(); - let at = match *at { - BlockId::Hash(hash) => match header_metadata_cache.header_metadata(hash) { - Some(metadata) => ComplexBlockId::new(hash, metadata.number), - None => { - let header = utils::require_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Hash(hash.clone()), - )?; - ComplexBlockId::new(hash, *header.number()) - }, - }, - BlockId::Number(number) => { - let hash = utils::require_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Number(number.clone()), - )? - .hash(); - ComplexBlockId::new(hash, number) - }, - }; - - cache.value_at_block(&at).map(|block_and_value| { - block_and_value.map(|(begin_block, end_block, value)| { - ( - (begin_block.number, begin_block.hash), - end_block.map(|end_block| (end_block.number, end_block.hash)), - value, - ) - }) - }) - } -} - -/// Get pruning strategy for given cache. -fn cache_pruning_strategy>(cache: CacheKeyId) -> PruningStrategy { - // the cache is mostly used to store data from consensus engines - // this kind of data is only required for non-finalized blocks - // => by default we prune finalized cached entries - - match cache { - // we need to keep changes tries configurations forever (or at least until changes tries, - // that were built using this configuration, are pruned) to make it possible to refer - // to old changes tries - well_known_cache_keys::CHANGES_TRIE_CONFIG => PruningStrategy::NeverPrune, - _ => PruningStrategy::ByDepth(PRUNE_DEPTH.into()), - } -} diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs deleted file mode 100644 index 3a3c5918535f9..0000000000000 --- a/client/db/src/changes_tries_storage.rs +++ /dev/null @@ -1,1168 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! DB-backed changes tries storage. - -use crate::{ - cache::{ - ComplexBlockId, DbCache, DbCacheSync, DbCacheTransactionOps, EntryType as CacheEntryType, - }, - utils::{self, meta_keys, Meta}, - Database, DbHash, -}; -use codec::{Decode, Encode}; -use hash_db::Prefix; -use parking_lot::RwLock; -use sc_client_api::backend::PrunableStateChangesTrieStorage; -use sp_blockchain::{ - well_known_cache_keys, Cache as BlockchainCache, Error as ClientError, HeaderMetadataCache, - Result as ClientResult, -}; -use sp_core::{ - convert_hash, storage::PrefixedStorageKey, ChangesTrieConfiguration, - ChangesTrieConfigurationRange, -}; -use sp_database::Transaction; -use sp_runtime::{ - generic::{BlockId, ChangesTrieSignal, DigestItem}, - traits::{Block as BlockT, CheckedSub, HashFor, Header as HeaderT, NumberFor, One, Zero}, -}; -use sp_state_machine::{ChangesTrieBuildCache, ChangesTrieCacheAction}; -use sp_trie::MemoryDB; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; - -/// Extract new changes trie configuration (if available) from the header. -pub fn extract_new_configuration( - header: &Header, -) -> Option<&Option> { - header - .digest() - .log(DigestItem::as_changes_trie_signal) - .and_then(ChangesTrieSignal::as_new_configuration) -} - -/// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is -/// currently guaranteed because import lock is held during block import/finalization. -pub struct DbChangesTrieStorageTransaction { - /// Cache operations that must be performed after db transaction is committed. - cache_ops: DbCacheTransactionOps, - /// New configuration (if changed at current block). - new_config: Option>, -} - -impl DbChangesTrieStorageTransaction { - /// Consume self and return transaction with given new configuration. - pub fn with_new_config(mut self, new_config: Option>) -> Self { - self.new_config = new_config; - self - } -} - -impl From> for DbChangesTrieStorageTransaction { - fn from(cache_ops: DbCacheTransactionOps) -> Self { - DbChangesTrieStorageTransaction { cache_ops, new_config: None } - } -} - -/// Changes tries storage. -/// -/// Stores all tries in separate DB column. -/// Lock order: meta, tries_meta, cache, build_cache. -pub struct DbChangesTrieStorage { - db: Arc>, - meta_column: u32, - changes_tries_column: u32, - key_lookup_column: u32, - header_column: u32, - meta: Arc, Block::Hash>>>, - tries_meta: RwLock>, - min_blocks_to_keep: Option, - /// The cache stores all ever existing changes tries configurations. - cache: DbCacheSync, - /// Build cache is a map of block => set of storage keys changed at this block. - /// They're used to build digest blocks - instead of reading+parsing tries from db - /// we just use keys sets from the cache. - build_cache: RwLock>>, -} - -/// Persistent struct that contains all the changes tries metadata. -#[derive(Decode, Encode, Debug)] -struct ChangesTriesMeta { - /// Oldest unpruned max-level (or skewed) digest trie blocks range. - /// The range is inclusive from both sides. - /// Is None only if: - /// 1) we haven't yet finalized any blocks (except genesis) - /// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are - /// disabled 3) changes tries pruning is disabled - pub oldest_digest_range: Option<(NumberFor, NumberFor)>, - /// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. - /// It is guaranteed that we have no any changes tries before (and including) this block. - /// It is guaranteed that all existing changes tries after this block are not yet pruned (if - /// created). - pub oldest_pruned_digest_range_end: NumberFor, -} - -impl DbChangesTrieStorage { - /// Create new changes trie storage. - pub fn new( - db: Arc>, - header_metadata_cache: Arc>, - meta_column: u32, - changes_tries_column: u32, - key_lookup_column: u32, - header_column: u32, - cache_column: u32, - meta: Arc, Block::Hash>>>, - min_blocks_to_keep: Option, - ) -> ClientResult { - let (finalized_hash, finalized_number, genesis_hash) = { - let meta = meta.read(); - (meta.finalized_hash, meta.finalized_number, meta.genesis_hash) - }; - let tries_meta = read_tries_meta(&*db, meta_column)?; - Ok(Self { - db: db.clone(), - meta_column, - changes_tries_column, - key_lookup_column, - header_column, - meta, - min_blocks_to_keep, - cache: DbCacheSync(RwLock::new(DbCache::new( - db.clone(), - header_metadata_cache, - key_lookup_column, - header_column, - cache_column, - genesis_hash, - ComplexBlockId::new(finalized_hash, finalized_number), - ))), - build_cache: RwLock::new(ChangesTrieBuildCache::new()), - tries_meta: RwLock::new(tries_meta), - }) - } - - /// Commit new changes trie. - pub fn commit( - &self, - tx: &mut Transaction, - mut changes_trie: MemoryDB>, - parent_block: ComplexBlockId, - block: ComplexBlockId, - new_header: &Block::Header, - finalized: bool, - new_configuration: Option>, - cache_tx: Option>, - ) -> ClientResult> { - // insert changes trie, associated with block, into DB - for (key, (val, _)) in changes_trie.drain() { - tx.set(self.changes_tries_column, key.as_ref(), &val); - } - - // if configuration has not been changed AND block is not finalized => nothing to do here - let new_configuration = match new_configuration { - Some(new_configuration) => new_configuration, - None if !finalized => return Ok(DbCacheTransactionOps::empty().into()), - None => - return self.finalize( - tx, - parent_block.hash, - block.hash, - block.number, - Some(new_header), - cache_tx, - ), - }; - - // update configuration cache - let mut cache_at = HashMap::new(); - cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); - Ok(DbChangesTrieStorageTransaction::from(match cache_tx { - Some(cache_tx) => self - .cache - .0 - .write() - .transaction_with_ops(tx, cache_tx.cache_ops) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(), - None => self - .cache - .0 - .write() - .transaction(tx) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(), - }) - .with_new_config(Some(new_configuration))) - } - - /// Called when block is finalized. - pub fn finalize( - &self, - tx: &mut Transaction, - parent_block_hash: Block::Hash, - block_hash: Block::Hash, - block_num: NumberFor, - new_header: Option<&Block::Header>, - cache_tx: Option>, - ) -> ClientResult> { - // prune obsolete changes tries - self.prune(tx, block_hash, block_num, new_header.clone(), cache_tx.as_ref())?; - - // if we have inserted the block that we're finalizing in the same transaction - // => then we have already finalized it from the commit() call - if cache_tx.is_some() { - if let Some(new_header) = new_header { - if new_header.hash() == block_hash { - return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")) - } - } - } - - // and finalize configuration cache entries - let block = ComplexBlockId::new(block_hash, block_num); - let parent_block_num = block_num.checked_sub(&One::one()).unwrap_or_else(|| Zero::zero()); - let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num); - Ok(match cache_tx { - Some(cache_tx) => DbChangesTrieStorageTransaction::from( - self.cache - .0 - .write() - .transaction_with_ops(tx, cache_tx.cache_ops) - .on_block_finalize(parent_block, block)? - .into_ops(), - ) - .with_new_config(cache_tx.new_config), - None => DbChangesTrieStorageTransaction::from( - self.cache - .0 - .write() - .transaction(tx) - .on_block_finalize(parent_block, block)? - .into_ops(), - ), - }) - } - - /// When block is reverted. - pub fn revert( - &self, - tx: &mut Transaction, - block: &ComplexBlockId, - ) -> ClientResult> { - Ok(self.cache.0.write().transaction(tx).on_block_revert(block)?.into_ops().into()) - } - - /// When transaction has been committed. - pub fn post_commit(&self, tx: Option>) { - if let Some(tx) = tx { - self.cache.0.write().commit(tx.cache_ops).expect( - "only fails if cache with given name isn't loaded yet; cache is already loaded \ - because there is tx; qed", - ); - } - } - - /// Commit changes into changes trie build cache. - pub fn commit_build_cache( - &self, - cache_update: ChangesTrieCacheAction>, - ) { - self.build_cache.write().perform(cache_update); - } - - /// Prune obsolete changes tries. - fn prune( - &self, - tx: &mut Transaction, - block_hash: Block::Hash, - block_num: NumberFor, - new_header: Option<&Block::Header>, - cache_tx: Option<&DbChangesTrieStorageTransaction>, - ) -> ClientResult<()> { - // never prune on archive nodes - let min_blocks_to_keep = match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => min_blocks_to_keep, - None => return Ok(()), - }; - - let mut tries_meta = self.tries_meta.write(); - let mut next_digest_range_start = block_num; - loop { - // prune oldest digest if it is known - // it could be unknown if: - // 1) either we're finalizing block#1 - // 2) or we are (or were) in period where changes tries are disabled - if let Some((begin, end)) = tries_meta.oldest_digest_range { - if block_num <= end || block_num - end <= min_blocks_to_keep.into() { - break - } - - tries_meta.oldest_pruned_digest_range_end = end; - sp_state_machine::prune_changes_tries( - &*self, - begin, - end, - &sp_state_machine::ChangesTrieAnchorBlockId { - hash: convert_hash(&block_hash), - number: block_num, - }, - |node| tx.remove(self.changes_tries_column, node.as_ref()), - ); - - next_digest_range_start = end + One::one(); - } - - // proceed to the next configuration range - let next_digest_range_start_hash = match block_num == next_digest_range_start { - true => block_hash, - false => utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Number(next_digest_range_start), - )? - .hash(), - }; - - let config_for_new_block = new_header - .map(|header| *header.number() == next_digest_range_start) - .unwrap_or(false); - let next_config = match cache_tx { - Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => { - let config = cache_tx.new_config.clone().expect("guarded by is_some(); qed"); - Ok(ChangesTrieConfigurationRange { - zero: (block_num, block_hash), - end: None, - config, - }) - }, - _ if config_for_new_block => self.configuration_at(&BlockId::Hash( - *new_header - .expect("config_for_new_block is only true when new_header is passed; qed") - .parent_hash(), - )), - _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash)), - }; - let next_config = match next_config { - Ok(next_config) => next_config, - Err(ClientError::UnknownBlock(_)) => break, // No block means nothing to prune. - Err(e) => return Err(e), - }; - if let Some(config) = next_config.config { - let mut oldest_digest_range = config - .next_max_level_digest_range(next_config.zero.0, next_digest_range_start) - .unwrap_or_else(|| (next_digest_range_start, next_digest_range_start)); - - if let Some(end) = next_config.end { - if end.0 < oldest_digest_range.1 { - oldest_digest_range.1 = end.0; - } - } - - tries_meta.oldest_digest_range = Some(oldest_digest_range); - continue - } - - tries_meta.oldest_digest_range = None; - break - } - - write_tries_meta(tx, self.meta_column, &*tries_meta); - Ok(()) - } -} - -impl PrunableStateChangesTrieStorage for DbChangesTrieStorage { - fn storage( - &self, - ) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { - self - } - - fn configuration_at( - &self, - at: &BlockId, - ) -> ClientResult, Block::Hash>> { - self.cache - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)? - .and_then(|(zero, end, encoded)| { - Decode::decode(&mut &encoded[..]) - .ok() - .map(|config| ChangesTrieConfigurationRange { zero, end, config }) - }) - .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) - } - - fn oldest_pruned_digest_range_end(&self) -> NumberFor { - self.tries_meta.read().oldest_pruned_digest_range_end - } -} - -impl sp_state_machine::ChangesTrieRootsStorage, NumberFor> - for DbChangesTrieStorage -{ - fn build_anchor( - &self, - hash: Block::Hash, - ) -> Result>, String> { - utils::read_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Hash(hash), - ) - .map_err(|e| e.to_string()) - .and_then(|maybe_header| { - maybe_header - .map(|header| sp_state_machine::ChangesTrieAnchorBlockId { - hash, - number: *header.number(), - }) - .ok_or_else(|| format!("Unknown header: {}", hash)) - }) - } - - fn root( - &self, - anchor: &sp_state_machine::ChangesTrieAnchorBlockId>, - block: NumberFor, - ) -> Result, String> { - // check API requirement: we can't get NEXT block(s) based on anchor - if block > anchor.number { - return Err(format!( - "Can't get changes trie root at {} using anchor at {}", - block, anchor.number - )) - } - - // we need to get hash of the block to resolve changes trie root - let block_id = if block <= self.meta.read().finalized_number { - // if block is finalized, we could just read canonical hash - BlockId::Number(block) - } else { - // the block is not finalized - let mut current_num = anchor.number; - let mut current_hash: Block::Hash = convert_hash(&anchor.hash); - let maybe_anchor_header: Block::Header = utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Number(current_num), - ) - .map_err(|e| e.to_string())?; - if maybe_anchor_header.hash() == current_hash { - // if anchor is canonicalized, then the block is also canonicalized - BlockId::Number(block) - } else { - // else (block is not finalized + anchor is not canonicalized): - // => we should find the required block hash by traversing - // back from the anchor to the block with given number - while current_num != block { - let current_header: Block::Header = utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Hash(current_hash), - ) - .map_err(|e| e.to_string())?; - - current_hash = *current_header.parent_hash(); - current_num = current_num - One::one(); - } - - BlockId::Hash(current_hash) - } - }; - - Ok(utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - block_id, - ) - .map_err(|e| e.to_string())? - .digest() - .log(DigestItem::as_changes_trie_root) - .cloned()) - } -} - -impl sp_state_machine::ChangesTrieStorage, NumberFor> - for DbChangesTrieStorage -where - Block: BlockT, -{ - fn as_roots_storage( - &self, - ) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { - self - } - - fn with_cached_changed_keys( - &self, - root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap, HashSet>>), - ) -> bool { - self.build_cache.read().with_changed_keys(root, functor) - } - - fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result>, String> { - Ok(self.db.get(self.changes_tries_column, key.as_ref())) - } -} - -/// Read changes tries metadata from database. -fn read_tries_meta( - db: &dyn Database, - meta_column: u32, -) -> ClientResult> { - match db.get(meta_column, meta_keys::CHANGES_TRIES_META) { - Some(h) => Decode::decode(&mut &h[..]).map_err(|err| { - ClientError::Backend(format!("Error decoding changes tries metadata: {}", err)) - }), - None => Ok(ChangesTriesMeta { - oldest_digest_range: None, - oldest_pruned_digest_range_end: Zero::zero(), - }), - } -} - -/// Write changes tries metadata from database. -fn write_tries_meta( - tx: &mut Transaction, - meta_column: u32, - meta: &ChangesTriesMeta, -) { - tx.set_from_vec(meta_column, meta_keys::CHANGES_TRIES_META, meta.encode()); -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - tests::{insert_header, prepare_changes, Block}, - Backend, - }; - use hash_db::EMPTY_PREFIX; - use sc_client_api::backend::{ - Backend as ClientBackend, BlockImportOperation, NewBlockState, - PrunableStateChangesTrieStorage, - }; - use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; - use sp_core::H256; - use sp_runtime::{ - testing::{Digest, Header}, - traits::{BlakeTwo256, Hash}, - }; - use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; - - fn changes(number: u64) -> Option, Vec)>> { - Some(vec![(number.to_le_bytes().to_vec(), number.to_le_bytes().to_vec())]) - } - - fn insert_header_with_configuration_change( - backend: &Backend, - number: u64, - parent_hash: H256, - changes: Option, Vec)>>, - new_configuration: Option, - ) -> H256 { - let mut digest = Digest::default(); - let mut changes_trie_update = Default::default(); - if let Some(changes) = changes { - let (root, update) = prepare_changes(changes); - digest.push(DigestItem::ChangesTrieRoot(root)); - changes_trie_update = update; - } - digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( - new_configuration, - ))); - - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root(Vec::new()), - digest, - extrinsics_root: Default::default(), - }; - let header_hash = header.hash(); - - let block_id = if number == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(number - 1) - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_id).unwrap(); - op.set_block_data(header, None, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)) - .unwrap(); - backend.commit_operation(op).unwrap(); - - header_hash - } - - #[test] - fn changes_trie_storage_works() { - let backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.meta.write().finalized_number = 1000; - - let check_changes = |backend: &Backend, - block: u64, - changes: Vec<(Vec, Vec)>| { - let (changes_root, mut changes_trie_update) = prepare_changes(changes); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { - hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), - number: block, - }; - assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); - - let storage = backend.changes_tries_storage.storage(); - for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val))); - } - }; - - let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; - let changes1 = vec![ - (b"key_at_1".to_vec(), b"val_at_1".to_vec()), - (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), - ]; - let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - - let block0 = insert_header( - &backend, - 0, - Default::default(), - Some(changes0.clone()), - Default::default(), - ); - let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); - let _ = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); - - // check that the storage contains tries for all blocks - check_changes(&backend, 0, changes0); - check_changes(&backend, 1, changes1); - check_changes(&backend, 2, changes2); - } - - #[test] - fn changes_trie_storage_works_with_forks() { - let backend = Backend::::new_test(1000, 100); - - let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; - let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; - let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; - let block0 = insert_header( - &backend, - 0, - Default::default(), - Some(changes0.clone()), - Default::default(), - ); - let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); - let block2 = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); - - let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; - let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; - let block2_1_0 = - insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default()); - let block2_1_1 = - insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default()); - - let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; - let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; - let block2_2_0 = - insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default()); - let block2_2_1 = - insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default()); - - // finalize block1 - backend.changes_tries_storage.meta.write().finalized_number = 1; - - // branch1: when asking for finalized block hash - let (changes1_root, _) = prepare_changes(changes1); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch2: when asking for finalized block hash - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch1: when asking for non-finalized block hash (search by traversal) - let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root))); - - // branch2: when asking for non-finalized block hash (search using canonicalized hint) - let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // finalize first block of branch2 (block2_2_0) - backend.changes_tries_storage.meta.write().finalized_number = 3; - - // branch2: when asking for finalized block of this branch - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // branch1: when asking for finalized block of other branch - // => result is incorrect (returned for the block of branch1), but this is expected, - // because the other fork is abandoned (forked before finalized header) - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - } - - #[test] - fn changes_tries_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - - let parent_hash = |number| { - if number == 0 { - Default::default() - } else { - backend - .blockchain() - .header(BlockId::Number(number - 1)) - .unwrap() - .unwrap() - .hash() - } - }; - - let insert_regular_header = |with_changes, number| { - insert_header( - &backend, - number, - parent_hash(number), - if with_changes { changes(number) } else { None }, - Default::default(), - ); - }; - - let is_pruned = |number| { - let trie_root = backend - .blockchain() - .header(BlockId::Number(number)) - .unwrap() - .unwrap() - .digest() - .log(DigestItem::as_changes_trie_root) - .cloned(); - match trie_root { - Some(trie_root) => - backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), - None => true, - } - }; - - let finalize_block = |number| { - let header = backend.blockchain().header(BlockId::Number(number)).unwrap().unwrap(); - let mut tx = Transaction::new(); - let cache_ops = backend - .changes_tries_storage - .finalize(&mut tx, *header.parent_hash(), header.hash(), number, None, None) - .unwrap(); - backend.storage.db.commit(tx).unwrap(); - backend.changes_tries_storage.post_commit(Some(cache_ops)); - }; - - // configuration ranges: - // (0; 6] - None - // [7; 17] - Some(2^2): D2 is built at #10, #14; SD is built at #17 - // [18; 21] - None - // [22; 32] - Some(8^1): D1 is built at #29; SD is built at #32 - // [33; ... - Some(1) - let config_at_6 = Some(ChangesTrieConfiguration::new(2, 2)); - let config_at_17 = None; - let config_at_21 = Some(ChangesTrieConfiguration::new(8, 1)); - let config_at_32 = Some(ChangesTrieConfiguration::new(1, 0)); - - (0..6).for_each(|number| insert_regular_header(false, number)); - insert_header_with_configuration_change(&backend, 6, parent_hash(6), None, config_at_6); - (7..17).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change( - &backend, - 17, - parent_hash(17), - changes(17), - config_at_17, - ); - (18..21).for_each(|number| insert_regular_header(false, number)); - insert_header_with_configuration_change(&backend, 21, parent_hash(21), None, config_at_21); - (22..32).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change( - &backend, - 32, - parent_hash(32), - changes(32), - config_at_32, - ); - (33..50).for_each(|number| insert_regular_header(true, number)); - - // when only genesis is finalized, nothing is pruned - (0..=6).for_each(|number| assert!(is_pruned(number))); - (7..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [1; 18] are finalized, nothing is pruned - (1..=18).for_each(|number| finalize_block(number)); - (0..=6).for_each(|number| assert!(is_pruned(number))); - (7..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 19 is finalized, changes tries for blocks [7; 10] are pruned - finalize_block(19); - (0..=10).for_each(|number| assert!(is_pruned(number))); - (11..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [20; 22] are finalized, nothing is pruned - (20..=22).for_each(|number| finalize_block(number)); - (0..=10).for_each(|number| assert!(is_pruned(number))); - (11..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 23 is finalized, changes tries for blocks [11; 14] are pruned - finalize_block(23); - (0..=14).for_each(|number| assert!(is_pruned(number))); - (15..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [24; 25] are finalized, nothing is pruned - (24..=25).for_each(|number| finalize_block(number)); - (0..=14).for_each(|number| assert!(is_pruned(number))); - (15..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 26 is finalized, changes tries for blocks [15; 17] are pruned - finalize_block(26); - (0..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [27; 37] are finalized, nothing is pruned - (27..=37).for_each(|number| finalize_block(number)); - (0..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 38 is finalized, changes tries for blocks [22; 29] are pruned - finalize_block(38); - (0..=29).for_each(|number| assert!(is_pruned(number))); - (30..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [39; 40] are finalized, nothing is pruned - (39..=40).for_each(|number| finalize_block(number)); - (0..=29).for_each(|number| assert!(is_pruned(number))); - (30..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 41 is finalized, changes tries for blocks [30; 32] are pruned - finalize_block(41); - (0..=32).for_each(|number| assert!(is_pruned(number))); - (33..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 42 is finalized, changes trie for block 33 is pruned - finalize_block(42); - (0..=33).for_each(|number| assert!(is_pruned(number))); - (34..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 43 is finalized, changes trie for block 34 is pruned - finalize_block(43); - (0..=34).for_each(|number| assert!(is_pruned(number))); - (35..50).for_each(|number| assert!(!is_pruned(number))); - } - - #[test] - fn changes_tries_configuration_is_updated_on_block_insert() { - let backend = Backend::::new_test(1000, 100); - - // configurations at blocks - let config_at_1 = Some(ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2 }); - let config_at_3 = Some(ChangesTrieConfiguration { digest_interval: 8, digest_levels: 1 }); - let config_at_5 = None; - let config_at_7 = Some(ChangesTrieConfiguration { digest_interval: 8, digest_levels: 1 }); - - // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = - insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - let block3 = - insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); - let block4 = insert_header(&backend, 4, block3, None, Default::default()); - let block5 = - insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); - let block6 = insert_header(&backend, 6, block5, None, Default::default()); - let block7 = - insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); - - // test configuration cache - let storage = &backend.changes_tries_storage; - assert_eq!( - storage.configuration_at(&BlockId::Hash(block1)).unwrap().config, - config_at_1.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block2)).unwrap().config, - config_at_1.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block3)).unwrap().config, - config_at_3.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block4)).unwrap().config, - config_at_3.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block5)).unwrap().config, - config_at_5.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block6)).unwrap().config, - config_at_5.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block7)).unwrap().config, - config_at_7.clone(), - ); - } - - #[test] - fn test_finalize_several_configuration_change_blocks_in_single_operation() { - let mut backend = Backend::::new_test(10, 10); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - - let configs = - (0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::>(); - - // insert unfinalized headers - let block0 = insert_header_with_configuration_change( - &backend, - 0, - Default::default(), - None, - configs[0].clone(), - ); - let block1 = insert_header_with_configuration_change( - &backend, - 1, - block0, - changes(1), - configs[1].clone(), - ); - let block2 = insert_header_with_configuration_change( - &backend, - 2, - block1, - changes(2), - configs[2].clone(), - ); - - let side_config2_1 = Some(ChangesTrieConfiguration::new(3, 2)); - let side_config2_2 = Some(ChangesTrieConfiguration::new(3, 3)); - let block2_1 = insert_header_with_configuration_change( - &backend, - 2, - block1, - changes(8), - side_config2_1.clone(), - ); - let _ = insert_header_with_configuration_change( - &backend, - 3, - block2_1, - changes(9), - side_config2_2.clone(), - ); - - // insert finalized header => 4 headers are finalized at once - let header3 = Header { - number: 3, - parent_hash: block2, - state_root: Default::default(), - digest: Digest { - logs: vec![DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( - configs[3].clone(), - ))], - }, - extrinsics_root: Default::default(), - }; - let block3 = header3.hash(); - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); - op.mark_finalized(BlockId::Hash(block1), None).unwrap(); - op.mark_finalized(BlockId::Hash(block2), None).unwrap(); - op.set_block_data(header3, None, None, None, NewBlockState::Final).unwrap(); - backend.commit_operation(op).unwrap(); - - // insert more unfinalized headers - let block4 = insert_header_with_configuration_change( - &backend, - 4, - block3, - changes(4), - configs[4].clone(), - ); - let block5 = insert_header_with_configuration_change( - &backend, - 5, - block4, - changes(5), - configs[5].clone(), - ); - let block6 = insert_header_with_configuration_change( - &backend, - 6, - block5, - changes(6), - configs[6].clone(), - ); - - // insert finalized header => 4 headers are finalized at once - let header7 = Header { - number: 7, - parent_hash: block6, - state_root: Default::default(), - digest: Digest { - logs: vec![DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( - configs[7].clone(), - ))], - }, - extrinsics_root: Default::default(), - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block6)).unwrap(); - op.mark_finalized(BlockId::Hash(block4), None).unwrap(); - op.mark_finalized(BlockId::Hash(block5), None).unwrap(); - op.mark_finalized(BlockId::Hash(block6), None).unwrap(); - op.set_block_data(header7, None, None, None, NewBlockState::Final).unwrap(); - backend.commit_operation(op).unwrap(); - } - - #[test] - fn changes_tries_configuration_is_reverted() { - let backend = Backend::::new_test(10, 10); - - let config0 = Some(ChangesTrieConfiguration::new(2, 5)); - let block0 = - insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); - let config1 = Some(ChangesTrieConfiguration::new(2, 6)); - let block1 = - insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); - let just1 = Some((*b"TEST", vec![42])); - backend.finalize_block(BlockId::Number(1), just1).unwrap(); - let config2 = Some(ChangesTrieConfiguration::new(2, 7)); - let block2 = - insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); - let config2_1 = Some(ChangesTrieConfiguration::new(2, 8)); - let _ = - insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); - let config2_2 = Some(ChangesTrieConfiguration::new(2, 9)); - let block2_2 = - insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); - let config2_3 = Some(ChangesTrieConfiguration::new(2, 10)); - let _ = - insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); - - // before truncate there are 2 unfinalized forks - block2_1+block2_3 - assert_eq!( - backend - .changes_tries_storage - .cache - .0 - .write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 4], - ); - - // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 - backend.revert(1, false).unwrap(); - assert_eq!( - backend - .changes_tries_storage - .cache - .0 - .write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 3], - ); - - // after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl - // specifics), the 1st one points to the block #3 because it isn't truncated - backend.revert(1, false).unwrap(); - assert_eq!( - backend - .changes_tries_storage - .cache - .0 - .write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 2], - ); - - // after truncating block2 - there are no unfinalized forks - backend.revert(1, false).unwrap(); - assert!(backend - .changes_tries_storage - .cache - .0 - .write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>() - .is_empty(),); - } -} diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 3b8936c0f7bac..d30c3e5df9330 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -28,14 +28,11 @@ #![warn(missing_docs)] -pub mod light; pub mod offchain; #[cfg(any(feature = "with-kvdb-rocksdb", test))] pub mod bench; -mod cache; -mod changes_tries_storage; mod children; #[cfg(feature = "with-parity-db")] mod parity_db; @@ -56,7 +53,6 @@ use std::{ }; use crate::{ - changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}, stats::StateUsageStats, storage_cache::{new_shared_cache, CachingState, SharedCache, SyncingCachingState}, utils::{meta_keys, read_db, read_meta, DatabaseType, Meta}, @@ -64,8 +60,7 @@ use crate::{ use codec::{Decode, Encode}; use hash_db::Prefix; use sc_client_api::{ - backend::{NewBlockState, ProvideChtRoots, PrunableStateChangesTrieStorage}, - cht, + backend::NewBlockState, leaves::{FinalizationDisplaced, LeafSet}, utils::is_descendent_of, IoInfo, MemoryInfo, MemorySize, UsageInfo, @@ -79,11 +74,10 @@ use sp_blockchain::{ use sp_core::{ offchain::OffchainOverlayedChange, storage::{well_known_keys, ChildInfo}, - ChangesTrieConfiguration, }; use sp_database::Transaction; use sp_runtime::{ - generic::{BlockId, DigestItem}, + generic::BlockId, traits::{ Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, @@ -91,7 +85,7 @@ use sp_runtime::{ Justification, Justifications, Storage, }; use sp_state_machine::{ - backend::Backend as StateBackend, ChangesTrieCacheAction, ChangesTrieTransaction, + backend::Backend as StateBackend, ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats, StorageCollection, UsageInfo as StateUsageInfo, }; @@ -104,7 +98,6 @@ pub use sp_database::Database; #[cfg(any(feature = "with-kvdb-rocksdb", test))] pub use bench::BenchmarkingState; -const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u32 = 32768; const CACHE_HEADERS: usize = 8; /// Default value for storage cache child ratio. @@ -406,11 +399,9 @@ pub(crate) mod columns { pub const HEADER: u32 = 4; pub const BODY: u32 = 5; pub const JUSTIFICATIONS: u32 = 6; - pub const CHANGES_TRIE: u32 = 7; pub const AUX: u32 = 8; /// Offchain workers local storage pub const OFFCHAIN: u32 = 9; - pub const CACHE: u32 = 10; /// Transactions pub const TRANSACTION: u32 = 11; } @@ -506,13 +497,6 @@ impl BlockchainDb { let mut meta = self.meta.write(); meta.block_gap = gap; } - - // Get block changes trie root, if available. - fn changes_trie_root(&self, block: BlockId) -> ClientResult> { - self.header(block).map(|header| { - header.and_then(|header| header.digest().log(DigestItem::as_changes_trie_root).cloned()) - }) - } } impl sc_client_api::blockchain::HeaderBackend for BlockchainDb { @@ -646,10 +630,6 @@ impl sc_client_api::blockchain::Backend for BlockchainDb Option>> { - None - } - fn leaves(&self) -> ClientResult> { Ok(self.leaves.read().hashes()) } @@ -702,12 +682,6 @@ impl sc_client_api::blockchain::Backend for BlockchainDb sc_client_api::blockchain::ProvideCache for BlockchainDb { - fn cache(&self) -> Option>> { - None - } -} - impl HeaderMetadata for BlockchainDb { type Error = sp_blockchain::Error; @@ -745,62 +719,6 @@ impl HeaderMetadata for BlockchainDb { } } -impl ProvideChtRoots for BlockchainDb { - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - let cht_number = match cht::block_to_cht_number(cht_size, block) { - Some(number) => number, - None => return Ok(None), - }; - - let cht_start: NumberFor = cht::start_number(cht::size(), cht_number); - - let mut current_num = cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - - cht::compute_root::, _>( - cht::size(), - cht_number, - cht_range.map(|num| self.hash(num)), - ) - .map(Some) - } - - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - let cht_number = match cht::block_to_cht_number(cht_size, block) { - Some(number) => number, - None => return Ok(None), - }; - - let cht_start: NumberFor = cht::start_number(cht::size(), cht_number); - - let mut current_num = cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - - cht::compute_root::, _>( - cht::size(), - cht_number, - cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), - ) - .map(Some) - } -} - /// Database transaction pub struct BlockImportOperation { old_state: SyncingCachingState, Block>, @@ -808,9 +726,6 @@ pub struct BlockImportOperation { storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, offchain_storage_updates: OffchainChangesCollection, - changes_trie_updates: MemoryDB>, - changes_trie_build_cache_update: Option>>, - changes_trie_config_update: Option>, pending_block: Option>, aux_ops: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, @@ -858,25 +773,14 @@ impl BlockImportOperation { ) }); - let mut changes_trie_config = None; let (root, transaction) = self.old_state.full_storage_root( storage.top.iter().map(|(k, v)| { - if &k[..] == well_known_keys::CHANGES_TRIE_CONFIG { - changes_trie_config = Some(Decode::decode(&mut &v[..])); - } (&k[..], Some(&v[..])) }), child_delta, ); - let changes_trie_config = match changes_trie_config { - Some(Ok(c)) => Some(c), - Some(Err(_)) => return Err(sp_blockchain::Error::InvalidState.into()), - None => None, - }; - self.db_updates = transaction; - self.changes_trie_config_update = Some(changes_trie_config); Ok(root) } } @@ -899,11 +803,6 @@ impl sc_client_api::backend::BlockImportOperation leaf_state: NewBlockState, ) -> ClientResult<()> { assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - if let Some(changes_trie_config_update) = - changes_tries_storage::extract_new_configuration(&header) - { - self.changes_trie_config_update = Some(changes_trie_config_update.clone()); - } self.pending_block = Some(PendingBlock { header, body, indexed_body, justifications, leaf_state }); Ok(()) @@ -930,15 +829,6 @@ impl sc_client_api::backend::BlockImportOperation Ok(root) } - fn update_changes_trie( - &mut self, - update: ChangesTrieTransaction, NumberFor>, - ) -> ClientResult<()> { - self.changes_trie_updates = update.0; - self.changes_trie_build_cache_update = Some(update.1); - Ok(()) - } - fn insert_aux(&mut self, ops: I) -> ClientResult<()> where I: IntoIterator, Option>)>, @@ -1095,7 +985,6 @@ impl FrozenForDuration { pub struct Backend { storage: Arc>, offchain_storage: offchain::LocalStorage, - changes_tries_storage: DbChangesTrieStorage, blockchain: BlockchainDb, canonicalization_delay: u64, shared_cache: SharedCache, @@ -1155,7 +1044,6 @@ impl Backend { ) -> ClientResult { let is_archive_pruning = config.state_pruning.is_archive(); let blockchain = BlockchainDb::new(db.clone(), config.transaction_storage.clone())?; - let meta = blockchain.meta.clone(); let map_e = |e: sc_state_db::Error| sp_blockchain::Error::from_state_db(e); let state_db: StateDb<_, _> = StateDb::new( config.state_pruning.clone(), @@ -1166,22 +1054,10 @@ impl Backend { let storage_db = StorageDb { db: db.clone(), state_db, prefix_keys: !db.supports_ref_counting() }; let offchain_storage = offchain::LocalStorage::new(db.clone()); - let changes_tries_storage = DbChangesTrieStorage::new( - db, - blockchain.header_metadata_cache.clone(), - columns::META, - columns::CHANGES_TRIE, - columns::KEY_LOOKUP, - columns::HEADER, - columns::CACHE, - meta, - if is_archive_pruning { None } else { Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) }, - )?; let backend = Backend { storage: Arc::new(storage_db), offchain_storage, - changes_tries_storage, blockchain, canonicalization_delay, shared_cache: new_shared_cache( @@ -1318,7 +1194,6 @@ impl Backend { header: &Block::Header, last_finalized: Option, justification: Option, - changes_trie_cache_ops: &mut Option>, finalization_displaced: &mut Option>>, ) -> ClientResult> { // TODO: ensure best chain contains this block. @@ -1328,10 +1203,8 @@ impl Backend { self.note_finalized( transaction, - false, header, *hash, - changes_trie_cache_ops, finalization_displaced, with_state, )?; @@ -1400,7 +1273,6 @@ impl Backend { (meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap.clone()) }; - let mut changes_trie_cache_ops = None; for (block, justification) in operation.finalized_blocks { let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; @@ -1410,7 +1282,6 @@ impl Backend { &block_header, Some(last_finalized_hash), justification, - &mut changes_trie_cache_ops, &mut finalization_displaced_leaves, )?); last_finalized_hash = block_hash; @@ -1475,11 +1346,6 @@ impl Backend { ); transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); - // for tests, because config is set from within the reset_storage - if operation.changes_trie_config_update.is_none() { - operation.changes_trie_config_update = Some(None); - } - if operation.commit_state { transaction.set_from_vec(columns::META, meta_keys::FINALIZED_STATE, lookup_key); } else { @@ -1578,7 +1444,6 @@ impl Backend { let header = &pending_block.header; let is_best = pending_block.leaf_state.is_best(); - let changes_trie_updates = operation.changes_trie_updates; debug!(target: "db", "DB Commit {:?} ({}), best={}, state={}, existing={}", hash, number, is_best, operation.commit_state, existing_header, @@ -1593,10 +1458,8 @@ impl Backend { self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; self.note_finalized( &mut transaction, - true, header, hash, - &mut changes_trie_cache_ops, &mut finalization_displaced_leaves, operation.commit_state, )?; @@ -1606,21 +1469,6 @@ impl Backend { } if !existing_header { - let changes_trie_config_update = operation.changes_trie_config_update; - changes_trie_cache_ops = Some(self.changes_tries_storage.commit( - &mut transaction, - changes_trie_updates, - cache::ComplexBlockId::new( - *header.parent_hash(), - if number.is_zero() { Zero::zero() } else { number - One::one() }, - ), - cache::ComplexBlockId::new(hash, number), - header, - finalized, - changes_trie_config_update, - changes_trie_cache_ops, - )?); - { let mut leaves = self.blockchain.leaves.write(); leaves.import(hash, number, parent_hash); @@ -1747,11 +1595,6 @@ impl Backend { ); } - if let Some(changes_trie_build_cache_update) = operation.changes_trie_build_cache_update { - self.changes_tries_storage.commit_build_cache(changes_trie_build_cache_update); - } - self.changes_tries_storage.post_commit(changes_trie_cache_ops); - if let Some((enacted, retracted)) = cache_update { self.shared_cache.write().sync(&enacted, &retracted); } @@ -1770,10 +1613,8 @@ impl Backend { fn note_finalized( &self, transaction: &mut Transaction, - is_inserted: bool, f_header: &Block::Header, f_hash: Block::Hash, - changes_trie_cache_ops: &mut Option>, displaced: &mut Option>>, with_state: bool, ) -> ClientResult<()> { @@ -1798,18 +1639,6 @@ impl Backend { apply_state_commit(transaction, commit); } - if !f_num.is_zero() { - let new_changes_trie_cache_ops = self.changes_tries_storage.finalize( - transaction, - *f_header.parent_hash(), - f_hash, - f_num, - if is_inserted { Some(&f_header) } else { None }, - changes_trie_cache_ops.take(), - )?; - *changes_trie_cache_ops = Some(new_changes_trie_cache_ops); - } - let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); self.prune_blocks(transaction, f_num, &new_displaced)?; match displaced { @@ -2036,9 +1865,6 @@ impl sc_client_api::backend::Backend for Backend { storage_updates: Default::default(), child_storage_updates: Default::default(), offchain_storage_updates: Default::default(), - changes_trie_config_update: None, - changes_trie_updates: MemoryDB::default(), - changes_trie_build_cache_update: None, aux_ops: Vec::new(), finalized_blocks: Vec::new(), set_head: None, @@ -2089,19 +1915,16 @@ impl sc_client_api::backend::Backend for Backend { let header = self.blockchain.expect_header(block)?; let mut displaced = None; - let mut changes_trie_cache_ops = None; let m = self.finalize_block_with_transaction( &mut transaction, &hash, &header, None, justification, - &mut changes_trie_cache_ops, &mut displaced, )?; self.storage.db.commit(transaction)?; self.blockchain.update_meta(m); - self.changes_tries_storage.post_commit(changes_trie_cache_ops); Ok(()) } @@ -2148,10 +1971,6 @@ impl sc_client_api::backend::Backend for Backend { Ok(()) } - fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { - Some(&self.changes_tries_storage) - } - fn offchain_storage(&self) -> Option { Some(self.offchain_storage.clone()) } @@ -2208,7 +2027,6 @@ impl sc_client_api::backend::Backend for Backend { return Ok(c.saturated_into::>()) } let mut transaction = Transaction::new(); - let removed_number = best_number; let removed = self.blockchain.header(BlockId::Number(best_number))?.ok_or_else(|| { sp_blockchain::Error::UnknownBlock(format!( @@ -2241,10 +2059,6 @@ impl sc_client_api::backend::Backend for Backend { let key = utils::number_and_hash_to_lookup_key(best_number.clone(), &best_hash)?; - let changes_trie_cache_ops = self.changes_tries_storage.revert( - &mut transaction, - &cache::ComplexBlockId::new(removed.hash(), removed_number), - )?; if update_finalized { transaction.set_from_vec( columns::META, @@ -2283,7 +2097,6 @@ impl sc_client_api::backend::Backend for Backend { best_hash, ); self.storage.db.commit(transaction)?; - self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); self.blockchain.update_meta(MetaUpdate { hash: best_hash, number: best_number, @@ -2345,11 +2158,6 @@ impl sc_client_api::backend::Backend for Backend { apply_state_commit(&mut transaction, commit); } transaction.remove(columns::KEY_LOOKUP, hash.as_ref()); - let changes_trie_cache_ops = self - .changes_tries_storage - .revert(&mut transaction, &cache::ComplexBlockId::new(*hash, hdr.number))?; - - self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); leaves.revert(hash.clone(), hdr.number); leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); self.storage.db.commit(transaction)?; @@ -2461,32 +2269,16 @@ pub(crate) mod tests { use sp_blockchain::{lowest_common_ancestor, tree_route}; use sp_core::H256; use sp_runtime::{ - generic::DigestItem, testing::{Block as RawBlock, ExtrinsicWrapper, Header}, traits::{BlakeTwo256, Hash}, ConsensusEngineId, }; - use sp_state_machine::{TrieDBMut, TrieMut}; const CONS0_ENGINE_ID: ConsensusEngineId = *b"CON0"; const CONS1_ENGINE_ID: ConsensusEngineId = *b"CON1"; pub(crate) type Block = RawBlock>; - pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { - let mut changes_root = H256::default(); - let mut changes_trie_update = MemoryDB::::default(); - { - let mut trie = - TrieDBMut::::new(&mut changes_trie_update, &mut changes_root); - for (key, value) in changes { - trie.insert(&key, &value).unwrap(); - } - } - - (changes_root, changes_trie_update) - } - pub fn insert_header( backend: &Backend, number: u64, @@ -2501,20 +2293,14 @@ pub(crate) mod tests { backend: &Backend, number: u64, parent_hash: H256, - changes: Option, Vec)>>, + _changes: Option, Vec)>>, extrinsics_root: H256, body: Vec>, transaction_index: Option>, ) -> H256 { use sp_runtime::testing::Digest; - let mut digest = Digest::default(); - let mut changes_trie_update = Default::default(); - if let Some(changes) = changes { - let (root, update) = prepare_changes(changes); - digest.push(DigestItem::ChangesTrieRoot(root)); - changes_trie_update = update; - } + let digest = Digest::default(); let header = Header { number, parent_hash, @@ -2535,8 +2321,6 @@ pub(crate) mod tests { if let Some(index) = transaction_index { op.update_transaction_index(index).unwrap(); } - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)) - .unwrap(); backend.commit_operation(op).unwrap(); header_hash @@ -3241,38 +3025,6 @@ pub(crate) mod tests { } } - #[test] - fn header_cht_root_works() { - use sc_client_api::ProvideChtRoots; - - let backend = Backend::::new_test(10, 10); - - // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = - insert_header(&backend, 0, Default::default(), None, Default::default()); - let cht_size: u64 = cht::size(); - for i in 1..1 + cht_size + cht_size + 1 { - prev_hash = insert_header(&backend, i, prev_hash, None, Default::default()); - } - - let blockchain = backend.blockchain(); - - let cht_root_1 = blockchain - .header_cht_root(cht_size, cht::start_number(cht_size, 0)) - .unwrap() - .unwrap(); - let cht_root_2 = blockchain - .header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) - .unwrap() - .unwrap(); - let cht_root_3 = blockchain - .header_cht_root(cht_size, cht::end_number(cht_size, 0)) - .unwrap() - .unwrap(); - assert_eq!(cht_root_1, cht_root_2); - assert_eq!(cht_root_2, cht_root_3); - } - #[test] fn prune_blocks_on_finalize() { for storage in &[TransactionStorageMode::BlockBody, TransactionStorageMode::StorageChain] { diff --git a/client/db/src/light.rs b/client/db/src/light.rs deleted file mode 100644 index 48cf0489cf2a0..0000000000000 --- a/client/db/src/light.rs +++ /dev/null @@ -1,1329 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! RocksDB-based light client blockchain storage. - -use parking_lot::RwLock; -use std::{collections::HashMap, convert::TryInto, sync::Arc}; - -use crate::{ - cache::{ComplexBlockId, DbCache, DbCacheSync, EntryType as CacheEntryType}, - utils::{self, block_id_to_lookup_key, meta_keys, read_db, read_meta, DatabaseType, Meta}, - DatabaseSettings, DbHash, FrozenForDuration, -}; -use codec::{Decode, Encode}; -use log::{debug, trace, warn}; -use sc_client_api::{ - backend::{AuxStore, NewBlockState, ProvideChtRoots}, - blockchain::{BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo}, - cht, Storage, UsageInfo, -}; -use sp_blockchain::{ - well_known_cache_keys, CachedHeaderMetadata, Error as ClientError, - HeaderBackend as BlockchainHeaderBackend, HeaderMetadata, HeaderMetadataCache, - Result as ClientResult, -}; -use sp_database::{Database, Transaction}; -use sp_runtime::{ - generic::{BlockId, DigestItem}, - traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, One, Zero}, -}; - -pub(crate) mod columns { - pub const META: u32 = crate::utils::COLUMN_META; - pub const KEY_LOOKUP: u32 = 1; - pub const HEADER: u32 = 2; - pub const CACHE: u32 = 3; - pub const CHT: u32 = 4; - pub const AUX: u32 = 5; -} - -/// Prefix for headers CHT. -const HEADER_CHT_PREFIX: u8 = 0; -/// Prefix for changes tries roots CHT. -const CHANGES_TRIE_CHT_PREFIX: u8 = 1; - -/// Light blockchain storage. Stores most recent headers + CHTs for older headers. -/// Locks order: meta, cache. -pub struct LightStorage { - db: Arc>, - meta: RwLock, Block::Hash>>, - cache: Arc>, - header_metadata_cache: Arc>, - io_stats: FrozenForDuration, -} - -impl LightStorage { - /// Create new storage with given settings. - pub fn new(config: DatabaseSettings) -> ClientResult { - let db = crate::utils::open_database::(&config, DatabaseType::Light)?; - Self::from_kvdb(db as Arc<_>) - } - - /// Create new memory-backed `LightStorage` for tests. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test() -> Self { - let db = Arc::new(sp_database::MemDb::default()); - Self::from_kvdb(db as Arc<_>).expect("failed to create test-db") - } - - fn from_kvdb(db: Arc>) -> ClientResult { - let meta = read_meta::(&*db, columns::HEADER)?; - let header_metadata_cache = Arc::new(HeaderMetadataCache::default()); - let cache = DbCache::new( - db.clone(), - header_metadata_cache.clone(), - columns::KEY_LOOKUP, - columns::HEADER, - columns::CACHE, - meta.genesis_hash, - ComplexBlockId::new(meta.finalized_hash, meta.finalized_number), - ); - - Ok(LightStorage { - db, - meta: RwLock::new(meta), - cache: Arc::new(DbCacheSync(RwLock::new(cache))), - header_metadata_cache, - io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), - }) - } - - #[cfg(test)] - pub(crate) fn cache(&self) -> &DbCacheSync { - &self.cache - } - - fn update_meta( - &self, - hash: Block::Hash, - number: NumberFor, - is_best: bool, - is_finalized: bool, - ) { - let mut meta = self.meta.write(); - - if number.is_zero() { - meta.genesis_hash = hash; - meta.finalized_hash = hash; - } - - if is_best { - meta.best_number = number; - meta.best_hash = hash; - } - - if is_finalized { - meta.finalized_number = number; - meta.finalized_hash = hash; - } - } -} - -impl BlockchainHeaderBackend for LightStorage -where - Block: BlockT, -{ - fn header(&self, id: BlockId) -> ClientResult> { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) - } - - fn info(&self) -> BlockchainInfo { - let meta = self.meta.read(); - BlockchainInfo { - best_hash: meta.best_hash, - best_number: meta.best_number, - genesis_hash: meta.genesis_hash.clone(), - finalized_hash: meta.finalized_hash, - finalized_number: meta.finalized_number, - finalized_state: if meta.finalized_hash != Default::default() { - Some((meta.genesis_hash, Zero::zero())) - } else { - None - }, - number_leaves: 1, - block_gap: None, - } - } - - fn status(&self, id: BlockId) -> ClientResult { - let exists = match id { - BlockId::Hash(_) => - read_db(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?.is_some(), - BlockId::Number(n) => n <= self.meta.read().best_number, - }; - match exists { - true => Ok(BlockStatus::InChain), - false => Ok(BlockStatus::Unknown), - } - } - - fn number(&self, hash: Block::Hash) -> ClientResult>> { - if let Some(lookup_key) = - block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? - { - let number = utils::lookup_key_to_number(&lookup_key)?; - Ok(Some(number)) - } else { - Ok(None) - } - } - - fn hash(&self, number: NumberFor) -> ClientResult> { - Ok(self.header(BlockId::Number(number))?.map(|header| header.hash().clone())) - } -} - -impl HeaderMetadata for LightStorage { - type Error = ClientError; - - fn header_metadata( - &self, - hash: Block::Hash, - ) -> Result, Self::Error> { - self.header_metadata_cache.header_metadata(hash).map_or_else( - || { - self.header(BlockId::hash(hash))? - .map(|header| { - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache - .insert_header_metadata(header_metadata.hash, header_metadata.clone()); - header_metadata - }) - .ok_or_else(|| { - ClientError::UnknownBlock(format!("header not found in db: {}", hash)) - }) - }, - Ok, - ) - } - - fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.header_metadata_cache.insert_header_metadata(hash, metadata) - } - - fn remove_header_metadata(&self, hash: Block::Hash) { - self.header_metadata_cache.remove_header_metadata(hash); - } -} - -impl LightStorage { - // Get block changes trie root, if available. - fn changes_trie_root(&self, block: BlockId) -> ClientResult> { - self.header(block).map(|header| { - header.and_then(|header| header.digest().log(DigestItem::as_changes_trie_root).cloned()) - }) - } - - /// Handle setting head within a transaction. `route_to` should be the last - /// block that existed in the database. `best_to` should be the best block - /// to be set. - /// - /// In the case where the new best block is a block to be imported, `route_to` - /// should be the parent of `best_to`. In the case where we set an existing block - /// to be best, `route_to` should equal to `best_to`. - fn set_head_with_transaction( - &self, - transaction: &mut Transaction, - route_to: Block::Hash, - best_to: (NumberFor, Block::Hash), - ) -> ClientResult<()> { - let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1)?; - - // handle reorg. - let meta = self.meta.read(); - if meta.best_hash != Default::default() { - let tree_route = sp_blockchain::tree_route(self, meta.best_hash, route_to)?; - - // update block number to hash lookup entries. - for retracted in tree_route.retracted() { - if retracted.hash == meta.finalized_hash { - // TODO: can we recover here? - warn!( - "Safety failure: reverting finalized block {:?}", - (&retracted.number, &retracted.hash) - ); - } - - utils::remove_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - retracted.number, - )?; - } - - for enacted in tree_route.enacted() { - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - enacted.number, - enacted.hash, - )?; - } - } - - transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key); - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - best_to.0, - best_to.1, - )?; - - Ok(()) - } - - // Note that a block is finalized. Only call with child of last finalized block. - fn note_finalized( - &self, - transaction: &mut Transaction, - header: &Block::Header, - hash: Block::Hash, - ) -> ClientResult<()> { - let meta = self.meta.read(); - if &meta.finalized_hash != header.parent_hash() { - return Err(::sp_blockchain::Error::NonSequentialFinalization(format!( - "Last finalized {:?} not parent of {:?}", - meta.finalized_hash, hash - )) - .into()) - } - - let lookup_key = utils::number_and_hash_to_lookup_key(header.number().clone(), hash)?; - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - - // build new CHT(s) if required - if let Some(new_cht_number) = cht::is_build_required(cht::size(), *header.number()) { - let new_cht_start: NumberFor = cht::start_number(cht::size(), new_cht_number); - - let mut current_num = new_cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - - let new_header_cht_root = cht::compute_root::, _>( - cht::size(), - new_cht_number, - cht_range.map(|num| self.hash(num)), - )?; - transaction.set( - columns::CHT, - &cht_key(HEADER_CHT_PREFIX, new_cht_start)?, - new_header_cht_root.as_ref(), - ); - - // if the header includes changes trie root, let's build a changes tries roots CHT - if header.digest().log(DigestItem::as_changes_trie_root).is_some() { - let mut current_num = new_cht_start; - let cht_range = std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - let new_changes_trie_cht_root = - cht::compute_root::, _>( - cht::size(), - new_cht_number, - cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), - )?; - transaction.set( - columns::CHT, - &cht_key(CHANGES_TRIE_CHT_PREFIX, new_cht_start)?, - new_changes_trie_cht_root.as_ref(), - ); - } - - // prune headers that are replaced with CHT - let mut prune_block = new_cht_start; - let new_cht_end = cht::end_number(cht::size(), new_cht_number); - trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", - new_cht_start, new_cht_end, new_cht_number); - - while prune_block <= new_cht_end { - if let Some(hash) = self.hash(prune_block)? { - let lookup_key = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Number(prune_block))? - .expect("retrieved hash for `prune_block` right above. therefore retrieving lookup key must succeed. q.e.d."); - utils::remove_key_mappings( - transaction, - columns::KEY_LOOKUP, - prune_block, - hash, - )?; - transaction.remove(columns::HEADER, &lookup_key); - } - prune_block += One::one(); - } - } - - Ok(()) - } - - /// Read CHT root of given type for the block. - fn read_cht_root( - &self, - cht_type: u8, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - let no_cht_for_block = || ClientError::Backend(format!("Missing CHT for block {}", block)); - - let meta = self.meta.read(); - let max_cht_number = cht::max_cht_number(cht_size, meta.finalized_number); - let cht_number = cht::block_to_cht_number(cht_size, block).ok_or_else(no_cht_for_block)?; - match max_cht_number { - Some(max_cht_number) if cht_number <= max_cht_number => (), - _ => return Ok(None), - } - - let cht_start = cht::start_number(cht_size, cht_number); - self.db - .get(columns::CHT, &cht_key(cht_type, cht_start)?) - .ok_or_else(no_cht_for_block) - .and_then(|hash| Block::Hash::decode(&mut &*hash).map_err(|_| no_cht_for_block())) - .map(Some) - } -} - -impl AuxStore for LightStorage -where - Block: BlockT, -{ - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >( - &self, - insert: I, - delete: D, - ) -> ClientResult<()> { - let mut transaction = Transaction::new(); - for (k, v) in insert { - transaction.set(columns::AUX, k, v); - } - for k in delete { - transaction.remove(columns::AUX, k); - } - self.db.commit(transaction)?; - - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - Ok(self.db.get(columns::AUX, key)) - } -} - -impl Storage for LightStorage -where - Block: BlockT, -{ - fn import_header( - &self, - header: Block::Header, - mut cache_at: HashMap>, - leaf_state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()> { - let mut transaction = Transaction::new(); - - let hash = header.hash(); - let number = *header.number(); - let parent_hash = *header.parent_hash(); - - for (key, maybe_val) in aux_ops { - match maybe_val { - Some(val) => transaction.set_from_vec(columns::AUX, &key, val), - None => transaction.remove(columns::AUX, &key), - } - } - - // blocks are keyed by number + hash. - let lookup_key = utils::number_and_hash_to_lookup_key(number, &hash)?; - - if leaf_state.is_best() { - self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; - } - - utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; - transaction.set_from_vec(columns::HEADER, &lookup_key, header.encode()); - - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache - .insert_header_metadata(header.hash().clone(), header_metadata); - - let is_genesis = number.is_zero(); - if is_genesis { - self.cache.0.write().set_genesis_hash(hash); - transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); - } - - let finalized = match leaf_state { - _ if is_genesis => true, - NewBlockState::Final => true, - _ => false, - }; - - if finalized { - self.note_finalized(&mut transaction, &header, hash)?; - } - - // update changes trie configuration cache - if !cache_at.contains_key(&well_known_cache_keys::CHANGES_TRIE_CONFIG) { - if let Some(new_configuration) = - crate::changes_tries_storage::extract_new_configuration(&header) - { - cache_at - .insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); - } - } - - { - let mut cache = self.cache.0.write(); - let cache_ops = cache - .transaction(&mut transaction) - .on_block_insert( - ComplexBlockId::new( - *header.parent_hash(), - if number.is_zero() { Zero::zero() } else { number - One::one() }, - ), - ComplexBlockId::new(hash, number), - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(); - - debug!("Light DB Commit {:?} ({})", hash, number); - - self.db.commit(transaction)?; - cache.commit(cache_ops).expect( - "only fails if cache with given name isn't loaded yet; cache is already loaded \ - because there are cache_ops; qed", - ); - } - - self.update_meta(hash, number, leaf_state.is_best(), finalized); - - Ok(()) - } - - fn set_head(&self, id: BlockId) -> ClientResult<()> { - if let Some(header) = self.header(id)? { - let hash = header.hash(); - let number = header.number(); - - let mut transaction = Transaction::new(); - self.set_head_with_transaction( - &mut transaction, - hash.clone(), - (number.clone(), hash.clone()), - )?; - self.db.commit(transaction)?; - self.update_meta(hash, header.number().clone(), true, false); - - Ok(()) - } else { - Err(ClientError::UnknownBlock(format!("Cannot set head {:?}", id))) - } - } - - fn finalize_header(&self, id: BlockId) -> ClientResult<()> { - if let Some(header) = self.header(id)? { - let mut transaction = Transaction::new(); - let hash = header.hash(); - let number = *header.number(); - self.note_finalized(&mut transaction, &header, hash.clone())?; - { - let mut cache = self.cache.0.write(); - let cache_ops = cache - .transaction(&mut transaction) - .on_block_finalize( - ComplexBlockId::new( - *header.parent_hash(), - if number.is_zero() { Zero::zero() } else { number - One::one() }, - ), - ComplexBlockId::new(hash, number), - )? - .into_ops(); - - self.db.commit(transaction)?; - cache.commit(cache_ops).expect( - "only fails if cache with given name isn't loaded yet; cache is already loaded \ - because there are cache_ops; qed", - ); - } - self.update_meta(hash, header.number().clone(), false, true); - - Ok(()) - } else { - Err(ClientError::UnknownBlock(format!("Cannot finalize block {:?}", id))) - } - } - - fn last_finalized(&self) -> ClientResult { - Ok(self.meta.read().finalized_hash.clone()) - } - - fn cache(&self) -> Option>> { - Some(self.cache.clone()) - } - - fn usage_info(&self) -> Option { - use sc_client_api::{IoInfo, MemoryInfo, MemorySize}; - - // TODO: reimplement IO stats - let database_cache = MemorySize::from_bytes(0); - let io_stats = self.io_stats.take_or_else(|| kvdb::IoStats::empty()); - - Some(UsageInfo { - memory: MemoryInfo { - database_cache, - state_cache: Default::default(), - state_db: Default::default(), - }, - io: IoInfo { - transactions: io_stats.transactions, - bytes_read: io_stats.bytes_read, - bytes_written: io_stats.bytes_written, - writes: io_stats.writes, - reads: io_stats.reads, - average_transaction_size: io_stats.avg_transaction_size() as u64, - // Light client does not track those - state_reads: 0, - state_writes: 0, - state_reads_cache: 0, - state_writes_cache: 0, - state_writes_nodes: 0, - }, - }) - } -} - -impl ProvideChtRoots for LightStorage -where - Block: BlockT, -{ - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - self.read_cht_root(HEADER_CHT_PREFIX, cht_size, block) - } - - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - self.read_cht_root(CHANGES_TRIE_CHT_PREFIX, cht_size, block) - } -} - -/// Build the key for inserting header-CHT at given block. -fn cht_key>(cht_type: u8, block: N) -> ClientResult<[u8; 5]> { - let mut key = [cht_type; 5]; - key[1..].copy_from_slice(&utils::number_index_key(block)?); - Ok(key) -} - -#[cfg(test)] -pub(crate) mod tests { - use super::*; - use sc_client_api::cht; - use sp_blockchain::{lowest_common_ancestor, tree_route}; - use sp_core::ChangesTrieConfiguration; - use sp_runtime::{ - generic::{ChangesTrieSignal, DigestItem}, - testing::{Block as RawBlock, ExtrinsicWrapper, Header, H256 as Hash}, - }; - - type Block = RawBlock>; - type AuthorityId = sp_core::ed25519::Public; - - pub fn default_header(parent: &Hash, number: u64) -> Header { - Header { - number: number.into(), - parent_hash: *parent, - state_root: Hash::random(), - digest: Default::default(), - extrinsics_root: Default::default(), - } - } - - fn header_with_changes_trie(parent: &Hash, number: u64) -> Header { - let mut header = default_header(parent, number); - header - .digest - .logs - .push(DigestItem::ChangesTrieRoot([(number % 256) as u8; 32].into())); - header - } - - fn header_with_extrinsics_root(parent: &Hash, number: u64, extrinsics_root: Hash) -> Header { - let mut header = default_header(parent, number); - header.extrinsics_root = extrinsics_root; - header - } - - pub fn insert_block Header>( - db: &LightStorage, - cache: HashMap>, - mut header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Best, Vec::new()).unwrap(); - hash - } - - fn insert_final_block Header>( - db: &LightStorage, - cache: HashMap>, - header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Final, Vec::new()).unwrap(); - hash - } - - fn insert_non_best_block Header>( - db: &LightStorage, - cache: HashMap>, - header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Normal, Vec::new()).unwrap(); - hash - } - - #[test] - fn returns_known_header() { - let db = LightStorage::new_test(); - let known_hash = - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - let header_by_hash = db.header(BlockId::Hash(known_hash)).unwrap().unwrap(); - let header_by_number = db.header(BlockId::Number(0)).unwrap().unwrap(); - assert_eq!(header_by_hash, header_by_number); - } - - #[test] - fn does_not_return_unknown_header() { - let db = LightStorage::::new_test(); - assert!(db.header(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap().is_none()); - assert!(db.header(BlockId::Number(0)).unwrap().is_none()); - } - - #[test] - fn returns_info() { - let db = LightStorage::new_test(); - let genesis_hash = - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - let info = db.info(); - assert_eq!(info.best_hash, genesis_hash); - assert_eq!(info.best_number, 0); - assert_eq!(info.genesis_hash, genesis_hash); - let best_hash = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); - let info = db.info(); - assert_eq!(info.best_hash, best_hash); - assert_eq!(info.best_number, 1); - assert_eq!(info.genesis_hash, genesis_hash); - } - - #[test] - fn returns_block_status() { - let db = LightStorage::new_test(); - let genesis_hash = - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.status(BlockId::Hash(genesis_hash)).unwrap(), BlockStatus::InChain); - assert_eq!(db.status(BlockId::Number(0)).unwrap(), BlockStatus::InChain); - assert_eq!( - db.status(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap(), - BlockStatus::Unknown - ); - assert_eq!(db.status(BlockId::Number(1)).unwrap(), BlockStatus::Unknown); - } - - #[test] - fn returns_block_hash() { - let db = LightStorage::new_test(); - let genesis_hash = - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.hash(0).unwrap(), Some(genesis_hash)); - assert_eq!(db.hash(1).unwrap(), None); - } - - #[test] - fn import_header_works() { - let raw_db = Arc::new(sp_database::MemDb::default()); - let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); - - let genesis_hash = - insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(raw_db.count(columns::HEADER), 1); - assert_eq!(raw_db.count(columns::KEY_LOOKUP), 2); - - let _ = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); - assert_eq!(raw_db.count(columns::HEADER), 2); - assert_eq!(raw_db.count(columns::KEY_LOOKUP), 4); - } - - #[test] - fn finalized_ancient_headers_are_replaced_with_cht() { - fn insert_headers Header>( - header_producer: F, - ) -> (Arc, LightStorage) { - let raw_db = Arc::new(sp_database::MemDb::default()); - let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); - let cht_size: u64 = cht::size(); - let ucht_size: usize = cht_size as _; - - // insert genesis block header (never pruned) - let mut prev_hash = - insert_final_block(&db, HashMap::new(), || header_producer(&Default::default(), 0)); - - // insert SIZE blocks && ensure that nothing is pruned - - for number in 0..cht::size() { - prev_hash = - insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + number)); - } - assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size); - assert_eq!(raw_db.count(columns::CHT), 0); - - // insert next SIZE blocks && ensure that nothing is pruned - for number in 0..(cht_size as _) { - prev_hash = insert_block(&db, HashMap::new(), || { - header_producer(&prev_hash, 1 + cht_size + number) - }); - } - assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size + ucht_size); - assert_eq!(raw_db.count(columns::CHT), 0); - - // insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of - // this CHT are pruned nothing is yet finalized, so nothing is pruned. - prev_hash = insert_block(&db, HashMap::new(), || { - header_producer(&prev_hash, 1 + cht_size + cht_size) - }); - assert_eq!(raw_db.count(columns::HEADER), 2 + ucht_size + ucht_size); - assert_eq!(raw_db.count(columns::CHT), 0); - - // now finalize the block. - for i in (0..(ucht_size + ucht_size)).map(|i| i + 1) { - db.finalize_header(BlockId::Number(i as _)).unwrap(); - } - db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); - (raw_db, db) - } - - // when headers are created without changes tries roots - let (raw_db, db) = insert_headers(default_header); - let cht_size: u64 = cht::size(); - assert_eq!(raw_db.count(columns::HEADER), (1 + cht_size + 1) as usize); - assert_eq!(raw_db.count(columns::KEY_LOOKUP), (2 * (1 + cht_size + 1)) as usize); - assert_eq!(raw_db.count(columns::CHT), 1); - assert!((0..cht_size as _).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); - assert!(db.header_cht_root(cht_size, cht_size / 2).unwrap().is_some()); - assert!(db.header_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - assert!(db.changes_trie_cht_root(cht_size, cht_size / 2).is_err()); - assert!(db.changes_trie_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - - // when headers are created with changes tries roots - let (raw_db, db) = insert_headers(header_with_changes_trie); - assert_eq!(raw_db.count(columns::HEADER), (1 + cht_size + 1) as usize); - assert_eq!(raw_db.count(columns::CHT), 2); - assert!((0..cht_size as _).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); - assert!(db.header_cht_root(cht_size, cht_size / 2).unwrap().is_some()); - assert!(db.header_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - assert!(db.changes_trie_cht_root(cht_size, cht_size / 2).unwrap().is_some()); - assert!(db.changes_trie_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - } - - #[test] - fn get_cht_fails_for_genesis_block() { - assert!(LightStorage::::new_test().header_cht_root(cht::size(), 0).is_err()); - } - - #[test] - fn get_cht_fails_for_non_existent_cht() { - let cht_size: u64 = cht::size(); - assert!(LightStorage::::new_test() - .header_cht_root(cht_size, cht_size / 2) - .unwrap() - .is_none()); - } - - #[test] - fn get_cht_works() { - let db = LightStorage::new_test(); - - // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = insert_final_block(&db, HashMap::new(), || { - header_with_changes_trie(&Default::default(), 0) - }); - let cht_size: u64 = cht::size(); - let ucht_size: usize = cht_size as _; - for i in 1..1 + ucht_size + ucht_size + 1 { - prev_hash = insert_block(&db, HashMap::new(), || { - header_with_changes_trie(&prev_hash, i as u64) - }); - db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); - } - - let cht_root_1 = - db.header_cht_root(cht_size, cht::start_number(cht_size, 0)).unwrap().unwrap(); - let cht_root_2 = db - .header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) - .unwrap() - .unwrap(); - let cht_root_3 = - db.header_cht_root(cht_size, cht::end_number(cht_size, 0)).unwrap().unwrap(); - assert_eq!(cht_root_1, cht_root_2); - assert_eq!(cht_root_2, cht_root_3); - - let cht_root_1 = db - .changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0)) - .unwrap() - .unwrap(); - let cht_root_2 = db - .changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) - .unwrap() - .unwrap(); - let cht_root_3 = db - .changes_trie_cht_root(cht_size, cht::end_number(cht_size, 0)) - .unwrap() - .unwrap(); - assert_eq!(cht_root_1, cht_root_2); - assert_eq!(cht_root_2, cht_root_3); - } - - #[test] - fn tree_route_works() { - let db = LightStorage::new_test(); - let block0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - - // fork from genesis: 3 prong. - let a1 = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); - let a2 = insert_block(&db, HashMap::new(), || default_header(&a1, 2)); - let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); - - // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || { - header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) - }); - let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); - - { - let tree_route = tree_route(&db, a3, b2).unwrap(); - - assert_eq!(tree_route.common_block().hash, block0); - assert_eq!( - tree_route.retracted().iter().map(|r| r.hash).collect::>(), - vec![a3, a2, a1] - ); - assert_eq!( - tree_route.enacted().iter().map(|r| r.hash).collect::>(), - vec![b1, b2] - ); - } - - { - let tree_route = tree_route(&db, a1, a3).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert!(tree_route.retracted().is_empty()); - assert_eq!( - tree_route.enacted().iter().map(|r| r.hash).collect::>(), - vec![a2, a3] - ); - } - - { - let tree_route = tree_route(&db, a3, a1).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert_eq!( - tree_route.retracted().iter().map(|r| r.hash).collect::>(), - vec![a3, a2] - ); - assert!(tree_route.enacted().is_empty()); - } - - { - let tree_route = tree_route(&db, a2, a2).unwrap(); - - assert_eq!(tree_route.common_block().hash, a2); - assert!(tree_route.retracted().is_empty()); - assert!(tree_route.enacted().is_empty()); - } - } - - #[test] - fn lowest_common_ancestor_works() { - let db = LightStorage::new_test(); - let block0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - - // fork from genesis: 3 prong. - let a1 = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); - let a2 = insert_block(&db, HashMap::new(), || default_header(&a1, 2)); - let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); - - // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || { - header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) - }); - let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); - - { - let lca = lowest_common_ancestor(&db, a3, b2).unwrap(); - - assert_eq!(lca.hash, block0); - assert_eq!(lca.number, 0); - } - - { - let lca = lowest_common_ancestor(&db, a1, a3).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(&db, a3, a1).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(&db, a2, a3).unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } - - { - let lca = lowest_common_ancestor(&db, a2, a1).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(&db, a2, a2).unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } - } - - #[test] - fn authorities_are_cached() { - let db = LightStorage::new_test(); - - fn run_checks( - db: &LightStorage, - max: u64, - checks: &[(u64, Option>)], - ) { - for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) { - let actual = authorities(db.cache(), BlockId::Number(*at)); - assert_eq!(*expected, actual); - } - } - - fn same_authorities() -> HashMap> { - HashMap::new() - } - - fn make_authorities( - authorities: Vec, - ) -> HashMap> { - let mut map = HashMap::new(); - map.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); - map - } - - fn authorities( - cache: &dyn BlockchainCache, - at: BlockId, - ) -> Option> { - cache - .get_at(&well_known_cache_keys::AUTHORITIES, &at) - .unwrap_or(None) - .and_then(|(_, _, val)| Decode::decode(&mut &val[..]).ok()) - } - - let auth1 = || AuthorityId::from_raw([1u8; 32]); - let auth2 = || AuthorityId::from_raw([2u8; 32]); - let auth3 = || AuthorityId::from_raw([3u8; 32]); - let auth4 = || AuthorityId::from_raw([4u8; 32]); - let auth5 = || AuthorityId::from_raw([5u8; 32]); - let auth6 = || AuthorityId::from_raw([6u8; 32]); - - let (hash2, hash6) = { - // first few blocks are instantly finalized - // B0(None) -> B1(None) -> B2(1) -> B3(1) -> B4(1, 2) -> B5(1, 2) -> B6(1, 2) - let checks = vec![ - (0, None), - (1, None), - (2, Some(vec![auth1()])), - (3, Some(vec![auth1()])), - (4, Some(vec![auth1(), auth2()])), - (5, Some(vec![auth1(), auth2()])), - (6, Some(vec![auth1(), auth2()])), - ]; - - let hash0 = insert_final_block(&db, same_authorities(), || { - default_header(&Default::default(), 0) - }); - run_checks(&db, 0, &checks); - let hash1 = insert_final_block(&db, same_authorities(), || default_header(&hash0, 1)); - run_checks(&db, 1, &checks); - let hash2 = insert_final_block(&db, make_authorities(vec![auth1()]), || { - default_header(&hash1, 2) - }); - run_checks(&db, 2, &checks); - let hash3 = insert_final_block(&db, make_authorities(vec![auth1()]), || { - default_header(&hash2, 3) - }); - run_checks(&db, 3, &checks); - let hash4 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { - default_header(&hash3, 4) - }); - run_checks(&db, 4, &checks); - let hash5 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { - default_header(&hash4, 5) - }); - run_checks(&db, 5, &checks); - let hash6 = insert_final_block(&db, same_authorities(), || default_header(&hash5, 6)); - run_checks(&db, 6, &checks); - - (hash2, hash6) - }; - - { - // some older non-best blocks are inserted - // ... -> B2(1) -> B2_1(1) -> B2_2(2) - // => the cache ignores all writes before best finalized block - let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || { - default_header(&hash2, 3) - }); - assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_1))); - let hash2_2 = - insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || { - default_header(&hash2_1, 4) - }); - assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_2))); - } - - let (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) = { - // inserting non-finalized blocks - // B6(None) -> B7(3) -> B8(3) - // \> B6_1(4) -> B6_2(4) - // \> B6_1_1(5) - // \> B6_1_2(6) -> B6_1_3(7) - - let hash7 = - insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash6, 7)); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - let hash8 = - insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash7, 8)); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - let hash6_1 = - insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6, 7)); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || { - default_header(&hash6_1, 8) - }); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || { - default_header(&hash6_1, 8) - }); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - let hash6_2 = - insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6_1, 8)); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - - (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) - }; - - { - // finalize block hash6_1 - db.finalize_header(BlockId::Hash(hash6_1)).unwrap(); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - // finalize block hash6_2 - db.finalize_header(BlockId::Hash(hash6_2)).unwrap(); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - } - } - - #[test] - fn database_is_reopened() { - let db = LightStorage::new_test(); - let hash0 = - insert_final_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.info().best_hash, hash0); - assert_eq!(db.header(BlockId::Hash(hash0)).unwrap().unwrap().hash(), hash0); - - let db = db.db; - let db = LightStorage::from_kvdb(db).unwrap(); - assert_eq!(db.info().best_hash, hash0); - assert_eq!(db.header(BlockId::Hash::(hash0)).unwrap().unwrap().hash(), hash0); - } - - #[test] - fn aux_store_works() { - let db = LightStorage::::new_test(); - - // insert aux1 + aux2 using direct store access - db.insert_aux(&[(&[1][..], &[101][..]), (&[2][..], &[102][..])], ::std::iter::empty()) - .unwrap(); - - // check aux values - assert_eq!(db.get_aux(&[1]).unwrap(), Some(vec![101])); - assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); - assert_eq!(db.get_aux(&[3]).unwrap(), None); - - // delete aux1 + insert aux3 using import operation - db.import_header( - default_header(&Default::default(), 0), - HashMap::new(), - NewBlockState::Best, - vec![(vec![3], Some(vec![103])), (vec![1], None)], - ) - .unwrap(); - - // check aux values - assert_eq!(db.get_aux(&[1]).unwrap(), None); - assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); - assert_eq!(db.get_aux(&[3]).unwrap(), Some(vec![103])); - } - - #[test] - fn cache_can_be_initialized_after_genesis_inserted() { - let (genesis_hash, storage) = { - let db = LightStorage::::new_test(); - - // before cache is initialized => Err - assert!(db.cache().get_at(b"test", &BlockId::Number(0)).is_err()); - - // insert genesis block (no value for cache is provided) - let mut genesis_hash = None; - insert_block(&db, HashMap::new(), || { - let header = default_header(&Default::default(), 0); - genesis_hash = Some(header.hash()); - header - }); - - // after genesis is inserted => None - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), None); - - // initialize cache - db.cache().initialize(b"test", vec![42]).unwrap(); - - // after genesis is inserted + cache is initialized => Some - assert_eq!( - db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), - Some(((0, genesis_hash.unwrap()), None, vec![42])), - ); - - (genesis_hash, db.db) - }; - - // restart && check that after restart value is read from the cache - let db = - LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); - assert_eq!( - db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), - Some(((0, genesis_hash.unwrap()), None, vec![42])), - ); - } - - #[test] - fn changes_trie_configuration_is_tracked_on_light_client() { - let db = LightStorage::::new_test(); - - let new_config = Some(ChangesTrieConfiguration::new(2, 2)); - - // insert block#0 && block#1 (no value for cache is provided) - let hash0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!( - db.cache() - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)) - .unwrap() - .map(|(_, _, v)| ChangesTrieConfiguration::decode(&mut &v[..]).unwrap()), - None, - ); - - // insert configuration at block#1 (starts from block#2) - insert_block(&db, HashMap::new(), || { - let mut header = default_header(&hash0, 1); - header.digest_mut().push(DigestItem::ChangesTrieSignal( - ChangesTrieSignal::NewConfiguration(new_config.clone()), - )); - header - }); - assert_eq!( - db.cache() - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)) - .unwrap() - .map(|(_, _, v)| Option::::decode(&mut &v[..]).unwrap()), - Some(new_config), - ); - } -} diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index 1b645ca9fb2b9..61c0b94dc701c 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -16,7 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use crate::{ - columns, light, + columns, utils::{DatabaseType, NUM_COLUMNS}, }; /// A `Database` adapter for parity-db. @@ -61,10 +61,6 @@ pub fn open>( state_col.preimage = true; state_col.uniform = true; }, - DatabaseType::Light => { - config.columns[light::columns::HEADER as usize].compression = - parity_db::CompressionType::Lz4; - }, } let db = if create { diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 0e895eaaf3851..b098a7864bafb 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -56,10 +56,6 @@ pub mod meta_keys { pub const FINALIZED_STATE: &[u8; 6] = b"fstate"; /// Block gap. pub const BLOCK_GAP: &[u8; 3] = b"gap"; - /// Meta information prefix for list-based caches. - pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; - /// Meta information for changes tries key. - pub const CHANGES_TRIES_META: &[u8; 5] = b"ctrie"; /// Genesis block hash. pub const GENESIS_HASH: &[u8; 3] = b"gen"; /// Leaves prefix list key. @@ -95,8 +91,6 @@ pub type NumberIndexKey = [u8; 4]; pub enum DatabaseType { /// Full node database. Full, - /// Light node database. - Light, } /// Convert block number into short lookup key (LE representation) for @@ -124,19 +118,6 @@ where Ok(lookup_key) } -/// Convert block lookup key into block number. -/// all block lookup keys start with the block number. -pub fn lookup_key_to_number(key: &[u8]) -> sp_blockchain::Result -where - N: From, -{ - if key.len() < 4 { - return Err(sp_blockchain::Error::Backend("Invalid block key".into())) - } - Ok((key[0] as u32) << 24 | (key[1] as u32) << 16 | (key[2] as u32) << 8 | (key[3] as u32)) - .map(Into::into) -} - /// Delete number to hash mapping in DB transaction. pub fn remove_number_to_key_mapping>( transaction: &mut Transaction, @@ -147,18 +128,6 @@ pub fn remove_number_to_key_mapping>( Ok(()) } -/// Remove key mappings. -pub fn remove_key_mappings, H: AsRef<[u8]>>( - transaction: &mut Transaction, - key_lookup_col: u32, - number: N, - hash: H, -) -> sp_blockchain::Result<()> { - remove_number_to_key_mapping(transaction, key_lookup_col, number)?; - transaction.remove(key_lookup_col, hash.as_ref()); - Ok(()) -} - /// Place a number mapping into the database. This maps number to current perceived /// block hash at that position. pub fn insert_number_to_key_mapping + Clone, H: AsRef<[u8]>>( @@ -357,18 +326,6 @@ fn open_kvdb_rocksdb( other_col_budget, ); }, - DatabaseType::Light => { - let col_budget = cache_size / (NUM_COLUMNS as usize); - for i in 0..NUM_COLUMNS { - memory_budget.insert(i, col_budget); - } - log::trace!( - target: "db", - "Open RocksDB light database at {:?}, column cache: {} MiB", - path, - col_budget, - ); - }, } db_config.memory_budget = memory_budget; @@ -424,8 +381,7 @@ fn maybe_migrate_to_type_subdir( // See if there's a file identifying a rocksdb or paritydb folder in the parent dir and // the target path ends in a role specific directory if (basedir.join("db_version").exists() || basedir.join("metadata").exists()) && - (p.ends_with(DatabaseType::Full.as_str()) || - p.ends_with(DatabaseType::Light.as_str())) + (p.ends_with(DatabaseType::Full.as_str())) { // Try to open the database to check if the current `DatabaseType` matches the type of // database stored in the target directory and close the database on success. @@ -501,18 +457,6 @@ pub fn read_header( } } -/// Required header from the database. -pub fn require_header( - db: &dyn Database, - col_index: u32, - col: u32, - id: BlockId, -) -> sp_blockchain::Result { - read_header(db, col_index, col, id).and_then(|header| { - header.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("Require header: {}", id))) - }) -} - /// Read meta from the database. pub fn read_meta( db: &dyn Database, @@ -598,7 +542,6 @@ impl DatabaseType { pub fn as_str(&self) -> &'static str { match *self { DatabaseType::Full => "full", - DatabaseType::Light => "light", } } } @@ -669,23 +612,12 @@ mod tests { assert!(old_db_path.join(db_type.as_str()).join(db_check_file).exists()); } - check_dir_for_db_type( - DatabaseType::Light, - DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 }, - "db_version", - ); check_dir_for_db_type( DatabaseType::Full, DatabaseSource::RocksDb { path: PathBuf::new(), cache_size: 128 }, "db_version", ); - #[cfg(feature = "with-parity-db")] - check_dir_for_db_type( - DatabaseType::Light, - DatabaseSource::ParityDb { path: PathBuf::new() }, - "metadata", - ); #[cfg(feature = "with-parity-db")] check_dir_for_db_type( DatabaseType::Full, @@ -709,16 +641,8 @@ mod tests { assert!(!old_db_path.join("light/db_version").exists()); assert!(!old_db_path.join("full/db_version").exists()); } - let source = DatabaseSource::RocksDb { - path: old_db_path.join(DatabaseType::Light.as_str()), - cache_size: 128, - }; - let settings = db_settings(source); - let db_res = open_database::(&settings, DatabaseType::Light); - assert!(db_res.is_err(), "Opening a light database in full role should fail"); // assert nothing was changed assert!(old_db_path.join("db_version").exists()); - assert!(!old_db_path.join("light/db_version").exists()); assert!(!old_db_path.join("full/db_version").exists()); } } @@ -735,7 +659,6 @@ mod tests { #[test] fn database_type_as_str_works() { assert_eq!(DatabaseType::Full.as_str(), "full"); - assert_eq!(DatabaseType::Light.as_str(), "light"); } #[test] diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index fe964f47ba374..3a911e0a0f14d 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -40,7 +40,7 @@ use tracing_subscriber::layer::SubscriberExt; use crate::WasmExecutionMethod; -pub type TestExternalities = CoreTestExternalities; +pub type TestExternalities = CoreTestExternalities; type HostFunctions = sp_io::SubstrateHostFunctions; /// Simple macro that runs a given method as test with the available wasm execution methods. diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index d54f7234b44b4..000f7397ac9d9 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -19,7 +19,7 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use log::debug; -use parity_scale_codec::{Decode, Encode}; +use parity_scale_codec::Decode; use sc_client_api::{backend::Backend, utils::is_descendent_of}; use sc_consensus::{ @@ -35,7 +35,7 @@ use sp_core::hashing::twox_128; use sp_finality_grandpa::{ConsensusLog, GrandpaApi, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, - traits::{Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero}, + traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, Justification, }; @@ -89,7 +89,6 @@ impl JustificationImport for GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, BE: Backend, Client: ClientForGrandpa, SC: SelectChain, @@ -229,7 +228,6 @@ pub fn find_forced_change( impl GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, BE: Backend, Client: ClientForGrandpa, Client::Api: GrandpaApi, @@ -515,7 +513,6 @@ where impl BlockImport for GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, BE: Backend, Client: ClientForGrandpa, Client::Api: GrandpaApi, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 452659ced6a70..e7618929fd089 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -58,7 +58,7 @@ use futures::{prelude::*, StreamExt}; use log::{debug, error, info}; -use parity_scale_codec::{Decode, Encode}; +use parity_scale_codec::Decode; use parking_lot::RwLock; use prometheus_endpoint::{PrometheusError, Registry}; use sc_client_api::{ @@ -77,7 +77,7 @@ use sp_core::crypto::Public; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, DigestFor, NumberFor, Zero}, + traits::{Block as BlockT, NumberFor, Zero}, }; pub use finality_grandpa::BlockNumberOps; @@ -718,7 +718,6 @@ where SC: SelectChain + 'static, VR: VotingRule + Clone + 'static, NumberFor: BlockNumberOps, - DigestFor: Encode, C: ClientForGrandpa + 'static, C::Api: GrandpaApi, { diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 1aef7cd1b017a..77318244eefa7 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -43,6 +43,7 @@ use sp_finality_grandpa::{ use sp_keyring::Ed25519Keyring; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ + codec::Encode, generic::{BlockId, DigestItem}, traits::{Block as BlockT, Header as HeaderT}, Justifications, @@ -139,26 +140,20 @@ impl TestNetFactory for GrandpaTestNet { &self, client: PeersClient, ) -> (BlockImportAdapter, Option>, PeerData) { - match client { - PeersClient::Full(ref client, ref backend) => { - let (import, link) = block_import( - client.clone(), - &self.test_config, - LongestChain::new(backend.clone()), - None, - ) - .expect("Could not create block import for fresh peer."); - let justification_import = Box::new(import.clone()); - ( - BlockImportAdapter::new(import), - Some(justification_import), - Mutex::new(Some(link)), - ) - }, - PeersClient::Light(..) => { - panic!("Light client is not used in tests."); - }, - } + let (client, backend) = (client.as_client(), client.as_backend()); + let (import, link) = block_import( + client.clone(), + &self.test_config, + LongestChain::new(backend.clone()), + None, + ) + .expect("Could not create block import for fresh peer."); + let justification_import = Box::new(import.clone()); + ( + BlockImportAdapter::new(import), + Some(justification_import), + Mutex::new(Some(link)), + ) } fn peer(&mut self, i: usize) -> &mut GrandpaPeer { @@ -466,7 +461,7 @@ fn finalize_3_voters_1_full_observer() { // all peers should have stored the justification for the best finalized block #20 for peer_id in 0..4 { - let client = net.lock().peers[peer_id].client().as_full().unwrap(); + let client = net.lock().peers[peer_id].client().as_client(); let justification = crate::aux_schema::best_justification::<_, Block>(&*client).unwrap().unwrap(); @@ -539,7 +534,7 @@ fn transition_3_voters_twice_1_full_observer() { net.lock().block_until_sync(); for (i, peer) in net.lock().peers().iter().enumerate() { - let full_client = peer.client().as_full().expect("only full clients are used in test"); + let full_client = peer.client().as_client(); assert_eq!(full_client.chain_info().best_number, 1, "Peer #{} failed to sync", i); let set: AuthoritySet = @@ -614,7 +609,7 @@ fn transition_3_voters_twice_1_full_observer() { .take_while(|n| future::ready(n.header.number() < &30)) .for_each(move |_| future::ready(())) .map(move |()| { - let full_client = client.as_full().expect("only full clients are used in test"); + let full_client = client.as_client(); let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); @@ -835,7 +830,7 @@ fn force_change_to_new_set() { for (i, peer) in net.lock().peers().iter().enumerate() { assert_eq!(peer.client().info().best_number, 26, "Peer #{} failed to sync", i); - let full_client = peer.client().as_full().expect("only full clients are used in test"); + let full_client = peer.client().as_client(); let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); @@ -861,7 +856,7 @@ fn allows_reimporting_change_blocks() { let client = net.peer(0).client().clone(); let (mut block_import, ..) = net.make_block_import(client.clone()); - let full_client = client.as_full().unwrap(); + let full_client = client.as_client(); let builder = full_client .new_block_at(&BlockId::Number(0), Default::default(), false) .unwrap(); @@ -908,7 +903,7 @@ fn test_bad_justification() { let client = net.peer(0).client().clone(); let (mut block_import, ..) = net.make_block_import(client.clone()); - let full_client = client.as_full().expect("only full clients are used in test"); + let full_client = client.as_client(); let builder = full_client .new_block_at(&BlockId::Number(0), Default::default(), false) .unwrap(); @@ -1148,7 +1143,7 @@ fn voter_persists_its_votes() { .await; let block_30_hash = - net.lock().peer(0).client().as_full().unwrap().hash(30).unwrap().unwrap(); + net.lock().peer(0).client().as_client().hash(30).unwrap().unwrap(); // we restart alice's voter abort.abort(); @@ -1581,7 +1576,7 @@ fn imports_justification_for_regular_blocks_on_import() { let client = net.peer(0).client().clone(); let (mut block_import, ..) = net.make_block_import(client.clone()); - let full_client = client.as_full().expect("only full clients are used in test"); + let full_client = client.as_client(); let builder = full_client .new_block_at(&BlockId::Number(0), Default::default(), false) .unwrap(); diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml deleted file mode 100644 index cc567c60524a1..0000000000000 --- a/client/light/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -description = "components for a light client" -name = "sc-light" -version = "4.0.0-dev" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -authors = ["Parity Technologies "] -edition = "2018" -homepage = "https://substrate.io" -repository = "https://github.com/paritytech/substrate/" -documentation = "https://docs.rs/sc-light" -readme = "README.md" - -[dependencies] -parking_lot = "0.11.1" -hash-db = "0.15.2" -sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } -sp-externalities = { version = "0.10.0-dev", path = "../../primitives/externalities" } -sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } -sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } -sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } -sc-client-api = { version = "4.0.0-dev", path = "../api" } -sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "2.0.0" } -sc-executor = { version = "0.10.0-dev", path = "../executor" } - -[features] -default = [] diff --git a/client/light/README.md b/client/light/README.md deleted file mode 100644 index 1ba1f155b1652..0000000000000 --- a/client/light/README.md +++ /dev/null @@ -1,3 +0,0 @@ -Light client components. - -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file diff --git a/client/light/src/backend.rs b/client/light/src/backend.rs deleted file mode 100644 index 3091dce625a3f..0000000000000 --- a/client/light/src/backend.rs +++ /dev/null @@ -1,578 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Light client backend. Only stores headers and justifications of blocks. -//! Everything else is requested from full nodes on demand. - -use parking_lot::RwLock; -use std::{ - collections::{HashMap, HashSet}, - sync::Arc, -}; - -use codec::{Decode, Encode}; - -use super::blockchain::Blockchain; -use hash_db::Hasher; -use sc_client_api::{ - backend::{ - AuxStore, Backend as ClientBackend, BlockImportOperation, NewBlockState, - PrunableStateChangesTrieStorage, RemoteBackend, - }, - blockchain::{well_known_cache_keys, HeaderBackend as BlockchainHeaderBackend}, - in_mem::check_genesis_storage, - light::Storage as BlockchainStorage, - UsageInfo, -}; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_core::{ - offchain::storage::InMemOffchainStorage, - storage::{well_known_keys, ChildInfo}, - ChangesTrieConfiguration, -}; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, HashFor, Header, NumberFor, Zero}, - Justification, Justifications, Storage, -}; -use sp_state_machine::{ - Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, - IndexOperation, StorageCollection, TrieBackend, -}; - -const IN_MEMORY_EXPECT_PROOF: &str = - "InMemory state backend has Void error type and always succeeds; qed"; - -/// Light client backend. -pub struct Backend { - blockchain: Arc>, - genesis_state: RwLock>>, - import_lock: RwLock<()>, -} - -/// Light block (header and justification) import operation. -pub struct ImportOperation { - header: Option, - cache: HashMap>, - leaf_state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - finalized_blocks: Vec>, - set_head: Option>, - storage_update: Option>>, - changes_trie_config_update: Option>, - _phantom: std::marker::PhantomData, -} - -/// Either in-memory genesis state, or locally-unavailable state. -pub enum GenesisOrUnavailableState { - /// Genesis state - storage values are stored in-memory. - Genesis(InMemoryBackend), - /// We know that state exists, but all calls will fail with error, because it - /// isn't locally available. - Unavailable, -} - -impl Backend { - /// Create new light backend. - pub fn new(blockchain: Arc>) -> Self { - Self { blockchain, genesis_state: RwLock::new(None), import_lock: Default::default() } - } - - /// Get shared blockchain reference. - pub fn blockchain(&self) -> &Arc> { - &self.blockchain - } -} - -impl AuxStore for Backend { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >( - &self, - insert: I, - delete: D, - ) -> ClientResult<()> { - self.blockchain.storage().insert_aux(insert, delete) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - self.blockchain.storage().get_aux(key) - } -} - -impl ClientBackend for Backend> -where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, -{ - type BlockImportOperation = ImportOperation; - type Blockchain = Blockchain; - type State = GenesisOrUnavailableState>; - type OffchainStorage = InMemOffchainStorage; - - fn begin_operation(&self) -> ClientResult { - Ok(ImportOperation { - header: None, - cache: Default::default(), - leaf_state: NewBlockState::Normal, - aux_ops: Vec::new(), - finalized_blocks: Vec::new(), - set_head: None, - storage_update: None, - changes_trie_config_update: None, - _phantom: Default::default(), - }) - } - - fn begin_state_operation( - &self, - _operation: &mut Self::BlockImportOperation, - _block: BlockId, - ) -> ClientResult<()> { - Ok(()) - } - - fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> ClientResult<()> { - if !operation.finalized_blocks.is_empty() { - for block in operation.finalized_blocks { - self.blockchain.storage().finalize_header(block)?; - } - } - - if let Some(header) = operation.header { - let is_genesis_import = header.number().is_zero(); - if let Some(new_config) = operation.changes_trie_config_update { - operation - .cache - .insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_config.encode()); - } - self.blockchain.storage().import_header( - header, - operation.cache, - operation.leaf_state, - operation.aux_ops, - )?; - - // when importing genesis block => remember its state - if is_genesis_import { - *self.genesis_state.write() = operation.storage_update.take(); - } - } else { - for (key, maybe_val) in operation.aux_ops { - match maybe_val { - Some(val) => self - .blockchain - .storage() - .insert_aux(&[(&key[..], &val[..])], std::iter::empty())?, - None => - self.blockchain.storage().insert_aux(std::iter::empty(), &[&key[..]])?, - } - } - } - - if let Some(set_head) = operation.set_head { - self.blockchain.storage().set_head(set_head)?; - } - - Ok(()) - } - - fn finalize_block( - &self, - block: BlockId, - _justification: Option, - ) -> ClientResult<()> { - self.blockchain.storage().finalize_header(block) - } - - fn append_justification( - &self, - _block: BlockId, - _justification: Justification, - ) -> ClientResult<()> { - Ok(()) - } - - fn blockchain(&self) -> &Blockchain { - &self.blockchain - } - - fn usage_info(&self) -> Option { - self.blockchain.storage().usage_info() - } - - fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { - None - } - - fn offchain_storage(&self) -> Option { - None - } - - fn state_at(&self, block: BlockId) -> ClientResult { - let block_number = self.blockchain.expect_block_number_from_id(&block)?; - - // special case for genesis block - if block_number.is_zero() { - if let Some(genesis_state) = self.genesis_state.read().clone() { - return Ok(GenesisOrUnavailableState::Genesis(genesis_state)) - } - } - - // else return unavailable state. We do not return error here, because error - // would mean that we do not know this state at all. But we know that it exists - Ok(GenesisOrUnavailableState::Unavailable) - } - - fn revert( - &self, - _n: NumberFor, - _revert_finalized: bool, - ) -> ClientResult<(NumberFor, HashSet)> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn remove_leaf_block(&self, _hash: &Block::Hash) -> ClientResult<()> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn get_import_lock(&self) -> &RwLock<()> { - &self.import_lock - } -} - -impl RemoteBackend for Backend> -where - Block: BlockT, - S: BlockchainStorage + 'static, - Block::Hash: Ord, -{ - fn is_local_state_available(&self, block: &BlockId) -> bool { - self.genesis_state.read().is_some() && - self.blockchain - .expect_block_number_from_id(block) - .map(|num| num.is_zero()) - .unwrap_or(false) - } - - fn remote_blockchain(&self) -> Arc> { - self.blockchain.clone() - } -} - -impl BlockImportOperation for ImportOperation -where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, -{ - type State = GenesisOrUnavailableState>; - - fn state(&self) -> ClientResult> { - // None means 'locally-stateless' backend - Ok(None) - } - - fn set_block_data( - &mut self, - header: Block::Header, - _body: Option>, - _indexed_body: Option>>, - _justifications: Option, - state: NewBlockState, - ) -> ClientResult<()> { - self.leaf_state = state; - self.header = Some(header); - Ok(()) - } - - fn update_cache(&mut self, cache: HashMap>) { - self.cache = cache; - } - - fn update_db_storage( - &mut self, - _update: >>::Transaction, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn update_changes_trie( - &mut self, - _update: ChangesTrieTransaction, NumberFor>, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn set_genesis_state(&mut self, input: Storage, commit: bool) -> ClientResult { - check_genesis_storage(&input)?; - - // changes trie configuration - let changes_trie_config = input - .top - .iter() - .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) - .map(|(_, v)| { - Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis") - }); - self.changes_trie_config_update = Some(changes_trie_config); - - // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, _> = HashMap::new(); - storage.insert(None, input.top); - - // create a list of children keys to re-compute roots for - let child_delta = input - .children_default - .iter() - .map(|(_storage_key, storage_child)| (&storage_child.child_info, std::iter::empty())); - - // make sure to persist the child storage - for (_child_key, storage_child) in input.children_default.clone() { - storage.insert(Some(storage_child.child_info), storage_child.data); - } - - let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); - if commit { - self.storage_update = Some(storage_update); - } - - Ok(storage_root) - } - - fn reset_storage(&mut self, _input: Storage) -> ClientResult { - Err(ClientError::NotAvailableOnLightClient) - } - - fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where - I: IntoIterator, Option>)>, - { - self.aux_ops.append(&mut ops.into_iter().collect()); - Ok(()) - } - - fn update_storage( - &mut self, - _update: StorageCollection, - _child_update: ChildStorageCollection, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn mark_finalized( - &mut self, - block: BlockId, - _justifications: Option, - ) -> ClientResult<()> { - self.finalized_blocks.push(block); - Ok(()) - } - - fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { - self.set_head = Some(block); - Ok(()) - } - - fn update_transaction_index( - &mut self, - _index: Vec, - ) -> sp_blockchain::Result<()> { - // noop for the light client - Ok(()) - } -} - -impl std::fmt::Debug for GenesisOrUnavailableState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.fmt(f), - GenesisOrUnavailableState::Unavailable => write!(f, "Unavailable"), - } - } -} - -impl StateBackend for GenesisOrUnavailableState -where - H::Out: Ord + codec::Codec, -{ - type Error = ClientError; - type Transaction = as StateBackend>::Transaction; - type TrieBackendStorage = as StateBackend>::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> ClientResult>> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.storage(key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn child_storage(&self, child_info: &ChildInfo, key: &[u8]) -> ClientResult>> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.child_storage(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.next_storage_key(key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn next_child_storage_key( - &self, - child_info: &ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.next_child_storage_key(child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn for_keys_with_prefix(&self, prefix: &[u8], action: A) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_keys_with_prefix(prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], action: A) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_key_values_with_prefix(prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn apply_to_key_values_while, Vec) -> bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - start_at: Option<&[u8]>, - action: A, - allow_missing: bool, - ) -> ClientResult { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => Ok(state - .apply_to_key_values_while(child_info, prefix, start_at, action, allow_missing) - .expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn apply_to_keys_while bool>( - &self, - child_info: Option<&ChildInfo>, - prefix: Option<&[u8]>, - action: A, - ) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.apply_to_keys_while(child_info, prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn for_child_keys_with_prefix( - &self, - child_info: &ChildInfo, - prefix: &[u8], - action: A, - ) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_child_keys_with_prefix(child_info, prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn storage_root<'a>( - &self, - delta: impl Iterator)>, - ) -> (H::Out, Self::Transaction) - where - H::Out: Ord, - { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.storage_root(delta), - GenesisOrUnavailableState::Unavailable => Default::default(), - } - } - - fn child_storage_root<'a>( - &self, - child_info: &ChildInfo, - delta: impl Iterator)>, - ) -> (H::Out, bool, Self::Transaction) - where - H::Out: Ord, - { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(child_info, delta); - (root, is_equal, Default::default()) - }, - GenesisOrUnavailableState::Unavailable => (H::Out::default(), true, Default::default()), - } - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.pairs(), - GenesisOrUnavailableState::Unavailable => Vec::new(), - } - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.keys(prefix), - GenesisOrUnavailableState::Unavailable => Vec::new(), - } - } - - fn register_overlay_stats(&self, _stats: &sp_state_machine::StateMachineStats) {} - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - sp_state_machine::UsageInfo::empty() - } - - fn as_trie_backend(&self) -> Option<&TrieBackend> { - match self { - GenesisOrUnavailableState::Genesis(ref state) => state.as_trie_backend(), - GenesisOrUnavailableState::Unavailable => None, - } - } -} diff --git a/client/light/src/blockchain.rs b/client/light/src/blockchain.rs deleted file mode 100644 index 24d9ef4fd4b95..0000000000000 --- a/client/light/src/blockchain.rs +++ /dev/null @@ -1,219 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Light client blockchain backend. Only stores headers and justifications of recent -//! blocks. CHT roots are stored for headers of ancient blocks. - -use std::sync::Arc; - -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}, - Justifications, -}; - -use sc_client_api::light::RemoteHeaderRequest; -pub use sc_client_api::{ - backend::{AuxStore, NewBlockState, ProvideChtRoots}, - blockchain::{ - well_known_cache_keys, Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, - HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache, - }, - cht, - light::{LocalOrRemote, RemoteBlockchain, Storage}, -}; -use sp_blockchain::{ - CachedHeaderMetadata, Error as ClientError, HeaderMetadata, Result as ClientResult, -}; - -/// Light client blockchain. -pub struct Blockchain { - storage: S, -} - -impl Blockchain { - /// Create new light blockchain backed with given storage. - pub fn new(storage: S) -> Self { - Self { storage } - } - - /// Get storage reference. - pub fn storage(&self) -> &S { - &self.storage - } -} - -impl BlockchainHeaderBackend for Blockchain -where - Block: BlockT, - S: Storage, -{ - fn header(&self, id: BlockId) -> ClientResult> { - match RemoteBlockchain::header(self, id)? { - LocalOrRemote::Local(header) => Ok(Some(header)), - LocalOrRemote::Remote(_) => Err(ClientError::NotAvailableOnLightClient), - LocalOrRemote::Unknown => Ok(None), - } - } - - fn info(&self) -> BlockchainInfo { - self.storage.info() - } - - fn status(&self, id: BlockId) -> ClientResult { - self.storage.status(id) - } - - fn number(&self, hash: Block::Hash) -> ClientResult>> { - self.storage.number(hash) - } - - fn hash( - &self, - number: <::Header as HeaderT>::Number, - ) -> ClientResult> { - self.storage.hash(number) - } -} - -impl HeaderMetadata for Blockchain -where - Block: BlockT, - S: Storage, -{ - type Error = ClientError; - - fn header_metadata( - &self, - hash: Block::Hash, - ) -> Result, Self::Error> { - self.storage.header_metadata(hash) - } - - fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.storage.insert_header_metadata(hash, metadata) - } - - fn remove_header_metadata(&self, hash: Block::Hash) { - self.storage.remove_header_metadata(hash) - } -} - -impl BlockchainBackend for Blockchain -where - Block: BlockT, - S: Storage, -{ - fn body(&self, _id: BlockId) -> ClientResult>> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn justifications(&self, _id: BlockId) -> ClientResult> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn last_finalized(&self) -> ClientResult { - self.storage.last_finalized() - } - - fn cache(&self) -> Option>> { - self.storage.cache() - } - - fn leaves(&self) -> ClientResult> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn children(&self, _parent_hash: Block::Hash) -> ClientResult> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn indexed_transaction(&self, _hash: &Block::Hash) -> ClientResult>> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn block_indexed_body( - &self, - _id: BlockId, - ) -> sp_blockchain::Result>>> { - Err(ClientError::NotAvailableOnLightClient) - } -} - -impl, Block: BlockT> ProvideCache for Blockchain { - fn cache(&self) -> Option>> { - self.storage.cache() - } -} - -impl RemoteBlockchain for Blockchain -where - S: Storage, -{ - fn header( - &self, - id: BlockId, - ) -> ClientResult>> { - // first, try to read header from local storage - if let Some(local_header) = self.storage.header(id)? { - return Ok(LocalOrRemote::Local(local_header)) - } - - // we need to know block number to check if it's a part of CHT - let number = match id { - BlockId::Hash(hash) => match self.storage.number(hash)? { - Some(number) => number, - None => return Ok(LocalOrRemote::Unknown), - }, - BlockId::Number(number) => number, - }; - - // if the header is genesis (never pruned), non-canonical, or from future => return - if number.is_zero() || self.storage.status(BlockId::Number(number))? == BlockStatus::Unknown - { - return Ok(LocalOrRemote::Unknown) - } - - Ok(LocalOrRemote::Remote(RemoteHeaderRequest { - cht_root: match self.storage.header_cht_root(cht::size(), number)? { - Some(cht_root) => cht_root, - None => return Ok(LocalOrRemote::Unknown), - }, - block: number, - retry_count: None, - })) - } -} - -impl, Block: BlockT> ProvideChtRoots for Blockchain { - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - self.storage().header_cht_root(cht_size, block) - } - - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - self.storage().changes_trie_cht_root(cht_size, block) - } -} diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs deleted file mode 100644 index a0776131e406d..0000000000000 --- a/client/light/src/call_executor.rs +++ /dev/null @@ -1,206 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Methods that light client could use to execute runtime calls. - -use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; - -use codec::{Decode, Encode}; -use hash_db::Hasher; -use sp_core::{ - convert_hash, - traits::{CodeExecutor, SpawnNamed}, - NativeOrEncoded, -}; -use sp_externalities::Extensions; -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, -}; -use sp_state_machine::{ - create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionManager, - ExecutionStrategy, OverlayedChanges, StorageProof, -}; - -use sp_api::{ProofRecorder, StorageTransactionCache}; - -use sp_blockchain::{Error as ClientError, Result as ClientResult}; - -use sc_client_api::{ - backend::RemoteBackend, call_executor::CallExecutor, light::RemoteCallRequest, -}; -use sc_executor::RuntimeVersion; - -/// Call executor that is able to execute calls only on genesis state. -/// -/// Trying to execute call on non-genesis state leads to error. -pub struct GenesisCallExecutor { - backend: Arc, - local: L, -} - -impl GenesisCallExecutor { - /// Create new genesis call executor. - pub fn new(backend: Arc, local: L) -> Self { - Self { backend, local } - } -} - -impl Clone for GenesisCallExecutor { - fn clone(&self) -> Self { - GenesisCallExecutor { backend: self.backend.clone(), local: self.local.clone() } - } -} - -impl CallExecutor for GenesisCallExecutor -where - Block: BlockT, - B: RemoteBackend, - Local: CallExecutor, -{ - type Error = ClientError; - - type Backend = B; - - fn call( - &self, - id: &BlockId, - method: &str, - call_data: &[u8], - strategy: ExecutionStrategy, - extensions: Option, - ) -> ClientResult> { - if self.backend.is_local_state_available(id) { - self.local.call(id, method, call_data, strategy, extensions) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } - - fn contextual_call< - EM: Fn( - Result, Self::Error>, - Result, Self::Error>, - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &RefCell, - _: Option<&RefCell>>, - _manager: ExecutionManager, - native_call: Option, - recorder: &Option>, - extensions: Option, - ) -> ClientResult> - where - ExecutionManager: Clone, - { - // there's no actual way/need to specify native/wasm execution strategy on light node - // => we can safely ignore passed values - - if self.backend.is_local_state_available(at) { - CallExecutor::contextual_call::< - fn( - Result, Local::Error>, - Result, Local::Error>, - ) -> Result, Local::Error>, - _, - NC, - >( - &self.local, - at, - method, - call_data, - changes, - None, - ExecutionManager::NativeWhenPossible, - native_call, - recorder, - extensions, - ) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } - - fn prove_execution( - &self, - at: &BlockId, - method: &str, - call_data: &[u8], - ) -> ClientResult<(Vec, StorageProof)> { - if self.backend.is_local_state_available(at) { - self.local.prove_execution(at, method, call_data) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } - - fn runtime_version(&self, id: &BlockId) -> ClientResult { - if self.backend.is_local_state_available(id) { - self.local.runtime_version(id) - } else { - Err(ClientError::NotAvailableOnLightClient) - } - } -} - -/// Check remote contextual execution proof using given backend. -/// -/// Proof should include the method execution proof. -pub fn check_execution_proof( - executor: &E, - spawn_handle: Box, - request: &RemoteCallRequest

, - remote_proof: StorageProof, -) -> ClientResult> -where - Header: HeaderT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, -{ - let local_state_root = request.header.state_root(); - let root: H::Out = convert_hash(&local_state_root); - - // prepare execution environment - let mut changes = OverlayedChanges::default(); - let trie_backend = create_proof_check_backend(root, remote_proof)?; - - // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 - let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); - let runtime_code = backend_runtime_code - .runtime_code() - .map_err(|_e| ClientError::RuntimeCodeMissing)?; - - // execute method - execution_proof_check_on_trie_backend::( - &trie_backend, - &mut changes, - executor, - spawn_handle, - &request.method, - &request.call_data, - &runtime_code, - ) - .map_err(Into::into) -} diff --git a/client/light/src/lib.rs b/client/light/src/lib.rs deleted file mode 100644 index 4b084cda0f8b1..0000000000000 --- a/client/light/src/lib.rs +++ /dev/null @@ -1,41 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Light client components. - -use sp_runtime::traits::{Block as BlockT, HashFor}; -use std::sync::Arc; - -pub mod backend; -pub mod blockchain; -pub mod call_executor; - -pub use backend::*; -pub use blockchain::*; -pub use call_executor::*; - -use sc_client_api::light::Storage as BlockchainStorage; - -/// Create an instance of light client backend. -pub fn new_light_backend(blockchain: Arc>) -> Arc>> -where - B: BlockT, - S: BlockchainStorage, -{ - Arc::new(Backend::new(blockchain)) -} diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 7b334175a2805..a11cec623328e 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -20,14 +20,13 @@ use crate::{ bitswap::Bitswap, config::ProtocolId, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, - light_client_requests, peer_info, protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, - request_responses, DhtEvent, ObservedRole, + peer_info, request_responses, DhtEvent, ObservedRole, }; use bytes::Bytes; use codec::Encode; -use futures::{channel::oneshot, stream::StreamExt}; +use futures::channel::oneshot; use libp2p::{ core::{Multiaddr, PeerId, PublicKey}, identify::IdentifyInfo, @@ -76,10 +75,6 @@ pub struct Behaviour { #[behaviour(ignore)] events: VecDeque>, - /// Light client request handling. - #[behaviour(ignore)] - light_client_request_sender: light_client_requests::sender::LightClientRequestSender, - /// Protocol name used to send out block requests via /// [`request_responses::RequestResponsesBehaviour`]. #[behaviour(ignore)] @@ -198,7 +193,6 @@ impl Behaviour { substrate: Protocol, user_agent: String, local_public_key: PublicKey, - light_client_request_sender: light_client_requests::sender::LightClientRequestSender, disco_config: DiscoveryConfig, block_request_protocol_config: request_responses::ProtocolConfig, state_request_protocol_config: request_responses::ProtocolConfig, @@ -233,7 +227,6 @@ impl Behaviour { request_response_protocols.into_iter(), peerset, )?, - light_client_request_sender, events: VecDeque::new(), block_request_protocol_name, state_request_protocol_name, @@ -316,14 +309,6 @@ impl Behaviour { pub fn put_value(&mut self, key: record::Key, value: Vec) { self.discovery.put_value(key, value); } - - /// Issue a light client request. - pub fn light_client_request( - &mut self, - r: light_client_requests::sender::Request, - ) -> Result<(), light_client_requests::sender::SendRequestError> { - self.light_client_request_sender.request(r) - } } fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { @@ -436,15 +421,11 @@ impl NetworkBehaviourEventProcess> for Behavi CustomMessageOutcome::NotificationsReceived { remote, messages } => { self.events.push_back(BehaviourOut::NotificationsReceived { remote, messages }); }, - CustomMessageOutcome::PeerNewBest(peer_id, number) => { - self.light_client_request_sender.update_best_block(&peer_id, number); - }, + CustomMessageOutcome::PeerNewBest(_peer_id, _number) => {}, CustomMessageOutcome::SyncConnected(peer_id) => { - self.light_client_request_sender.inject_connected(peer_id); self.events.push_back(BehaviourOut::SyncConnected(peer_id)) }, CustomMessageOutcome::SyncDisconnected(peer_id) => { - self.light_client_request_sender.inject_disconnected(peer_id); self.events.push_back(BehaviourOut::SyncDisconnected(peer_id)) }, CustomMessageOutcome::None => {}, @@ -534,23 +515,9 @@ impl NetworkBehaviourEventProcess for Behaviour { impl Behaviour { fn poll( &mut self, - cx: &mut Context, + _cx: &mut Context, _: &mut impl PollParameters, ) -> Poll>> { - use light_client_requests::sender::OutEvent; - while let Poll::Ready(Some(event)) = self.light_client_request_sender.poll_next_unpin(cx) { - match event { - OutEvent::SendRequest { target, request, pending_response, protocol_name } => - self.request_responses.send_request( - &target, - &protocol_name, - request, - pending_response, - IfDisconnected::ImmediateError, - ), - } - } - if let Some(event) = self.events.pop_front() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 76c806ccbf7b6..8ef52e46fd071 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -23,7 +23,6 @@ pub use crate::{ chain::Client, - on_demand_layer::{AlwaysBadChecker, OnDemand}, request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, @@ -83,11 +82,6 @@ pub struct Params { /// Client that contains the blockchain. pub chain: Arc>, - /// The `OnDemand` object acts as a "receiver" for block data requests from the client. - /// If `Some`, the network worker will process these requests and answer them. - /// Normally used only for light clients. - pub on_demand: Option>>, - /// Pool of transactions. /// /// The network worker will fetch transactions from this object in order to propagate them on diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 2f81ddfa1fb13..cb16eb163ee46 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -247,7 +247,6 @@ mod behaviour; mod chain; mod discovery; -mod on_demand_layer; mod peer_info; mod protocol; mod request_responses; diff --git a/client/network/src/light_client_requests.rs b/client/network/src/light_client_requests.rs index e18b783f219be..58d5575cbfc74 100644 --- a/client/network/src/light_client_requests.rs +++ b/client/network/src/light_client_requests.rs @@ -20,8 +20,6 @@ /// For incoming light client requests. pub mod handler; -/// For outgoing light client requests. -pub mod sender; use crate::{config::ProtocolId, request_responses::ProtocolConfig}; @@ -48,268 +46,3 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::{config::ProtocolId, request_responses::IncomingRequest}; - - use assert_matches::assert_matches; - use futures::{ - channel::oneshot, - executor::{block_on, LocalPool}, - prelude::*, - task::Spawn, - }; - use libp2p::PeerId; - use sc_client_api::{ - light::{ - self, ChangesProof, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, - RemoteHeaderRequest, RemoteReadRequest, - }, - FetchChecker, RemoteReadChildRequest, StorageProof, - }; - use sp_blockchain::Error as ClientError; - use sp_core::storage::ChildInfo; - use sp_runtime::{ - generic::Header, - traits::{BlakeTwo256, Block as BlockT, NumberFor}, - }; - use std::{collections::HashMap, sync::Arc}; - - pub struct DummyFetchChecker { - pub ok: bool, - pub _mark: std::marker::PhantomData, - } - - impl FetchChecker for DummyFetchChecker { - fn check_header_proof( - &self, - _request: &RemoteHeaderRequest, - header: Option, - _remote_proof: StorageProof, - ) -> Result { - match self.ok { - true if header.is_some() => Ok(header.unwrap()), - _ => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_proof( - &self, - request: &RemoteReadRequest, - _: StorageProof, - ) -> Result, Option>>, ClientError> { - match self.ok { - true => Ok(request.keys.iter().cloned().map(|k| (k, Some(vec![42]))).collect()), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - _: StorageProof, - ) -> Result, Option>>, ClientError> { - match self.ok { - true => Ok(request.keys.iter().cloned().map(|k| (k, Some(vec![42]))).collect()), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_execution_proof( - &self, - _: &RemoteCallRequest, - _: StorageProof, - ) -> Result, ClientError> { - match self.ok { - true => Ok(vec![42]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_changes_proof( - &self, - _: &RemoteChangesRequest, - _: ChangesProof, - ) -> Result, u32)>, ClientError> { - match self.ok { - true => Ok(vec![(100u32.into(), 2)]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_body_proof( - &self, - _: &RemoteBodyRequest, - body: Vec, - ) -> Result, ClientError> { - match self.ok { - true => Ok(body), - false => Err(ClientError::Backend("Test error".into())), - } - } - } - - pub fn protocol_id() -> ProtocolId { - ProtocolId::from("test") - } - - pub fn peerset() -> (sc_peerset::Peerset, sc_peerset::PeersetHandle) { - let cfg = sc_peerset::SetConfig { - in_peers: 128, - out_peers: 128, - bootnodes: Default::default(), - reserved_only: false, - reserved_nodes: Default::default(), - }; - sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { sets: vec![cfg] }) - } - - pub fn dummy_header() -> sp_test_primitives::Header { - sp_test_primitives::Header { - parent_hash: Default::default(), - number: 0, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - type Block = - sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; - - fn send_receive(request: sender::Request, pool: &LocalPool) { - let client = Arc::new(substrate_test_runtime_client::new()); - let (handler, protocol_config) = - handler::LightClientRequestHandler::new(&protocol_id(), client); - pool.spawner().spawn_obj(handler.run().boxed().into()).unwrap(); - - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = sender::LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: true, - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - sender.inject_connected(PeerId::random()); - - sender.request(request).unwrap(); - let sender::OutEvent::SendRequest { pending_response, request, .. } = - block_on(sender.next()).unwrap(); - let (tx, rx) = oneshot::channel(); - block_on(protocol_config.inbound_queue.unwrap().send(IncomingRequest { - peer: PeerId::random(), - payload: request, - pending_response: tx, - })) - .unwrap(); - pool.spawner() - .spawn_obj( - async move { - pending_response.send(Ok(rx.await.unwrap().result.unwrap())).unwrap(); - } - .boxed() - .into(), - ) - .unwrap(); - - pool.spawner() - .spawn_obj(sender.for_each(|_| future::ready(())).boxed().into()) - .unwrap(); - } - - #[test] - fn send_receive_call() { - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }; - - let mut pool = LocalPool::new(); - send_receive(sender::Request::Call { request, sender: chan.0 }, &pool); - assert_eq!(vec![42], pool.run_until(chan.1).unwrap().unwrap()); - // ^--- from `DummyFetchChecker::check_execution_proof` - } - - #[test] - fn send_receive_read() { - let chan = oneshot::channel(); - let request = light::RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - let mut pool = LocalPool::new(); - send_receive(sender::Request::Read { request, sender: chan.0 }, &pool); - assert_eq!( - Some(vec![42]), - pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() - ); - // ^--- from `DummyFetchChecker::check_read_proof` - } - - #[test] - fn send_receive_read_child() { - let chan = oneshot::channel(); - let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); - let request = light::RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: child_info.prefixed_storage_key(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - let mut pool = LocalPool::new(); - send_receive(sender::Request::ReadChild { request, sender: chan.0 }, &pool); - assert_eq!( - Some(vec![42]), - pool.run_until(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap() - ); - // ^--- from `DummyFetchChecker::check_read_child_proof` - } - - #[test] - fn send_receive_header() { - sp_tracing::try_init_simple(); - let chan = oneshot::channel(); - let request = light::RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }; - let mut pool = LocalPool::new(); - send_receive(sender::Request::Header { request, sender: chan.0 }, &pool); - // The remote does not know block 1: - assert_matches!(pool.run_until(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed)); - } - - #[test] - fn send_receive_changes() { - let chan = oneshot::channel(); - let request = light::RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), Vec::new()), - key: Vec::new(), - storage_key: None, - retry_count: None, - }; - let mut pool = LocalPool::new(); - send_receive(sender::Request::Changes { request, sender: chan.0 }, &pool); - assert_eq!(vec![(100, 2)], pool.run_until(chan.1).unwrap().unwrap()); - // ^--- from `DummyFetchChecker::check_changes_proof` - } -} diff --git a/client/network/src/light_client_requests/handler.rs b/client/network/src/light_client_requests/handler.rs index 43504edddd73a..f1dd5f1f2cb74 100644 --- a/client/network/src/light_client_requests/handler.rs +++ b/client/network/src/light_client_requests/handler.rs @@ -32,17 +32,17 @@ use codec::{self, Decode, Encode}; use futures::{channel::mpsc, prelude::*}; use log::{debug, trace}; use prost::Message; -use sc_client_api::{light, StorageProof}; +use sc_client_api::StorageProof; use sc_peerset::ReputationChange; use sp_core::{ hexdisplay::HexDisplay, - storage::{ChildInfo, ChildType, PrefixedStorageKey, StorageKey}, + storage::{ChildInfo, ChildType, PrefixedStorageKey}, }; use sp_runtime::{ generic::BlockId, - traits::{Block, Zero}, + traits::Block, }; -use std::{collections::BTreeMap, sync::Arc}; +use std::sync::Arc; const LOG_TARGET: &str = "light-client-request-handler"; @@ -137,12 +137,12 @@ impl LightClientRequestHandler { self.on_remote_call_request(&peer, r)?, Some(schema::v1::light::request::Request::RemoteReadRequest(r)) => self.on_remote_read_request(&peer, r)?, - Some(schema::v1::light::request::Request::RemoteHeaderRequest(r)) => - self.on_remote_header_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteHeaderRequest(_r)) => + return Err(HandleRequestError::BadRequest("Not supported.")), Some(schema::v1::light::request::Request::RemoteReadChildRequest(r)) => self.on_remote_read_child_request(&peer, r)?, - Some(schema::v1::light::request::Request::RemoteChangesRequest(r)) => - self.on_remote_changes_request(&peer, r)?, + Some(schema::v1::light::request::Request::RemoteChangesRequest(_r)) => + return Err(HandleRequestError::BadRequest("Not supported.")), None => return Err(HandleRequestError::BadRequest("Remote request without request data.")), }; @@ -285,106 +285,6 @@ impl LightClientRequestHandler { Ok(schema::v1::light::Response { response: Some(response) }) } - - fn on_remote_header_request( - &mut self, - peer: &PeerId, - request: &schema::v1::light::RemoteHeaderRequest, - ) -> Result { - trace!("Remote header proof request from {} ({:?}).", peer, request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - let (header, proof) = match self.client.header_proof(&BlockId::Number(block)) { - Ok((header, proof)) => (header.encode(), proof), - Err(error) => { - trace!( - "Remote header proof request from {} ({:?}) failed with: {}.", - peer, - request.block, - error - ); - (Default::default(), StorageProof::empty()) - }, - }; - - let response = { - let r = schema::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; - schema::v1::light::response::Response::RemoteHeaderResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } - - fn on_remote_changes_request( - &mut self, - peer: &PeerId, - request: &schema::v1::light::RemoteChangesRequest, - ) -> Result { - trace!( - "Remote changes proof request from {} for key {} ({:?}..{:?}).", - peer, - if !request.storage_key.is_empty() { - format!( - "{} : {}", - HexDisplay::from(&request.storage_key), - HexDisplay::from(&request.key) - ) - } else { - HexDisplay::from(&request.key).to_string() - }, - request.first, - request.last, - ); - - let first = Decode::decode(&mut request.first.as_ref())?; - let last = Decode::decode(&mut request.last.as_ref())?; - let min = Decode::decode(&mut request.min.as_ref())?; - let max = Decode::decode(&mut request.max.as_ref())?; - let key = StorageKey(request.key.clone()); - let storage_key = if request.storage_key.is_empty() { - None - } else { - Some(PrefixedStorageKey::new_ref(&request.storage_key)) - }; - - let proof = - match self.client.key_changes_proof(first, last, min, max, storage_key, &key) { - Ok(proof) => proof, - Err(error) => { - trace!( - "Remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}.", - peer, - format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&key.0)), - request.first, - request.last, - error, - ); - - light::ChangesProof:: { - max_block: Zero::zero(), - proof: Vec::new(), - roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), - } - }, - }; - - let response = { - let r = schema::v1::light::RemoteChangesResponse { - max: proof.max_block.encode(), - proof: proof.proof, - roots: proof - .roots - .into_iter() - .map(|(k, v)| schema::v1::light::Pair { fst: k.encode(), snd: v.encode() }) - .collect(), - roots_proof: proof.roots_proof.encode(), - }; - schema::v1::light::response::Response::RemoteChangesResponse(r) - }; - - Ok(schema::v1::light::Response { response: Some(response) }) - } } #[derive(derive_more::Display, derive_more::From)] diff --git a/client/network/src/light_client_requests/sender.rs b/client/network/src/light_client_requests/sender.rs deleted file mode 100644 index 284db827594b4..0000000000000 --- a/client/network/src/light_client_requests/sender.rs +++ /dev/null @@ -1,1294 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Helper for outgoing light client requests. -//! -//! Call [`LightClientRequestSender::request`](sender::LightClientRequestSender::request) -//! to send out light client requests. It will: -//! -//! 1. Build the request. -//! -//! 2. Forward the request to [`crate::request_responses::RequestResponsesBehaviour`] via -//! [`OutEvent::SendRequest`](sender::OutEvent::SendRequest). -//! -//! 3. Wait for the response and forward the response via the [`futures::channel::oneshot::Sender`] -//! provided earlier with [`LightClientRequestSender::request`](sender::LightClientRequestSender:: -//! request). - -use crate::{ - config::ProtocolId, - protocol::message::BlockAttributes, - request_responses::{OutboundFailure, RequestFailure}, - schema, PeerId, -}; -use codec::{self, Decode, Encode}; -use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use prost::Message; -use sc_client_api::light::{self, RemoteBodyRequest}; -use sc_peerset::ReputationChange; -use sp_blockchain::Error as ClientError; -use sp_runtime::traits::{Block, Header, NumberFor}; -use std::{ - collections::{BTreeMap, HashMap, VecDeque}, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -mod rep { - use super::*; - - /// Reputation change for a peer when a request timed out. - pub const TIMEOUT: ReputationChange = - ReputationChange::new(-(1 << 8), "light client request timeout"); - /// Reputation change for a peer when a request is refused. - pub const REFUSED: ReputationChange = - ReputationChange::new(-(1 << 8), "light client request refused"); -} - -/// Configuration options for [`LightClientRequestSender`]. -#[derive(Debug, Clone)] -struct Config { - max_pending_requests: usize, - light_protocol: String, - block_protocol: String, -} - -impl Config { - /// Create a new [`LightClientRequestSender`] configuration. - pub fn new(id: &ProtocolId) -> Self { - Self { - max_pending_requests: 128, - light_protocol: super::generate_protocol_name(id), - block_protocol: crate::block_request_handler::generate_protocol_name(id), - } - } -} - -/// State machine helping to send out light client requests. -pub struct LightClientRequestSender { - /// This behaviour's configuration. - config: Config, - /// Verifies that received responses are correct. - checker: Arc>, - /// Peer information (addresses, their best block, etc.) - peers: HashMap>, - /// Pending (local) requests. - pending_requests: VecDeque>, - /// Requests on their way to remote peers. - sent_requests: FuturesUnordered< - BoxFuture< - 'static, - (SentRequest, Result, RequestFailure>, oneshot::Canceled>), - >, - >, - /// Handle to use for reporting misbehaviour of peers. - peerset: sc_peerset::PeersetHandle, -} - -/// Augments a pending light client request with metadata. -#[derive(Debug)] -struct PendingRequest { - /// Remaining attempts. - attempts_left: usize, - /// The actual request. - request: Request, -} - -impl PendingRequest { - fn new(req: Request) -> Self { - Self { - // Number of retries + one for the initial attempt. - attempts_left: req.retries() + 1, - request: req, - } - } - - fn into_sent(self, peer_id: PeerId) -> SentRequest { - SentRequest { attempts_left: self.attempts_left, request: self.request, peer: peer_id } - } -} - -/// Augments a light client request with metadata that is currently being send to a remote. -#[derive(Debug)] -struct SentRequest { - /// Remaining attempts. - attempts_left: usize, - /// The actual request. - request: Request, - /// The peer that the request is send to. - peer: PeerId, -} - -impl SentRequest { - fn into_pending(self) -> PendingRequest { - PendingRequest { attempts_left: self.attempts_left, request: self.request } - } -} - -impl Unpin for LightClientRequestSender {} - -impl LightClientRequestSender -where - B: Block, -{ - /// Construct a new light client handler. - pub fn new( - id: &ProtocolId, - checker: Arc>, - peerset: sc_peerset::PeersetHandle, - ) -> Self { - Self { - config: Config::new(id), - checker, - peers: Default::default(), - pending_requests: Default::default(), - sent_requests: Default::default(), - peerset, - } - } - - /// We rely on external information about peers best blocks as we lack the - /// means to determine it ourselves. - pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor) { - if let Some(info) = self.peers.get_mut(peer) { - log::trace!("new best block for {:?}: {:?}", peer, num); - info.best_block = Some(num) - } - } - - /// Issue a new light client request. - pub fn request(&mut self, req: Request) -> Result<(), SendRequestError> { - if self.pending_requests.len() >= self.config.max_pending_requests { - return Err(SendRequestError::TooManyRequests) - } - self.pending_requests.push_back(PendingRequest::new(req)); - Ok(()) - } - - /// Remove the given peer. - /// - /// In-flight requests to the given peer might fail and be retried. See - /// [`::poll_next`]. - fn remove_peer(&mut self, peer: PeerId) { - self.peers.remove(&peer); - } - - /// Process a local request's response from remote. - /// - /// If successful, this will give us the actual, checked data we should be - /// sending back to the client, otherwise an error. - fn on_response( - &mut self, - peer: PeerId, - request: &Request, - response: Response, - ) -> Result, Error> { - log::trace!("response from {}", peer); - match response { - Response::Light(r) => self.on_response_light(request, r), - Response::Block(r) => self.on_response_block(request, r), - } - } - - fn on_response_light( - &mut self, - request: &Request, - response: schema::v1::light::Response, - ) -> Result, Error> { - use schema::v1::light::response::Response; - match response.response { - Some(Response::RemoteCallResponse(response)) => { - if let Request::Call { request, .. } = request { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_execution_proof(request, proof)?; - Ok(Reply::VecU8(reply)) - } else { - Err(Error::UnexpectedResponse) - } - }, - Some(Response::RemoteReadResponse(response)) => match request { - Request::Read { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - }, - Request::ReadChild { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_child_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - }, - _ => Err(Error::UnexpectedResponse), - }, - Some(Response::RemoteChangesResponse(response)) => { - if let Request::Changes { request, .. } = request { - let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; - let roots = { - let mut r = BTreeMap::new(); - for pair in response.roots { - let k = Decode::decode(&mut pair.fst.as_ref())?; - let v = Decode::decode(&mut pair.snd.as_ref())?; - r.insert(k, v); - } - r - }; - let reply = self.checker.check_changes_proof( - &request, - light::ChangesProof { - max_block, - proof: response.proof, - roots, - roots_proof, - }, - )?; - Ok(Reply::VecNumberU32(reply)) - } else { - Err(Error::UnexpectedResponse) - } - }, - Some(Response::RemoteHeaderResponse(response)) => { - if let Request::Header { request, .. } = request { - let header = if response.header.is_empty() { - None - } else { - Some(Decode::decode(&mut response.header.as_ref())?) - }; - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_header_proof(&request, header, proof)?; - Ok(Reply::Header(reply)) - } else { - Err(Error::UnexpectedResponse) - } - }, - None => Err(Error::UnexpectedResponse), - } - } - - fn on_response_block( - &mut self, - request: &Request, - response: schema::v1::BlockResponse, - ) -> Result, Error> { - let request = if let Request::Body { request, .. } = &request { - request - } else { - return Err(Error::UnexpectedResponse) - }; - - let body: Vec<_> = match response.blocks.into_iter().next() { - Some(b) => b.body, - None => return Err(Error::UnexpectedResponse), - }; - - let body = body - .into_iter() - .map(|extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) - .collect::>()?; - - let body = self.checker.check_body_proof(&request, body)?; - Ok(Reply::Extrinsics(body)) - } - - /// Signal that the node is connected to the given peer. - pub fn inject_connected(&mut self, peer: PeerId) { - let prev_entry = self.peers.insert(peer, Default::default()); - debug_assert!( - prev_entry.is_none(), - "Expect `inject_connected` to be called for disconnected peer.", - ); - } - - /// Signal that the node disconnected from the given peer. - pub fn inject_disconnected(&mut self, peer: PeerId) { - self.remove_peer(peer) - } -} - -impl Stream for LightClientRequestSender { - type Item = OutEvent; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - // If we have received responses to previously sent requests, check them and pass them on. - while let Poll::Ready(Some((sent_request, request_result))) = - self.sent_requests.poll_next_unpin(cx) - { - if let Some(info) = self.peers.get_mut(&sent_request.peer) { - if info.status != PeerStatus::Busy { - // If we get here, something is wrong with our internal handling of peer status - // information. At any time, a single peer processes at most one request from - // us. A malicious peer should not be able to get us here. It is our own fault - // and must be fixed! - panic!("unexpected peer status {:?} for {}", info.status, sent_request.peer); - } - - info.status = PeerStatus::Idle; // Make peer available again. - } - - let request_result = match request_result { - Ok(r) => r, - Err(oneshot::Canceled) => { - log::debug!("Oneshot for request to peer {} was canceled.", sent_request.peer); - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("no response from peer"), - ); - self.pending_requests.push_back(sent_request.into_pending()); - continue - }, - }; - - let decoded_request_result = request_result.map(|response| { - if sent_request.request.is_block_request() { - schema::v1::BlockResponse::decode(&response[..]).map(|r| Response::Block(r)) - } else { - schema::v1::light::Response::decode(&response[..]).map(|r| Response::Light(r)) - } - }); - - let response = match decoded_request_result { - Ok(Ok(response)) => response, - Ok(Err(e)) => { - log::debug!( - "Failed to decode response from peer {}: {:?}.", - sent_request.peer, - e - ); - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("invalid response from peer"), - ); - self.pending_requests.push_back(sent_request.into_pending()); - continue - }, - Err(e) => { - log::debug!("Request to peer {} failed with {:?}.", sent_request.peer, e); - - match e { - RequestFailure::NotConnected => { - self.remove_peer(sent_request.peer); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::UnknownProtocol => { - debug_assert!( - false, - "Light client and block request protocol should be known when \ - sending requests.", - ); - }, - RequestFailure::Refused => { - self.remove_peer(sent_request.peer); - self.peerset.report_peer(sent_request.peer, rep::REFUSED); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::Obsolete => { - debug_assert!( - false, - "Can not receive `RequestFailure::Obsolete` after dropping the \ - response receiver.", - ); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::Network(OutboundFailure::Timeout) => { - self.remove_peer(sent_request.peer); - self.peerset.report_peer(sent_request.peer, rep::TIMEOUT); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::Network(OutboundFailure::UnsupportedProtocols) => { - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal( - "peer does not support light client or block request protocol", - ), - ); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::Network(OutboundFailure::DialFailure) => { - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("failed to dial peer"), - ); - self.pending_requests.push_back(sent_request.into_pending()); - }, - RequestFailure::Network(OutboundFailure::ConnectionClosed) => { - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("connection to peer closed"), - ); - self.pending_requests.push_back(sent_request.into_pending()); - }, - } - - continue - }, - }; - - match self.on_response(sent_request.peer, &sent_request.request, response) { - Ok(reply) => sent_request.request.return_reply(Ok(reply)), - Err(Error::UnexpectedResponse) => { - log::debug!("Unexpected response from peer {}.", sent_request.peer); - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("unexpected response from peer"), - ); - self.pending_requests.push_back(sent_request.into_pending()); - }, - Err(other) => { - log::debug!( - "error handling response from peer {}: {}", - sent_request.peer, - other - ); - self.remove_peer(sent_request.peer); - self.peerset.report_peer( - sent_request.peer, - ReputationChange::new_fatal("invalid response from peer"), - ); - self.pending_requests.push_back(sent_request.into_pending()) - }, - } - } - - // If we have a pending request to send, try to find an available peer and send it. - while let Some(mut pending_request) = self.pending_requests.pop_front() { - if pending_request.attempts_left == 0 { - pending_request.request.return_reply(Err(ClientError::RemoteFetchFailed)); - continue - } - - let protocol = if pending_request.request.is_block_request() { - self.config.block_protocol.clone() - } else { - self.config.light_protocol.clone() - }; - - // Out of all idle peers, find one who's best block is high enough, choose any idle peer - // if none exists. - let mut peer = None; - for (peer_id, peer_info) in self.peers.iter_mut() { - if peer_info.status == PeerStatus::Idle { - match peer_info.best_block { - Some(n) if n >= pending_request.request.required_block() => { - peer = Some((*peer_id, peer_info)); - break - }, - _ => peer = Some((*peer_id, peer_info)), - } - } - } - - // Break in case there is no idle peer. - let (peer_id, peer_info) = match peer { - Some((peer_id, peer_info)) => (peer_id, peer_info), - None => { - self.pending_requests.push_front(pending_request); - log::debug!("No peer available to send request to."); - - break - }, - }; - - let request_bytes = match pending_request.request.serialize_request() { - Ok(bytes) => bytes, - Err(error) => { - log::debug!("failed to serialize request: {}", error); - pending_request.request.return_reply(Err(ClientError::RemoteFetchFailed)); - continue - }, - }; - - let (tx, rx) = oneshot::channel(); - - peer_info.status = PeerStatus::Busy; - - pending_request.attempts_left -= 1; - - self.sent_requests - .push(async move { (pending_request.into_sent(peer_id), rx.await) }.boxed()); - - return Poll::Ready(Some(OutEvent::SendRequest { - target: peer_id, - request: request_bytes, - pending_response: tx, - protocol_name: protocol, - })) - } - - Poll::Pending - } -} - -/// Events returned by [`LightClientRequestSender`]. -#[derive(Debug)] -pub enum OutEvent { - /// Emit a request to be send out on the network e.g. via [`crate::request_responses`]. - SendRequest { - /// The remote peer to send the request to. - target: PeerId, - /// The encoded request. - request: Vec, - /// The [`oneshot::Sender`] channel to pass the response to. - pending_response: oneshot::Sender, RequestFailure>>, - /// The name of the protocol to use to send the request. - protocol_name: String, - }, -} - -/// Incoming response from remote. -#[derive(Debug, Clone)] -pub enum Response { - /// Incoming light response from remote. - Light(schema::v1::light::Response), - /// Incoming block response from remote. - Block(schema::v1::BlockResponse), -} - -/// Error returned by [`LightClientRequestSender::request`]. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum SendRequestError { - /// There are currently too many pending request. - #[display(fmt = "too many pending requests")] - TooManyRequests, -} - -/// Error type to propagate errors internally. -#[derive(Debug, derive_more::Display, derive_more::From)] -enum Error { - /// The response type does not correspond to the issued request. - #[display(fmt = "unexpected response")] - UnexpectedResponse, - /// Encoding or decoding of some data failed. - #[display(fmt = "codec error: {}", _0)] - Codec(codec::Error), - /// The chain client errored. - #[display(fmt = "client error: {}", _0)] - Client(ClientError), -} - -/// The data to send back to the light client over the oneshot channel. -// It is unified here in order to be able to return it as a function -// result instead of delivering it to the client as a side effect of -// response processing. -#[derive(Debug)] -enum Reply { - VecU8(Vec), - VecNumberU32(Vec<(::Number, u32)>), - MapVecU8OptVecU8(HashMap, Option>>), - Header(B::Header), - Extrinsics(Vec), -} - -/// Information we have about some peer. -#[derive(Debug)] -struct PeerInfo { - best_block: Option>, - status: PeerStatus, -} - -impl Default for PeerInfo { - fn default() -> Self { - PeerInfo { best_block: None, status: PeerStatus::Idle } - } -} - -/// A peer is either idle or busy processing a request from us. -#[derive(Debug, Clone, PartialEq, Eq)] -enum PeerStatus { - /// The peer is available. - Idle, - /// We wait for the peer to return us a response for the given request ID. - Busy, -} - -/// The possible light client requests we support. -/// -/// The associated `oneshot::Sender` will be used to convey the result of -/// their request back to them (cf. `Reply`). -// This is modeled after light_dispatch.rs's `RequestData` which is not -// used because we currently only support a subset of those. -#[derive(Debug)] -pub enum Request { - /// Remote body request. - Body { - /// Request. - request: RemoteBodyRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, ClientError>>, - }, - /// Remote header request. - Header { - /// Request. - request: light::RemoteHeaderRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender>, - }, - /// Remote read request. - Read { - /// Request. - request: light::RemoteReadRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, Option>>, ClientError>>, - }, - /// Remote read child request. - ReadChild { - /// Request. - request: light::RemoteReadChildRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, Option>>, ClientError>>, - }, - /// Remote call request. - Call { - /// Request. - request: light::RemoteCallRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, ClientError>>, - }, - /// Remote changes request. - Changes { - /// Request. - request: light::RemoteChangesRequest, - /// [`oneshot::Sender`] to return response. - sender: oneshot::Sender, u32)>, ClientError>>, - }, -} - -impl Request { - fn is_block_request(&self) -> bool { - matches!(self, Request::Body { .. }) - } - - fn required_block(&self) -> NumberFor { - match self { - Request::Body { request, .. } => *request.header.number(), - Request::Header { request, .. } => request.block, - Request::Read { request, .. } => *request.header.number(), - Request::ReadChild { request, .. } => *request.header.number(), - Request::Call { request, .. } => *request.header.number(), - Request::Changes { request, .. } => request.max_block.0, - } - } - - fn retries(&self) -> usize { - let rc = match self { - Request::Body { request, .. } => request.retry_count, - Request::Header { request, .. } => request.retry_count, - Request::Read { request, .. } => request.retry_count, - Request::ReadChild { request, .. } => request.retry_count, - Request::Call { request, .. } => request.retry_count, - Request::Changes { request, .. } => request.retry_count, - }; - rc.unwrap_or(0) - } - - fn serialize_request(&self) -> Result, prost::EncodeError> { - let request = match self { - Request::Body { request, .. } => { - let rq = schema::v1::BlockRequest { - fields: BlockAttributes::BODY.to_be_u32(), - from_block: Some(schema::v1::block_request::FromBlock::Hash( - request.header.hash().encode(), - )), - to_block: Default::default(), - direction: schema::v1::Direction::Ascending as i32, - max_blocks: 1, - support_multiple_justifications: true, - }; - - let mut buf = Vec::with_capacity(rq.encoded_len()); - rq.encode(&mut buf)?; - return Ok(buf) - }, - Request::Header { request, .. } => { - let r = schema::v1::light::RemoteHeaderRequest { block: request.block.encode() }; - schema::v1::light::request::Request::RemoteHeaderRequest(r) - }, - Request::Read { request, .. } => { - let r = schema::v1::light::RemoteReadRequest { - block: request.block.encode(), - keys: request.keys.clone(), - }; - schema::v1::light::request::Request::RemoteReadRequest(r) - }, - Request::ReadChild { request, .. } => { - let r = schema::v1::light::RemoteReadChildRequest { - block: request.block.encode(), - storage_key: request.storage_key.clone().into_inner(), - keys: request.keys.clone(), - }; - schema::v1::light::request::Request::RemoteReadChildRequest(r) - }, - Request::Call { request, .. } => { - let r = schema::v1::light::RemoteCallRequest { - block: request.block.encode(), - method: request.method.clone(), - data: request.call_data.clone(), - }; - schema::v1::light::request::Request::RemoteCallRequest(r) - }, - Request::Changes { request, .. } => { - let r = schema::v1::light::RemoteChangesRequest { - first: request.first_block.1.encode(), - last: request.last_block.1.encode(), - min: request.tries_roots.1.encode(), - max: request.max_block.1.encode(), - storage_key: request - .storage_key - .clone() - .map(|s| s.into_inner()) - .unwrap_or_default(), - key: request.key.clone(), - }; - schema::v1::light::request::Request::RemoteChangesRequest(r) - }, - }; - - let rq = schema::v1::light::Request { request: Some(request) }; - let mut buf = Vec::with_capacity(rq.encoded_len()); - rq.encode(&mut buf)?; - Ok(buf) - } - - fn return_reply(self, result: Result, ClientError>) { - fn send(item: T, sender: oneshot::Sender) { - let _ = sender.send(item); // It is okay if the other end already hung up. - } - match self { - Request::Body { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), - }, - Request::Header { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::Header(x)) => send(Ok(x), sender), - reply => { - log::error!("invalid reply for header request: {:?}, {:?}", reply, request) - }, - }, - Request::Read { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), - }, - Request::ReadChild { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => { - log::error!("invalid reply for read child request: {:?}, {:?}", reply, request) - }, - }, - Request::Call { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::VecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request), - }, - Request::Changes { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender), - reply => { - log::error!("invalid reply for changes request: {:?}, {:?}", reply, request) - }, - }, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - light_client_requests::tests::{dummy_header, peerset, protocol_id, DummyFetchChecker}, - request_responses::OutboundFailure, - }; - - use assert_matches::assert_matches; - use futures::{channel::oneshot, executor::block_on, poll}; - use sc_client_api::StorageProof; - use sp_core::storage::ChildInfo; - use sp_runtime::{generic::Header, traits::BlakeTwo256}; - use std::{collections::HashSet, iter::FromIterator}; - - fn empty_proof() -> Vec { - StorageProof::empty().encode() - } - - #[test] - fn removes_peer_if_told() { - let peer = PeerId::random(); - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(DummyFetchChecker { ok: true, _mark: std::marker::PhantomData }), - peer_set_handle, - ); - - sender.inject_connected(peer); - assert_eq!(1, sender.peers.len()); - - sender.inject_disconnected(peer); - assert_eq!(0, sender.peers.len()); - } - - type Block = - sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; - - #[test] - fn body_request_fields_encoded_properly() { - let (sender, _receiver) = oneshot::channel(); - let request = Request::::Body { - request: RemoteBodyRequest { header: dummy_header(), retry_count: None }, - sender, - }; - let serialized_request = request.serialize_request().unwrap(); - let deserialized_request = - schema::v1::BlockRequest::decode(&serialized_request[..]).unwrap(); - assert!(BlockAttributes::from_be_u32(deserialized_request.fields) - .unwrap() - .contains(BlockAttributes::BODY)); - } - - #[test] - fn disconnects_from_peer_if_request_times_out() { - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: true, - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - - sender.inject_connected(peer0); - sender.inject_connected(peer1); - - assert_eq!( - HashSet::from_iter(&[peer0.clone(), peer1.clone()]), - sender.peers.keys().collect::>(), - "Expect knowledge of two peers." - ); - - assert!(sender.pending_requests.is_empty(), "Expect no pending request."); - assert!(sender.sent_requests.is_empty(), "Expect no sent request."); - - // Issue a request! - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - sender.request(Request::Call { request, sender: chan.0 }).unwrap(); - assert_eq!(1, sender.pending_requests.len(), "Expect one pending request."); - - let OutEvent::SendRequest { target, pending_response, .. } = - block_on(sender.next()).unwrap(); - assert!(target == peer0 || target == peer1, "Expect request to originate from known peer."); - - // And we should have one busy peer. - assert!({ - let (idle, busy): (Vec<_>, Vec<_>) = - sender.peers.iter().partition(|(_, info)| info.status == PeerStatus::Idle); - idle.len() == 1 && - busy.len() == 1 && (idle[0].0 == &peer0 || busy[0].0 == &peer0) && - (idle[0].0 == &peer1 || busy[0].0 == &peer1) - }); - - assert_eq!(0, sender.pending_requests.len(), "Expect no pending request."); - assert_eq!(1, sender.sent_requests.len(), "Expect one request to be sent."); - - // Report first attempt as timed out. - pending_response - .send(Err(RequestFailure::Network(OutboundFailure::Timeout))) - .unwrap(); - - // Expect a new request to be issued. - let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); - - assert_eq!(1, sender.peers.len(), "Expect peer to be removed."); - assert_eq!(0, sender.pending_requests.len(), "Expect no request to be pending."); - assert_eq!(1, sender.sent_requests.len(), "Expect new request to be issued."); - - // Report second attempt as timed out. - pending_response - .send(Err(RequestFailure::Network(OutboundFailure::Timeout))) - .unwrap(); - assert_matches!( - block_on(async { poll!(sender.next()) }), - Poll::Pending, - "Expect sender to not issue another attempt.", - ); - assert_matches!( - block_on(chan.1).unwrap(), - Err(ClientError::RemoteFetchFailed), - "Expect request failure to be reported.", - ); - assert_eq!(0, sender.peers.len(), "Expect no peer to be left"); - assert_eq!(0, sender.pending_requests.len(), "Expect no request to be pending."); - assert_eq!(0, sender.sent_requests.len(), "Expect no other request to be in progress."); - } - - #[test] - fn disconnects_from_peer_on_incorrect_response() { - let peer = PeerId::random(); - - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: false, - // ^--- Making sure the response data check fails. - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - - sender.inject_connected(peer); - assert_eq!(1, sender.peers.len(), "Expect one peer."); - - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - sender.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, sender.pending_requests.len(), "Expect one pending request."); - assert_eq!(0, sender.sent_requests.len(), "Expect zero sent requests."); - - let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); - assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); - assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); - - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - let response = schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - }; - let mut data = Vec::new(); - response.encode(&mut data).unwrap(); - data - }; - - pending_response.send(Ok(response)).unwrap(); - - assert_matches!( - block_on(async { poll!(sender.next()) }), - Poll::Pending, - "Expect sender to not issue another attempt, given that there is no peer left.", - ); - - assert!(sender.peers.is_empty(), "Expect no peers to be left."); - assert_eq!(1, sender.pending_requests.len(), "Expect request to be pending again."); - assert_eq!(0, sender.sent_requests.len(), "Expect no request to be sent."); - } - - #[test] - fn disconnects_from_peer_on_wrong_response_type() { - let peer = PeerId::random(); - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: true, - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - - sender.inject_connected(peer); - assert_eq!(1, sender.peers.len(), "Expect one peer."); - - let chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - sender.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, sender.pending_requests.len()); - assert_eq!(0, sender.sent_requests.len()); - let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); - assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); - assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); - - let response = { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; // Not a RemoteCallResponse! - let response = schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - }; - let mut data = Vec::new(); - response.encode(&mut data).unwrap(); - data - }; - - pending_response.send(Ok(response)).unwrap(); - assert_matches!( - block_on(async { poll!(sender.next()) }), - Poll::Pending, - "Expect sender to not issue another attempt, given that there is no peer left.", - ); - - assert!(sender.peers.is_empty(), "Expect no peers to be left."); - assert_eq!(1, sender.pending_requests.len(), "Expect request to be pending again."); - assert_eq!(0, sender.sent_requests.len(), "Expect no request to be sent."); - } - - #[test] - fn receives_remote_failure_after_retry_count_failures() { - let peers = (0..4).map(|_| PeerId::random()).collect::>(); - - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: false, - // ^--- Making sure the response data check fails. - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - - for peer in &peers { - sender.inject_connected(*peer); - } - assert_eq!(4, sender.peers.len(), "Expect four peers."); - - let mut chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(3), // Attempt up to three retries. - }; - sender.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, sender.pending_requests.len()); - assert_eq!(0, sender.sent_requests.len()); - let mut pending_response = match block_on(sender.next()).unwrap() { - OutEvent::SendRequest { pending_response, .. } => Some(pending_response), - }; - assert_eq!(0, sender.pending_requests.len(), "Expect zero pending requests."); - assert_eq!(1, sender.sent_requests.len(), "Expect one sent request."); - - for (i, _peer) in peers.iter().enumerate() { - // Construct an invalid response - let response = { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - let response = schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - }; - let mut data = Vec::new(); - response.encode(&mut data).unwrap(); - data - }; - pending_response.take().unwrap().send(Ok(response)).unwrap(); - - if i < 3 { - pending_response = match block_on(sender.next()).unwrap() { - OutEvent::SendRequest { pending_response, .. } => Some(pending_response), - }; - assert_matches!(chan.1.try_recv(), Ok(None)) - } else { - // Last peer and last attempt. - assert_matches!( - block_on(async { poll!(sender.next()) }), - Poll::Pending, - "Expect sender to not issue another attempt, given that there is no peer left.", - ); - assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed)))) - } - } - } - - fn issue_request(request: Request) { - let peer = PeerId::random(); - - let (_peer_set, peer_set_handle) = peerset(); - let mut sender = LightClientRequestSender::::new( - &protocol_id(), - Arc::new(crate::light_client_requests::tests::DummyFetchChecker { - ok: true, - _mark: std::marker::PhantomData, - }), - peer_set_handle, - ); - - sender.inject_connected(peer); - assert_eq!(1, sender.peers.len(), "Expect one peer."); - - let response = match request { - Request::Body { .. } => unimplemented!(), - Request::Header { .. } => { - let r = schema::v1::light::RemoteHeaderResponse { - header: dummy_header().encode(), - proof: empty_proof(), - }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteHeaderResponse(r)), - } - }, - Request::Read { .. } => { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - } - }, - Request::ReadChild { .. } => { - let r = schema::v1::light::RemoteReadResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteReadResponse(r)), - } - }, - Request::Call { .. } => { - let r = schema::v1::light::RemoteCallResponse { proof: empty_proof() }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteCallResponse(r)), - } - }, - Request::Changes { .. } => { - let r = schema::v1::light::RemoteChangesResponse { - max: std::iter::repeat(1).take(32).collect(), - proof: Vec::new(), - roots: Vec::new(), - roots_proof: empty_proof(), - }; - schema::v1::light::Response { - response: Some(schema::v1::light::response::Response::RemoteChangesResponse(r)), - } - }, - }; - - let response = { - let mut data = Vec::new(); - response.encode(&mut data).unwrap(); - data - }; - - sender.request(request).unwrap(); - - assert_eq!(1, sender.pending_requests.len()); - assert_eq!(0, sender.sent_requests.len()); - let OutEvent::SendRequest { pending_response, .. } = block_on(sender.next()).unwrap(); - assert_eq!(0, sender.pending_requests.len()); - assert_eq!(1, sender.sent_requests.len()); - - pending_response.send(Ok(response)).unwrap(); - assert_matches!( - block_on(async { poll!(sender.next()) }), - Poll::Pending, - "Expect sender to not issue another attempt, given that there is no peer left.", - ); - - assert_eq!(0, sender.pending_requests.len()); - assert_eq!(0, sender.sent_requests.len()) - } - - #[test] - fn receives_remote_call_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }; - issue_request(Request::Call { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_read_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - issue_request(Request::Read { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_read_child_response() { - let mut chan = oneshot::channel(); - let child_info = ChildInfo::new_default(&b":child_storage:default:sub"[..]); - let request = light::RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: child_info.prefixed_storage_key(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - issue_request(Request::ReadChild { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_header_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }; - issue_request(Request::Header { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_changes_response() { - let mut chan = oneshot::channel(); - let request = light::RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), Vec::new()), - key: Vec::new(), - storage_key: None, - retry_count: None, - }; - issue_request(Request::Changes { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } -} diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs deleted file mode 100644 index eaeb0bee98f2c..0000000000000 --- a/client/network/src/on_demand_layer.rs +++ /dev/null @@ -1,241 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! On-demand requests service. - -use crate::light_client_requests; - -use futures::{channel::oneshot, prelude::*}; -use parking_lot::Mutex; -use sc_client_api::{ - ChangesProof, FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, - RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, - StorageProof, -}; -use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_blockchain::Error as ClientError; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use std::{ - collections::HashMap, - pin::Pin, - sync::Arc, - task::{Context, Poll}, -}; - -/// Implements the `Fetcher` trait of the client. Makes it possible for the light client to perform -/// network requests for some state. -/// -/// This implementation stores all the requests in a queue. The network, in parallel, is then -/// responsible for pulling elements out of that queue and fulfilling them. -pub struct OnDemand { - /// Objects that checks whether what has been retrieved is correct. - checker: Arc>, - - /// Queue of requests. Set to `Some` at initialization, then extracted by the network. - /// - /// Note that a better alternative would be to use a MPMC queue here, and add a `poll` method - /// from the `OnDemand`. However there exists no popular implementation of MPMC channels in - /// asynchronous Rust at the moment - requests_queue: - Mutex>>>, - - /// Sending side of `requests_queue`. - requests_send: TracingUnboundedSender>, -} - -#[derive(Debug, thiserror::Error)] -#[error("AlwaysBadChecker")] -struct ErrorAlwaysBadChecker; - -impl Into for ErrorAlwaysBadChecker { - fn into(self) -> ClientError { - ClientError::Application(Box::new(self)) - } -} - -/// Dummy implementation of `FetchChecker` that always assumes that responses are bad. -/// -/// Considering that it is the responsibility of the client to build the fetcher, it can use this -/// implementation if it knows that it will never perform any request. -#[derive(Default, Clone)] -pub struct AlwaysBadChecker; - -impl FetchChecker for AlwaysBadChecker { - fn check_header_proof( - &self, - _request: &RemoteHeaderRequest, - _remote_header: Option, - _remote_proof: StorageProof, - ) -> Result { - Err(ErrorAlwaysBadChecker.into()) - } - - fn check_read_proof( - &self, - _request: &RemoteReadRequest, - _remote_proof: StorageProof, - ) -> Result, Option>>, ClientError> { - Err(ErrorAlwaysBadChecker.into()) - } - - fn check_read_child_proof( - &self, - _request: &RemoteReadChildRequest, - _remote_proof: StorageProof, - ) -> Result, Option>>, ClientError> { - Err(ErrorAlwaysBadChecker.into()) - } - - fn check_execution_proof( - &self, - _request: &RemoteCallRequest, - _remote_proof: StorageProof, - ) -> Result, ClientError> { - Err(ErrorAlwaysBadChecker.into()) - } - - fn check_changes_proof( - &self, - _request: &RemoteChangesRequest, - _remote_proof: ChangesProof, - ) -> Result, u32)>, ClientError> { - Err(ErrorAlwaysBadChecker.into()) - } - - fn check_body_proof( - &self, - _request: &RemoteBodyRequest, - _body: Vec, - ) -> Result, ClientError> { - Err(ErrorAlwaysBadChecker.into()) - } -} - -impl OnDemand -where - B::Header: HeaderT, -{ - /// Creates new on-demand service. - pub fn new(checker: Arc>) -> Self { - let (requests_send, requests_queue) = tracing_unbounded("mpsc_ondemand"); - let requests_queue = Mutex::new(Some(requests_queue)); - - Self { checker, requests_queue, requests_send } - } - - /// Get checker reference. - pub fn checker(&self) -> &Arc> { - &self.checker - } - - /// Extracts the queue of requests. - /// - /// Whenever one of the methods of the `Fetcher` trait is called, an element is pushed on this - /// channel. - /// - /// If this function returns `None`, that means that the receiver has already been extracted in - /// the past, and therefore that something already handles the requests. - pub(crate) fn extract_receiver( - &self, - ) -> Option>> { - self.requests_queue.lock().take() - } -} - -impl Fetcher for OnDemand -where - B: BlockT, - B::Header: HeaderT, -{ - type RemoteHeaderResult = RemoteResponse; - type RemoteReadResult = RemoteResponse, Option>>>; - type RemoteCallResult = RemoteResponse>; - type RemoteChangesResult = RemoteResponse, u32)>>; - type RemoteBodyResult = RemoteResponse>; - - fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::Header { request, sender }); - RemoteResponse { receiver } - } - - fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::Read { request, sender }); - RemoteResponse { receiver } - } - - fn remote_read_child( - &self, - request: RemoteReadChildRequest, - ) -> Self::RemoteReadResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::ReadChild { request, sender }); - RemoteResponse { receiver } - } - - fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::Call { request, sender }); - RemoteResponse { receiver } - } - - fn remote_changes( - &self, - request: RemoteChangesRequest, - ) -> Self::RemoteChangesResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::Changes { request, sender }); - RemoteResponse { receiver } - } - - fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_requests::sender::Request::Body { request, sender }); - RemoteResponse { receiver } - } -} - -/// Future for an on-demand remote call response. -pub struct RemoteResponse { - receiver: oneshot::Receiver>, -} - -impl Future for RemoteResponse { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - match self.receiver.poll_unpin(cx) { - Poll::Ready(Ok(res)) => Poll::Ready(res), - Poll::Ready(Err(_)) => Poll::Ready(Err(ClientError::RemoteFetchCancelled)), - Poll::Pending => Poll::Pending, - } - } -} diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 90e647505fa1f..caf4db89f653a 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -33,11 +33,9 @@ use crate::{ config::{parse_str_addr, Params, TransportConfig}, discovery::DiscoveryConfig, error::Error, - light_client_requests, network_state::{ NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, }, - on_demand_layer::AlwaysBadChecker, protocol::{ self, event::Event, @@ -238,12 +236,6 @@ impl NetworkWorker { } })?; - let checker = params - .on_demand - .as_ref() - .map(|od| od.checker().clone()) - .unwrap_or_else(|| Arc::new(AlwaysBadChecker)); - let num_connected = Arc::new(AtomicUsize::new(0)); let is_major_syncing = Arc::new(AtomicBool::new(false)); @@ -255,14 +247,6 @@ impl NetworkWorker { params.network_config.client_version, params.network_config.node_name ); - let light_client_request_sender = { - light_client_requests::sender::LightClientRequestSender::new( - ¶ms.protocol_id, - checker, - peerset_handle.clone(), - ) - }; - let discovery_config = { let mut config = DiscoveryConfig::new(local_public.clone()); config.with_permanent_addresses(known_addresses); @@ -347,7 +331,6 @@ impl NetworkWorker { protocol, user_agent, local_public, - light_client_request_sender, discovery_config, params.block_request_protocol_config, params.state_request_protocol_config, @@ -447,7 +430,6 @@ impl NetworkWorker { service, import_queue: params.import_queue, from_service, - light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, tx_handler_controller, @@ -1464,8 +1446,6 @@ pub struct NetworkWorker { import_queue: Box>, /// Messages from the [`NetworkService`] that must be processed. from_service: TracingUnboundedReceiver>, - /// Receiver for queries from the light client that must be processed. - light_client_rqs: Option>>, /// Senders for events that happen on the network. event_streams: out_events::OutChannels, /// Prometheus network metrics. @@ -1489,23 +1469,6 @@ impl Future for NetworkWorker { this.import_queue .poll_actions(cx, &mut NetworkLink { protocol: &mut this.network_service }); - // Check for new incoming light client requests. - if let Some(light_client_rqs) = this.light_client_rqs.as_mut() { - while let Poll::Ready(Some(rq)) = light_client_rqs.poll_next_unpin(cx) { - let result = this.network_service.behaviour_mut().light_client_request(rq); - match result { - Ok(()) => {}, - Err(light_client_requests::sender::SendRequestError::TooManyRequests) => { - warn!("Couldn't start light client request: too many pending requests"); - }, - } - - if let Some(metrics) = this.metrics.as_ref() { - metrics.issued_light_requests.inc(); - } - } - } - // At the time of writing of this comment, due to a high volume of messages, the network // worker sometimes takes a long time to process the loop below. When that happens, the // rest of the polling is frozen. In order to avoid negative side-effects caused by this diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 69b172d07edfe..1c66986e422fc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -116,7 +116,6 @@ fn build_test_full_node( }), network_config: config, chain: client.clone(), - on_demand: None, transaction_pool: Arc::new(crate::config::EmptyTransactionPool), protocol_id, import_queue, diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index fb0012aaf5baf..7f8cc0de3208d 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -48,13 +48,13 @@ use sc_consensus::{ }; pub use sc_network::config::EmptyTransactionPool; use sc_network::{ - block_request_handler::{self, BlockRequestHandler}, + block_request_handler::BlockRequestHandler, config::{ MultiaddrWithPeerId, NetworkConfiguration, NonDefaultSetConfig, NonReservedPeerMode, ProtocolConfig, ProtocolId, Role, SyncMode, TransportConfig, }, - light_client_requests::{self, handler::LightClientRequestHandler}, - state_request_handler::{self, StateRequestHandler}, + light_client_requests::handler::LightClientRequestHandler, + state_request_handler::StateRequestHandler, warp_request_handler, Multiaddr, NetworkService, NetworkWorker, }; use sc_service::client::Client; @@ -133,25 +133,20 @@ pub type PeersFullClient = Client< Block, substrate_test_runtime_client::runtime::RuntimeApi, >; -pub type PeersLightClient = Client< - substrate_test_runtime_client::LightBackend, - substrate_test_runtime_client::LightExecutor, - Block, - substrate_test_runtime_client::runtime::RuntimeApi, ->; #[derive(Clone)] -pub enum PeersClient { - Full(Arc, Arc), - Light(Arc, Arc), +pub struct PeersClient { + client: Arc, + backend: Arc, } impl PeersClient { - pub fn as_full(&self) -> Option> { - match *self { - PeersClient::Full(ref client, _) => Some(client.clone()), - _ => None, - } + pub fn as_client(&self) -> Arc { + self.client.clone() + } + + pub fn as_backend(&self) -> Arc { + self.backend.clone() } pub fn as_block_import(&self) -> BlockImportAdapter { @@ -159,27 +154,18 @@ impl PeersClient { } pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { - match *self { - PeersClient::Full(ref client, _) => client.get_aux(key), - PeersClient::Light(ref client, _) => client.get_aux(key), - } + self.client.get_aux(key) } pub fn info(&self) -> BlockchainInfo { - match *self { - PeersClient::Full(ref client, _) => client.chain_info(), - PeersClient::Light(ref client, _) => client.chain_info(), - } + self.client.info() } pub fn header( &self, block: &BlockId, ) -> ClientResult::Header>> { - match *self { - PeersClient::Full(ref client, _) => client.header(block), - PeersClient::Light(ref client, _) => client.header(block), - } + self.client.header(block) } pub fn has_state_at(&self, block: &BlockId) -> bool { @@ -187,33 +173,19 @@ impl PeersClient { Some(header) => header, None => return false, }; - match self { - PeersClient::Full(_client, backend) => - backend.have_state_at(&header.hash(), *header.number()), - PeersClient::Light(_client, backend) => - backend.have_state_at(&header.hash(), *header.number()), - } + self.backend.have_state_at(&header.hash(), *header.number()) } pub fn justifications(&self, block: &BlockId) -> ClientResult> { - match *self { - PeersClient::Full(ref client, _) => client.justifications(block), - PeersClient::Light(ref client, _) => client.justifications(block), - } + self.client.justifications(block) } pub fn finality_notification_stream(&self) -> FinalityNotifications { - match *self { - PeersClient::Full(ref client, _) => client.finality_notification_stream(), - PeersClient::Light(ref client, _) => client.finality_notification_stream(), - } + self.client.finality_notification_stream() } pub fn import_notification_stream(&self) -> ImportNotifications { - match *self { - PeersClient::Full(ref client, _) => client.import_notification_stream(), - PeersClient::Light(ref client, _) => client.import_notification_stream(), - } + self.client.import_notification_stream() } pub fn finalize_block( @@ -222,12 +194,7 @@ impl PeersClient { justification: Option, notify: bool, ) -> ClientResult<()> { - match *self { - PeersClient::Full(ref client, ref _backend) => - client.finalize_block(id, justification, notify), - PeersClient::Light(ref client, ref _backend) => - client.finalize_block(id, justification, notify), - } + self.client.finalize_block(id, justification, notify) } } @@ -240,10 +207,7 @@ impl BlockImport for PeersClient { &mut self, block: BlockCheckParams, ) -> Result { - match self { - PeersClient::Full(client, _) => client.check_block(block).await, - PeersClient::Light(client, _) => client.check_block(block).await, - } + self.client.check_block(block).await } async fn import_block( @@ -251,12 +215,7 @@ impl BlockImport for PeersClient { block: BlockImportParams, cache: HashMap>, ) -> Result { - match self { - PeersClient::Full(client, _) => - client.import_block(block.clear_storage_changes_and_mutate(), cache).await, - PeersClient::Light(client, _) => - client.import_block(block.clear_storage_changes_and_mutate(), cache).await, - } + self.client.import_block(block.clear_storage_changes_and_mutate(), cache).await } } @@ -370,8 +329,7 @@ where BlockBuilder, ) -> Block, { - let full_client = - self.client.as_full().expect("blocks could only be generated by full clients"); + let full_client = self.client.as_client(); let mut at = full_client.header(&at).unwrap().unwrap().hash(); for _ in 0..count { let builder = @@ -773,10 +731,10 @@ where let client = Arc::new(c); let (block_import, justification_import, data) = - self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); + self.make_block_import(PeersClient { client: client.clone(), backend: backend.clone() }); let verifier = self.make_verifier( - PeersClient::Full(client.clone(), backend.clone()), + PeersClient { client: client.clone(), backend: backend.clone() }, &Default::default(), &data, ); @@ -861,7 +819,6 @@ where }), network_config, chain: client.clone(), - on_demand: None, transaction_pool: Arc::new(EmptyTransactionPool), protocol_id, import_queue, @@ -892,7 +849,7 @@ where peers.push(Peer { data, - client: PeersClient::Full(client.clone(), backend.clone()), + client: PeersClient { client: client.clone(), backend: backend.clone() }, select_chain: Some(longest_chain), backend: Some(backend), imported_blocks_stream, @@ -905,94 +862,6 @@ where }); } - /// Add a light peer. - fn add_light_peer(&mut self) { - let (c, backend) = substrate_test_runtime_client::new_light(); - let client = Arc::new(c); - let (block_import, justification_import, data) = - self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); - - let verifier = self.make_verifier( - PeersClient::Light(client.clone(), backend.clone()), - &Default::default(), - &data, - ); - let verifier = VerifierAdapter::new(verifier); - - let import_queue = Box::new(BasicQueue::new( - verifier.clone(), - Box::new(block_import.clone()), - justification_import, - &sp_core::testing::TaskExecutor::new(), - None, - )); - - let listen_addr = build_multiaddr![Memory(rand::random::())]; - - let mut network_config = - NetworkConfiguration::new("test-node", "test-client", Default::default(), None); - network_config.transport = TransportConfig::MemoryOnly; - network_config.listen_addresses = vec![listen_addr.clone()]; - network_config.allow_non_globals_in_dht = true; - - let protocol_id = ProtocolId::from("test-protocol-name"); - - let block_request_protocol_config = - block_request_handler::generate_protocol_config(&protocol_id); - let state_request_protocol_config = - state_request_handler::generate_protocol_config(&protocol_id); - - let light_client_request_protocol_config = - light_client_requests::generate_protocol_config(&protocol_id); - - let network = NetworkWorker::new(sc_network::config::Params { - role: Role::Light, - executor: None, - transactions_handler_executor: Box::new(|task| { - async_std::task::spawn(task); - }), - network_config, - chain: client.clone(), - on_demand: None, - transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id, - import_queue, - block_announce_validator: Box::new(DefaultBlockAnnounceValidator), - metrics_registry: None, - block_request_protocol_config, - state_request_protocol_config, - light_client_request_protocol_config, - warp_sync: None, - }) - .unwrap(); - - self.mut_peers(|peers| { - for peer in peers.iter_mut() { - peer.network.add_known_address( - network.service().local_peer_id().clone(), - listen_addr.clone(), - ); - } - - let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); - let finality_notification_stream = - Box::pin(client.finality_notification_stream().fuse()); - - peers.push(Peer { - data, - verifier, - select_chain: None, - backend: None, - block_import, - client: PeersClient::Light(client, backend), - imported_blocks_stream, - finality_notification_stream, - network, - listen_addr, - }); - }); - } - /// Used to spawn background tasks, e.g. the block request protocol handler. fn spawn_task(&self, f: BoxFuture<'static, ()>) { async_std::task::spawn(f); diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index f3af7f8ff6fc3..79330d145f937 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -20,7 +20,6 @@ use super::*; use futures::{executor::block_on, Future}; use sp_consensus::{block_validation::Validation, BlockOrigin}; use sp_runtime::Justifications; -use std::time::Duration; use substrate_test_runtime::Header; fn test_ancestor_search_when_common_is(n: usize) { @@ -391,35 +390,6 @@ fn own_blocks_are_announced() { (net.peers()[2].blockchain_canon_equals(peer0)); } -#[test] -fn blocks_are_not_announced_by_light_nodes() { - sp_tracing::try_init_simple(); - let mut net = TestNet::new(0); - - // full peer0 is connected to light peer - // light peer1 is connected to full peer2 - net.add_full_peer(); - net.add_light_peer(); - - // Sync between 0 and 1. - net.peer(0).push_blocks(1, false); - assert_eq!(net.peer(0).client.info().best_number, 1); - net.block_until_sync(); - assert_eq!(net.peer(1).client.info().best_number, 1); - - // Add another node and remove node 0. - net.add_full_peer(); - net.peers.remove(0); - - // Poll for a few seconds and make sure 1 and 2 (now 0 and 1) don't sync together. - let mut delay = futures_timer::Delay::new(Duration::from_secs(5)); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - Pin::new(&mut delay).poll(cx) - })); - assert_eq!(net.peer(1).client.info().best_number, 0); -} - #[test] fn can_sync_small_non_best_forks() { sp_tracing::try_init_simple(); @@ -483,72 +453,6 @@ fn can_sync_small_non_best_forks() { })); } -#[test] -fn can_not_sync_from_light_peer() { - sp_tracing::try_init_simple(); - - // given the network with 1 full nodes (#0) and 1 light node (#1) - let mut net = TestNet::new(1); - net.add_light_peer(); - - // generate some blocks on #0 - net.peer(0).push_blocks(1, false); - - // and let the light client sync from this node - net.block_until_sync(); - - // ensure #0 && #1 have the same best block - let full0_info = net.peer(0).client.info(); - let light_info = net.peer(1).client.info(); - assert_eq!(full0_info.best_number, 1); - assert_eq!(light_info.best_number, 1); - assert_eq!(light_info.best_hash, full0_info.best_hash); - - // add new full client (#2) && remove #0 - net.add_full_peer(); - net.peers.remove(0); - - // ensure that the #2 (now #1) fails to sync block #1 even after 5 seconds - let mut test_finished = futures_timer::Delay::new(Duration::from_secs(5)); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - Pin::new(&mut test_finished).poll(cx) - })); -} - -#[test] -fn light_peer_imports_header_from_announce() { - sp_tracing::try_init_simple(); - - fn import_with_announce(net: &mut TestNet, hash: H256) { - net.peer(0).announce_block(hash, None); - - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(1).client().header(&BlockId::Hash(hash)).unwrap().is_some() { - Poll::Ready(()) - } else { - Poll::Pending - } - })); - } - - // given the network with 1 full nodes (#0) and 1 light node (#1) - let mut net = TestNet::new(1); - net.add_light_peer(); - - // let them connect to each other - net.block_until_sync(); - - // check that NEW block is imported from announce message - let new_hash = net.peer(0).push_blocks(1, false); - import_with_announce(&mut net, new_hash); - - // check that KNOWN STALE block is imported from announce message - let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); - import_with_announce(&mut net, known_stale_hash); -} - #[test] fn can_sync_explicit_forks() { sp_tracing::try_init_simple(); @@ -1177,16 +1081,14 @@ fn syncs_indexed_blocks() { assert!(net .peer(0) .client() - .as_full() - .unwrap() + .as_client() .indexed_transaction(&indexed_key) .unwrap() .is_some()); assert!(net .peer(1) .client() - .as_full() - .unwrap() + .as_client() .indexed_transaction(&indexed_key) .unwrap() .is_none()); @@ -1195,8 +1097,7 @@ fn syncs_indexed_blocks() { assert!(net .peer(1) .client() - .as_full() - .unwrap() + .as_client() .indexed_transaction(&indexed_key) .unwrap() .is_some()); diff --git a/client/rpc/src/chain/chain_light.rs b/client/rpc/src/chain/chain_light.rs deleted file mode 100644 index 2d15c819e1dab..0000000000000 --- a/client/rpc/src/chain/chain_light.rs +++ /dev/null @@ -1,114 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! Blockchain API backend for light nodes. - -use futures::{future::ready, FutureExt, TryFutureExt}; -use jsonrpc_pubsub::manager::SubscriptionManager; -use std::sync::Arc; - -use sc_client_api::light::{Fetcher, RemoteBlockchain, RemoteBodyRequest}; -use sp_runtime::{ - generic::{BlockId, SignedBlock}, - traits::Block as BlockT, -}; - -use super::{client_err, error::FutureResult, ChainBackend}; -use sc_client_api::BlockchainEvents; -use sp_blockchain::HeaderBackend; - -/// Blockchain API backend for light nodes. Reads all the data from local -/// database, if available, or fetches it from remote node otherwise. -pub struct LightChain { - /// Substrate client. - client: Arc, - /// Current subscriptions. - subscriptions: SubscriptionManager, - /// Remote blockchain reference - remote_blockchain: Arc>, - /// Remote fetcher reference. - fetcher: Arc, -} - -impl> LightChain { - /// Create new Chain API RPC handler. - pub fn new( - client: Arc, - subscriptions: SubscriptionManager, - remote_blockchain: Arc>, - fetcher: Arc, - ) -> Self { - Self { client, subscriptions, remote_blockchain, fetcher } - } -} - -impl ChainBackend for LightChain -where - Block: BlockT + 'static, - Block::Header: Unpin, - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, - F: Fetcher + Send + Sync + 'static, -{ - fn client(&self) -> &Arc { - &self.client - } - - fn subscriptions(&self) -> &SubscriptionManager { - &self.subscriptions - } - - fn header(&self, hash: Option) -> FutureResult> { - let hash = self.unwrap_or_best(hash); - - let fetcher = self.fetcher.clone(); - let maybe_header = sc_client_api::light::future_header( - &*self.remote_blockchain, - &*fetcher, - BlockId::Hash(hash), - ); - - maybe_header.then(move |result| ready(result.map_err(client_err))).boxed() - } - - fn block(&self, hash: Option) -> FutureResult>> { - let fetcher = self.fetcher.clone(); - self.header(hash) - .and_then(move |header| async move { - match header { - Some(header) => { - let body = fetcher - .remote_body(RemoteBodyRequest { - header: header.clone(), - retry_count: Default::default(), - }) - .await; - - body.map(|body| { - Some(SignedBlock { - block: Block::new(header, body), - justifications: None, - }) - }) - .map_err(client_err) - }, - None => Ok(None), - } - }) - .boxed() - } -} diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index a06c3a094b40f..80dab3ec5d4e5 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -19,7 +19,6 @@ //! Substrate blockchain API. mod chain_full; -mod chain_light; #[cfg(test)] mod tests; @@ -34,7 +33,6 @@ use std::sync::Arc; use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; use sc_client_api::{ - light::{Fetcher, RemoteBlockchain}, BlockchainEvents, }; use sp_rpc::{list::ListOrValue, number::NumberOrHex}; @@ -206,29 +204,6 @@ where Chain { backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)) } } -/// Create new state API that works on light node. -pub fn new_light>( - client: Arc, - subscriptions: SubscriptionManager, - remote_blockchain: Arc>, - fetcher: Arc, -) -> Chain -where - Block: BlockT + 'static, - Block::Header: Unpin, - Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, - F: Send + Sync + 'static, -{ - Chain { - backend: Box::new(self::chain_light::LightChain::new( - client, - subscriptions, - remote_blockchain, - fetcher, - )), - } -} - /// Chain API with subscriptions support. pub struct Chain { backend: Box>, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 97f77a4077962..bd72343583361 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -27,8 +27,7 @@ use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, Subscripti use log::warn; use rpc::Result as RpcResult; use std::{ - collections::{BTreeMap, HashMap}, - ops::Range, + collections::HashMap, sync::Arc, }; @@ -45,7 +44,7 @@ use sp_core::{ }; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, CheckedSub, NumberFor, SaturatedConversion}, + traits::Block as BlockT, }; use sp_version::RuntimeVersion; @@ -66,14 +65,6 @@ use std::marker::PhantomData; struct QueryStorageRange { /// Hashes of all the blocks in the range. pub hashes: Vec, - /// Number of the first block in the range. - pub first_number: NumberFor, - /// Blocks subrange ([begin; end) indices within `hashes`) where we should read keys at - /// each state to get changes. - pub unfiltered_range: Range, - /// Blocks subrange ([begin; end) indices within `hashes`) where we could pre-filter - /// blocks-with-changes by using changes tries. - pub filtered_range: Option>, } /// State API backend for full nodes. @@ -107,10 +98,8 @@ where Ok(hash.unwrap_or_else(|| self.client.info().best_hash)) } - /// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges. - /// Blocks that contain changes within filtered subrange could be filtered using changes tries. - /// Blocks that contain changes within unfiltered subrange must be filtered manually. - fn split_query_storage_range( + /// Validates block range. + fn query_storage_range( &self, from: Block::Hash, to: Option, @@ -156,22 +145,8 @@ where hashes }; - // check if we can filter blocks-with-changes from some (sub)range using changes tries - let changes_trie_range = self - .client - .max_key_changes_range(from_number, BlockId::Hash(to_meta.hash)) - .map_err(client_err)?; - let filtered_range_begin = changes_trie_range.and_then(|(begin, _)| { - // avoids a corner case where begin < from_number (happens when querying genesis) - begin.checked_sub(&from_number).map(|x| x.saturated_into::()) - }); - let (unfiltered_range, filtered_range) = split_range(hashes.len(), filtered_range_begin); - Ok(QueryStorageRange { hashes, - first_number: from_number, - unfiltered_range, - filtered_range, }) } @@ -183,8 +158,8 @@ where last_values: &mut HashMap>, changes: &mut Vec>, ) -> Result<()> { - for block in range.unfiltered_range.start..range.unfiltered_range.end { - let block_hash = range.hashes[block].clone(); + for block_hash in &range.hashes { + let block_hash = block_hash.clone(); let mut block_changes = StorageChangeSet { block: block_hash.clone(), changes: Vec::new() }; let id = BlockId::hash(block_hash); @@ -207,57 +182,6 @@ where } Ok(()) } - - /// Iterates through all blocks that are changing keys within range.filtered_range and collects - /// these changes. - fn query_storage_filtered( - &self, - range: &QueryStorageRange, - keys: &[StorageKey], - last_values: &HashMap>, - changes: &mut Vec>, - ) -> Result<()> { - let (begin, end) = match range.filtered_range { - Some(ref filtered_range) => ( - range.first_number + filtered_range.start.saturated_into(), - BlockId::Hash(range.hashes[filtered_range.end - 1].clone()), - ), - None => return Ok(()), - }; - let mut changes_map: BTreeMap, StorageChangeSet> = - BTreeMap::new(); - for key in keys { - let mut last_block = None; - let mut last_value = last_values.get(key).cloned().unwrap_or_default(); - let key_changes = self.client.key_changes(begin, end, None, key).map_err(client_err)?; - for (block, _) in key_changes.into_iter().rev() { - if last_block == Some(block) { - continue - } - - let block_hash = - range.hashes[(block - range.first_number).saturated_into::()].clone(); - let id = BlockId::Hash(block_hash); - let value_at_block = self.client.storage(&id, key).map_err(client_err)?; - if last_value == value_at_block { - continue - } - - changes_map - .entry(block) - .or_insert_with(|| StorageChangeSet { block: block_hash, changes: Vec::new() }) - .changes - .push((key.clone(), value_at_block.clone())); - last_block = Some(block); - last_value = value_at_block; - } - } - if let Some(additional_capacity) = changes_map.len().checked_sub(changes.len()) { - changes.reserve(additional_capacity); - } - changes.extend(changes_map.into_iter().map(|(_, cs)| cs)); - Ok(()) - } } impl StateBackend for FullState @@ -430,11 +354,10 @@ where keys: Vec, ) -> FutureResult>> { let call_fn = move || { - let range = self.split_query_storage_range(from, to)?; + let range = self.query_storage_range(from, to)?; let mut changes = Vec::new(); let mut last_values = HashMap::new(); self.query_storage_unfiltered(&range, &keys, &mut last_values, &mut changes)?; - self.query_storage_filtered(&range, &keys, &last_values, &mut changes)?; Ok(changes) }; @@ -768,30 +691,6 @@ where } } -/// Splits passed range into two subranges where: -/// - first range has at least one element in it; -/// - second range (optionally) starts at given `middle` element. -pub(crate) fn split_range( - size: usize, - middle: Option, -) -> (Range, Option>) { - // check if we can filter blocks-with-changes from some (sub)range using changes tries - let range2_begin = match middle { - // some of required changes tries are pruned => use available tries - Some(middle) if middle != 0 => Some(middle), - // all required changes tries are available, but we still want values at first block - // => do 'unfiltered' read for the first block and 'filtered' for the rest - Some(_) if size > 1 => Some(1), - // range contains single element => do not use changes tries - Some(_) => None, - // changes tries are not available => do 'unfiltered' read for the whole range - None => None, - }; - let range1 = 0..range2_begin.unwrap_or(size); - let range2 = range2_begin.map(|begin| begin..size); - (range1, range2) -} - fn invalid_block_range( from: &CachedHeaderMetadata, to: &CachedHeaderMetadata, diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 712fe00c54386..9d5919ec27f4e 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -17,16 +17,15 @@ // along with this program. If not, see . use self::error::Error; -use super::{state_full::split_range, *}; +use super::*; use crate::testing::TaskExecutor; use assert_matches::assert_matches; use futures::{executor, StreamExt}; use sc_block_builder::BlockBuilderProvider; use sc_rpc_api::DenyUnsafe; use sp_consensus::BlockOrigin; -use sp_core::{hash::H256, storage::ChildInfo, ChangesTrieConfiguration}; +use sp_core::{hash::H256, storage::ChildInfo}; use sp_io::hashing::blake2_256; -use sp_runtime::generic::BlockId; use std::sync::Arc; use substrate_test_runtime_client::{prelude::*, runtime}; @@ -336,7 +335,7 @@ fn should_send_initial_storage_changes_and_notifications() { #[test] fn should_query_storage() { - fn run_tests(mut client: Arc, has_changes_trie_config: bool) { + fn run_tests(mut client: Arc) { let (api, _child) = new_full( client.clone(), SubscriptionManager::new(Arc::new(TaskExecutor)), @@ -369,13 +368,6 @@ fn should_query_storage() { let block2_hash = add_block(1); let genesis_hash = client.genesis_hash(); - if has_changes_trie_config { - assert_eq!( - client.max_key_changes_range(1, BlockId::Hash(block1_hash)).unwrap(), - Some((0, BlockId::Hash(block1_hash))), - ); - } - let mut expected = vec![ StorageChangeSet { block: genesis_hash, @@ -519,26 +511,15 @@ fn should_query_storage() { ); } - run_tests(Arc::new(substrate_test_runtime_client::new()), false); + run_tests(Arc::new(substrate_test_runtime_client::new())); run_tests( Arc::new( TestClientBuilder::new() - .changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2))) .build(), ), - true, ); } -#[test] -fn should_split_ranges() { - assert_eq!(split_range(1, None), (0..1, None)); - assert_eq!(split_range(100, None), (0..100, None)); - assert_eq!(split_range(1, Some(0)), (0..1, None)); - assert_eq!(split_range(100, Some(50)), (0..50, Some(50..100))); - assert_eq!(split_range(100, Some(99)), (0..99, Some(99..100))); -} - #[test] fn should_return_runtime_version() { let client = Arc::new(substrate_test_runtime_client::new()); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index bcb05ce743701..d7f8a5bc09169 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -30,7 +30,7 @@ use log::info; use prometheus_endpoint::Registry; use sc_chain_spec::get_extension; use sc_client_api::{ - execution_extensions::ExecutionExtensions, light::RemoteBlockchain, + execution_extensions::ExecutionExtensions, proof_provider::ProofProvider, BadBlocks, BlockBackend, BlockchainEvents, ExecutorProvider, ForkBlocks, StorageProvider, UsageProvider, }; @@ -40,7 +40,7 @@ use sc_executor::RuntimeVersionOf; use sc_keystore::LocalKeystore; use sc_network::{ block_request_handler::{self, BlockRequestHandler}, - config::{OnDemand, Role, SyncMode}, + config::{Role, SyncMode}, light_client_requests::{self, handler::LightClientRequestHandler}, state_request_handler::{self, StateRequestHandler}, warp_request_handler::{self, RequestHandler as WarpSyncRequestHandler, WarpSyncProvider}, @@ -381,23 +381,19 @@ where pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { /// The service configuration. pub config: Configuration, - /// A shared client returned by `new_full_parts`/`new_light_parts`. + /// A shared client returned by `new_full_parts`. pub client: Arc, - /// A shared backend returned by `new_full_parts`/`new_light_parts`. + /// A shared backend returned by `new_full_parts`. pub backend: Arc, - /// A task manager returned by `new_full_parts`/`new_light_parts`. + /// A task manager returned by `new_full_parts`. pub task_manager: &'a mut TaskManager, - /// A shared keystore returned by `new_full_parts`/`new_light_parts`. + /// A shared keystore returned by `new_full_parts`. pub keystore: SyncCryptoStorePtr, - /// An optional, shared data fetcher for light clients. - pub on_demand: Option>>, /// A shared transaction pool. pub transaction_pool: Arc, /// A RPC extension builder. Use `NoopRpcExtensionBuilder` if you just want to pass in the /// extensions directly. pub rpc_extensions_builder: Box + Send>, - /// An optional, shared remote blockchain instance. Used for light clients. - pub remote_blockchain: Option>>, /// A shared network instance. pub network: Arc::Hash>>, /// A Sender for RPC requests. @@ -474,12 +470,10 @@ where mut config, task_manager, client, - on_demand: _, backend, keystore, transaction_pool, rpc_extensions_builder, - remote_blockchain: _, network, system_rpc_tx, telemetry, @@ -719,7 +713,7 @@ where pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { /// The service configuration. pub config: &'a Configuration, - /// A shared client returned by `new_full_parts`/`new_light_parts`. + /// A shared client returned by `new_full_parts`. pub client: Arc, /// A shared transaction pool. pub transaction_pool: Arc, @@ -727,8 +721,6 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { pub spawn_handle: SpawnTaskHandle, /// An import queue. pub import_queue: TImpQu, - /// An optional, shared data fetcher for light clients. - pub on_demand: Option>>, /// A block announce validator builder. pub block_announce_validator_builder: Option) -> Box + Send> + Send>>, @@ -767,7 +759,6 @@ where transaction_pool, spawn_handle, import_queue, - on_demand, block_announce_validator_builder, warp_sync, } = params; @@ -863,7 +854,6 @@ where }, network_config: config.network.clone(), chain: client.clone(), - on_demand, transaction_pool: transaction_pool_adapter as _, import_queue: Box::new(import_queue), protocol_id, diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index d7a8b6f227e8f..d2c474fc2b877 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -28,7 +28,7 @@ use sp_core::{ use sp_externalities::Extensions; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, NumberFor}, + traits::Block as BlockT, }; use sp_state_machine::{ self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, @@ -153,8 +153,6 @@ where extensions: Option, ) -> sp_blockchain::Result> { let mut changes = OverlayedChanges::default(); - let changes_trie = - backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let state = self.backend.state_at(*at)?; let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); let runtime_code = @@ -168,7 +166,6 @@ where let return_data = StateMachine::new( &state, - changes_trie, &mut changes, &self.executor, method, @@ -208,8 +205,6 @@ where where ExecutionManager: Clone, { - let changes_trie_state = - backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); let state = self.backend.state_at(*at)?; @@ -243,7 +238,6 @@ where let mut state_machine = StateMachine::new( &backend, - changes_trie_state, changes, &self.executor, method, @@ -262,7 +256,6 @@ where None => { let mut state_machine = StateMachine::new( &state, - changes_trie_state, changes, &self.executor, method, @@ -286,11 +279,9 @@ where fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - let changes_trie_state = - backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?; let state = self.backend.state_at(*id)?; let mut cache = StorageTransactionCache::::default(); - let mut ext = Ext::new(&mut overlay, &mut cache, &state, changes_trie_state, None); + let mut ext = Ext::new(&mut overlay, &mut cache, &state, None); let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); let runtime_code = state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; @@ -317,7 +308,7 @@ where state_runtime_code.runtime_code().map_err(sp_blockchain::Error::RuntimeCode)?; let runtime_code = self.check_override(runtime_code, at)?; - sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _, _>( + sp_state_machine::prove_execution_on_trie_backend( &trie_backend, &mut Default::default(), &self.executor, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 4e3cb0aaf234b..598203e5c2677 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -23,7 +23,6 @@ use super::{ genesis, }; use codec::{Decode, Encode}; -use hash_db::Prefix; use log::{info, trace, warn}; use parking_lot::{Mutex, RwLock}; use prometheus_endpoint::Registry; @@ -31,18 +30,16 @@ use rand::Rng; use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider, RecordProof}; use sc_client_api::{ backend::{ - self, apply_aux, changes_tries_state_at_block, BlockImportOperation, ClientImportOperation, - Finalizer, ImportSummary, LockImportRun, NewBlockState, PrunableStateChangesTrieStorage, + self, apply_aux, BlockImportOperation, ClientImportOperation, + Finalizer, ImportSummary, LockImportRun, NewBlockState, StorageProvider, }, - cht, client::{ BadBlocks, BlockBackend, BlockImportNotification, BlockOf, BlockchainEvents, ClientInfo, FinalityNotification, FinalityNotifications, ForkBlocks, ImportNotifications, ProvideUncles, }, execution_extensions::ExecutionExtensions, - light::ChangesProof, notifications::{StorageEventStream, StorageNotifications}, CallExecutor, ExecutorProvider, KeyIterator, ProofProvider, UsageProvider, }; @@ -56,35 +53,33 @@ use sp_api::{ ProvideRuntimeApi, }; use sp_blockchain::{ - self as blockchain, well_known_cache_keys::Id as CacheKeyId, Backend as ChainBackend, Cache, - CachedHeaderMetadata, Error, HeaderBackend as ChainHeaderBackend, HeaderMetadata, ProvideCache, + self as blockchain, well_known_cache_keys::Id as CacheKeyId, Backend as ChainBackend, + CachedHeaderMetadata, Error, HeaderBackend as ChainHeaderBackend, HeaderMetadata, }; use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_core::{ - convert_hash, - storage::{well_known_keys, ChildInfo, PrefixedStorageKey, StorageData, StorageKey}, - ChangesTrieConfiguration, NativeOrEncoded, + storage::{well_known_keys, ChildInfo, StorageData, StorageKey}, + NativeOrEncoded, }; #[cfg(feature = "test-helpers")] use sp_keystore::SyncCryptoStorePtr; use sp_runtime::{ - generic::{BlockId, DigestItem, SignedBlock}, + generic::{BlockId, SignedBlock}, traits::{ - Block as BlockT, DigestFor, HashFor, Header as HeaderT, NumberFor, One, + Block as BlockT, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, - BuildStorage, Justification, Justifications, + Digest, BuildStorage, Justification, Justifications, }; use sp_state_machine::{ - key_changes, key_changes_proof, prove_child_read, prove_range_read_with_size, prove_read, - read_range_proof_check, Backend as StateBackend, ChangesTrieAnchorBlockId, - ChangesTrieConfigurationRange, ChangesTrieRootsStorage, ChangesTrieStorage, DBValue, + prove_child_read, prove_range_read_with_size, prove_read, + read_range_proof_check, Backend as StateBackend, }; use sp_trie::StorageProof; use std::{ - collections::{BTreeMap, HashMap, HashSet}, + collections::HashMap, marker::PhantomData, panic::UnwindSafe, path::PathBuf, @@ -409,250 +404,6 @@ where self.executor.runtime_version(id) } - /// Reads given header and generates CHT-based header proof for CHT of given size. - pub fn header_proof_with_cht_size( - &self, - id: &BlockId, - cht_size: NumberFor, - ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { - let proof_error = || { - sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id)) - }; - let header = self.backend.blockchain().expect_header(*id)?; - let block_num = *header.number(); - let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?; - let cht_start = cht::start_number(cht_size, cht_num); - let mut current_num = cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - let headers = cht_range.map(|num| self.block_hash(num)); - let proof = cht::build_proof::, _, _>( - cht_size, - cht_num, - std::iter::once(block_num), - headers, - )?; - Ok((header, proof)) - } - - /// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size. - pub fn key_changes_proof_with_cht_size( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey, - cht_size: NumberFor, - ) -> sp_blockchain::Result> { - struct AccessedRootsRecorder<'a, Block: BlockT> { - storage: &'a dyn ChangesTrieStorage, NumberFor>, - min: NumberFor, - required_roots_proofs: Mutex, Block::Hash>>, - } - - impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> - for AccessedRootsRecorder<'a, Block> - { - fn build_anchor( - &self, - hash: Block::Hash, - ) -> Result>, String> { - self.storage.build_anchor(hash) - } - - fn root( - &self, - anchor: &ChangesTrieAnchorBlockId>, - block: NumberFor, - ) -> Result, String> { - let root = self.storage.root(anchor, block)?; - if block < self.min { - if let Some(ref root) = root { - self.required_roots_proofs.lock().insert(block, root.clone()); - } - } - Ok(root) - } - } - - impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> - for AccessedRootsRecorder<'a, Block> - { - fn as_roots_storage( - &self, - ) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { - self - } - - fn with_cached_changed_keys( - &self, - root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap, HashSet>>), - ) -> bool { - self.storage.with_cached_changed_keys(root, functor) - } - - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - self.storage.get(key, prefix) - } - } - - let first_number = - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(first))?; - let (storage, configs) = self.require_changes_trie(first_number, last, true)?; - let min_number = - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; - - let recording_storage = AccessedRootsRecorder:: { - storage: storage.storage(), - min: min_number, - required_roots_proofs: Mutex::new(BTreeMap::new()), - }; - - let max_number = std::cmp::min( - self.backend.blockchain().info().best_number, - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(max))?, - ); - - // fetch key changes proof - let mut proof = Vec::new(); - for (config_zero, config_end, config) in configs { - let last_number = - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(last))?; - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: config_zero, - end: config_end.map(|(config_end_number, _)| config_end_number), - }; - let proof_range = key_changes_proof::, _>( - config_range, - &recording_storage, - first_number, - &ChangesTrieAnchorBlockId { hash: convert_hash(&last), number: last_number }, - max_number, - storage_key, - &key.0, - ) - .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; - proof.extend(proof_range); - } - - // now gather proofs for all changes tries roots that were touched during key_changes_proof - // execution AND are unknown (i.e. replaced with CHT) to the requester - let roots = recording_storage.required_roots_proofs.into_inner(); - let roots_proof = self.changes_trie_roots_proof(cht_size, roots.keys().cloned())?; - - Ok(ChangesProof { - max_block: max_number, - proof, - roots: roots.into_iter().map(|(n, h)| (n, convert_hash(&h))).collect(), - roots_proof, - }) - } - - /// Generate CHT-based proof for roots of changes tries at given blocks. - fn changes_trie_roots_proof>>( - &self, - cht_size: NumberFor, - blocks: I, - ) -> sp_blockchain::Result { - // most probably we have touched several changes tries that are parts of the single CHT - // => GroupBy changes tries by CHT number and then gather proof for the whole group at once - let mut proofs = Vec::new(); - - cht::for_each_cht_group::( - cht_size, - blocks, - |_, cht_num, cht_blocks| { - let cht_proof = - self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; - proofs.push(cht_proof); - Ok(()) - }, - (), - )?; - - Ok(StorageProof::merge(proofs)) - } - - /// Generates CHT-based proof for roots of changes tries at given blocks - /// (that are part of single CHT). - fn changes_trie_roots_proof_at_cht( - &self, - cht_size: NumberFor, - cht_num: NumberFor, - blocks: Vec>, - ) -> sp_blockchain::Result { - let cht_start = cht::start_number(cht_size, cht_num); - let mut current_num = cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - let roots = cht_range.map(|num| { - self.header(&BlockId::Number(num)).map(|block| { - block - .and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned()) - }) - }); - let proof = cht::build_proof::, _, _>( - cht_size, cht_num, blocks, roots, - )?; - Ok(proof) - } - - /// Returns changes trie storage and all configurations that have been active - /// in the range [first; last]. - /// - /// Configurations are returned in descending order (and obviously never overlap). - /// If fail_if_disabled is false, returns maximal consequent configurations ranges, - /// starting from last and stopping on either first, or when CT have been disabled. - /// If fail_if_disabled is true, fails when there's a subrange where CT have been disabled - /// inside first..last blocks range. - fn require_changes_trie( - &self, - first: NumberFor, - last: Block::Hash, - fail_if_disabled: bool, - ) -> sp_blockchain::Result<( - &dyn PrunableStateChangesTrieStorage, - Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, - )> { - let storage = self - .backend - .changes_trie_storage() - .ok_or_else(|| sp_blockchain::Error::ChangesTriesNotSupported)?; - - let mut configs = Vec::with_capacity(1); - let mut current = last; - loop { - let config_range = storage.configuration_at(&BlockId::Hash(current))?; - match config_range.config { - Some(config) => configs.push((config_range.zero.0, config_range.end, config)), - None if !fail_if_disabled => return Ok((storage, configs)), - None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), - } - - if config_range.zero.0 < first { - break - } - - current = *self - .backend - .blockchain() - .expect_header(BlockId::Hash(config_range.zero.1))? - .parent_hash(); - } - - Ok((storage, configs)) - } - /// Apply a checked and validated block to an operation. If a justification is provided /// then `finalized` *must* be true. fn apply_block( @@ -807,7 +558,7 @@ where sc_consensus::StorageChanges::Changes(storage_changes) => { self.backend .begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; - let (main_sc, child_sc, offchain_sc, tx, _, changes_trie_tx, tx_index) = + let (main_sc, child_sc, offchain_sc, tx, _, tx_index) = storage_changes.into_inner(); if self.config.offchain_indexing_api { @@ -818,9 +569,6 @@ where operation.op.update_storage(main_sc.clone(), child_sc.clone())?; operation.op.update_transaction_index(tx_index)?; - if let Some(changes_trie_transaction) = changes_trie_tx { - operation.op.update_changes_trie(changes_trie_transaction)?; - } Some((main_sc, child_sc)) }, sc_consensus::StorageChanges::Import(changes) => { @@ -972,11 +720,8 @@ where )?; let state = self.backend.state_at(at)?; - let changes_trie_state = - changes_tries_state_at_block(&at, self.backend.changes_trie_storage())?; - let gen_storage_changes = runtime_api - .into_storage_changes(&state, changes_trie_state.as_ref(), *parent_hash) + .into_storage_changes(&state, *parent_hash) .map_err(sp_blockchain::Error::Storage)?; if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root @@ -1325,25 +1070,6 @@ where .map(|(r, p)| (r, StorageProof::merge(vec![p, code_proof]))) } - fn header_proof( - &self, - id: &BlockId, - ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { - self.header_proof_with_cht_size(id, cht::size()) - } - - fn key_changes_proof( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result> { - self.key_changes_proof_with_cht_size(first, last, min, max, storage_key, key, cht::size()) - } - fn read_proof_collection( &self, id: &BlockId, @@ -1418,7 +1144,7 @@ where fn new_block_at>( &self, parent: &BlockId, - inherent_digests: DigestFor, + inherent_digests: Digest, record_proof: R, ) -> sp_blockchain::Result> { sc_block_builder::BlockBuilder::new( @@ -1433,7 +1159,7 @@ where fn new_block( &self, - inherent_digests: DigestFor, + inherent_digests: Digest, ) -> sp_blockchain::Result> { let info = self.chain_info(); sc_block_builder::BlockBuilder::new( @@ -1581,89 +1307,6 @@ where .child_storage_hash(child_info, &key.0) .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?) } - - fn max_key_changes_range( - &self, - first: NumberFor, - last: BlockId, - ) -> sp_blockchain::Result, BlockId)>> { - let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; - let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - if first > last_number { - return Err(sp_blockchain::Error::ChangesTrieAccessFailed( - "Invalid changes trie range".into(), - )) - } - - let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { - Some((storage, configs)) => (storage, configs), - None => return Ok(None), - }; - - let first_available_changes_trie = configs.last().map(|config| config.0); - match first_available_changes_trie { - Some(first_available_changes_trie) => { - let oldest_unpruned = storage.oldest_pruned_digest_range_end(); - let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); - Ok(Some((first, last))) - }, - None => Ok(None), - } - } - - fn key_changes( - &self, - first: NumberFor, - last: BlockId, - storage_key: Option<&PrefixedStorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result, u32)>> { - let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; - let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - let (storage, configs) = self.require_changes_trie(first, last_hash, true)?; - - let mut result = Vec::new(); - let best_number = self.backend.blockchain().info().best_number; - for (config_zero, config_end, config) in configs { - let range_first = ::std::cmp::max(first, config_zero + One::one()); - let range_anchor = match config_end { - Some((config_end_number, config_end_hash)) => - if last_number > config_end_number { - ChangesTrieAnchorBlockId { - hash: config_end_hash, - number: config_end_number, - } - } else { - ChangesTrieAnchorBlockId { - hash: convert_hash(&last_hash), - number: last_number, - } - }, - None => - ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, - }; - - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: config_zero.clone(), - end: config_end.map(|(config_end_number, _)| config_end_number), - }; - let result_range: Vec<(NumberFor, u32)> = key_changes::, _>( - config_range, - storage.storage(), - range_first, - &range_anchor, - best_number, - storage_key, - &key.0, - ) - .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) - .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; - result.extend(result_range); - } - - Ok(result) - } } impl HeaderMetadata for Client @@ -1791,16 +1434,6 @@ where } } -impl ProvideCache for Client -where - B: backend::Backend, - Block: BlockT, -{ - fn cache(&self) -> Option>> { - self.backend.blockchain().cache() - } -} - impl ProvideRuntimeApi for Client where B: backend::Backend, diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index a1ff8da4085c9..15c76cd03d1d3 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -68,7 +68,7 @@ use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; pub use sc_consensus::ImportQueue; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] -pub use sc_network::config::{OnDemand, TransactionImport, TransactionImportFuture}; +pub use sc_network::config::{TransactionImport, TransactionImportFuture}; pub use sc_rpc::Metadata as RpcMetadata; pub use sc_tracing::TracingReceiver; pub use sc_transaction_pool::Options as TransactionPoolOptions; diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index aeee4a5f90728..7d9ebe304c06a 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -19,7 +19,6 @@ tokio = { version = "1.10.0", features = ["time"] } log = "0.4.8" fdlimit = "0.2.1" parking_lot = "0.11.1" -sc-light = { version = "4.0.0-dev", path = "../../light" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 33cbefbb06a95..8f028c3691768 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -27,14 +27,14 @@ use sc_client_db::{ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, }; -use sc_service::client::{self, new_in_mem, Client, LocalCallExecutor}; +use sc_service::client::{new_in_mem, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError, SelectChain}; -use sp_core::{blake2_256, testing::TaskExecutor, ChangesTrieConfiguration, H256}; +use sp_core::{testing::TaskExecutor, H256}; use sp_runtime::{ generic::BlockId, traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, - ConsensusEngineId, DigestItem, Justifications, + ConsensusEngineId, Justifications, }; use sp_state_machine::{ backend::Backend as _, ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine, @@ -42,14 +42,13 @@ use sp_state_machine::{ use sp_storage::{ChildInfo, StorageKey}; use sp_trie::{trie_types::Layout, TrieConfiguration}; use std::{ - collections::{HashMap, HashSet}, + collections::HashSet, sync::Arc, }; use substrate_test_runtime::TestAPI; use substrate_test_runtime_client::{ prelude::*, runtime::{ - self, genesismap::{insert_genesis_block, GenesisConfig}, Block, BlockNumber, Digest, Hash, Header, RuntimeApi, Transfer, }, @@ -57,6 +56,8 @@ use substrate_test_runtime_client::{ Sr25519Keyring, TestClientBuilder, TestClientBuilderExt, }; +mod db; + const TEST_ENGINE_ID: ConsensusEngineId = *b"TEST"; pub struct ExecutorDispatch; @@ -77,86 +78,6 @@ fn executor() -> sc_executor::NativeElseWasmExecutor { sc_executor::NativeElseWasmExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) } -pub fn prepare_client_with_key_changes() -> ( - client::Client< - substrate_test_runtime_client::Backend, - substrate_test_runtime_client::ExecutorDispatch, - Block, - RuntimeApi, - >, - Vec, - Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, -) { - // prepare block structure - let blocks_transfers = vec![ - vec![ - (AccountKeyring::Alice, AccountKeyring::Dave), - (AccountKeyring::Bob, AccountKeyring::Dave), - ], - vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], - vec![], - vec![(AccountKeyring::Alice, AccountKeyring::Dave)], - ]; - - // prepare client ang import blocks - let mut local_roots = Vec::new(); - let config = Some(ChangesTrieConfiguration::new(4, 2)); - let mut remote_client = TestClientBuilder::new().changes_trie_config(config).build(); - let mut nonces: HashMap<_, u64> = Default::default(); - for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { - let mut builder = remote_client.new_block(Default::default()).unwrap(); - for (from, to) in block_transfers { - builder - .push_transfer(Transfer { - from: from.into(), - to: to.into(), - amount: 1, - nonce: *nonces.entry(from).and_modify(|n| *n = *n + 1).or_default(), - }) - .unwrap(); - } - let block = builder.build().unwrap().block; - block_on(remote_client.import(BlockOrigin::Own, block)).unwrap(); - - let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); - let trie_root = header - .digest() - .log(DigestItem::as_changes_trie_root) - .map(|root| H256::from_slice(root.as_ref())) - .unwrap(); - local_roots.push(trie_root); - } - - // prepare test cases - let alice = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec(); - let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); - let charlie = - blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); - let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let eve = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); - let ferdie = - blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); - let test_cases = vec![ - (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), - (1, 3, alice.clone(), vec![(1, 0)]), - (2, 4, alice.clone(), vec![(4, 0)]), - (2, 3, alice.clone(), vec![]), - (1, 4, bob.clone(), vec![(1, 1)]), - (1, 1, bob.clone(), vec![(1, 1)]), - (2, 4, bob.clone(), vec![]), - (1, 4, charlie.clone(), vec![(2, 0)]), - (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - (1, 1, dave.clone(), vec![(1, 1), (1, 0)]), - (3, 4, dave.clone(), vec![(4, 0)]), - (1, 4, eve.clone(), vec![(2, 0)]), - (1, 1, eve.clone(), vec![]), - (3, 4, eve.clone(), vec![]), - (1, 4, ferdie.clone(), vec![]), - ]; - - (remote_client, local_roots, test_cases) -} - fn construct_block( backend: &InMemoryBackend, number: BlockNumber, @@ -184,7 +105,6 @@ fn construct_block( StateMachine::new( backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "Core_initialize_block", @@ -199,7 +119,6 @@ fn construct_block( for tx in transactions.iter() { StateMachine::new( backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "BlockBuilder_apply_extrinsic", @@ -214,7 +133,6 @@ fn construct_block( let ret_data = StateMachine::new( backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "BlockBuilder_finalize_block", @@ -248,7 +166,6 @@ fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec(), &mut overlay, &executor(), "Core_execute_block", @@ -283,7 +199,6 @@ fn construct_genesis_should_work_with_native() { #[test] fn construct_genesis_should_work_with_wasm() { let mut storage = GenesisConfig::new( - None, vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 1000, @@ -302,7 +217,6 @@ fn construct_genesis_should_work_with_wasm() { let _ = StateMachine::new( &backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "Core_execute_block", @@ -318,7 +232,6 @@ fn construct_genesis_should_work_with_wasm() { #[test] fn construct_genesis_with_bad_transaction_should_panic() { let mut storage = GenesisConfig::new( - None, vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], 68, @@ -337,7 +250,6 @@ fn construct_genesis_with_bad_transaction_should_panic() { let r = StateMachine::new( &backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), &mut overlay, &executor(), "Core_execute_block", @@ -906,23 +818,6 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { ); } -#[test] -fn key_changes_works() { - let (client, _, test_cases) = prepare_client_with_key_changes(); - - for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { - let end = client.block_hash(end).unwrap().unwrap(); - let actual_result = - client.key_changes(begin, BlockId::Hash(end), None, &StorageKey(key)).unwrap(); - if actual_result != expected_result { - panic!( - "Failed test {}: actual = {:?}, expected = {:?}", - index, actual_result, expected_result, - ); - } - } -} - #[test] fn import_with_justification() { let mut client = substrate_test_runtime_client::new(); @@ -1536,152 +1431,6 @@ fn returns_status_for_pruned_blocks() { ); } -#[test] -fn imports_blocks_with_changes_tries_config_change() { - // create client with initial 4^2 configuration - let mut client = TestClientBuilder::with_default_backend() - .changes_trie_config(Some(ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - })) - .build(); - - // =================================================================== - // blocks 1,2,3,4,5,6,7,8,9,10 are empty - // block 11 changes the key - // block 12 is the L1 digest that covers this change - // blocks 13,14,15,16,17,18,19,20,21,22 are empty - // block 23 changes the configuration to 5^1 AND is skewed digest - // =================================================================== - // blocks 24,25 are changing the key - // block 26 is empty - // block 27 changes the key - // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to - // `3^1` - // =================================================================== - // block 29 is empty - // block 30 changes the key - // block 31 is L1 digest that covers this change - // =================================================================== - (1..11).for_each(|number| { - let block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (11..12).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (12..23).for_each(|number| { - let block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (23..24).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 5, - digest_levels: 1, - })) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (24..26).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (26..27).for_each(|number| { - let block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (27..28).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (28..29).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 3, - digest_levels: 1, - })) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (29..30).for_each(|number| { - let block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (30..31).for_each(|number| { - let mut block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap(); - block - .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) - .unwrap(); - let block = block.build().unwrap().block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - (31..32).for_each(|number| { - let block = client - .new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap() - .build() - .unwrap() - .block; - block_on(client.import(BlockOrigin::Own, block)).unwrap(); - }); - - // now check that configuration cache works - assert_eq!( - client.key_changes(1, BlockId::Number(31), None, &StorageKey(vec![42])).unwrap(), - vec![(30, 0), (27, 0), (25, 0), (24, 0), (11, 0)] - ); -} - #[test] fn storage_keys_iter_prefix_and_start_key_works() { let child_info = ChildInfo::new_default(b"child"); diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index a735c67d846ce..188475ec07239 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -18,7 +18,7 @@ //! Chain api required for the transaction pool. -use codec::{Decode, Encode}; +use codec::Encode; use futures::{ channel::{mpsc, oneshot}, future::{ready, Future, FutureExt, Ready}, @@ -30,14 +30,13 @@ use std::{marker::PhantomData, pin::Pin, sync::Arc}; use prometheus_endpoint::Registry as PrometheusRegistry; use sc_client_api::{ blockchain::HeaderBackend, - light::{Fetcher, RemoteBodyRequest, RemoteCallRequest}, BlockBackend, }; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, - traits::{self, Block as BlockT, BlockIdTo, Hash as HashT, Header as HeaderT}, + traits::{self, Block as BlockT, BlockIdTo}, transaction_validity::{TransactionSource, TransactionValidity}, }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; @@ -289,127 +288,3 @@ where validate_transaction_blocking(&*self.client, at, source, uxt) } } - -/// The transaction pool logic for light client. -pub struct LightChainApi { - client: Arc, - fetcher: Arc, - _phantom: PhantomData, -} - -impl LightChainApi { - /// Create new transaction pool logic. - pub fn new(client: Arc, fetcher: Arc) -> Self { - LightChainApi { client, fetcher, _phantom: Default::default() } - } -} - -impl graph::ChainApi for LightChainApi -where - Block: BlockT, - Client: HeaderBackend + 'static, - F: Fetcher + 'static, -{ - type Block = Block; - type Error = error::Error; - type ValidationFuture = - Box> + Send + Unpin>; - type BodyFuture = Pin< - Box< - dyn Future::Extrinsic>>>> - + Send, - >, - >; - - fn validate_transaction( - &self, - at: &BlockId, - source: TransactionSource, - uxt: graph::ExtrinsicFor, - ) -> Self::ValidationFuture { - let header_hash = self.client.expect_block_hash_from_id(at); - let header_and_hash = header_hash.and_then(|header_hash| { - self.client - .expect_header(BlockId::Hash(header_hash)) - .map(|header| (header_hash, header)) - }); - let (block, header) = match header_and_hash { - Ok((header_hash, header)) => (header_hash, header), - Err(err) => return Box::new(ready(Err(err.into()))), - }; - let remote_validation_request = self.fetcher.remote_call(RemoteCallRequest { - block, - header, - method: "TaggedTransactionQueue_validate_transaction".into(), - call_data: (source, uxt, block).encode(), - retry_count: None, - }); - let remote_validation_request = remote_validation_request.then(move |result| { - let result: error::Result = - result.map_err(Into::into).and_then(|result| { - Decode::decode(&mut &result[..]).map_err(|e| { - Error::RuntimeApi(format!("Error decoding tx validation result: {:?}", e)) - }) - }); - ready(result) - }); - - Box::new(remote_validation_request) - } - - fn block_id_to_number( - &self, - at: &BlockId, - ) -> error::Result>> { - Ok(self.client.block_number_from_id(at)?) - } - - fn block_id_to_hash( - &self, - at: &BlockId, - ) -> error::Result>> { - Ok(self.client.block_hash_from_id(at)?) - } - - fn hash_and_length( - &self, - ex: &graph::ExtrinsicFor, - ) -> (graph::ExtrinsicHash, usize) { - ex.using_encoded(|x| (<::Hashing as HashT>::hash(x), x.len())) - } - - fn block_body(&self, id: &BlockId) -> Self::BodyFuture { - let header = self - .client - .header(*id) - .and_then(|h| h.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("{}", id)))); - let header = match header { - Ok(header) => header, - Err(err) => { - log::warn!(target: "txpool", "Failed to query header: {:?}", err); - return Box::pin(ready(Ok(None))) - }, - }; - - let fetcher = self.fetcher.clone(); - async move { - let transactions = fetcher - .remote_body(RemoteBodyRequest { header, retry_count: None }) - .await - .unwrap_or_else(|e| { - log::warn!(target: "txpool", "Failed to fetch block body: {:?}", e); - Vec::new() - }); - - Ok(Some(transactions)) - } - .boxed() - } - - fn block_header( - &self, - at: &BlockId, - ) -> Result::Header>, Self::Error> { - self.client.header(*at).map_err(Into::into) - } -} diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 8af73c3fe5b48..1f94bd997de21 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -38,7 +38,7 @@ pub mod test_helpers { }; } -pub use crate::api::{FullChainApi, LightChainApi}; +pub use crate::api::FullChainApi; use futures::{ channel::oneshot, future::{self, ready}, @@ -79,9 +79,6 @@ type PolledIterator = Pin = BasicPool, Block>; -/// A transaction pool for a light node. -pub type LightPool = - BasicPool, Block>; /// Basic implementation of transaction pool that can be customized by providing PoolApi. pub struct BasicPool @@ -364,33 +361,6 @@ where } } -impl LightPool -where - Block: BlockT, - Client: sp_blockchain::HeaderBackend + sc_client_api::UsageProvider + 'static, - Fetcher: sc_client_api::Fetcher + 'static, -{ - /// Create new basic transaction pool for a light node with the provided api. - pub fn new_light( - options: graph::Options, - prometheus: Option<&PrometheusRegistry>, - spawner: impl SpawnEssentialNamed, - client: Arc, - fetcher: Arc, - ) -> Self { - let pool_api = Arc::new(LightChainApi::new(client.clone(), fetcher)); - Self::with_revalidation_type( - options, - false.into(), - pool_api, - prometheus, - RevalidationType::Light, - spawner, - client.usage_info().chain.best_number, - ) - } -} - impl FullPool where Block: BlockT, diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index 4b5294835403a..48e9bd48c6035 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -151,7 +151,7 @@ impl Pallet { fn change_authorities(new: WeakBoundedVec) { >::put(&new); - let log: DigestItem = DigestItem::Consensus( + let log = DigestItem::Consensus( AURA_ENGINE_ID, ConsensusLog::AuthoritiesChange(new.into_inner()).encode(), ); @@ -222,7 +222,7 @@ impl OneSessionHandler for Pallet { } fn on_disabled(i: u32) { - let log: DigestItem = DigestItem::Consensus( + let log = DigestItem::Consensus( AURA_ENGINE_ID, ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), ); diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 9c755eea6c446..4e91f2218a42f 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -633,7 +633,7 @@ impl Pallet { } fn deposit_consensus(new: U) { - let log: DigestItem = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); + let log = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); >::deposit_log(log.into()) } diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs index 7c70766623b4d..f27bc450ad146 100644 --- a/frame/beefy-mmr/src/tests.rs +++ b/frame/beefy-mmr/src/tests.rs @@ -40,7 +40,7 @@ fn init_block(block: u64) { BeefyMmr::on_initialize(block); } -pub fn beefy_log(log: ConsensusLog) -> DigestItem { +pub fn beefy_log(log: ConsensusLog) -> DigestItem { DigestItem::Consensus(BEEFY_ENGINE_ID, log.encode()) } diff --git a/frame/beefy/src/lib.rs b/frame/beefy/src/lib.rs index 3b28d454849cf..6a7118c1f5c96 100644 --- a/frame/beefy/src/lib.rs +++ b/frame/beefy/src/lib.rs @@ -110,7 +110,7 @@ impl Pallet { let next_id = Self::validator_set_id() + 1u64; >::put(next_id); - let log: DigestItem = DigestItem::Consensus( + let log = DigestItem::Consensus( BEEFY_ENGINE_ID, ConsensusLog::AuthoritiesChange(ValidatorSet { validators: new, id: next_id }) .encode(), @@ -163,7 +163,7 @@ impl OneSessionHandler for Pallet { } fn on_disabled(i: u32) { - let log: DigestItem = DigestItem::Consensus( + let log = DigestItem::Consensus( BEEFY_ENGINE_ID, ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), ); diff --git a/frame/beefy/src/tests.rs b/frame/beefy/src/tests.rs index 24f9acaf76bfc..252c03efb54a9 100644 --- a/frame/beefy/src/tests.rs +++ b/frame/beefy/src/tests.rs @@ -20,7 +20,6 @@ use std::vec; use beefy_primitives::ValidatorSet; use codec::Encode; -use sp_core::H256; use sp_runtime::DigestItem; use frame_support::traits::OnInitialize; @@ -32,7 +31,7 @@ fn init_block(block: u64) { Session::on_initialize(block); } -pub fn beefy_log(log: ConsensusLog) -> DigestItem { +pub fn beefy_log(log: ConsensusLog) -> DigestItem { DigestItem::Consensus(BEEFY_ENGINE_ID, log.encode()) } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index b1bdf357ec07d..e77c811a35e2d 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -125,7 +125,6 @@ use frame_support::{ }, weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, }; -use frame_system::DigestOf; use sp_runtime::{ generic::Digest, traits::{ @@ -281,8 +280,8 @@ where Self::initialize_block_impl(header.number(), header.parent_hash(), &digests); } - fn extract_pre_digest(header: &System::Header) -> DigestOf { - let mut digest = >::default(); + fn extract_pre_digest(header: &System::Header) -> Digest { + let mut digest = ::default(); header.digest().logs().iter().for_each(|d| { if d.as_pre_runtime().is_some() { digest.push(d.clone()) @@ -294,7 +293,7 @@ where fn initialize_block_impl( block_number: &System::BlockNumber, parent_hash: &System::Hash, - digest: &Digest, + digest: &Digest, ) { let mut weight = 0; if Self::runtime_upgraded() { diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 9f6967a7d3c85..0e7d885649cc3 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -508,7 +508,7 @@ impl Pallet { /// Deposit one of this module's logs. fn deposit_log(log: ConsensusLog) { - let log: DigestItem = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); + let log = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); >::deposit_log(log.into()); } diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index f1996553f02eb..49e4022a4aaed 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -254,7 +254,7 @@ impl Config for Test { type MaxAuthorities = MaxAuthorities; } -pub fn grandpa_log(log: ConsensusLog) -> DigestItem { +pub fn grandpa_log(log: ConsensusLog) -> DigestItem { DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()) } diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 35552e08fef1e..dc2c056eefa1a 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -1223,7 +1223,7 @@ mod private { pub trait Sealed {} impl Sealed for Vec {} - impl Sealed for Digest {} + impl Sealed for Digest {} impl Sealed for BoundedVec {} impl Sealed for WeakBoundedVec {} impl Sealed for bounded_btree_map::BoundedBTreeMap {} @@ -1263,7 +1263,7 @@ impl StorageDecodeLength for Vec {} /// We abuse the fact that SCALE does not put any marker into the encoding, i.e. we only encode the /// internal vec and we can append to this vec. We have a test that ensures that if the `Digest` /// format ever changes, we need to remove this here. -impl StorageAppend> for Digest {} +impl StorageAppend for Digest {} /// Marker trait that is implemented for types that support the `storage::append` api with a limit /// on the number of element. @@ -1485,8 +1485,8 @@ mod test { fn digest_storage_append_works_as_expected() { TestExternalities::default().execute_with(|| { struct Storage; - impl generator::StorageValue> for Storage { - type Query = Digest; + impl generator::StorageValue for Storage { + type Query = Digest; fn module_prefix() -> &'static [u8] { b"MyModule" @@ -1496,22 +1496,21 @@ mod test { b"Storage" } - fn from_optional_value_to_query(v: Option>) -> Self::Query { + fn from_optional_value_to_query(v: Option) -> Self::Query { v.unwrap() } - fn from_query_to_optional_value(v: Self::Query) -> Option> { + fn from_query_to_optional_value(v: Self::Query) -> Option { Some(v) } } - Storage::append(DigestItem::ChangesTrieRoot(1)); Storage::append(DigestItem::Other(Vec::new())); let value = unhashed::get_raw(&Storage::storage_value_final_key()).unwrap(); let expected = Digest { - logs: vec![DigestItem::ChangesTrieRoot(1), DigestItem::Other(Vec::new())], + logs: vec![DigestItem::Other(Vec::new())], }; assert_eq!(Digest::decode(&mut &value[..]).unwrap(), expected); }); diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index e7371b1099e5e..eddf78ce6b254 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -22,8 +22,8 @@ use codec::Encode; use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::{storage, traits::Get, weights::DispatchClass}; -use frame_system::{Call, DigestItemOf, Pallet as System, RawOrigin}; -use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; +use frame_system::{Call, Pallet as System, RawOrigin}; +use sp_core::storage::well_known_keys; use sp_runtime::traits::Hash; use sp_std::{prelude::*, vec}; @@ -62,23 +62,6 @@ benchmarks! { assert_eq!(current_code.len(), 4_000_000 as usize); } - set_changes_trie_config { - let d = 1000; - - let digest_item = DigestItemOf::::Other(vec![]); - - for i in 0 .. d { - System::::deposit_log(digest_item.clone()); - } - let changes_trie_config = ChangesTrieConfiguration { - digest_interval: d, - digest_levels: d, - }; - }: _(RawOrigin::Root, Some(changes_trie_config)) - verify { - assert_eq!(System::::digest().logs.len(), (d + 1) as usize) - } - #[skip_meta] set_storage { let i in 1 .. 1000; diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 2e7f26eef16f4..774cce771cc2c 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -95,7 +95,7 @@ use frame_support::{ Parameter, }; use scale_info::TypeInfo; -use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; +use sp_core::storage::well_known_keys; #[cfg(feature = "std")] use frame_support::traits::GenesisBuild; @@ -405,37 +405,6 @@ pub mod pallet { Ok(().into()) } - /// Set the new changes trie configuration. - /// - /// # - /// - `O(1)` - /// - 1 storage write or delete (codec `O(1)`). - /// - 1 call to `deposit_log`: Uses `append` API, so O(1) - /// - Base Weight: 7.218 µs - /// - DB Weight: - /// - Writes: Changes Trie, System Digest - /// # - #[pallet::weight((T::SystemWeightInfo::set_changes_trie_config(), DispatchClass::Operational))] - pub fn set_changes_trie_config( - origin: OriginFor, - changes_trie_config: Option, - ) -> DispatchResultWithPostInfo { - ensure_root(origin)?; - match changes_trie_config.clone() { - Some(changes_trie_config) => storage::unhashed::put_raw( - well_known_keys::CHANGES_TRIE_CONFIG, - &changes_trie_config.encode(), - ), - None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), - } - - let log = generic::DigestItem::ChangesTrieSignal( - generic::ChangesTrieSignal::NewConfiguration(changes_trie_config), - ); - Self::deposit_log(log.into()); - Ok(().into()) - } - /// Set some items of storage. /// /// # @@ -615,7 +584,7 @@ pub mod pallet { /// Digest of the current block, also part of the block header. #[pallet::storage] #[pallet::getter(fn digest)] - pub(super) type Digest = StorageValue<_, DigestOf, ValueQuery>; + pub(super) type Digest = StorageValue<_, generic::Digest, ValueQuery>; /// Events deposited for the current block. /// @@ -664,7 +633,6 @@ pub mod pallet { #[pallet::genesis_config] pub struct GenesisConfig { - pub changes_trie_config: Option, #[serde(with = "sp_core::bytes")] pub code: Vec, } @@ -672,7 +640,7 @@ pub mod pallet { #[cfg(feature = "std")] impl Default for GenesisConfig { fn default() -> Self { - Self { changes_trie_config: Default::default(), code: Default::default() } + Self { code: Default::default() } } } @@ -687,12 +655,6 @@ pub mod pallet { sp_io::storage::set(well_known_keys::CODE, &self.code); sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); - if let Some(ref changes_trie_config) = self.changes_trie_config { - sp_io::storage::set( - well_known_keys::CHANGES_TRIE_CONFIG, - &changes_trie_config.encode(), - ); - } } } } @@ -757,9 +719,6 @@ impl GenesisConfig { } } -pub type DigestOf = generic::Digest<::Hash>; -pub type DigestItemOf = generic::DigestItem<::Hash>; - pub type Key = Vec; pub type KeyValue = (Vec, Vec); @@ -1367,7 +1326,7 @@ impl Pallet { pub fn initialize( number: &T::BlockNumber, parent_hash: &T::Hash, - digest: &DigestOf, + digest: &generic::Digest, kind: InitKind, ) { // populate environment @@ -1407,7 +1366,7 @@ impl Pallet { // stay to be inspected by the client and will be cleared by `Self::initialize`. let number = >::get(); let parent_hash = >::get(); - let mut digest = >::get(); + let digest = >::get(); let extrinsics = (0..ExtrinsicCount::::take().unwrap_or_default()) .map(ExtrinsicData::::take) @@ -1425,17 +1384,6 @@ impl Pallet { let storage_root = T::Hash::decode(&mut &sp_io::storage::root()[..]) .expect("Node is configured to use the same hash; qed"); - let storage_changes_root = sp_io::storage::changes_root(&parent_hash.encode()); - - // we can't compute changes trie root earlier && put it to the Digest - // because it will include all currently existing temporaries. - if let Some(storage_changes_root) = storage_changes_root { - let item = generic::DigestItem::ChangesTrieRoot( - T::Hash::decode(&mut &storage_changes_root[..]) - .expect("Node is configured to use the same hash; qed"), - ); - digest.push(item); - } ::new( number, @@ -1452,7 +1400,7 @@ impl Pallet { /// - `O(1)` /// - 1 storage write (codec `O(1)`) /// # - pub fn deposit_log(item: DigestItemOf) { + pub fn deposit_log(item: generic::DigestItem) { >::append(item); } diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index bc0f027e1efaa..dd2a7f6c14909 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -288,10 +288,6 @@ fn generate_runtime_api_base_structures() -> Result { fn into_storage_changes( &self, backend: &Self::StateBackend, - changes_trie_state: Option<&#crate_::ChangesTrieState< - #crate_::HashFor, - #crate_::NumberFor, - >>, parent_hash: Block::Hash, ) -> std::result::Result< #crate_::StorageChanges, @@ -299,7 +295,6 @@ fn generate_runtime_api_base_structures() -> Result { > where Self: Sized { self.changes.replace(Default::default()).into_storage_changes( backend, - changes_trie_state, parent_hash, self.storage_transaction_cache.replace(Default::default()), ) diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 77f8a07f85c48..9483d018c4a40 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -116,10 +116,6 @@ fn implement_common_api_traits(block_type: TypePath, self_ty: Type) -> Result, - #crate_::NumberFor<#block_type>, - >>, _: <#block_type as #crate_::BlockT>::Hash, ) -> std::result::Result< #crate_::StorageChanges, diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 82954d193e605..434756acad2c7 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -97,7 +97,7 @@ pub use sp_runtime::{ #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - Backend as StateBackend, ChangesTrieState, InMemoryBackend, OverlayedChanges, StorageProof, + Backend as StateBackend, InMemoryBackend, OverlayedChanges, StorageProof, }; #[cfg(feature = "std")] use sp_std::result; @@ -394,14 +394,12 @@ pub type ProofRecorder = sp_state_machine::ProofRecorder<::Hash> pub type StorageTransactionCache = sp_state_machine::StorageTransactionCache< >>::Transaction, HashFor, - NumberFor, >; #[cfg(feature = "std")] pub type StorageChanges = sp_state_machine::StorageChanges< >>::Transaction, HashFor, - NumberFor, >; /// Extract the state backend type for a type that implements `ProvideRuntimeApi`. @@ -514,7 +512,6 @@ pub trait ApiExt { fn into_storage_changes( &self, backend: &Self::StateBackend, - changes_trie_state: Option<&ChangesTrieState, NumberFor>>, parent_hash: Block::Hash, ) -> Result, String> where diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index 101f92fd6c7d7..e32290b12a599 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -211,7 +211,7 @@ fn record_proof_works() { None, 8, ); - execution_proof_check_on_trie_backend::<_, u64, _, _>( + execution_proof_check_on_trie_backend( &backend, &mut overlay, &executor, diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index fc70ce845dc98..71c3f36a161b6 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -17,8 +17,6 @@ //! Substrate blockchain trait -use std::sync::Arc; - use log::warn; use parking_lot::RwLock; use sp_runtime::{ @@ -96,8 +94,6 @@ pub trait Backend: fn justifications(&self, id: BlockId) -> Result>; /// Get last finalized block hash. fn last_finalized(&self) -> Result; - /// Returns data cache reference, if it is enabled on this backend. - fn cache(&self) -> Option>>; /// Returns hashes of all blocks that are leaves of the block tree. /// in other words, that have no children, are chain heads. @@ -237,33 +233,6 @@ pub trait Backend: fn block_indexed_body(&self, id: BlockId) -> Result>>>; } -/// Provides access to the optional cache. -pub trait ProvideCache { - /// Returns data cache reference, if it is enabled on this backend. - fn cache(&self) -> Option>>; -} - -/// Blockchain optional data cache. -pub trait Cache: Send + Sync { - /// Initialize genesis value for the given cache. - /// - /// The operation should be performed once before anything else is inserted in the cache. - /// Otherwise cache may end up in inconsistent state. - fn initialize(&self, key: &well_known_cache_keys::Id, value_at_genesis: Vec) -> Result<()>; - /// Returns cached value by the given key. - /// - /// Returned tuple is the range where value has been active and the value itself. - /// Fails if read from cache storage fails or if the value for block is discarded - /// (i.e. if block is earlier that best finalized, but it is not in canonical chain). - fn get_at( - &self, - key: &well_known_cache_keys::Id, - block: &BlockId, - ) -> Result< - Option<((NumberFor, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>, - >; -} - /// Blockchain info #[derive(Debug, Eq, PartialEq)] pub struct Info { diff --git a/primitives/consensus/aura/src/digests.rs b/primitives/consensus/aura/src/digests.rs index eaa29036d98a1..6925862d0ce94 100644 --- a/primitives/consensus/aura/src/digests.rs +++ b/primitives/consensus/aura/src/digests.rs @@ -25,7 +25,6 @@ use crate::AURA_ENGINE_ID; use codec::{Codec, Encode}; use sp_consensus_slots::Slot; use sp_runtime::generic::DigestItem; -use sp_std::fmt::Debug; /// A digest item which is usable with aura consensus. pub trait CompatibleDigestItem: Sized { @@ -42,10 +41,9 @@ pub trait CompatibleDigestItem: Sized { fn as_aura_pre_digest(&self) -> Option; } -impl CompatibleDigestItem for DigestItem +impl CompatibleDigestItem for DigestItem where Signature: Codec, - Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static, { fn aura_seal(signature: Signature) -> Self { DigestItem::Seal(AURA_ENGINE_ID, signature.encode()) diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index 1c908fe61fc0b..4847adec37f18 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -21,7 +21,7 @@ use super::{ AllowedSlots, AuthorityId, AuthorityIndex, AuthoritySignature, BabeAuthorityWeight, BabeEpochConfiguration, Slot, BABE_ENGINE_ID, }; -use codec::{Codec, Decode, Encode, MaxEncodedLen}; +use codec::{Decode, Encode, MaxEncodedLen}; use sp_runtime::{DigestItem, RuntimeDebug}; use sp_std::vec::Vec; @@ -177,10 +177,7 @@ pub trait CompatibleDigestItem: Sized { fn as_next_config_descriptor(&self) -> Option; } -impl CompatibleDigestItem for DigestItem -where - Hash: Send + Sync + Eq + Clone + Codec + 'static, -{ +impl CompatibleDigestItem for DigestItem { fn babe_pre_digest(digest: PreDigest) -> Self { DigestItem::PreRuntime(BABE_ENGINE_ID, digest.encode()) } diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index d7979baf47c11..93ad387ff35e9 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -25,8 +25,9 @@ use std::{sync::Arc, time::Duration}; use futures::prelude::*; use sp_runtime::{ + Digest, generic::BlockId, - traits::{Block as BlockT, DigestFor, HashFor, NumberFor}, + traits::{Block as BlockT, HashFor}, }; use sp_state_machine::StorageProof; @@ -112,7 +113,7 @@ pub struct Proposal { pub proof: Proof, /// The storage changes while building this block. pub storage_changes: - sp_state_machine::StorageChanges, NumberFor>, + sp_state_machine::StorageChanges>, } /// Error that is returned when [`ProofRecording`] requested to record a proof, @@ -224,7 +225,7 @@ pub trait Proposer { fn propose( self, inherent_data: InherentData, - inherent_digests: DigestFor, + inherent_digests: Digest, max_duration: Duration, block_size_limit: Option, ) -> Self::Proposal; diff --git a/primitives/core/src/changes_trie.rs b/primitives/core/src/changes_trie.rs deleted file mode 100644 index f4ce83dc2c877..0000000000000 --- a/primitives/core/src/changes_trie.rs +++ /dev/null @@ -1,321 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2018-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Substrate changes trie configuration. - -use codec::{Decode, Encode}; -use num_traits::Zero; -#[cfg(any(feature = "std", test))] -use serde::{Deserialize, Serialize}; - -/// Substrate changes trie configuration. -#[cfg_attr( - any(feature = "std", test), - derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf) -)] -#[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode, scale_info::TypeInfo)] -pub struct ChangesTrieConfiguration { - /// Interval (in blocks) at which level1-digests are created. Digests are not - /// created when this is less or equal to 1. - pub digest_interval: u32, - /// Maximal number of digest levels in hierarchy. 0 means that digests are not - /// created at all (even level1 digests). 1 means only level1-digests are created. - /// 2 means that every digest_interval^2 there will be a level2-digest, and so on. - /// Please ensure that maximum digest interval (i.e. digest_interval^digest_levels) - /// is within `u32` limits. Otherwise you'll never see digests covering such intervals - /// && maximal digests interval will be truncated to the last interval that fits - /// `u32` limits. - pub digest_levels: u32, -} - -/// Substrate changes trie configuration range. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ChangesTrieConfigurationRange { - /// Zero block of configuration. - pub zero: (Number, Hash), - /// Last block of configuration (if configuration has been deactivated at some point). - pub end: Option<(Number, Hash)>, - /// The configuration itself. None if changes tries were disabled in this range. - pub config: Option, -} - -impl ChangesTrieConfiguration { - /// Create new configuration given digest interval and levels. - pub fn new(digest_interval: u32, digest_levels: u32) -> Self { - Self { digest_interval, digest_levels } - } - - /// Is digest build enabled? - pub fn is_digest_build_enabled(&self) -> bool { - self.digest_interval > 1 && self.digest_levels > 0 - } - - /// Do we need to build digest at given block? - pub fn is_digest_build_required_at_block(&self, zero: Number, block: Number) -> bool - where - Number: From - + PartialEq - + ::sp_std::ops::Rem - + ::sp_std::ops::Sub - + ::sp_std::cmp::PartialOrd - + Zero, - { - block > zero && - self.is_digest_build_enabled() && - ((block - zero) % self.digest_interval.into()).is_zero() - } - - /// Returns max digest interval. One if digests are not created at all. - pub fn max_digest_interval(&self) -> u32 { - if !self.is_digest_build_enabled() { - return 1 - } - - // we'll get >1 loop iteration only when bad configuration parameters are selected - let mut current_level = self.digest_levels; - loop { - if let Some(max_digest_interval) = self.digest_interval.checked_pow(current_level) { - return max_digest_interval - } - - current_level -= 1; - } - } - - /// Returns max level digest block number that has been created at block <= passed block number. - /// - /// Returns None if digests are not created at all. - pub fn prev_max_level_digest_block(&self, zero: Number, block: Number) -> Option - where - Number: Clone - + From - + PartialOrd - + PartialEq - + ::sp_std::ops::Add - + ::sp_std::ops::Sub - + ::sp_std::ops::Div - + ::sp_std::ops::Mul - + Zero, - { - if block <= zero { - return None - } - - let (next_begin, next_end) = - self.next_max_level_digest_range(zero.clone(), block.clone())?; - - // if 'next' digest includes our block, then it is a also a previous digest - if next_end == block { - return Some(block) - } - - // if previous digest ends at zero block, then there are no previous digest - let prev_end = next_begin - 1.into(); - if prev_end == zero { - None - } else { - Some(prev_end) - } - } - - /// Returns max level digest blocks range (inclusive) which includes passed block. - /// - /// Returns None if digests are not created at all. - /// It will return the first max-level digest if block is <= zero. - pub fn next_max_level_digest_range( - &self, - zero: Number, - mut block: Number, - ) -> Option<(Number, Number)> - where - Number: Clone - + From - + PartialOrd - + PartialEq - + ::sp_std::ops::Add - + ::sp_std::ops::Sub - + ::sp_std::ops::Div - + ::sp_std::ops::Mul, - { - if !self.is_digest_build_enabled() { - return None - } - - if block <= zero { - block = zero.clone() + 1.into(); - } - - let max_digest_interval: Number = self.max_digest_interval().into(); - let max_digests_since_zero = (block.clone() - zero.clone()) / max_digest_interval.clone(); - if max_digests_since_zero == 0.into() { - return Some((zero.clone() + 1.into(), zero + max_digest_interval)) - } - let last_max_digest_block = zero + max_digests_since_zero * max_digest_interval.clone(); - Some(if block == last_max_digest_block { - (block.clone() - max_digest_interval + 1.into(), block) - } else { - (last_max_digest_block.clone() + 1.into(), last_max_digest_block + max_digest_interval) - }) - } - - /// Returns Some if digest must be built at given block number. - /// The tuple is: - /// ( - /// digest level - /// digest interval (in blocks) - /// step between blocks we're interested in when digest is built - /// ) - pub fn digest_level_at_block( - &self, - zero: Number, - block: Number, - ) -> Option<(u32, u32, u32)> - where - Number: Clone - + From - + PartialEq - + ::sp_std::ops::Rem - + ::sp_std::ops::Sub - + ::sp_std::cmp::PartialOrd - + Zero, - { - if !self.is_digest_build_required_at_block(zero.clone(), block.clone()) { - return None - } - - let relative_block = block - zero; - let mut digest_interval = self.digest_interval; - let mut current_level = 1u32; - let mut digest_step = 1u32; - while current_level < self.digest_levels { - let new_digest_interval = match digest_interval.checked_mul(self.digest_interval) { - Some(new_digest_interval) - if (relative_block.clone() % new_digest_interval.into()).is_zero() => - new_digest_interval, - _ => break, - }; - - digest_step = digest_interval; - digest_interval = new_digest_interval; - current_level += 1; - } - - Some((current_level, digest_interval, digest_step)) - } -} - -#[cfg(test)] -mod tests { - use super::ChangesTrieConfiguration; - - fn config(interval: u32, levels: u32) -> ChangesTrieConfiguration { - ChangesTrieConfiguration { digest_interval: interval, digest_levels: levels } - } - - #[test] - fn is_digest_build_enabled_works() { - assert!(!config(0, 100).is_digest_build_enabled()); - assert!(!config(1, 100).is_digest_build_enabled()); - assert!(config(2, 100).is_digest_build_enabled()); - assert!(!config(100, 0).is_digest_build_enabled()); - assert!(config(100, 1).is_digest_build_enabled()); - } - - #[test] - fn is_digest_build_required_at_block_works() { - fn test_with_zero(zero: u64) { - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 1u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 2u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 8u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 9u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 64u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 64u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 512u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 4096u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4103u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 4104u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4108u64)); - } - - test_with_zero(0); - test_with_zero(8); - test_with_zero(17); - } - - #[test] - fn digest_level_at_block_works() { - fn test_with_zero(zero: u64) { - assert_eq!(config(8, 4).digest_level_at_block(zero, zero), None); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 7u64), None); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 63u64), None); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 8u64), Some((1, 8, 1))); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 64u64), Some((2, 64, 8))); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 512u64), Some((3, 512, 64))); - assert_eq!( - config(8, 4).digest_level_at_block(zero, zero + 4096u64), - Some((4, 4096, 512)) - ); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 4112u64), Some((1, 8, 1))); - } - - test_with_zero(0); - test_with_zero(8); - test_with_zero(17); - } - - #[test] - fn max_digest_interval_works() { - assert_eq!(config(0, 0).max_digest_interval(), 1); - assert_eq!(config(2, 2).max_digest_interval(), 4); - assert_eq!(config(8, 4).max_digest_interval(), 4096); - assert_eq!(config(::std::u32::MAX, 1024).max_digest_interval(), ::std::u32::MAX); - } - - #[test] - fn next_max_level_digest_range_works() { - assert_eq!(config(0, 0).next_max_level_digest_range(0u64, 16), None); - assert_eq!(config(1, 1).next_max_level_digest_range(0u64, 16), None); - assert_eq!(config(2, 1).next_max_level_digest_range(0u64, 16), Some((15, 16))); - assert_eq!(config(4, 1).next_max_level_digest_range(0u64, 16), Some((13, 16))); - assert_eq!(config(32, 1).next_max_level_digest_range(0u64, 16), Some((1, 32))); - assert_eq!(config(2, 3).next_max_level_digest_range(0u64, 10), Some((9, 16))); - assert_eq!(config(2, 3).next_max_level_digest_range(0u64, 8), Some((1, 8))); - assert_eq!(config(2, 1).next_max_level_digest_range(1u64, 1), Some((2, 3))); - assert_eq!(config(2, 2).next_max_level_digest_range(7u64, 9), Some((8, 11))); - - assert_eq!(config(2, 2).next_max_level_digest_range(7u64, 5), Some((8, 11))); - } - - #[test] - fn prev_max_level_digest_block_works() { - assert_eq!(config(0, 0).prev_max_level_digest_block(0u64, 16), None); - assert_eq!(config(1, 1).prev_max_level_digest_block(0u64, 16), None); - assert_eq!(config(2, 1).prev_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 1).prev_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 17), Some(16)); - assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 33), Some(32)); - assert_eq!(config(32, 1).prev_max_level_digest_block(0u64, 16), None); - assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 10), Some(8)); - assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 8), Some(8)); - assert_eq!(config(2, 2).prev_max_level_digest_block(7u64, 8), None); - - assert_eq!(config(2, 2).prev_max_level_digest_block(7u64, 5), None); - } -} diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index a6229fe43a1a5..bf961b2f4f8dd 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -58,7 +58,6 @@ pub mod hexdisplay; pub mod u32_trait; -mod changes_trie; pub mod ecdsa; pub mod ed25519; pub mod hash; @@ -76,7 +75,6 @@ pub use self::{ hash::{convert_hash, H160, H256, H512}, uint::{U256, U512}, }; -pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index e6a8f8caa8d33..aac45234deadd 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -173,13 +173,6 @@ pub trait Externalities: ExtensionStore { /// operation. fn storage_append(&mut self, key: Vec, value: Vec); - /// Get the changes trie root of the current storage overlay at a block with given `parent`. - /// - /// `parent` expects a SCALE encoded hash. - /// - /// The returned hash is defined by the `Block` and is SCALE encoded. - fn storage_changes_root(&mut self, parent: &[u8]) -> Result>, ()>; - /// Start a new nested transaction. /// /// This allows to either commit or roll back all changes made after this call to the diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 78e6f0c847952..d93076b1d1cfe 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -195,18 +195,6 @@ pub trait Storage { self.storage_root() } - /// "Commit" all existing operations and get the resulting storage change root. - /// `parent_hash` is a SCALE encoded hash. - /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns `Some(Vec)` which holds the SCALE encoded hash or `None` when - /// changes trie is disabled. - fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { - self.storage_changes_root(parent_hash) - .expect("Invalid `parent_hash` given to `changes_root`.") - } - /// Get the next key in storage after the given one in lexicographic order. fn next_key(&mut self, key: &[u8]) -> Option> { self.next_storage_key(&key) @@ -1497,7 +1485,7 @@ pub fn oom(_: core::alloc::Layout) -> ! { /// Type alias for Externalities implementation used in tests. #[cfg(feature = "std")] -pub type TestExternalities = sp_state_machine::TestExternalities; +pub type TestExternalities = sp_state_machine::TestExternalities; /// The host functions Substrate provides for the Wasm runtime environment. /// diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index 82c50fffeb8d7..1749cc4853672 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -31,7 +31,7 @@ use std::{ sync::{Arc, Mutex}, }; -type TestExternalities = sp_state_machine::TestExternalities; +type TestExternalities = sp_state_machine::TestExternalities; fn call_wasm_method_with_result( binary: &[u8], diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index 87af9bc77a5fa..fc284d7ff9ab7 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -26,48 +26,44 @@ use crate::{ codec::{Decode, Encode, Error, Input}, scale_info::{ build::{Fields, Variants}, - meta_type, Path, Type, TypeInfo, TypeParameter, + Path, Type, TypeInfo, }, ConsensusEngineId, }; -use sp_core::{ChangesTrieConfiguration, RuntimeDebug}; +use sp_core::RuntimeDebug; /// Generic header digest. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] -pub struct Digest { +pub struct Digest { /// A list of logs in the digest. - #[cfg_attr( - feature = "std", - serde(bound(serialize = "Hash: codec::Codec", deserialize = "Hash: codec::Codec")) - )] - pub logs: Vec>, + pub logs: Vec, } -impl Default for Digest { +impl Default for Digest { fn default() -> Self { Self { logs: Vec::new() } } } -impl Digest { +impl Digest { /// Get reference to all digest items. - pub fn logs(&self) -> &[DigestItem] { + pub fn logs(&self) -> &[DigestItem] { &self.logs } /// Push new digest item. - pub fn push(&mut self, item: DigestItem) { + pub fn push(&mut self, item: DigestItem) { self.logs.push(item); } /// Pop a digest item. - pub fn pop(&mut self) -> Option> { + pub fn pop(&mut self) -> Option { self.logs.pop() } /// Get reference to the first digest item that matches the passed predicate. - pub fn log) -> Option<&T>>( + pub fn log Option<&T>>( &self, predicate: F, ) -> Option<&T> { @@ -75,7 +71,7 @@ impl Digest { } /// Get a conversion of the first digest item that successfully converts using the function. - pub fn convert_first) -> Option>( + pub fn convert_first Option>( &self, predicate: F, ) -> Option { @@ -87,12 +83,7 @@ impl Digest { /// provide opaque access to other items. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] #[cfg_attr(feature = "std", derive(parity_util_mem::MallocSizeOf))] -pub enum DigestItem { - /// System digest item that contains the root of changes trie at given - /// block. It is created for every block iff runtime supports changes - /// trie creation. - ChangesTrieRoot(Hash), - +pub enum DigestItem { /// A pre-runtime digest. /// /// These are messages from the consensus engine to the runtime, although @@ -116,10 +107,6 @@ pub enum DigestItem { /// by runtimes. Seal(ConsensusEngineId, Vec), - /// Digest item that contains signal from changes tries manager to the - /// native code. - ChangesTrieSignal(ChangesTrieSignal), - /// Some other thing. Unsupported and experimental. Other(Vec), @@ -132,25 +119,8 @@ pub enum DigestItem { RuntimeEnvironmentUpdated, } -/// Available changes trie signals. -#[derive(PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] -#[cfg_attr(feature = "std", derive(Debug, parity_util_mem::MallocSizeOf))] -pub enum ChangesTrieSignal { - /// New changes trie configuration is enacted, starting from **next block**. - /// - /// The block that emits this signal will contain changes trie (CT) that covers - /// blocks range [BEGIN; current block], where BEGIN is (order matters): - /// - LAST_TOP_LEVEL_DIGEST_BLOCK+1 if top level digest CT has ever been created using current - /// configuration AND the last top level digest CT has been created at block - /// LAST_TOP_LEVEL_DIGEST_BLOCK; - /// - LAST_CONFIGURATION_CHANGE_BLOCK+1 if there has been CT configuration change before and - /// the last configuration change happened at block LAST_CONFIGURATION_CHANGE_BLOCK; - /// - 1 otherwise. - NewConfiguration(Option), -} - #[cfg(feature = "std")] -impl serde::Serialize for DigestItem { +impl serde::Serialize for DigestItem { fn serialize(&self, seq: S) -> Result where S: serde::Serializer, @@ -160,7 +130,7 @@ impl serde::Serialize for DigestItem { } #[cfg(feature = "std")] -impl<'a, Hash: Decode> serde::Deserialize<'a> for DigestItem { +impl<'a> serde::Deserialize<'a> for DigestItem { fn deserialize(de: D) -> Result where D: serde::Deserializer<'a>, @@ -171,22 +141,14 @@ impl<'a, Hash: Decode> serde::Deserialize<'a> for DigestItem { } } -impl TypeInfo for DigestItem -where - Hash: TypeInfo + 'static, -{ +impl TypeInfo for DigestItem { type Identity = Self; fn type_info() -> Type { Type::builder() .path(Path::new("DigestItem", module_path!())) - .type_params(vec![TypeParameter::new("Hash", Some(meta_type::()))]) .variant( Variants::new() - .variant("ChangesTrieRoot", |v| { - v.index(DigestItemType::ChangesTrieRoot as u8) - .fields(Fields::unnamed().field(|f| f.ty::().type_name("Hash"))) - }) .variant("PreRuntime", |v| { v.index(DigestItemType::PreRuntime as u8).fields( Fields::unnamed() @@ -214,13 +176,6 @@ where .field(|f| f.ty::>().type_name("Vec")), ) }) - .variant("ChangesTrieSignal", |v| { - v.index(DigestItemType::ChangesTrieSignal as u8).fields( - Fields::unnamed().field(|f| { - f.ty::().type_name("ChangesTrieSignal") - }), - ) - }) .variant("Other", |v| { v.index(DigestItemType::Other as u8).fields( Fields::unnamed().field(|f| f.ty::>().type_name("Vec")), @@ -237,9 +192,7 @@ where /// A 'referencing view' for digest item. Does not own its contents. Used by /// final runtime implementations for encoding/decoding its log items. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] -pub enum DigestItemRef<'a, Hash: 'a> { - /// Reference to `DigestItem::ChangesTrieRoot`. - ChangesTrieRoot(&'a Hash), +pub enum DigestItemRef<'a> { /// A pre-runtime digest. /// /// These are messages from the consensus engine to the runtime, although @@ -254,9 +207,6 @@ pub enum DigestItemRef<'a, Hash: 'a> { /// Put a Seal on it. This is only used by native code, and is never seen /// by runtimes. Seal(&'a ConsensusEngineId, &'a Vec), - /// Digest item that contains signal from changes tries manager to the - /// native code. - ChangesTrieSignal(&'a ChangesTrieSignal), /// Any 'non-system' digest item, opaque to the native code. Other(&'a Vec), /// Runtime code or heap pages updated. @@ -271,11 +221,9 @@ pub enum DigestItemRef<'a, Hash: 'a> { #[derive(Encode, Decode)] pub enum DigestItemType { Other = 0, - ChangesTrieRoot = 2, Consensus = 4, Seal = 5, PreRuntime = 6, - ChangesTrieSignal = 7, RuntimeEnvironmentUpdated = 8, } @@ -293,25 +241,18 @@ pub enum OpaqueDigestItemId<'a> { Other, } -impl DigestItem { +impl DigestItem { /// Returns a 'referencing view' for this digest item. - pub fn dref(&self) -> DigestItemRef { + pub fn dref(&self) -> DigestItemRef { match *self { - Self::ChangesTrieRoot(ref v) => DigestItemRef::ChangesTrieRoot(v), Self::PreRuntime(ref v, ref s) => DigestItemRef::PreRuntime(v, s), Self::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), Self::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), - Self::ChangesTrieSignal(ref s) => DigestItemRef::ChangesTrieSignal(s), Self::Other(ref v) => DigestItemRef::Other(v), Self::RuntimeEnvironmentUpdated => DigestItemRef::RuntimeEnvironmentUpdated, } } - /// Returns `Some` if the entry is the `ChangesTrieRoot` entry. - pub fn as_changes_trie_root(&self) -> Option<&Hash> { - self.dref().as_changes_trie_root() - } - /// Returns `Some` if this entry is the `PreRuntime` entry. pub fn as_pre_runtime(&self) -> Option<(ConsensusEngineId, &[u8])> { self.dref().as_pre_runtime() @@ -327,11 +268,6 @@ impl DigestItem { self.dref().as_seal() } - /// Returns `Some` if the entry is the `ChangesTrieSignal` entry. - pub fn as_changes_trie_signal(&self) -> Option<&ChangesTrieSignal> { - self.dref().as_changes_trie_signal() - } - /// Returns Some if `self` is a `DigestItem::Other`. pub fn as_other(&self) -> Option<&[u8]> { self.dref().as_other() @@ -372,20 +308,19 @@ impl DigestItem { } } -impl Encode for DigestItem { +impl Encode for DigestItem { fn encode(&self) -> Vec { self.dref().encode() } } -impl codec::EncodeLike for DigestItem {} +impl codec::EncodeLike for DigestItem {} -impl Decode for DigestItem { +impl Decode for DigestItem { #[allow(deprecated)] fn decode(input: &mut I) -> Result { let item_type: DigestItemType = Decode::decode(input)?; match item_type { - DigestItemType::ChangesTrieRoot => Ok(Self::ChangesTrieRoot(Decode::decode(input)?)), DigestItemType::PreRuntime => { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; Ok(Self::PreRuntime(vals.0, vals.1)) @@ -398,23 +333,13 @@ impl Decode for DigestItem { let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; Ok(Self::Seal(vals.0, vals.1)) }, - DigestItemType::ChangesTrieSignal => - Ok(Self::ChangesTrieSignal(Decode::decode(input)?)), DigestItemType::Other => Ok(Self::Other(Decode::decode(input)?)), DigestItemType::RuntimeEnvironmentUpdated => Ok(Self::RuntimeEnvironmentUpdated), } } } -impl<'a, Hash> DigestItemRef<'a, Hash> { - /// Cast this digest item into `ChangesTrieRoot`. - pub fn as_changes_trie_root(&self) -> Option<&'a Hash> { - match *self { - Self::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), - _ => None, - } - } - +impl<'a> DigestItemRef<'a> { /// Cast this digest item into `PreRuntime` pub fn as_pre_runtime(&self) -> Option<(ConsensusEngineId, &'a [u8])> { match *self { @@ -439,14 +364,6 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { } } - /// Cast this digest item into `ChangesTrieSignal`. - pub fn as_changes_trie_signal(&self) -> Option<&'a ChangesTrieSignal> { - match *self { - Self::ChangesTrieSignal(ref changes_trie_signal) => Some(changes_trie_signal), - _ => None, - } - } - /// Cast this digest item into `PreRuntime` pub fn as_other(&self) -> Option<&'a [u8]> { match *self { @@ -508,15 +425,11 @@ impl<'a, Hash> DigestItemRef<'a, Hash> { } } -impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { +impl<'a> Encode for DigestItemRef<'a> { fn encode(&self) -> Vec { let mut v = Vec::new(); match *self { - Self::ChangesTrieRoot(changes_trie_root) => { - DigestItemType::ChangesTrieRoot.encode_to(&mut v); - changes_trie_root.encode_to(&mut v); - }, Self::Consensus(val, data) => { DigestItemType::Consensus.encode_to(&mut v); (val, data).encode_to(&mut v); @@ -529,10 +442,6 @@ impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { DigestItemType::PreRuntime.encode_to(&mut v); (val, data).encode_to(&mut v); }, - Self::ChangesTrieSignal(changes_trie_signal) => { - DigestItemType::ChangesTrieSignal.encode_to(&mut v); - changes_trie_signal.encode_to(&mut v); - }, Self::Other(val) => { DigestItemType::Other.encode_to(&mut v); val.encode_to(&mut v); @@ -546,16 +455,7 @@ impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { } } -impl ChangesTrieSignal { - /// Try to cast this signal to NewConfiguration. - pub fn as_new_configuration(&self) -> Option<&Option> { - match self { - Self::NewConfiguration(config) => Some(config), - } - } -} - -impl<'a, Hash: Encode> codec::EncodeLike for DigestItemRef<'a, Hash> {} +impl<'a> codec::EncodeLike for DigestItemRef<'a> {} #[cfg(test)] mod tests { @@ -565,7 +465,6 @@ mod tests { fn should_serialize_digest() { let digest = Digest { logs: vec![ - DigestItem::ChangesTrieRoot(4), DigestItem::Other(vec![1, 2, 3]), DigestItem::Seal(*b"test", vec![1, 2, 3]), ], @@ -579,7 +478,7 @@ mod tests { #[test] fn digest_item_type_info() { - let type_info = DigestItem::::type_info(); + let type_info = DigestItem::type_info(); let variants = if let scale_info::TypeDef::Variant(variant) = type_info.type_def() { variant.variants() } else { @@ -589,21 +488,13 @@ mod tests { // ensure that all variants are covered by manual TypeInfo impl let check = |digest_item_type: DigestItemType| { let (variant_name, digest_item) = match digest_item_type { - DigestItemType::Other => ("Other", DigestItem::::Other(Default::default())), - DigestItemType::ChangesTrieRoot => - ("ChangesTrieRoot", DigestItem::ChangesTrieRoot(Default::default())), + DigestItemType::Other => ("Other", DigestItem::Other(Default::default())), DigestItemType::Consensus => ("Consensus", DigestItem::Consensus(Default::default(), Default::default())), DigestItemType::Seal => ("Seal", DigestItem::Seal(Default::default(), Default::default())), DigestItemType::PreRuntime => ("PreRuntime", DigestItem::PreRuntime(Default::default(), Default::default())), - DigestItemType::ChangesTrieSignal => ( - "ChangesTrieSignal", - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration( - Default::default(), - )), - ), DigestItemType::RuntimeEnvironmentUpdated => ("RuntimeEnvironmentUpdated", DigestItem::RuntimeEnvironmentUpdated), }; @@ -617,11 +508,9 @@ mod tests { }; check(DigestItemType::Other); - check(DigestItemType::ChangesTrieRoot); check(DigestItemType::Consensus); check(DigestItemType::Seal); check(DigestItemType::PreRuntime); - check(DigestItemType::ChangesTrieSignal); check(DigestItemType::RuntimeEnvironmentUpdated); } } diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index 82f081c0d70b0..0ad53b8e83b79 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -51,7 +51,7 @@ pub struct Header + TryFrom, Hash: HashT> { /// The merkle root of the extrinsics. pub extrinsics_root: Hash::Output, /// A chain-specific digest of data useful for light clients or referencing auxiliary data. - pub digest: Digest, + pub digest: Digest, } #[cfg(feature = "std")] @@ -150,11 +150,11 @@ where self.parent_hash = hash } - fn digest(&self) -> &Digest { + fn digest(&self) -> &Digest { &self.digest } - fn digest_mut(&mut self) -> &mut Digest { + fn digest_mut(&mut self) -> &mut Digest { #[cfg(feature = "std")] log::debug!(target: "header", "Retrieving mutable reference to digest"); &mut self.digest @@ -165,7 +165,7 @@ where extrinsics_root: Self::Hash, state_root: Self::Hash, parent_hash: Self::Hash, - digest: Digest, + digest: Digest, ) -> Self { Self { number, extrinsics_root, state_root, parent_hash, digest } } @@ -236,7 +236,6 @@ mod tests { extrinsics_root: BlakeTwo256::hash(b"4"), digest: crate::generic::Digest { logs: vec![ - crate::generic::DigestItem::ChangesTrieRoot(BlakeTwo256::hash(b"5")), crate::generic::DigestItem::Other(b"6".to_vec()), ], }, @@ -266,7 +265,6 @@ mod tests { digest: crate::generic::Digest { logs: vec![ crate::generic::DigestItem::Other(b"5000".to_vec()), - crate::generic::DigestItem::ChangesTrieRoot(BlakeTwo256::hash(b"6000")), ], }, }; diff --git a/primitives/runtime/src/generic/mod.rs b/primitives/runtime/src/generic/mod.rs index 71127e88ec32c..3d8e8a0ce7faa 100644 --- a/primitives/runtime/src/generic/mod.rs +++ b/primitives/runtime/src/generic/mod.rs @@ -31,7 +31,7 @@ mod unchecked_extrinsic; pub use self::{ block::{Block, BlockId, SignedBlock}, checked_extrinsic::CheckedExtrinsic, - digest::{ChangesTrieSignal, Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, + digest::{Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}, era::{Era, Phase}, header::Header, unchecked_extrinsic::{SignedPayload, UncheckedExtrinsic}, diff --git a/primitives/runtime/src/generic/tests.rs b/primitives/runtime/src/generic/tests.rs index 095bcb717bb11..ef2d3f0f4b5a2 100644 --- a/primitives/runtime/src/generic/tests.rs +++ b/primitives/runtime/src/generic/tests.rs @@ -19,29 +19,28 @@ use super::DigestItem; use crate::codec::{Decode, Encode}; -use sp_core::H256; #[test] fn system_digest_item_encoding() { - let item = DigestItem::ChangesTrieRoot::(H256::default()); + let item = DigestItem::Consensus([1, 2, 3, 4], vec![5, 6, 7, 8]); let encoded = item.encode(); assert_eq!( encoded, vec![ - // type = DigestItemType::ChangesTrieRoot + // type = DigestItemType::Consensus 2, // trie root 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ] ); - let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); + let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); assert_eq!(item, decoded); } #[test] fn non_system_digest_item_encoding() { - let item = DigestItem::Other::(vec![10, 20, 30]); + let item = DigestItem::Other(vec![10, 20, 30]); let encoded = item.encode(); assert_eq!( encoded, @@ -53,6 +52,6 @@ fn non_system_digest_item_encoding() { ] ); - let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); + let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); assert_eq!(item, decoded); } diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index fe9ba588adb87..4573bc84473a3 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -182,10 +182,10 @@ impl traits::Verify for TestSignature { } /// Digest item -pub type DigestItem = generic::DigestItem; +pub type DigestItem = generic::DigestItem; /// Header Digest -pub type Digest = generic::Digest; +pub type Digest = generic::Digest; /// Block Header pub type Header = generic::Header; diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 6d79d740dc4e1..f61de70e35197 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -19,7 +19,7 @@ use crate::{ codec::{Codec, Decode, Encode, MaxEncodedLen}, - generic::{Digest, DigestItem}, + generic::Digest, scale_info::{MetaType, StaticTypeInfo, TypeInfo}, transaction_validity::{ TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, @@ -548,10 +548,7 @@ impl CheckEqual for sp_core::H256 { } } -impl CheckEqual for super::generic::DigestItem -where - H: Encode, -{ +impl CheckEqual for super::generic::DigestItem { #[cfg(feature = "std")] fn check_equal(&self, other: &Self) { if self != other { @@ -642,7 +639,7 @@ pub trait Header: extrinsics_root: Self::Hash, state_root: Self::Hash, parent_hash: Self::Hash, - digest: Digest, + digest: Digest, ) -> Self; /// Returns a reference to the header number. @@ -666,9 +663,9 @@ pub trait Header: fn set_parent_hash(&mut self, hash: Self::Hash); /// Returns a reference to the digest. - fn digest(&self) -> &Digest; + fn digest(&self) -> &Digest; /// Get a mutable reference to the digest. - fn digest_mut(&mut self) -> &mut Digest; + fn digest_mut(&mut self) -> &mut Digest; /// Returns the hash of the header. fn hash(&self) -> Self::Hash { @@ -763,9 +760,6 @@ pub type HashFor = <::Header as Header>::Hashing; /// Extract the number type for a block. pub type NumberFor = <::Header as Header>::Number; /// Extract the digest type for a block. -pub type DigestFor = Digest<<::Header as Header>::Hash>; -/// Extract the digest item type for a block. -pub type DigestItemFor = DigestItem<<::Header as Header>::Hash>; /// A "checkable" piece of information, used by the standard Substrate Executive in order to /// check the validity of a piece of extrinsic information, usually by verifying the signature. diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 7dcf92b06de06..eb6e2939b83fc 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -292,32 +292,6 @@ impl> Consolidate for sp_trie::GenericMem } } -/// Insert input pairs into memory db. -#[cfg(test)] -pub(crate) fn insert_into_memory_db( - mdb: &mut sp_trie::MemoryDB, - input: I, -) -> Option -where - H: Hasher, - I: IntoIterator, -{ - use sp_trie::{trie_types::TrieDBMut, TrieMut}; - - let mut root = ::Out::default(); - { - let mut trie = TrieDBMut::::new(mdb, &mut root); - for (key, value) in input { - if let Err(e) = trie.insert(&key, &value) { - log::warn!(target: "trie", "Failed to write to trie: {}", e); - return None - } - } - } - - Some(root) -} - /// Wrapper to create a [`RuntimeCode`] from a type that implements [`Backend`]. #[cfg(feature = "std")] pub struct BackendRuntimeCode<'a, B, H> { diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 0bbd2d0a8e8e6..3774adc5b0368 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -309,10 +309,6 @@ impl Externalities for BasicExternalities { .encode() } - fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { - Ok(None) - } - fn storage_start_transaction(&mut self) { unimplemented!("Transactions are not supported by BasicExternalities"); } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs deleted file mode 100644 index d3c6c12122c4f..0000000000000 --- a/primitives/state-machine/src/changes_trie/build.rs +++ /dev/null @@ -1,1083 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Structures and functions required to build changes trie for given block. - -use crate::{ - backend::Backend, - changes_trie::{ - build_iterator::digest_build_iterator, - input::{ChildIndex, DigestIndex, ExtrinsicIndex, InputKey, InputPair}, - AnchorBlockId, BlockNumber, ConfigurationRange, Storage, - }, - overlayed_changes::{OverlayedChanges, OverlayedValue}, - trie_backend_essence::TrieBackendEssence, - StorageKey, -}; -use codec::{Decode, Encode}; -use hash_db::Hasher; -use num_traits::One; -use sp_core::storage::{ChildInfo, PrefixedStorageKey}; -use std::collections::{btree_map::Entry, BTreeMap}; - -/// Prepare input pairs for building a changes trie of given block. -/// -/// Returns Err if storage error has occurred OR if storage haven't returned -/// required data. -pub(crate) fn prepare_input<'a, B, H, Number>( - backend: &'a B, - storage: &'a dyn Storage, - config: ConfigurationRange<'a, Number>, - overlay: &'a OverlayedChanges, - parent: &'a AnchorBlockId, -) -> Result< - ( - impl Iterator> + 'a, - Vec<(ChildIndex, impl Iterator> + 'a)>, - Vec, - ), - String, -> -where - B: Backend, - H: Hasher + 'a, - H::Out: Encode, - Number: BlockNumber, -{ - let number = parent.number.clone() + One::one(); - let (extrinsics_input, children_extrinsics_input) = - prepare_extrinsics_input(backend, &number, overlay)?; - let (digest_input, mut children_digest_input, digest_input_blocks) = - prepare_digest_input::(parent, config, number, storage)?; - - let mut children_digest = Vec::with_capacity(children_extrinsics_input.len()); - for (child_index, ext_iter) in children_extrinsics_input.into_iter() { - let dig_iter = children_digest_input.remove(&child_index); - children_digest.push(( - child_index, - Some(ext_iter).into_iter().flatten().chain(dig_iter.into_iter().flatten()), - )); - } - for (child_index, dig_iter) in children_digest_input.into_iter() { - children_digest.push(( - child_index, - None.into_iter().flatten().chain(Some(dig_iter).into_iter().flatten()), - )); - } - - Ok((extrinsics_input.chain(digest_input), children_digest, digest_input_blocks)) -} -/// Prepare ExtrinsicIndex input pairs. -fn prepare_extrinsics_input<'a, B, H, Number>( - backend: &'a B, - block: &Number, - overlay: &'a OverlayedChanges, -) -> Result< - ( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, - ), - String, -> -where - B: Backend, - H: Hasher + 'a, - Number: BlockNumber, -{ - let mut children_result = BTreeMap::new(); - - for (child_changes, child_info) in overlay.children() { - let child_index = ChildIndex:: { - block: block.clone(), - storage_key: child_info.prefixed_storage_key(), - }; - - let iter = prepare_extrinsics_input_inner( - backend, - block, - overlay, - Some(child_info.clone()), - child_changes, - )?; - children_result.insert(child_index, iter); - } - - let top = prepare_extrinsics_input_inner(backend, block, overlay, None, overlay.changes())?; - - Ok((top, children_result)) -} - -fn prepare_extrinsics_input_inner<'a, B, H, Number>( - backend: &'a B, - block: &Number, - overlay: &'a OverlayedChanges, - child_info: Option, - changes: impl Iterator, -) -> Result> + 'a, String> -where - B: Backend, - H: Hasher, - Number: BlockNumber, -{ - changes - .filter_map(|(k, v)| { - let extrinsics = v.extrinsics(); - if !extrinsics.is_empty() { - Some((k, extrinsics)) - } else { - None - } - }) - .try_fold( - BTreeMap::new(), - |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, extrinsics)| { - match map.entry(k) { - Entry::Vacant(entry) => { - // ignore temporary values (values that have null value at the end of - // operation AND are not in storage at the beginning of operation - if let Some(child_info) = child_info.as_ref() { - if !overlay - .child_storage(child_info, k) - .map(|v| v.is_some()) - .unwrap_or_default() - { - if !backend - .exists_child_storage(&child_info, k) - .map_err(|e| format!("{}", e))? - { - return Ok(map) - } - } - } else { - if !overlay.storage(k).map(|v| v.is_some()).unwrap_or_default() { - if !backend.exists_storage(k).map_err(|e| format!("{}", e))? { - return Ok(map) - } - } - }; - - let extrinsics = extrinsics.into_iter().collect(); - entry.insert(( - ExtrinsicIndex { block: block.clone(), key: k.to_vec() }, - extrinsics, - )); - }, - Entry::Occupied(mut entry) => { - // we do not need to check for temporary values here, because entry is - // Occupied AND we are checking it before insertion - let entry_extrinsics = &mut entry.get_mut().1; - entry_extrinsics.extend(extrinsics.into_iter()); - entry_extrinsics.sort(); - }, - } - - Ok(map) - }, - ) - .map(|pairs| pairs.into_iter().map(|(_, (k, v))| InputPair::ExtrinsicIndex(k, v))) -} - -/// Prepare DigestIndex input pairs. -fn prepare_digest_input<'a, H, Number>( - parent: &'a AnchorBlockId, - config: ConfigurationRange, - block: Number, - storage: &'a dyn Storage, -) -> Result< - ( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, - Vec, - ), - String, -> -where - H: Hasher, - H::Out: 'a + Encode, - Number: BlockNumber, -{ - let build_skewed_digest = config.end.as_ref() == Some(&block); - let block_for_digest = if build_skewed_digest { - config - .config - .next_max_level_digest_range(config.zero.clone(), block.clone()) - .map(|(_, end)| end) - .unwrap_or_else(|| block.clone()) - } else { - block.clone() - }; - - let digest_input_blocks = digest_build_iterator(config, block_for_digest).collect::>(); - digest_input_blocks - .clone() - .into_iter() - .try_fold( - (BTreeMap::new(), BTreeMap::new()), - move |(mut map, mut child_map), digest_build_block| { - let extrinsic_prefix = - ExtrinsicIndex::key_neutral_prefix(digest_build_block.clone()); - let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block.clone()); - let child_prefix = ChildIndex::key_neutral_prefix(digest_build_block.clone()); - let trie_root = storage.root(parent, digest_build_block.clone())?; - let trie_root = trie_root.ok_or_else(|| { - format!("No changes trie root for block {}", digest_build_block.clone()) - })?; - - let insert_to_map = |map: &mut BTreeMap<_, _>, key: StorageKey| { - match map.entry(key.clone()) { - Entry::Vacant(entry) => { - entry.insert(( - DigestIndex { block: block.clone(), key }, - vec![digest_build_block.clone()], - )); - }, - Entry::Occupied(mut entry) => { - // DigestIndexValue must be sorted. Here we are relying on the fact that - // digest_build_iterator() returns blocks in ascending order => we only - // need to check for duplicates - // - // is_dup_block could be true when key has been changed in both digest - // block AND other blocks that it covers - let is_dup_block = entry.get().1.last() == Some(&digest_build_block); - if !is_dup_block { - entry.get_mut().1.push(digest_build_block.clone()); - } - }, - } - }; - - // try to get all updated keys from cache - let populated_from_cache = - storage.with_cached_changed_keys(&trie_root, &mut |changed_keys| { - for (storage_key, changed_keys) in changed_keys { - let map = match storage_key { - Some(storage_key) => child_map - .entry(ChildIndex:: { - block: block.clone(), - storage_key: storage_key.clone(), - }) - .or_default(), - None => &mut map, - }; - for changed_key in changed_keys.iter().cloned() { - insert_to_map(map, changed_key); - } - } - }); - if populated_from_cache { - return Ok((map, child_map)) - } - - let mut children_roots = BTreeMap::::new(); - { - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - - trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| { - if let Ok(InputKey::ChildIndex::(trie_key)) = - Decode::decode(&mut key) - { - if let Ok(value) = >::decode(&mut value) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.insert(trie_key.storage_key, trie_root); - } - } - }); - - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| { - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - - trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| { - if let Ok(InputKey::DigestIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - } - - for (storage_key, trie_root) in children_roots.into_iter() { - let child_index = ChildIndex:: { block: block.clone(), storage_key }; - - let mut map = child_map.entry(child_index).or_default(); - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |mut key| { - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - - trie_storage.for_keys_with_prefix(&digest_prefix, |mut key| { - if let Ok(InputKey::DigestIndex::(trie_key)) = - Decode::decode(&mut key) - { - insert_to_map(&mut map, trie_key.key); - } - }); - } - Ok((map, child_map)) - }, - ) - .map(|(pairs, child_pairs)| { - ( - pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)), - child_pairs - .into_iter() - .map(|(sk, pairs)| { - (sk, pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v))) - }) - .collect(), - digest_input_blocks, - ) - }) -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{ - changes_trie::{ - build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, - storage::InMemoryStorage, - Configuration, RootsStorage, - }, - InMemoryBackend, - }; - use sp_core::Blake2Hasher; - - fn prepare_for_build( - zero: u64, - ) -> ( - InMemoryBackend, - InMemoryStorage, - OverlayedChanges, - Configuration, - ) { - let child_info_1 = ChildInfo::new_default(b"storage_key1"); - let child_info_2 = ChildInfo::new_default(b"storage_key2"); - let backend: InMemoryBackend<_> = vec![ - (vec![100], vec![255]), - (vec![101], vec![255]), - (vec![102], vec![255]), - (vec![103], vec![255]), - (vec![104], vec![255]), - (vec![105], vec![255]), - ] - .into_iter() - .collect::>() - .into(); - let prefixed_child_trie_key1 = child_info_1.prefixed_storage_key(); - let storage = InMemoryStorage::with_inputs( - vec![ - ( - zero + 1, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![100] }, - vec![1, 3], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![101] }, - vec![0, 2], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![105] }, - vec![0, 2, 4], - ), - ], - ), - ( - zero + 2, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 2, key: vec![102] }, - vec![0], - )], - ), - ( - zero + 3, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 3, key: vec![100] }, - vec![0], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 3, key: vec![105] }, - vec![1], - ), - ], - ), - ( - zero + 4, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2, 3], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![101] }, - vec![1], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![103] }, - vec![0, 1], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1, zero + 3], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1, zero + 3], - ), - ], - ), - (zero + 5, Vec::new()), - ( - zero + 6, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 6, key: vec![105] }, - vec![2], - )], - ), - (zero + 7, Vec::new()), - ( - zero + 8, - vec![InputPair::DigestIndex( - DigestIndex { block: zero + 8, key: vec![105] }, - vec![zero + 6], - )], - ), - (zero + 9, Vec::new()), - (zero + 10, Vec::new()), - (zero + 11, Vec::new()), - (zero + 12, Vec::new()), - (zero + 13, Vec::new()), - (zero + 14, Vec::new()), - (zero + 15, Vec::new()), - ], - vec![( - prefixed_child_trie_key1.clone(), - vec![ - ( - zero + 1, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![100] }, - vec![1, 3], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![101] }, - vec![0, 2], - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 1, key: vec![105] }, - vec![0, 2, 4], - ), - ], - ), - ( - zero + 2, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 2, key: vec![102] }, - vec![0], - )], - ), - ( - zero + 4, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 2, key: vec![102] }, - vec![0, 3], - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2], - ), - ], - ), - ], - )], - ); - - let mut changes = OverlayedChanges::default(); - changes.set_collect_extrinsics(true); - - changes.start_transaction(); - - changes.set_extrinsic_index(1); - changes.set_storage(vec![101], Some(vec![203])); - - changes.set_extrinsic_index(3); - changes.set_storage(vec![100], Some(vec![202])); - changes.set_child_storage(&child_info_1, vec![100], Some(vec![202])); - - changes.commit_transaction().unwrap(); - - changes.set_extrinsic_index(0); - changes.set_storage(vec![100], Some(vec![0])); - changes.set_extrinsic_index(2); - changes.set_storage(vec![100], Some(vec![200])); - - changes.set_extrinsic_index(0); - changes.set_storage(vec![103], Some(vec![0])); - changes.set_extrinsic_index(1); - changes.set_storage(vec![103], None); - - changes.set_extrinsic_index(0); - changes.set_child_storage(&child_info_1, vec![100], Some(vec![0])); - changes.set_extrinsic_index(2); - changes.set_child_storage(&child_info_1, vec![100], Some(vec![200])); - - changes.set_extrinsic_index(0); - changes.set_child_storage(&child_info_2, vec![100], Some(vec![0])); - changes.set_extrinsic_index(2); - changes.set_child_storage(&child_info_2, vec![100], Some(vec![200])); - - changes.set_extrinsic_index(1); - - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - - (backend, storage, changes, config) - } - - fn configuration_range<'a>( - config: &'a Configuration, - zero: u64, - ) -> ConfigurationRange<'a, u64> { - ConfigurationRange { config, zero, end: None } - } - - #[test] - fn build_changes_trie_nodes_on_non_digest_block() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![103] }, - vec![0, 1] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 5u64, storage_key: child_trie_key1 }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, - vec![0, 2, 3] - ),] - ), - ( - ChildIndex { block: zero + 5, storage_key: child_trie_key2 }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 5, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_digest_block_l1() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1, zero + 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1, zero + 3] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1] - ), - ] - ), - ( - ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_digest_block_l2() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![100] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![101] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![102] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![103] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![105] }, - vec![zero + 4, zero + 8] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 16u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 16, key: vec![102] }, - vec![zero + 4] - ), - ] - ), - ( - ChildIndex { block: zero + 16, storage_key: child_trie_key2.clone() }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 16, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_skewed_digest_block() { - fn test_with_zero(zero: u64) { - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 10 }; - - let mut configuration_range = configuration_range(&config, zero); - let changes_trie_nodes = - prepare_input(&backend, &storage, configuration_range.clone(), &changes, &parent) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![103] }, - vec![0, 1] - ), - ] - ); - - configuration_range.end = Some(zero + 11); - let changes_trie_nodes = - prepare_input(&backend, &storage, configuration_range, &changes, &parent).unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 11, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![100] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![101] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![102] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![103] }, - vec![zero + 4] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 11, key: vec![105] }, - vec![zero + 4, zero + 8] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_ignores_temporary_storage_values() { - fn test_with_zero(zero: u64) { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, storage, mut changes, config) = prepare_for_build(zero); - - // 110: missing from backend, set to None in overlay - changes.set_storage(vec![110], None); - - let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ) - .unwrap(); - assert_eq!( - changes_trie_nodes.0.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![101] }, - vec![1] - ), - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![103] }, - vec![0, 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1, zero + 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1, zero + 3] - ), - ] - ); - assert_eq!( - changes_trie_nodes - .1 - .into_iter() - .map(|(k, v)| (k, v.collect::>())) - .collect::>(), - vec![ - ( - ChildIndex { block: zero + 4u64, storage_key: child_trie_key1.clone() }, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![100] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![101] }, - vec![zero + 1] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![102] }, - vec![zero + 2] - ), - InputPair::DigestIndex( - DigestIndex { block: zero + 4, key: vec![105] }, - vec![zero + 1] - ), - ] - ), - ( - ChildIndex { block: zero + 4, storage_key: child_trie_key2.clone() }, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: zero + 4, key: vec![100] }, - vec![0, 2] - ),] - ), - ] - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn cache_is_used_when_changes_trie_is_built() { - let child_trie_key1 = ChildInfo::new_default(b"storage_key1").prefixed_storage_key(); - let child_trie_key2 = ChildInfo::new_default(b"storage_key2").prefixed_storage_key(); - let (backend, mut storage, changes, config) = prepare_for_build(0); - let parent = AnchorBlockId { hash: Default::default(), number: 15 }; - - // override some actual values from storage with values from the cache - // - // top-level storage: - // (keys 100, 101, 103, 105 are now missing from block#4 => they do not appear - // in l2 digest at block 16) - // - // "1" child storage: - // key 102 is now missing from block#4 => it doesn't appear in l2 digest at block 16 - // (keys 103, 104) are now added to block#4 => they appear in l2 digest at block 16 - // - // "2" child storage: - // (keys 105, 106) are now added to block#4 => they appear in l2 digest at block 16 - let trie_root4 = storage.root(&parent, 4).unwrap().unwrap(); - let cached_data4 = IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()) - .set_digest_input_blocks(vec![1, 2, 3]) - .insert(None, vec![vec![100], vec![102]].into_iter().collect()) - .insert(Some(child_trie_key1.clone()), vec![vec![103], vec![104]].into_iter().collect()) - .insert(Some(child_trie_key2.clone()), vec![vec![105], vec![106]].into_iter().collect()) - .complete(4, &trie_root4); - storage.cache_mut().perform(cached_data4); - - let (root_changes_trie_nodes, child_changes_tries_nodes, _) = - prepare_input(&backend, &storage, configuration_range(&config, 0), &changes, &parent) - .unwrap(); - assert_eq!( - root_changes_trie_nodes.collect::>>(), - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![8]), - ] - ); - - let child_changes_tries_nodes = child_changes_tries_nodes - .into_iter() - .map(|(k, i)| (k, i.collect::>())) - .collect::>(); - assert_eq!( - child_changes_tries_nodes - .get(&ChildIndex { block: 16u64, storage_key: child_trie_key1.clone() }) - .unwrap(), - &vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16u64, key: vec![100] }, - vec![0, 2, 3] - ), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![103] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![104] }, vec![4]), - ], - ); - assert_eq!( - child_changes_tries_nodes - .get(&ChildIndex { block: 16u64, storage_key: child_trie_key2.clone() }) - .unwrap(), - &vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16u64, key: vec![100] }, - vec![0, 2] - ), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![105] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![106] }, vec![4]), - ], - ); - } -} diff --git a/primitives/state-machine/src/changes_trie/build_cache.rs b/primitives/state-machine/src/changes_trie/build_cache.rs deleted file mode 100644 index 04820242d9d08..0000000000000 --- a/primitives/state-machine/src/changes_trie/build_cache.rs +++ /dev/null @@ -1,278 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Changes tries build cache. - -use std::collections::{HashMap, HashSet}; - -use crate::StorageKey; -use sp_core::storage::PrefixedStorageKey; - -/// Changes trie build cache. -/// -/// Helps to avoid read of changes tries from the database when digest trie -/// is built. It holds changed keys for every block (indexed by changes trie -/// root) that could be referenced by future digest items. For digest entries -/// it also holds keys covered by this digest. Entries for top level digests -/// are never created, because they'll never be used to build other digests. -/// -/// Entries are pruned from the cache once digest block that is using this entry -/// is inserted (because digest block will includes all keys from this entry). -/// When there's a fork, entries are pruned when first changes trie is inserted. -pub struct BuildCache { - /// Map of block (implies changes trie) number => changes trie root. - roots_by_number: HashMap, - /// Map of changes trie root => set of storage keys that are in this trie. - /// The `Option>` in inner `HashMap` stands for the child storage key. - /// If it is `None`, then the `HashSet` contains keys changed in top-level storage. - /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by - /// the key. - changed_keys: HashMap, HashSet>>, -} - -/// The action to perform when block-with-changes-trie is imported. -#[derive(Debug, PartialEq)] -pub enum CacheAction { - /// Cache data that has been collected when CT has been built. - CacheBuildData(CachedBuildData), - /// Clear cache from all existing entries. - Clear, -} - -/// The data that has been cached during changes trie building. -#[derive(Debug, PartialEq)] -pub struct CachedBuildData { - block: N, - trie_root: H, - digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, -} - -/// The action to perform when block-with-changes-trie is imported. -#[derive(Debug, PartialEq)] -pub(crate) enum IncompleteCacheAction { - /// Cache data that has been collected when CT has been built. - CacheBuildData(IncompleteCachedBuildData), - /// Clear cache from all existing entries. - Clear, -} - -/// The data (without changes trie root) that has been cached during changes trie building. -#[derive(Debug, PartialEq)] -pub(crate) struct IncompleteCachedBuildData { - digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, -} - -impl BuildCache -where - N: Eq + ::std::hash::Hash, - H: Eq + ::std::hash::Hash + Clone, -{ - /// Create new changes trie build cache. - pub fn new() -> Self { - BuildCache { roots_by_number: HashMap::new(), changed_keys: HashMap::new() } - } - - /// Get cached changed keys for changes trie with given root. - pub fn get( - &self, - root: &H, - ) -> Option<&HashMap, HashSet>> { - self.changed_keys.get(&root) - } - - /// Execute given functor with cached entry for given block. - /// Returns true if the functor has been called and false otherwise. - pub fn with_changed_keys( - &self, - root: &H, - functor: &mut dyn FnMut(&HashMap, HashSet>), - ) -> bool { - match self.changed_keys.get(&root) { - Some(changed_keys) => { - functor(changed_keys); - true - }, - None => false, - } - } - - /// Insert data into cache. - pub fn perform(&mut self, action: CacheAction) { - match action { - CacheAction::CacheBuildData(data) => { - self.roots_by_number.insert(data.block, data.trie_root.clone()); - self.changed_keys.insert(data.trie_root, data.changed_keys); - - for digest_input_block in data.digest_input_blocks { - let digest_input_block_hash = self.roots_by_number.remove(&digest_input_block); - if let Some(digest_input_block_hash) = digest_input_block_hash { - self.changed_keys.remove(&digest_input_block_hash); - } - } - }, - CacheAction::Clear => { - self.roots_by_number.clear(); - self.changed_keys.clear(); - }, - } - } -} - -impl IncompleteCacheAction { - /// Returns true if we need to collect changed keys for this action. - pub fn collects_changed_keys(&self) -> bool { - match *self { - IncompleteCacheAction::CacheBuildData(_) => true, - IncompleteCacheAction::Clear => false, - } - } - - /// Complete cache action with computed changes trie root. - pub(crate) fn complete(self, block: N, trie_root: &H) -> CacheAction { - match self { - IncompleteCacheAction::CacheBuildData(build_data) => - CacheAction::CacheBuildData(build_data.complete(block, trie_root.clone())), - IncompleteCacheAction::Clear => CacheAction::Clear, - } - } - - /// Set numbers of blocks that are superseded by this new entry. - /// - /// If/when this build data is committed to the cache, entries for these blocks - /// will be removed from the cache. - pub(crate) fn set_digest_input_blocks(self, digest_input_blocks: Vec) -> Self { - match self { - IncompleteCacheAction::CacheBuildData(build_data) => - IncompleteCacheAction::CacheBuildData( - build_data.set_digest_input_blocks(digest_input_blocks), - ), - IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, - } - } - - /// Insert changed keys of given storage into cached data. - pub(crate) fn insert( - self, - storage_key: Option, - changed_keys: HashSet, - ) -> Self { - match self { - IncompleteCacheAction::CacheBuildData(build_data) => - IncompleteCacheAction::CacheBuildData(build_data.insert(storage_key, changed_keys)), - IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, - } - } -} - -impl IncompleteCachedBuildData { - /// Create new cached data. - pub(crate) fn new() -> Self { - IncompleteCachedBuildData { digest_input_blocks: Vec::new(), changed_keys: HashMap::new() } - } - - fn complete(self, block: N, trie_root: H) -> CachedBuildData { - CachedBuildData { - block, - trie_root, - digest_input_blocks: self.digest_input_blocks, - changed_keys: self.changed_keys, - } - } - - fn set_digest_input_blocks(mut self, digest_input_blocks: Vec) -> Self { - self.digest_input_blocks = digest_input_blocks; - self - } - - fn insert( - mut self, - storage_key: Option, - changed_keys: HashSet, - ) -> Self { - self.changed_keys.insert(storage_key, changed_keys); - self - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn updated_keys_are_stored_when_non_top_level_digest_is_built() { - let mut data = IncompleteCachedBuildData::::new(); - data = data.insert(None, vec![vec![1]].into_iter().collect()); - assert_eq!(data.changed_keys.len(), 1); - - let mut cache = BuildCache::new(); - cache.perform(CacheAction::CacheBuildData(data.complete(1, 1))); - assert_eq!(cache.changed_keys.len(), 1); - assert_eq!( - cache.get(&1).unwrap().clone(), - vec![(None, vec![vec![1]].into_iter().collect())].into_iter().collect(), - ); - } - - #[test] - fn obsolete_entries_are_purged_when_new_ct_is_built() { - let mut cache = BuildCache::::new(); - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .insert(None, vec![vec![1]].into_iter().collect()) - .complete(1, 1), - )); - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .insert(None, vec![vec![2]].into_iter().collect()) - .complete(2, 2), - )); - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .insert(None, vec![vec![3]].into_iter().collect()) - .complete(3, 3), - )); - - assert_eq!(cache.changed_keys.len(), 3); - - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .set_digest_input_blocks(vec![1, 2, 3]) - .complete(4, 4), - )); - - assert_eq!(cache.changed_keys.len(), 1); - - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .insert(None, vec![vec![8]].into_iter().collect()) - .complete(8, 8), - )); - cache.perform(CacheAction::CacheBuildData( - IncompleteCachedBuildData::new() - .insert(None, vec![vec![12]].into_iter().collect()) - .complete(12, 12), - )); - - assert_eq!(cache.changed_keys.len(), 3); - - cache.perform(CacheAction::Clear); - - assert_eq!(cache.changed_keys.len(), 0); - } -} diff --git a/primitives/state-machine/src/changes_trie/build_iterator.rs b/primitives/state-machine/src/changes_trie/build_iterator.rs deleted file mode 100644 index 62bb00a2f8829..0000000000000 --- a/primitives/state-machine/src/changes_trie/build_iterator.rs +++ /dev/null @@ -1,487 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Structures and functions to return blocks whose changes are to be included -//! in given block's changes trie. - -use crate::changes_trie::{BlockNumber, ConfigurationRange}; -use num_traits::Zero; - -/// Returns iterator of OTHER blocks that are required for inclusion into -/// changes trie of given block. Blocks are guaranteed to be returned in -/// ascending order. -/// -/// Skewed digest is built IF block >= config.end. -pub fn digest_build_iterator<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - block: Number, -) -> DigestBuildIterator { - // prepare digest build parameters - let (_, _, digest_step) = match config.config.digest_level_at_block(config.zero, block.clone()) - { - Some((current_level, digest_interval, digest_step)) => - (current_level, digest_interval, digest_step), - None => return DigestBuildIterator::empty(), - }; - - DigestBuildIterator::new( - block.clone(), - config.end.unwrap_or(block), - config.config.digest_interval, - digest_step, - ) -} - -/// Changes trie build iterator that returns numbers of OTHER blocks that are -/// required for inclusion into changes trie of given block. -#[derive(Debug)] -pub struct DigestBuildIterator { - /// Block we're building changes trie for. It could (logically) be a post-end block if we are - /// creating skewed digest. - block: Number, - /// Block that is a last block where current configuration is active. We have never yet created - /// anything after this block => digest that we're creating can't reference any blocks that are - /// >= end. - end: Number, - /// Interval of L1 digest blocks. - digest_interval: u32, - /// Max step that could be used when digest is created. - max_step: u32, - - // Mutable data below: - /// Step of current blocks range. - current_step: u32, - /// Reverse step of current blocks range. - current_step_reverse: u32, - /// Current blocks range. - current_range: Option>, - /// Last block that we have returned. - last_block: Option, -} - -impl DigestBuildIterator { - /// Create new digest build iterator. - pub fn new(block: Number, end: Number, digest_interval: u32, max_step: u32) -> Self { - DigestBuildIterator { - block, - end, - digest_interval, - max_step, - current_step: max_step, - current_step_reverse: 0, - current_range: None, - last_block: None, - } - } - - /// Create empty digest build iterator. - pub fn empty() -> Self { - Self::new(Zero::zero(), Zero::zero(), 0, 0) - } -} - -impl Iterator for DigestBuildIterator { - type Item = Number; - - fn next(&mut self) -> Option { - // when we're building skewed digest, we might want to skip some blocks if - // they're not covered by current configuration - loop { - if let Some(next) = self.current_range.as_mut().and_then(|iter| iter.next()) { - if next < self.end { - self.last_block = Some(next.clone()); - return Some(next) - } - } - - // we are safe to use non-checking mul/sub versions here because: - // DigestBuildIterator is created only by internal function that is checking - // that all multiplications/subtractions are safe within max_step limit - - let next_step_reverse = if self.current_step_reverse == 0 { - 1 - } else { - self.current_step_reverse * self.digest_interval - }; - if next_step_reverse > self.max_step { - return None - } - - self.current_step_reverse = next_step_reverse; - self.current_range = Some(BlocksRange::new( - match self.last_block.clone() { - Some(last_block) => last_block + self.current_step.into(), - None => - self.block.clone() - - (self.current_step * self.digest_interval - self.current_step).into(), - }, - self.block.clone(), - self.current_step.into(), - )); - - self.current_step = self.current_step / self.digest_interval; - if self.current_step == 0 { - self.current_step = 1; - } - } - } -} - -/// Blocks range iterator with builtin step_by support. -#[derive(Debug)] -struct BlocksRange { - current: Number, - end: Number, - step: Number, -} - -impl BlocksRange { - pub fn new(begin: Number, end: Number, step: Number) -> Self { - BlocksRange { current: begin, end, step } - } -} - -impl Iterator for BlocksRange { - type Item = Number; - - fn next(&mut self) -> Option { - if self.current >= self.end { - return None - } - - let current = Some(self.current.clone()); - self.current += self.step.clone(); - current - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::changes_trie::Configuration; - - fn digest_build_iterator( - digest_interval: u32, - digest_levels: u32, - zero: u64, - block: u64, - end: Option, - ) -> DigestBuildIterator { - super::digest_build_iterator( - ConfigurationRange { - config: &Configuration { digest_interval, digest_levels }, - zero, - end, - }, - block, - ) - } - - fn digest_build_iterator_basic( - digest_interval: u32, - digest_levels: u32, - zero: u64, - block: u64, - ) -> (u64, u32, u32) { - let iter = digest_build_iterator(digest_interval, digest_levels, zero, block, None); - (iter.block, iter.digest_interval, iter.max_step) - } - - fn digest_build_iterator_blocks( - digest_interval: u32, - digest_levels: u32, - zero: u64, - block: u64, - end: Option, - ) -> Vec { - digest_build_iterator(digest_interval, digest_levels, zero, block, end).collect() - } - - #[test] - fn suggest_digest_inclusion_returns_empty_iterator() { - fn test_with_zero(zero: u64) { - let empty = (0, 0, 0); - assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 0), empty, "block is 0"); - assert_eq!( - digest_build_iterator_basic(0, 16, zero, zero + 64), - empty, - "digest_interval is 0" - ); - assert_eq!( - digest_build_iterator_basic(1, 16, zero, zero + 64), - empty, - "digest_interval is 1" - ); - assert_eq!( - digest_build_iterator_basic(4, 0, zero, zero + 64), - empty, - "digest_levels is 0" - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 1), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 2), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 15), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 17), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(::std::u32::MAX / 2 + 1, 16, zero, ::std::u64::MAX,), - empty, - "digest_interval * 2 is greater than u64::MAX" - ); - } - - test_with_zero(0); - test_with_zero(1); - test_with_zero(2); - test_with_zero(4); - test_with_zero(17); - } - - #[test] - fn suggest_digest_inclusion_returns_level1_iterator() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_basic(16, 1, zero, zero + 16), - (zero + 16, 16, 1), - "!(block % interval) && first digest level == block", - ); - assert_eq!( - digest_build_iterator_basic(16, 1, zero, zero + 256), - (zero + 256, 16, 1), - "!(block % interval^2), but there's only 1 digest level", - ); - assert_eq!( - digest_build_iterator_basic(16, 2, zero, zero + 32), - (zero + 32, 16, 1), - "second level digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(16, 3, zero, zero + 4080), - (zero + 4080, 16, 1), - "second && third level digest are not required for this block", - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn suggest_digest_inclusion_returns_level2_iterator() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_basic(16, 2, zero, zero + 256), - (zero + 256, 16, 16), - "second level digest", - ); - assert_eq!( - digest_build_iterator_basic(16, 2, zero, zero + 4096), - (zero + 4096, 16, 16), - "!(block % interval^3), but there's only 2 digest levels", - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn suggest_digest_inclusion_returns_level3_iterator() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_basic(16, 3, zero, zero + 4096), - (zero + 4096, 16, 256), - "third level digest: beginning", - ); - assert_eq!( - digest_build_iterator_basic(16, 3, zero, zero + 8192), - (zero + 8192, 16, 256), - "third level digest: next", - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_level1_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_blocks(16, 1, zero, zero + 16, None), - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - .iter() - .map(|item| zero + item) - .collect::>() - ); - assert_eq!( - digest_build_iterator_blocks(16, 1, zero, zero + 256, None), - [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] - .iter() - .map(|item| zero + item) - .collect::>() - ); - assert_eq!( - digest_build_iterator_blocks(16, 2, zero, zero + 32, None), - [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] - .iter() - .map(|item| zero + item) - .collect::>() - ); - assert_eq!( - digest_build_iterator_blocks(16, 3, zero, zero + 4080, None), - [ - 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, - 4078, 4079 - ] - .iter() - .map(|item| zero + item) - .collect::>() - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_level1_and_level2_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_blocks(16, 2, zero, zero + 256, None), - [ - // level2 points to previous 16-1 level1 digests: - 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, - // level2 is a level1 digest of 16-1 previous blocks: - 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - ] - .iter() - .map(|item| zero + item) - .collect::>(), - ); - assert_eq!( - digest_build_iterator_blocks(16, 2, zero, zero + 4096, None), - [ - // level2 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, - 4064, 4080, // level2 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, - 4094, 4095, - ] - .iter() - .map(|item| zero + item) - .collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_level1_and_level2_and_level3_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_blocks(16, 3, zero, zero + 4096, None), - [ - // level3 points to previous 16-1 level2 digests: - 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, - 3840, // level3 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, - 4064, 4080, // level3 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, - 4094, 4095, - ] - .iter() - .map(|item| zero + item) - .collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_skewed_digest_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1338)), - [ - // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: - 256, 512, 768, 1024, 1280, - // level3 MUST point to previous 16-1 level1 digests, BUT there are only 3: - 1296, 1312, 1328, - // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only - // 9: - 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, - ] - .iter() - .map(|item| zero + item) - .collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_skewed_digest_blocks_skipping_level() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1284)), - [ - // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: - 256, 512, 768, 1024, 1280, - // level3 MUST point to previous 16-1 level1 digests, BUT there are NO ANY - // L1-digests: level3 MUST be a level1 digest of 16-1 previous blocks, BUT - // there are only 3: - 1281, 1282, 1283, - ] - .iter() - .map(|item| zero + item) - .collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } -} diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs deleted file mode 100644 index 9343a226a3aa8..0000000000000 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ /dev/null @@ -1,748 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Functions + iterator that traverses changes tries and returns all -//! (block, extrinsic) pairs where given key has been changed. - -use crate::{ - changes_trie::{ - input::{ChildIndex, DigestIndex, DigestIndexValue, ExtrinsicIndex, ExtrinsicIndexValue}, - storage::{InMemoryStorage, TrieBackendAdapter}, - surface_iterator::{surface_iterator, SurfaceIterator}, - AnchorBlockId, BlockNumber, ConfigurationRange, RootsStorage, Storage, - }, - proving_backend::ProvingBackendRecorder, - trie_backend_essence::TrieBackendEssence, -}; -use codec::{Codec, Decode, Encode}; -use hash_db::Hasher; -use num_traits::Zero; -use sp_core::storage::PrefixedStorageKey; -use sp_trie::Recorder; -use std::{cell::RefCell, collections::VecDeque}; - -/// Return changes of given key at given blocks range. -/// `max` is the number of best known block. -/// Changes are returned in descending order (i.e. last block comes first). -pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - storage: &'a dyn Storage, - begin: Number, - end: &'a AnchorBlockId, - max: Number, - storage_key: Option<&'a PrefixedStorageKey>, - key: &'a [u8], -) -> Result, String> { - // we can't query any roots before root - let max = std::cmp::min(max, end.number.clone()); - - Ok(DrilldownIterator { - essence: DrilldownIteratorEssence { - storage_key, - key, - roots_storage: storage.as_roots_storage(), - storage, - begin: begin.clone(), - end, - config: config.clone(), - surface: surface_iterator(config, max, begin, end.number.clone())?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - }) -} - -/// Returns proof of changes of given key at given blocks range. -/// `max` is the number of best known block. -pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - storage: &dyn Storage, - begin: Number, - end: &AnchorBlockId, - max: Number, - storage_key: Option<&PrefixedStorageKey>, - key: &[u8], -) -> Result>, String> -where - H::Out: Codec, -{ - // we can't query any roots before root - let max = std::cmp::min(max, end.number.clone()); - - let mut iter = ProvingDrilldownIterator { - essence: DrilldownIteratorEssence { - storage_key, - key, - roots_storage: storage.as_roots_storage(), - storage, - begin: begin.clone(), - end, - config: config.clone(), - surface: surface_iterator(config, max, begin, end.number.clone())?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - proof_recorder: Default::default(), - }; - - // iterate to collect proof - while let Some(item) = iter.next() { - item?; - } - - Ok(iter.extract_proof()) -} - -/// Check key changes proof and return changes of the key at given blocks range. -/// `max` is the number of best known block. -/// Changes are returned in descending order (i.e. last block comes first). -pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - roots_storage: &dyn RootsStorage, - proof: Vec>, - begin: Number, - end: &AnchorBlockId, - max: Number, - storage_key: Option<&PrefixedStorageKey>, - key: &[u8], -) -> Result, String> -where - H::Out: Encode, -{ - key_changes_proof_check_with_db( - config, - roots_storage, - &InMemoryStorage::with_proof(proof), - begin, - end, - max, - storage_key, - key, - ) -} - -/// Similar to the `key_changes_proof_check` function, but works with prepared proof storage. -pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - roots_storage: &dyn RootsStorage, - proof_db: &InMemoryStorage, - begin: Number, - end: &AnchorBlockId, - max: Number, - storage_key: Option<&PrefixedStorageKey>, - key: &[u8], -) -> Result, String> -where - H::Out: Encode, -{ - // we can't query any roots before root - let max = std::cmp::min(max, end.number.clone()); - - DrilldownIterator { - essence: DrilldownIteratorEssence { - storage_key, - key, - roots_storage, - storage: proof_db, - begin: begin.clone(), - end, - config: config.clone(), - surface: surface_iterator(config, max, begin, end.number.clone())?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - } - .collect() -} - -/// Drilldown iterator - receives 'digest points' from surface iterator and explores -/// every point until extrinsic is found. -pub struct DrilldownIteratorEssence<'a, H, Number> -where - H: Hasher, - Number: BlockNumber, - H::Out: 'a, -{ - storage_key: Option<&'a PrefixedStorageKey>, - key: &'a [u8], - roots_storage: &'a dyn RootsStorage, - storage: &'a dyn Storage, - begin: Number, - end: &'a AnchorBlockId, - config: ConfigurationRange<'a, Number>, - surface: SurfaceIterator<'a, Number>, - - extrinsics: VecDeque<(Number, u32)>, - blocks: VecDeque<(Number, Option)>, - - _hasher: ::std::marker::PhantomData, -} - -impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> -where - H: Hasher, - Number: BlockNumber, - H::Out: 'a, -{ - pub fn next(&mut self, trie_reader: F) -> Option> - where - F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, - { - match self.do_next(trie_reader) { - Ok(Some(res)) => Some(Ok(res)), - Ok(None) => None, - Err(err) => Some(Err(err)), - } - } - - fn do_next(&mut self, mut trie_reader: F) -> Result, String> - where - F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, - { - loop { - if let Some((block, extrinsic)) = self.extrinsics.pop_front() { - return Ok(Some((block, extrinsic))) - } - - if let Some((block, level)) = self.blocks.pop_front() { - // not having a changes trie root is an error because: - // we never query roots for future blocks - // AND trie roots for old blocks are known (both on full + light node) - let trie_root = - self.roots_storage.root(&self.end, block.clone())?.ok_or_else(|| { - format!("Changes trie root for block {} is not found", block.clone()) - })?; - let trie_root = if let Some(storage_key) = self.storage_key { - let child_key = - ChildIndex { block: block.clone(), storage_key: storage_key.clone() } - .encode(); - if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)? - .and_then(|v| >::decode(&mut &v[..]).ok()) - .map(|v| { - let mut hash = H::Out::default(); - hash.as_mut().copy_from_slice(&v[..]); - hash - }) { - trie_root - } else { - continue - } - } else { - trie_root - }; - - // only return extrinsics for blocks before self.max - // most of blocks will be filtered out before pushing to `self.blocks` - // here we just throwing away changes at digest blocks we're processing - debug_assert!( - block >= self.begin, - "We shall not touch digests earlier than a range' begin" - ); - if block <= self.end.number { - let extrinsics_key = - ExtrinsicIndex { block: block.clone(), key: self.key.to_vec() }.encode(); - let extrinsics = trie_reader(self.storage, trie_root, &extrinsics_key); - if let Some(extrinsics) = extrinsics? { - if let Ok(extrinsics) = ExtrinsicIndexValue::decode(&mut &extrinsics[..]) { - self.extrinsics - .extend(extrinsics.into_iter().rev().map(|e| (block.clone(), e))); - } - } - } - - let blocks_key = - DigestIndex { block: block.clone(), key: self.key.to_vec() }.encode(); - let blocks = trie_reader(self.storage, trie_root, &blocks_key); - if let Some(blocks) = blocks? { - if let Ok(blocks) = >::decode(&mut &blocks[..]) { - // filter level0 blocks here because we tend to use digest blocks, - // AND digest block changes could also include changes for out-of-range - // blocks - let begin = self.begin.clone(); - let end = self.end.number.clone(); - let config = self.config.clone(); - self.blocks.extend( - blocks - .into_iter() - .rev() - .filter(|b| { - level.map(|level| level > 1).unwrap_or(true) || - (*b >= begin && *b <= end) - }) - .map(|b| { - let prev_level = - level.map(|level| Some(level - 1)).unwrap_or_else(|| { - Some( - config - .config - .digest_level_at_block( - config.zero.clone(), - b.clone(), - ) - .map(|(level, _, _)| level) - .unwrap_or_else(|| Zero::zero()), - ) - }); - (b, prev_level) - }), - ); - } - } - - continue - } - - match self.surface.next() { - Some(Ok(block)) => self.blocks.push_back(block), - Some(Err(err)) => return Err(err), - None => return Ok(None), - } - } - } -} - -/// Exploring drilldown operator. -pub struct DrilldownIterator<'a, H, Number> -where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, -{ - essence: DrilldownIteratorEssence<'a, H, Number>, -} - -impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> -where - H::Out: Encode, -{ - type Item = Result<(Number, u32), String>; - - fn next(&mut self) -> Option { - self.essence.next(|storage, root, key| { - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key) - }) - } -} - -/// Proving drilldown iterator. -struct ProvingDrilldownIterator<'a, H, Number> -where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, -{ - essence: DrilldownIteratorEssence<'a, H, Number>, - proof_recorder: RefCell>, -} - -impl<'a, H, Number> ProvingDrilldownIterator<'a, H, Number> -where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, -{ - /// Consume the iterator, extracting the gathered proof in lexicographical order - /// by value. - pub fn extract_proof(self) -> Vec> { - self.proof_recorder - .into_inner() - .drain() - .into_iter() - .map(|n| n.data.to_vec()) - .collect() - } -} - -impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> -where - Number: BlockNumber, - H: Hasher, - H::Out: 'a + Codec, -{ - type Item = Result<(Number, u32), String>; - - fn next(&mut self) -> Option { - let proof_recorder = &mut *self - .proof_recorder - .try_borrow_mut() - .expect("only fails when already borrowed; storage() is non-reentrant; qed"); - self.essence.next(|storage, root, key| { - ProvingBackendRecorder::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), - proof_recorder, - } - .storage(key) - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::changes_trie::{input::InputPair, storage::InMemoryStorage, Configuration}; - use sp_runtime::traits::BlakeTwo256; - use std::iter::FromIterator; - - fn child_key() -> PrefixedStorageKey { - let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); - child_info.prefixed_storage_key() - } - - fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - let backend = InMemoryStorage::with_inputs( - vec![ - // digest: 1..4 => [(3, 0)] - (1, vec![]), - (2, vec![]), - ( - 3, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 3, key: vec![42] }, - vec![0], - )], - ), - (4, vec![InputPair::DigestIndex(DigestIndex { block: 4, key: vec![42] }, vec![3])]), - // digest: 5..8 => [(6, 3), (8, 1+2)] - (5, vec![]), - ( - 6, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 6, key: vec![42] }, - vec![3], - )], - ), - (7, vec![]), - ( - 8, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 8, key: vec![42] }, - vec![1, 2], - ), - InputPair::DigestIndex(DigestIndex { block: 8, key: vec![42] }, vec![6]), - ], - ), - // digest: 9..12 => [] - (9, vec![]), - (10, vec![]), - (11, vec![]), - (12, vec![]), - // digest: 0..16 => [4, 8] - (13, vec![]), - (14, vec![]), - (15, vec![]), - ( - 16, - vec![InputPair::DigestIndex( - DigestIndex { block: 16, key: vec![42] }, - vec![4, 8], - )], - ), - ], - vec![( - child_key(), - vec![ - ( - 1, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 1, key: vec![42] }, - vec![0], - )], - ), - ( - 2, - vec![InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 2, key: vec![42] }, - vec![3], - )], - ), - ( - 16, - vec![ - InputPair::ExtrinsicIndex( - ExtrinsicIndex { block: 16, key: vec![42] }, - vec![5], - ), - InputPair::DigestIndex( - DigestIndex { block: 16, key: vec![42] }, - vec![2], - ), - ], - ), - ], - )], - ); - - (config, backend) - } - - fn configuration_range<'a>( - config: &'a Configuration, - zero: u64, - ) -> ConfigurationRange<'a, u64> { - ConfigurationRange { config, zero, end: None } - } - - #[test] - fn drilldown_iterator_works() { - let (config, storage) = prepare_for_drilldown(); - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 2 }, - 4, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 3 }, - 4, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(3, 0)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 7 }, - 7, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(6, 3), (3, 0)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 7, - &AnchorBlockId { hash: Default::default(), number: 8 }, - 8, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 5, - &AnchorBlockId { hash: Default::default(), number: 7 }, - 8, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(6, 3)])); - } - - #[test] - fn drilldown_iterator_fails_when_storage_fails() { - let (config, storage) = prepare_for_drilldown(); - storage.clear_storage(); - - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 100 }, - 1000, - None, - &[42], - ) - .and_then(|i| i.collect::, _>>()) - .is_err()); - - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 100 }, - 1000, - Some(&child_key()), - &[42], - ) - .and_then(|i| i.collect::, _>>()) - .is_err()); - } - - #[test] - fn drilldown_iterator_fails_when_range_is_invalid() { - let (config, storage) = prepare_for_drilldown(); - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 100 }, - 50, - None, - &[42], - ) - .is_err()); - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 20, - &AnchorBlockId { hash: Default::default(), number: 10 }, - 100, - None, - &[42], - ) - .is_err()); - } - - #[test] - fn proving_drilldown_iterator_works() { - // happens on remote full node: - - // create drilldown iterator that records all trie nodes during drilldown - let (remote_config, remote_storage) = prepare_for_drilldown(); - let remote_proof = key_changes_proof::( - configuration_range(&remote_config, 0), - &remote_storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - None, - &[42], - ) - .unwrap(); - - let (remote_config, remote_storage) = prepare_for_drilldown(); - let remote_proof_child = key_changes_proof::( - configuration_range(&remote_config, 0), - &remote_storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - Some(&child_key()), - &[42], - ) - .unwrap(); - - // happens on local light node: - - // create drilldown iterator that works the same, but only depends on trie - let (local_config, local_storage) = prepare_for_drilldown(); - local_storage.clear_storage(); - let local_result = key_changes_proof_check::( - configuration_range(&local_config, 0), - &local_storage, - remote_proof, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - None, - &[42], - ); - - let (local_config, local_storage) = prepare_for_drilldown(); - local_storage.clear_storage(); - let local_result_child = key_changes_proof_check::( - configuration_range(&local_config, 0), - &local_storage, - remote_proof_child, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - Some(&child_key()), - &[42], - ); - - // check that drilldown result is the same as if it was happening at the full node - assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); - assert_eq!(local_result_child, Ok(vec![(16, 5), (2, 3)])); - } - - #[test] - fn drilldown_iterator_works_with_skewed_digest() { - let config = Configuration { digest_interval: 4, digest_levels: 3 }; - let mut config_range = configuration_range(&config, 0); - config_range.end = Some(91); - - // when 4^3 deactivates at block 91: - // last L3 digest has been created at block#64 - // skewed digest covers: - // L2 digests at blocks: 80 - // L1 digests at blocks: 84, 88 - // regular blocks: 89, 90, 91 - let mut input = (1u64..92u64).map(|b| (b, vec![])).collect::>(); - // changed at block#63 and covered by L3 digest at block#64 - input[63 - 1] - .1 - .push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 63, key: vec![42] }, vec![0])); - input[64 - 1] - .1 - .push(InputPair::DigestIndex(DigestIndex { block: 64, key: vec![42] }, vec![63])); - // changed at block#79 and covered by L2 digest at block#80 + skewed digest at block#91 - input[79 - 1] - .1 - .push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 79, key: vec![42] }, vec![1])); - input[80 - 1] - .1 - .push(InputPair::DigestIndex(DigestIndex { block: 80, key: vec![42] }, vec![79])); - input[91 - 1] - .1 - .push(InputPair::DigestIndex(DigestIndex { block: 91, key: vec![42] }, vec![80])); - let storage = InMemoryStorage::with_inputs(input, vec![]); - - let drilldown_result = key_changes::( - config_range, - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 91 }, - 100_000u64, - None, - &[42], - ) - .and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(79, 1), (63, 0)])); - } -} diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs deleted file mode 100644 index af0a423e57267..0000000000000 --- a/primitives/state-machine/src/changes_trie/input.rs +++ /dev/null @@ -1,207 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Different types of changes trie input pairs. - -use crate::{changes_trie::BlockNumber, StorageKey, StorageValue}; -use codec::{Decode, Encode, Error, Input, Output}; -use sp_core::storage::PrefixedStorageKey; - -/// Key of { changed key => set of extrinsic indices } mapping. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct ExtrinsicIndex { - /// Block at which this key has been inserted in the trie. - pub block: Number, - /// Storage key this node is responsible for. - pub key: StorageKey, -} - -/// Value of { changed key => set of extrinsic indices } mapping. -pub type ExtrinsicIndexValue = Vec; - -/// Key of { changed key => block/digest block numbers } mapping. -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct DigestIndex { - /// Block at which this key has been inserted in the trie. - pub block: Number, - /// Storage key this node is responsible for. - pub key: StorageKey, -} - -/// Key of { childtrie key => Childchange trie } mapping. -#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] -pub struct ChildIndex { - /// Block at which this key has been inserted in the trie. - pub block: Number, - /// Storage key this node is responsible for. - pub storage_key: PrefixedStorageKey, -} - -/// Value of { changed key => block/digest block numbers } mapping. -pub type DigestIndexValue = Vec; - -/// Value of { changed key => block/digest block numbers } mapping. -/// That is the root of the child change trie. -pub type ChildIndexValue = Vec; - -/// Single input pair of changes trie. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum InputPair { - /// Element of { key => set of extrinsics where key has been changed } element mapping. - ExtrinsicIndex(ExtrinsicIndex, ExtrinsicIndexValue), - /// Element of { key => set of blocks/digest blocks where key has been changed } element - /// mapping. - DigestIndex(DigestIndex, DigestIndexValue), - /// Element of { childtrie key => Childchange trie } where key has been changed } element - /// mapping. - ChildIndex(ChildIndex, ChildIndexValue), -} - -/// Single input key of changes trie. -#[derive(Clone, Debug, PartialEq, Eq)] -pub enum InputKey { - /// Key of { key => set of extrinsics where key has been changed } element mapping. - ExtrinsicIndex(ExtrinsicIndex), - /// Key of { key => set of blocks/digest blocks where key has been changed } element mapping. - DigestIndex(DigestIndex), - /// Key of { childtrie key => Childchange trie } where key has been changed } element mapping. - ChildIndex(ChildIndex), -} - -impl InputPair { - /// Extract storage key that this pair corresponds to. - pub fn key(&self) -> Option<&[u8]> { - match *self { - InputPair::ExtrinsicIndex(ref key, _) => Some(&key.key), - InputPair::DigestIndex(ref key, _) => Some(&key.key), - InputPair::ChildIndex(_, _) => None, - } - } -} - -impl Into<(StorageKey, StorageValue)> for InputPair { - fn into(self) -> (StorageKey, StorageValue) { - match self { - InputPair::ExtrinsicIndex(key, value) => (key.encode(), value.encode()), - InputPair::DigestIndex(key, value) => (key.encode(), value.encode()), - InputPair::ChildIndex(key, value) => (key.encode(), value.encode()), - } - } -} - -impl Into> for InputPair { - fn into(self) -> InputKey { - match self { - InputPair::ExtrinsicIndex(key, _) => InputKey::ExtrinsicIndex(key), - InputPair::DigestIndex(key, _) => InputKey::DigestIndex(key), - InputPair::ChildIndex(key, _) => InputKey::ChildIndex(key), - } - } -} - -impl ExtrinsicIndex { - pub fn key_neutral_prefix(block: Number) -> Vec { - let mut prefix = vec![1]; - prefix.extend(block.encode()); - prefix - } -} - -impl Encode for ExtrinsicIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(1); - self.block.encode_to(dest); - self.key.encode_to(dest); - } -} - -impl codec::EncodeLike for ExtrinsicIndex {} - -impl DigestIndex { - pub fn key_neutral_prefix(block: Number) -> Vec { - let mut prefix = vec![2]; - prefix.extend(block.encode()); - prefix - } -} - -impl Encode for DigestIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(2); - self.block.encode_to(dest); - self.key.encode_to(dest); - } -} - -impl ChildIndex { - pub fn key_neutral_prefix(block: Number) -> Vec { - let mut prefix = vec![3]; - prefix.extend(block.encode()); - prefix - } -} - -impl Encode for ChildIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(3); - self.block.encode_to(dest); - self.storage_key.encode_to(dest); - } -} - -impl codec::EncodeLike for DigestIndex {} - -impl Decode for InputKey { - fn decode(input: &mut I) -> Result { - match input.read_byte()? { - 1 => Ok(InputKey::ExtrinsicIndex(ExtrinsicIndex { - block: Decode::decode(input)?, - key: Decode::decode(input)?, - })), - 2 => Ok(InputKey::DigestIndex(DigestIndex { - block: Decode::decode(input)?, - key: Decode::decode(input)?, - })), - 3 => Ok(InputKey::ChildIndex(ChildIndex { - block: Decode::decode(input)?, - storage_key: PrefixedStorageKey::new(Decode::decode(input)?), - })), - _ => Err("Invalid input key variant".into()), - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn extrinsic_index_serialized_and_deserialized() { - let original = ExtrinsicIndex { block: 777u64, key: vec![42] }; - let serialized = original.encode(); - let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); - assert_eq!(InputKey::ExtrinsicIndex(original), deserialized); - } - - #[test] - fn digest_index_serialized_and_deserialized() { - let original = DigestIndex { block: 777u64, key: vec![42] }; - let serialized = original.encode(); - let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); - assert_eq!(InputKey::DigestIndex(original), deserialized); - } -} diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs deleted file mode 100644 index 40148095247dd..0000000000000 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ /dev/null @@ -1,428 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Changes trie related structures and functions. -//! -//! Changes trie is a trie built of { storage key => extrinsics } pairs -//! at the end of each block. For every changed storage key it contains -//! a pair, mapping key to the set of extrinsics where it has been changed. -//! -//! Optionally, every N blocks, additional level1-digest nodes are appended -//! to the changes trie, containing pairs { storage key => blocks }. For every -//! storage key that has been changed in PREVIOUS N-1 blocks (except for genesis -//! block) it contains a pair, mapping this key to the set of blocks where it -//! has been changed. -//! -//! Optionally, every N^digest_level (where digest_level > 1) blocks, additional -//! digest_level digest is created. It is built out of pairs { storage key => digest -//! block }, containing entries for every storage key that has been changed in -//! the last N*digest_level-1 blocks (except for genesis block), mapping these keys -//! to the set of lower-level digest blocks. -//! -//! Changes trie configuration could change within a time. The range of blocks, where -//! configuration has been active, is given by two blocks: zero and end. Zero block is -//! the block where configuration has been set. But the first changes trie that uses -//! this configuration will be built at the block zero+1. If configuration deactivates -//! at some block, this will be the end block of the configuration. It is also the -//! zero block of the next configuration. -//! -//! If configuration has the end block, it also means that 'skewed digest' has/should -//! been built at that block. If this is the block where max-level digest should have -//! been created, than it is simply max-level digest of this configuration. Otherwise, -//! it is the digest that covers all blocks since last max-level digest block was -//! created. -//! -//! Changes trie only contains the top level storage changes. Sub-level changes -//! are propagated through its storage root on the top level storage. - -mod build; -mod build_cache; -mod build_iterator; -mod changes_iterator; -mod input; -mod prune; -mod storage; -mod surface_iterator; - -pub use self::{ - build_cache::{BuildCache, CacheAction, CachedBuildData}, - changes_iterator::{ - key_changes, key_changes_proof, key_changes_proof_check, key_changes_proof_check_with_db, - }, - prune::prune, - storage::InMemoryStorage, -}; - -use crate::{ - backend::Backend, - changes_trie::{ - build::prepare_input, - build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, - }, - overlayed_changes::OverlayedChanges, - StorageKey, -}; -use codec::{Decode, Encode}; -use hash_db::{Hasher, Prefix}; -use num_traits::{One, Zero}; -use sp_core::{self, storage::PrefixedStorageKey}; -use sp_trie::{trie_types::TrieDBMut, DBValue, MemoryDB, TrieMut}; -use std::{ - collections::{HashMap, HashSet}, - convert::TryInto, -}; - -/// Requirements for block number that can be used with changes tries. -pub trait BlockNumber: - Send - + Sync - + 'static - + std::fmt::Display - + Clone - + From - + TryInto - + One - + Zero - + PartialEq - + Ord - + std::hash::Hash - + std::ops::Add - + ::std::ops::Sub - + std::ops::Mul - + ::std::ops::Div - + std::ops::Rem - + std::ops::AddAssign - + num_traits::CheckedMul - + num_traits::CheckedSub - + Decode - + Encode -{ -} - -impl BlockNumber for T where - T: Send - + Sync - + 'static - + std::fmt::Display - + Clone - + From - + TryInto - + One - + Zero - + PartialEq - + Ord - + std::hash::Hash - + std::ops::Add - + ::std::ops::Sub - + std::ops::Mul - + ::std::ops::Div - + std::ops::Rem - + std::ops::AddAssign - + num_traits::CheckedMul - + num_traits::CheckedSub - + Decode - + Encode -{ -} - -/// Block identifier that could be used to determine fork of this block. -#[derive(Debug)] -pub struct AnchorBlockId { - /// Hash of this block. - pub hash: Hash, - /// Number of this block. - pub number: Number, -} - -/// Changes tries state at some block. -pub struct State<'a, H, Number> { - /// Configuration that is active at given block. - pub config: Configuration, - /// Configuration activation block number. Zero if it is the first configuration on the chain, - /// or number of the block that have emit NewConfiguration signal (thus activating - /// configuration starting from the **next** block). - pub zero: Number, - /// Underlying changes tries storage reference. - pub storage: &'a dyn Storage, -} - -/// Changes trie storage. Provides access to trie roots and trie nodes. -pub trait RootsStorage: Send + Sync { - /// Resolve hash of the block into anchor. - fn build_anchor(&self, hash: H::Out) -> Result, String>; - /// Get changes trie root for the block with given number which is an ancestor (or the block - /// itself) of the anchor_block (i.e. anchor_block.number >= block). - fn root( - &self, - anchor: &AnchorBlockId, - block: Number, - ) -> Result, String>; -} - -/// Changes trie storage. Provides access to trie roots and trie nodes. -pub trait Storage: RootsStorage { - /// Casts from self reference to RootsStorage reference. - fn as_roots_storage(&self) -> &dyn RootsStorage; - /// Execute given functor with cached entry for given trie root. - /// Returns true if the functor has been called (cache entry exists) and false otherwise. - fn with_cached_changed_keys( - &self, - root: &H::Out, - functor: &mut dyn FnMut(&HashMap, HashSet>), - ) -> bool; - /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; -} - -/// Changes trie storage -> trie backend essence adapter. -pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>( - pub &'a dyn Storage, -); - -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage - for TrieBackendStorageAdapter<'a, H, N> -{ - type Overlay = sp_trie::MemoryDB; - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - self.0.get(key, prefix) - } -} - -/// Changes trie configuration. -pub type Configuration = sp_core::ChangesTrieConfiguration; - -/// Blocks range where configuration has been constant. -#[derive(Clone)] -pub struct ConfigurationRange<'a, N> { - /// Active configuration. - pub config: &'a Configuration, - /// Zero block of this configuration. The configuration is active starting from the next block. - pub zero: N, - /// End block of this configuration. It is the last block where configuration has been active. - pub end: Option, -} - -impl<'a, H, Number> State<'a, H, Number> { - /// Create state with given config and storage. - pub fn new(config: Configuration, zero: Number, storage: &'a dyn Storage) -> Self { - Self { config, zero, storage } - } -} - -impl<'a, H, Number: Clone> Clone for State<'a, H, Number> { - fn clone(&self) -> Self { - State { config: self.config.clone(), zero: self.zero.clone(), storage: self.storage } - } -} - -/// Create state where changes tries are disabled. -pub fn disabled_state<'a, H, Number>() -> Option> { - None -} - -/// Compute the changes trie root and transaction for given block. -/// Returns Err(()) if unknown `parent_hash` has been passed. -/// Returns Ok(None) if there's no data to perform computation. -/// Panics if background storage returns an error OR if insert to MemoryDB fails. -pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( - backend: &B, - state: Option<&'a State<'a, H, Number>>, - changes: &OverlayedChanges, - parent_hash: H::Out, - panic_on_storage_error: bool, -) -> Result, H::Out, CacheAction)>, ()> -where - H::Out: Ord + 'static + Encode, -{ - /// Panics when `res.is_err() && panic`, otherwise it returns `Err(())` on an error. - fn maybe_panic( - res: std::result::Result, - panic: bool, - ) -> std::result::Result { - res.map(Ok).unwrap_or_else(|e| { - if panic { - panic!( - "changes trie: storage access is not allowed to fail within runtime: {:?}", - e - ) - } else { - Err(()) - } - }) - } - - // when storage isn't provided, changes tries aren't created - let state = match state { - Some(state) => state, - None => return Ok(None), - }; - - // build_anchor error should not be considered fatal - let parent = state.storage.build_anchor(parent_hash).map_err(|_| ())?; - let block = parent.number.clone() + One::one(); - - // prepare configuration range - we already know zero block. Current block may be the end block - // if configuration has been changed in this block - let is_config_changed = - match changes.storage(sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG) { - Some(Some(new_config)) => new_config != &state.config.encode()[..], - Some(None) => true, - None => false, - }; - let config_range = ConfigurationRange { - config: &state.config, - zero: state.zero.clone(), - end: if is_config_changed { Some(block.clone()) } else { None }, - }; - - // storage errors are considered fatal (similar to situations when runtime fetches values from - // storage) - let (input_pairs, child_input_pairs, digest_input_blocks) = maybe_panic( - prepare_input::( - backend, - state.storage, - config_range.clone(), - changes, - &parent, - ), - panic_on_storage_error, - )?; - - // prepare cached data - let mut cache_action = prepare_cached_build_data(config_range, block.clone()); - let needs_changed_keys = cache_action.collects_changed_keys(); - cache_action = cache_action.set_digest_input_blocks(digest_input_blocks); - - let mut mdb = MemoryDB::default(); - let mut child_roots = Vec::with_capacity(child_input_pairs.len()); - for (child_index, input_pairs) in child_input_pairs { - let mut not_empty = false; - let mut root = Default::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - let mut storage_changed_keys = HashSet::new(); - for input_pair in input_pairs { - if needs_changed_keys { - if let Some(key) = input_pair.key() { - storage_changed_keys.insert(key.to_vec()); - } - } - - let (key, value) = input_pair.into(); - not_empty = true; - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - cache_action = - cache_action.insert(Some(child_index.storage_key.clone()), storage_changed_keys); - } - if not_empty { - child_roots.push(input::InputPair::ChildIndex(child_index, root.as_ref().to_vec())); - } - } - let mut root = Default::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - for (key, value) in child_roots.into_iter().map(Into::into) { - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - let mut storage_changed_keys = HashSet::new(); - for input_pair in input_pairs { - if needs_changed_keys { - if let Some(key) = input_pair.key() { - storage_changed_keys.insert(key.to_vec()); - } - } - - let (key, value) = input_pair.into(); - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - cache_action = cache_action.insert(None, storage_changed_keys); - } - - let cache_action = cache_action.complete(block, &root); - Ok(Some((mdb, root, cache_action))) -} - -/// Prepare empty cached build data for given block. -fn prepare_cached_build_data( - config: ConfigurationRange, - block: Number, -) -> IncompleteCacheAction { - // when digests are not enabled in configuration, we do not need to cache anything - // because it'll never be used again for building other tries - // => let's clear the cache - if !config.config.is_digest_build_enabled() { - return IncompleteCacheAction::Clear - } - - // when this is the last block where current configuration is active - // => let's clear the cache - if config.end.as_ref() == Some(&block) { - return IncompleteCacheAction::Clear - } - - // we do not need to cache anything when top-level digest trie is created, because - // it'll never be used again for building other tries - // => let's clear the cache - match config.config.digest_level_at_block(config.zero.clone(), block) { - Some((digest_level, _, _)) if digest_level == config.config.digest_levels => - IncompleteCacheAction::Clear, - _ => IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()), - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn cache_is_cleared_when_digests_are_disabled() { - let config = Configuration { digest_interval: 0, digest_levels: 0 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range, 8u32), IncompleteCacheAction::Clear); - } - - #[test] - fn build_data_is_cached_when_digests_are_enabled() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert!(prepare_cached_build_data(config_range.clone(), 4u32).collects_changed_keys()); - assert!(prepare_cached_build_data(config_range.clone(), 7u32).collects_changed_keys()); - assert!(prepare_cached_build_data(config_range, 8u32).collects_changed_keys()); - } - - #[test] - fn cache_is_cleared_when_digests_are_enabled_and_top_level_digest_is_built() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range, 64u32), IncompleteCacheAction::Clear); - } - - #[test] - fn cache_is_cleared_when_end_block_of_configuration_is_built() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: Some(4u32), config: &config }; - assert_eq!( - prepare_cached_build_data(config_range.clone(), 4u32), - IncompleteCacheAction::Clear - ); - } -} diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs deleted file mode 100644 index 2ca540562b47f..0000000000000 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ /dev/null @@ -1,204 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Changes trie pruning-related functions. - -use crate::{ - changes_trie::{ - input::{ChildIndex, InputKey}, - storage::TrieBackendAdapter, - AnchorBlockId, BlockNumber, Storage, - }, - proving_backend::ProvingBackendRecorder, - trie_backend_essence::TrieBackendEssence, -}; -use codec::{Codec, Decode}; -use hash_db::Hasher; -use log::warn; -use num_traits::One; -use sp_trie::Recorder; - -/// Prune obsolete changes tries. Pruning happens at the same block, where highest -/// level digest is created. Pruning guarantees to save changes tries for last -/// `min_blocks_to_keep` blocks. We only prune changes tries at `max_digest_interval` -/// ranges. -pub fn prune( - storage: &dyn Storage, - first: Number, - last: Number, - current_block: &AnchorBlockId, - mut remove_trie_node: F, -) where - H::Out: Codec, -{ - // delete changes trie for every block in range - let mut block = first; - loop { - if block >= last.clone() + One::one() { - break - } - - let prev_block = block.clone(); - block += One::one(); - - let block = prev_block; - let root = match storage.root(current_block, block.clone()) { - Ok(Some(root)) => root, - Ok(None) => continue, - Err(error) => { - // try to delete other tries - warn!(target: "trie", "Failed to read changes trie root from DB: {}", error); - continue - }, - }; - let children_roots = { - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - root, - ); - let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); - let mut children_roots = Vec::new(); - trie_storage.for_key_values_with_prefix(&child_prefix, |mut key, mut value| { - if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut key) { - if let Ok(value) = >::decode(&mut value) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.push(trie_root); - } - } - }); - - children_roots - }; - for root in children_roots.into_iter() { - prune_trie(storage, root, &mut remove_trie_node); - } - - prune_trie(storage, root, &mut remove_trie_node); - } -} - -// Prune a trie. -fn prune_trie( - storage: &dyn Storage, - root: H::Out, - remove_trie_node: &mut F, -) where - H::Out: Codec, -{ - // enumerate all changes trie' keys, recording all nodes that have been 'touched' - // (effectively - all changes trie nodes) - let mut proof_recorder: Recorder = Default::default(); - { - let mut trie = ProvingBackendRecorder::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), - proof_recorder: &mut proof_recorder, - }; - trie.record_all_keys(); - } - - // all nodes of this changes trie should be pruned - remove_trie_node(root); - for node in proof_recorder.drain().into_iter().map(|n| n.hash) { - remove_trie_node(node); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{backend::insert_into_memory_db, changes_trie::storage::InMemoryStorage}; - use codec::Encode; - use sp_core::H256; - use sp_runtime::traits::BlakeTwo256; - use sp_trie::MemoryDB; - use std::collections::HashSet; - - fn prune_by_collect( - storage: &dyn Storage, - first: u64, - last: u64, - current_block: u64, - ) -> HashSet { - let mut pruned_trie_nodes = HashSet::new(); - let anchor = AnchorBlockId { hash: Default::default(), number: current_block }; - prune(storage, first, last, &anchor, |node| { - pruned_trie_nodes.insert(node); - }); - pruned_trie_nodes - } - - #[test] - fn prune_works() { - fn prepare_storage() -> InMemoryStorage { - let child_info = sp_core::storage::ChildInfo::new_default(&b"1"[..]); - let child_key = - ChildIndex { block: 67u64, storage_key: child_info.prefixed_storage_key() } - .encode(); - let mut mdb1 = MemoryDB::::default(); - let root1 = - insert_into_memory_db::(&mut mdb1, vec![(vec![10], vec![20])]) - .unwrap(); - let mut mdb2 = MemoryDB::::default(); - let root2 = insert_into_memory_db::( - &mut mdb2, - vec![(vec![11], vec![21]), (vec![12], vec![22])], - ) - .unwrap(); - let mut mdb3 = MemoryDB::::default(); - let ch_root3 = - insert_into_memory_db::(&mut mdb3, vec![(vec![110], vec![120])]) - .unwrap(); - let root3 = insert_into_memory_db::( - &mut mdb3, - vec![ - (vec![13], vec![23]), - (vec![14], vec![24]), - (child_key, ch_root3.as_ref().encode()), - ], - ) - .unwrap(); - let mut mdb4 = MemoryDB::::default(); - let root4 = - insert_into_memory_db::(&mut mdb4, vec![(vec![15], vec![25])]) - .unwrap(); - let storage = InMemoryStorage::new(); - storage.insert(65, root1, mdb1); - storage.insert(66, root2, mdb2); - storage.insert(67, root3, mdb3); - storage.insert(68, root4, mdb4); - - storage - } - - let storage = prepare_storage(); - assert!(prune_by_collect(&storage, 20, 30, 90).is_empty()); - assert!(!storage.into_mdb().drain().is_empty()); - - let storage = prepare_storage(); - let prune60_65 = prune_by_collect(&storage, 60, 65, 90); - assert!(!prune60_65.is_empty()); - storage.remove_from_storage(&prune60_65); - assert!(!storage.into_mdb().drain().is_empty()); - - let storage = prepare_storage(); - let prune60_70 = prune_by_collect(&storage, 60, 70, 90); - assert!(!prune60_70.is_empty()); - storage.remove_from_storage(&prune60_70); - assert!(storage.into_mdb().drain().is_empty()); - } -} diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs deleted file mode 100644 index bd5e3a32b5657..0000000000000 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ /dev/null @@ -1,214 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Changes trie storage utilities. - -use crate::{ - changes_trie::{AnchorBlockId, BlockNumber, BuildCache, RootsStorage, Storage}, - trie_backend_essence::TrieBackendStorage, - StorageKey, -}; -use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; -use parking_lot::RwLock; -use sp_core::storage::PrefixedStorageKey; -use sp_trie::{DBValue, MemoryDB}; -use std::collections::{BTreeMap, HashMap, HashSet}; - -#[cfg(test)] -use crate::backend::insert_into_memory_db; -#[cfg(test)] -use crate::changes_trie::input::{ChildIndex, InputPair}; - -/// In-memory implementation of changes trie storage. -pub struct InMemoryStorage { - data: RwLock>, - cache: BuildCache, -} - -/// Adapter for using changes trie storage as a TrieBackendEssence' storage. -pub struct TrieBackendAdapter<'a, H: Hasher, Number: BlockNumber> { - storage: &'a dyn Storage, - _hasher: std::marker::PhantomData<(H, Number)>, -} - -struct InMemoryStorageData { - roots: BTreeMap, - mdb: MemoryDB, -} - -impl InMemoryStorage { - /// Creates storage from given in-memory database. - pub fn with_db(mdb: MemoryDB) -> Self { - Self { - data: RwLock::new(InMemoryStorageData { roots: BTreeMap::new(), mdb }), - cache: BuildCache::new(), - } - } - - /// Creates storage with empty database. - pub fn new() -> Self { - Self::with_db(Default::default()) - } - - /// Creates storage with given proof. - pub fn with_proof(proof: Vec>) -> Self { - use hash_db::HashDB; - - let mut proof_db = MemoryDB::::default(); - for item in proof { - proof_db.insert(EMPTY_PREFIX, &item); - } - Self::with_db(proof_db) - } - - /// Get mutable cache reference. - pub fn cache_mut(&mut self) -> &mut BuildCache { - &mut self.cache - } - - /// Create the storage with given blocks. - pub fn with_blocks(blocks: Vec<(Number, H::Out)>) -> Self { - Self { - data: RwLock::new(InMemoryStorageData { - roots: blocks.into_iter().collect(), - mdb: MemoryDB::default(), - }), - cache: BuildCache::new(), - } - } - - #[cfg(test)] - pub fn with_inputs( - mut top_inputs: Vec<(Number, Vec>)>, - children_inputs: Vec<(PrefixedStorageKey, Vec<(Number, Vec>)>)>, - ) -> Self { - let mut mdb = MemoryDB::default(); - let mut roots = BTreeMap::new(); - for (storage_key, child_input) in children_inputs { - for (block, pairs) in child_input { - let root = - insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); - - if let Some(root) = root { - let ix = if let Some(ix) = top_inputs.iter().position(|v| v.0 == block) { - ix - } else { - top_inputs.push((block.clone(), Default::default())); - top_inputs.len() - 1 - }; - top_inputs[ix].1.push(InputPair::ChildIndex( - ChildIndex { block: block.clone(), storage_key: storage_key.clone() }, - root.as_ref().to_vec(), - )); - } - } - } - - for (block, pairs) in top_inputs { - let root = insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); - if let Some(root) = root { - roots.insert(block, root); - } - } - - InMemoryStorage { - data: RwLock::new(InMemoryStorageData { roots, mdb }), - cache: BuildCache::new(), - } - } - - #[cfg(test)] - pub fn clear_storage(&self) { - self.data.write().mdb = MemoryDB::default(); // use new to be more correct - } - - #[cfg(test)] - pub fn remove_from_storage(&self, keys: &HashSet) { - let mut data = self.data.write(); - for key in keys { - data.mdb.remove_and_purge(key, hash_db::EMPTY_PREFIX); - } - } - - #[cfg(test)] - pub fn into_mdb(self) -> MemoryDB { - self.data.into_inner().mdb - } - - /// Insert changes trie for given block. - pub fn insert(&self, block: Number, changes_trie_root: H::Out, trie: MemoryDB) { - let mut data = self.data.write(); - data.roots.insert(block, changes_trie_root); - data.mdb.consolidate(trie); - } -} - -impl RootsStorage for InMemoryStorage { - fn build_anchor(&self, parent_hash: H::Out) -> Result, String> { - self.data - .read() - .roots - .iter() - .find(|(_, v)| **v == parent_hash) - .map(|(k, _)| AnchorBlockId { hash: parent_hash, number: k.clone() }) - .ok_or_else(|| format!("Can't find associated number for block {:?}", parent_hash)) - } - - fn root( - &self, - _anchor_block: &AnchorBlockId, - block: Number, - ) -> Result, String> { - Ok(self.data.read().roots.get(&block).cloned()) - } -} - -impl Storage for InMemoryStorage { - fn as_roots_storage(&self) -> &dyn RootsStorage { - self - } - - fn with_cached_changed_keys( - &self, - root: &H::Out, - functor: &mut dyn FnMut(&HashMap, HashSet>), - ) -> bool { - self.cache.with_changed_keys(root, functor) - } - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) - } -} - -impl<'a, H: Hasher, Number: BlockNumber> TrieBackendAdapter<'a, H, Number> { - pub fn new(storage: &'a dyn Storage) -> Self { - Self { storage, _hasher: Default::default() } - } -} - -impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> -where - Number: BlockNumber, - H: Hasher, -{ - type Overlay = MemoryDB; - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - self.storage.get(key, prefix) - } -} diff --git a/primitives/state-machine/src/changes_trie/surface_iterator.rs b/primitives/state-machine/src/changes_trie/surface_iterator.rs deleted file mode 100644 index b3e5a490cd184..0000000000000 --- a/primitives/state-machine/src/changes_trie/surface_iterator.rs +++ /dev/null @@ -1,326 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! The best way to understand how this iterator works is to imagine some 2D terrain that have some -//! mountains (digest changes tries) and valleys (changes tries for regular blocks). There are gems -//! (blocks) beneath the terrain. Given the request to find all gems in the range [X1; X2] this -//! iterator will return **minimal set** of points at the terrain (mountains and valleys) inside -//! this range that have to be drilled down to search for gems. - -use crate::changes_trie::{BlockNumber, ConfigurationRange}; -use num_traits::One; - -/// Returns surface iterator for given range of blocks. -/// -/// `max` is the number of best block, known to caller. We can't access any changes tries -/// that are built after this block, even though we may have them built already. -pub fn surface_iterator<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - max: Number, - begin: Number, - end: Number, -) -> Result, String> { - let (current, current_begin, digest_step, digest_level) = - lower_bound_max_digest(config.clone(), max.clone(), begin.clone(), end)?; - Ok(SurfaceIterator { - config, - begin, - max, - current: Some(current), - current_begin, - digest_step, - digest_level, - }) -} - -/// Surface iterator - only traverses top-level digests from given range and tries to find -/// all valid digest changes. -/// -/// Iterator item is the tuple of (last block of the current point + digest level of the current -/// point). Digest level is Some(0) when it is regular block, is Some(non-zero) when it is digest -/// block and None if it is skewed digest block. -pub struct SurfaceIterator<'a, Number: BlockNumber> { - config: ConfigurationRange<'a, Number>, - begin: Number, - max: Number, - current: Option, - current_begin: Number, - digest_step: u32, - digest_level: Option, -} - -impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { - type Item = Result<(Number, Option), String>; - - fn next(&mut self) -> Option { - let current = self.current.clone()?; - let digest_level = self.digest_level; - - if current < self.digest_step.into() { - self.current = None; - } else { - let next = current.clone() - self.digest_step.into(); - if next.is_zero() || next < self.begin { - self.current = None; - } else if next > self.current_begin { - self.current = Some(next); - } else { - let max_digest_interval = lower_bound_max_digest( - self.config.clone(), - self.max.clone(), - self.begin.clone(), - next, - ); - let (current, current_begin, digest_step, digest_level) = match max_digest_interval - { - Err(err) => return Some(Err(err)), - Ok(range) => range, - }; - - self.current = Some(current); - self.current_begin = current_begin; - self.digest_step = digest_step; - self.digest_level = digest_level; - } - } - - Some(Ok((current, digest_level))) - } -} - -/// Returns parameters of highest level digest block that includes the end of given range -/// and tends to include the whole range. -fn lower_bound_max_digest<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - max: Number, - begin: Number, - end: Number, -) -> Result<(Number, Number, u32, Option), String> { - if end > max || begin > end { - return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)) - } - if begin <= config.zero || - config.end.as_ref().map(|config_end| end > *config_end).unwrap_or(false) - { - return Err(format!( - "changes trie range is not covered by configuration: {}..{}/{}..{}", - begin, - end, - config.zero, - match config.end.as_ref() { - Some(config_end) => format!("{}", config_end), - None => "None".into(), - } - )) - } - - let mut digest_level = 0u32; - let mut digest_step = 1u32; - let mut digest_interval = 0u32; - let mut current = end.clone(); - let mut current_begin = begin.clone(); - if current_begin != current { - while digest_level != config.config.digest_levels { - // try to use next level digest - let new_digest_level = digest_level + 1; - let new_digest_step = digest_step * config.config.digest_interval; - let new_digest_interval = config.config.digest_interval * { - if digest_interval == 0 { - 1 - } else { - digest_interval - } - }; - let new_digest_begin = config.zero.clone() + - ((current.clone() - One::one() - config.zero.clone()) / - new_digest_interval.into()) * - new_digest_interval.into(); - let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); - let new_current = new_digest_begin.clone() + new_digest_interval.into(); - - // check if we met skewed digest - if let Some(skewed_digest_end) = config.end.as_ref() { - if new_digest_end > *skewed_digest_end { - let skewed_digest_start = config.config.prev_max_level_digest_block( - config.zero.clone(), - skewed_digest_end.clone(), - ); - if let Some(skewed_digest_start) = skewed_digest_start { - let skewed_digest_range = (skewed_digest_end.clone() - - skewed_digest_start.clone()) - .try_into() - .ok() - .expect( - "skewed digest range is always <= max level digest range;\ - max level digest range always fits u32; qed", - ); - return Ok(( - skewed_digest_end.clone(), - skewed_digest_start, - skewed_digest_range, - None, - )) - } - } - } - - // we can't use next level digest if it touches any unknown (> max) blocks - if new_digest_end > max { - if begin < new_digest_begin { - current_begin = new_digest_begin; - } - break - } - - // we can (and will) use this digest - digest_level = new_digest_level; - digest_step = new_digest_step; - digest_interval = new_digest_interval; - current = new_current; - current_begin = new_digest_begin; - - // if current digest covers the whole range => no need to use next level digest - if current_begin <= begin && new_digest_end >= end { - break - } - } - } - - Ok((current, current_begin, digest_step, Some(digest_level))) -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::changes_trie::Configuration; - - fn configuration_range<'a>( - config: &'a Configuration, - zero: u64, - ) -> ConfigurationRange<'a, u64> { - ConfigurationRange { config, zero, end: None } - } - - #[test] - fn lower_bound_max_digest_works() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - - // when config activates at 0 - assert_eq!( - lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64) - .unwrap(), - (192, 176, 16, Some(2)), - ); - - // when config activates at 30 - assert_eq!( - lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 50u64, 210u64) - .unwrap(), - (222, 206, 16, Some(2)), - ); - } - - #[test] - fn surface_iterator_works() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - - // when config activates at 0 - assert_eq!( - surface_iterator(configuration_range(&config, 0u64), 100_000u64, 40u64, 180u64,) - .unwrap() - .collect::>(), - vec![ - Ok((192, Some(2))), - Ok((176, Some(2))), - Ok((160, Some(2))), - Ok((144, Some(2))), - Ok((128, Some(2))), - Ok((112, Some(2))), - Ok((96, Some(2))), - Ok((80, Some(2))), - Ok((64, Some(2))), - Ok((48, Some(2))), - ], - ); - - // when config activates at 30 - assert_eq!( - surface_iterator(configuration_range(&config, 30u64), 100_000u64, 40u64, 180u64,) - .unwrap() - .collect::>(), - vec![ - Ok((190, Some(2))), - Ok((174, Some(2))), - Ok((158, Some(2))), - Ok((142, Some(2))), - Ok((126, Some(2))), - Ok((110, Some(2))), - Ok((94, Some(2))), - Ok((78, Some(2))), - Ok((62, Some(2))), - Ok((46, Some(2))), - ], - ); - - // when config activates at 0 AND max block is before next digest - assert_eq!( - surface_iterator(configuration_range(&config, 0u64), 183u64, 40u64, 183u64) - .unwrap() - .collect::>(), - vec![ - Ok((183, Some(0))), - Ok((182, Some(0))), - Ok((181, Some(0))), - Ok((180, Some(1))), - Ok((176, Some(2))), - Ok((160, Some(2))), - Ok((144, Some(2))), - Ok((128, Some(2))), - Ok((112, Some(2))), - Ok((96, Some(2))), - Ok((80, Some(2))), - Ok((64, Some(2))), - Ok((48, Some(2))), - ], - ); - } - - #[test] - fn surface_iterator_works_with_skewed_digest() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - let mut config_range = configuration_range(&config, 0u64); - - // when config activates at 0 AND ends at 170 - config_range.end = Some(170); - assert_eq!( - surface_iterator(config_range, 100_000u64, 40u64, 170u64) - .unwrap() - .collect::>(), - vec![ - Ok((170, None)), - Ok((160, Some(2))), - Ok((144, Some(2))), - Ok((128, Some(2))), - Ok((112, Some(2))), - Ok((96, Some(2))), - Ok((80, Some(2))), - Ok((64, Some(2))), - Ok((48, Some(2))), - ], - ); - } -} diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index c20d8492fb1f3..ed93cc8676d99 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -28,8 +28,6 @@ use sp_core::storage::{well_known_keys::is_child_storage_key, ChildInfo, Tracked use sp_externalities::{Extension, ExtensionStore, Externalities}; use sp_trie::{empty_child_trie_root, trie_types::Layout}; -#[cfg(feature = "std")] -use crate::changes_trie::State as ChangesTrieState; use crate::{log_error, trace, warn, StorageTransactionCache}; use sp_std::{ any::{Any, TypeId}, @@ -90,62 +88,52 @@ impl error::Error for Error { } /// Wraps a read-only backend, call executor, and current overlayed changes. -pub struct Ext<'a, H, N, B> +pub struct Ext<'a, H, B> where H: Hasher, B: 'a + Backend, - N: crate::changes_trie::BlockNumber, { /// The overlayed changes to write to. overlay: &'a mut OverlayedChanges, /// The storage backend to read from. backend: &'a B, /// The cache for the storage transactions. - storage_transaction_cache: &'a mut StorageTransactionCache, - /// Changes trie state to read from. - #[cfg(feature = "std")] - changes_trie_state: Option>, + storage_transaction_cache: &'a mut StorageTransactionCache, /// Pseudo-unique id used for tracing. pub id: u16, - /// Dummy usage of N arg. - _phantom: sp_std::marker::PhantomData, /// Extensions registered with this instance. #[cfg(feature = "std")] extensions: Option>, } -impl<'a, H, N, B> Ext<'a, H, N, B> +impl<'a, H, B> Ext<'a, H, B> where H: Hasher, B: Backend, - N: crate::changes_trie::BlockNumber, { /// Create a new `Ext`. #[cfg(not(feature = "std"))] pub fn new( overlay: &'a mut OverlayedChanges, - storage_transaction_cache: &'a mut StorageTransactionCache, + storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, ) -> Self { - Ext { overlay, backend, id: 0, storage_transaction_cache, _phantom: Default::default() } + Ext { overlay, backend, id: 0, storage_transaction_cache } } /// Create a new `Ext` from overlayed changes and read-only backend #[cfg(feature = "std")] pub fn new( overlay: &'a mut OverlayedChanges, - storage_transaction_cache: &'a mut StorageTransactionCache, + storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, - changes_trie_state: Option>, extensions: Option<&'a mut sp_externalities::Extensions>, ) -> Self { Self { overlay, backend, - changes_trie_state, storage_transaction_cache, id: rand::random(), - _phantom: Default::default(), extensions: extensions.map(OverlayedExtensions::new), } } @@ -159,12 +147,11 @@ where } #[cfg(test)] -impl<'a, H, N, B> Ext<'a, H, N, B> +impl<'a, H, B> Ext<'a, H, B> where H: Hasher, H::Out: Ord + 'static, B: 'a + Backend, - N: crate::changes_trie::BlockNumber, { pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { use std::collections::HashMap; @@ -181,12 +168,11 @@ where } } -impl<'a, H, N, B> Externalities for Ext<'a, H, N, B> +impl<'a, H, B> Externalities for Ext<'a, H, B> where H: Hasher, H::Out: Ord + 'static + codec::Codec, B: Backend, - N: crate::changes_trie::BlockNumber, { fn set_offchain_storage(&mut self, key: &[u8], value: Option<&[u8]>) { self.overlay.set_offchain_storage(key, value) @@ -644,54 +630,6 @@ where .add_transaction_index(IndexOperation::Renew { extrinsic: index, hash: hash.to_vec() }); } - #[cfg(not(feature = "std"))] - fn storage_changes_root(&mut self, _parent_hash: &[u8]) -> Result>, ()> { - Ok(None) - } - - #[cfg(feature = "std")] - fn storage_changes_root(&mut self, mut parent_hash: &[u8]) -> Result>, ()> { - let _guard = guard(); - if let Some(ref root) = self.storage_transaction_cache.changes_trie_transaction_storage_root - { - trace!( - target: "state", - method = "ChangesRoot", - ext_id = %HexDisplay::from(&self.id.to_le_bytes()), - parent_hash = %HexDisplay::from(&parent_hash), - ?root, - cached = true, - ); - - Ok(Some(root.encode())) - } else { - let root = self.overlay.changes_trie_root( - self.backend, - self.changes_trie_state.as_ref(), - Decode::decode(&mut parent_hash).map_err(|e| { - trace!( - target: "state", - error = %e, - "Failed to decode changes root parent hash", - ) - })?, - true, - self.storage_transaction_cache, - ); - - trace!( - target: "state", - method = "ChangesRoot", - ext_id = %HexDisplay::from(&self.id.to_le_bytes()), - parent_hash = %HexDisplay::from(&parent_hash), - ?root, - cached = false, - ); - - root.map(|r| r.map(|o| o.encode())) - } - } - fn storage_start_transaction(&mut self) { self.overlay.start_transaction() } @@ -712,8 +650,6 @@ where self.overlay .drain_storage_changes( self.backend, - #[cfg(feature = "std")] - None, Default::default(), self.storage_transaction_cache, ) @@ -733,8 +669,6 @@ where .overlay .drain_storage_changes( self.backend, - #[cfg(feature = "std")] - None, Default::default(), self.storage_transaction_cache, ) @@ -778,12 +712,11 @@ where } } -impl<'a, H, N, B> Ext<'a, H, N, B> +impl<'a, H, B> Ext<'a, H, B> where H: Hasher, H::Out: Ord + 'static + codec::Codec, B: Backend, - N: crate::changes_trie::BlockNumber, { fn limit_remove_from_backend( &mut self, @@ -869,12 +802,11 @@ impl<'a> StorageAppend<'a> { } #[cfg(not(feature = "std"))] -impl<'a, H, N, B> ExtensionStore for Ext<'a, H, N, B> +impl<'a, H, B> ExtensionStore for Ext<'a, H, B> where H: Hasher, H::Out: Ord + 'static + codec::Codec, B: Backend, - N: crate::changes_trie::BlockNumber, { fn extension_by_type_id(&mut self, _type_id: TypeId) -> Option<&mut dyn Any> { None @@ -897,11 +829,10 @@ where } #[cfg(feature = "std")] -impl<'a, H, N, B> ExtensionStore for Ext<'a, H, N, B> +impl<'a, H, B> ExtensionStore for Ext<'a, H, B> where H: Hasher, B: 'a + Backend, - N: crate::changes_trie::BlockNumber, { fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { self.extensions.as_mut().and_then(|exts| exts.get_mut(type_id)) @@ -939,85 +870,17 @@ where mod tests { use super::*; use crate::{ - changes_trie::{ - Configuration as ChangesTrieConfiguration, InMemoryStorage as TestChangesTrieStorage, - }, InMemoryBackend, }; use codec::Encode; - use hex_literal::hex; - use num_traits::Zero; use sp_core::{ map, - storage::{well_known_keys::EXTRINSIC_INDEX, Storage, StorageChild}, - Blake2Hasher, H256, + storage::{Storage, StorageChild}, + Blake2Hasher, }; type TestBackend = InMemoryBackend; - type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; - - fn prepare_overlay_with_changes() -> OverlayedChanges { - let mut changes = OverlayedChanges::default(); - changes.set_collect_extrinsics(true); - changes.set_extrinsic_index(1); - changes.set_storage(vec![1], Some(vec![100])); - changes.set_storage(EXTRINSIC_INDEX.to_vec(), Some(3u32.encode())); - changes.set_offchain_storage(b"k1", Some(b"v1")); - changes.set_offchain_storage(b"k2", Some(b"v2")); - changes - } - - fn changes_trie_config() -> ChangesTrieConfiguration { - ChangesTrieConfiguration { digest_interval: 0, digest_levels: 0 } - } - - #[test] - fn storage_changes_root_is_none_when_storage_is_not_provided() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); - } - - #[test] - fn storage_changes_root_is_none_when_state_is_not_provided() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); - } - - #[test] - fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); - let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); - assert_eq!( - ext.storage_changes_root(&H256::default().encode()).unwrap(), - Some(hex!("bb0c2ef6e1d36d5490f9766cfcc7dfe2a6ca804504c3bb206053890d6dd02376").to_vec()), - ); - } - - #[test] - fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - overlay.set_collect_extrinsics(false); - overlay.set_storage(vec![1], None); - let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); - let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); - assert_eq!( - ext.storage_changes_root(&H256::default().encode()).unwrap(), - Some(hex!("96f5aae4690e7302737b6f9b7f8567d5bbb9eac1c315f80101235a92d9ec27f4").to_vec()), - ); - } + type TestExt<'a> = Ext<'a, Blake2Hasher, TestBackend>; #[test] fn next_storage_key_works() { @@ -1035,7 +898,7 @@ mod tests { } .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_storage_key(&[5]), Some(vec![10])); @@ -1051,7 +914,7 @@ mod tests { drop(ext); overlay.set_storage(vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_storage_key(&[40]), Some(vec![50])); @@ -1079,7 +942,7 @@ mod tests { } .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); assert_eq!(ext.next_storage_key(&[5]), Some(vec![30])); @@ -1110,7 +973,7 @@ mod tests { } .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); // next_backend < next_overlay assert_eq!(ext.next_child_storage_key(child_info, &[5]), Some(vec![10])); @@ -1126,7 +989,7 @@ mod tests { drop(ext); overlay.set_child_storage(child_info, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); // next_overlay exist but next_backend doesn't exist assert_eq!(ext.next_child_storage_key(child_info, &[40]), Some(vec![50])); @@ -1155,7 +1018,7 @@ mod tests { } .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); assert_eq!(ext.child_storage(child_info, &[10]), Some(vec![10])); assert_eq!( @@ -1192,7 +1055,7 @@ mod tests { } .into(); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None); use sp_core::storage::well_known_keys; let mut ext = ext; diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 7bd0c645f3c00..6126e48e85333 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -23,8 +23,6 @@ pub mod backend; #[cfg(feature = "std")] mod basic; -#[cfg(feature = "std")] -mod changes_trie; mod error; mod ext; #[cfg(feature = "std")] @@ -140,28 +138,10 @@ pub use crate::{ }; pub use error::{Error, ExecutionError}; -#[cfg(not(feature = "std"))] -mod changes_trie { - /// Stub for change trie block number until - /// change trie move to no_std. - pub trait BlockNumber {} - - impl BlockNumber for N {} -} - #[cfg(feature = "std")] mod std_reexport { pub use crate::{ basic::BasicExternalities, - changes_trie::{ - disabled_state as disabled_changes_trie_state, key_changes, key_changes_proof, - key_changes_proof_check, key_changes_proof_check_with_db, prune as prune_changes_tries, - AnchorBlockId as ChangesTrieAnchorBlockId, BlockNumber as ChangesTrieBlockNumber, - BuildCache as ChangesTrieBuildCache, CacheAction as ChangesTrieCacheAction, - ConfigurationRange as ChangesTrieConfigurationRange, - InMemoryStorage as InMemoryChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, - State as ChangesTrieState, Storage as ChangesTrieStorage, - }, error::{Error, ExecutionError}, in_memory_backend::new_in_mem, proving_backend::{ @@ -200,10 +180,6 @@ mod execution { /// Default handler of the execution manager. pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; - /// Type of changes trie transaction. - pub type ChangesTrieTransaction = - (MemoryDB, ChangesTrieCacheAction<::Out, N>); - /// Trie backend with in-memory storage. pub type InMemoryBackend = TrieBackend, H>; @@ -303,11 +279,10 @@ mod execution { } /// The substrate state machine. - pub struct StateMachine<'a, B, H, N, Exec> + pub struct StateMachine<'a, B, H, Exec> where H: Hasher, B: Backend, - N: ChangesTrieBlockNumber, { backend: &'a B, exec: &'a Exec, @@ -315,8 +290,7 @@ mod execution { call_data: &'a [u8], overlay: &'a mut OverlayedChanges, extensions: Extensions, - changes_trie_state: Option>, - storage_transaction_cache: Option<&'a mut StorageTransactionCache>, + storage_transaction_cache: Option<&'a mut StorageTransactionCache>, runtime_code: &'a RuntimeCode<'a>, stats: StateMachineStats, /// The hash of the block the state machine will be executed on. @@ -325,29 +299,26 @@ mod execution { parent_hash: Option, } - impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> + impl<'a, B, H, Exec> Drop for StateMachine<'a, B, H, Exec> where H: Hasher, B: Backend, - N: ChangesTrieBlockNumber, { fn drop(&mut self) { self.backend.register_overlay_stats(&self.stats); } } - impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> + impl<'a, B, H, Exec> StateMachine<'a, B, H, Exec> where H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, B: Backend, - N: crate::changes_trie::BlockNumber, { /// Creates new substrate state machine. pub fn new( backend: &'a B, - changes_trie_state: Option>, overlay: &'a mut OverlayedChanges, exec: &'a Exec, method: &'a str, @@ -366,7 +337,6 @@ mod execution { call_data, extensions, overlay, - changes_trie_state, storage_transaction_cache: None, runtime_code, stats: StateMachineStats::default(), @@ -381,7 +351,7 @@ mod execution { /// build that will be cached. pub fn with_storage_transaction_cache( mut self, - cache: Option<&'a mut StorageTransactionCache>, + cache: Option<&'a mut StorageTransactionCache>, ) -> Self { self.storage_transaction_cache = cache; self @@ -438,7 +408,6 @@ mod execution { self.overlay, cache, self.backend, - self.changes_trie_state.clone(), Some(&mut self.extensions), ); @@ -557,9 +526,6 @@ mod execution { CallResult, ) -> CallResult, { - let changes_tries_enabled = self.changes_trie_state.is_some(); - self.overlay.set_collect_extrinsics(changes_tries_enabled); - let result = { match manager { ExecutionManager::Both(on_consensus_failure) => self @@ -583,7 +549,7 @@ mod execution { } /// Prove execution using the given state backend, overlayed changes, and call executor. - pub fn prove_execution( + pub fn prove_execution( backend: &mut B, overlay: &mut OverlayedChanges, exec: &Exec, @@ -597,13 +563,12 @@ mod execution { H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, - N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { let trie_backend = backend .as_trie_backend() .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_execution_on_trie_backend::<_, _, N, _, _>( + prove_execution_on_trie_backend::<_, _, _, _>( trie_backend, overlay, exec, @@ -623,7 +588,7 @@ mod execution { /// /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. - pub fn prove_execution_on_trie_backend( + pub fn prove_execution_on_trie_backend( trie_backend: &TrieBackend, overlay: &mut OverlayedChanges, exec: &Exec, @@ -637,13 +602,11 @@ mod execution { H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + 'static + Clone, - N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { let proving_backend = proving_backend::ProvingBackend::new(trie_backend); - let mut sm = StateMachine::<_, H, N, Exec>::new( + let mut sm = StateMachine::<_, H, Exec>::new( &proving_backend, - None, overlay, exec, method, @@ -662,7 +625,7 @@ mod execution { } /// Check execution proof, generated by `prove_execution` call. - pub fn execution_proof_check( + pub fn execution_proof_check( root: H::Out, proof: StorageProof, overlay: &mut OverlayedChanges, @@ -676,11 +639,10 @@ mod execution { H: Hasher, Exec: CodeExecutor + Clone + 'static, H::Out: Ord + 'static + codec::Codec, - N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { let trie_backend = create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend::<_, N, _, _>( + execution_proof_check_on_trie_backend::<_, _, _>( &trie_backend, overlay, exec, @@ -692,7 +654,7 @@ mod execution { } /// Check execution proof on proving backend, generated by `prove_execution` call. - pub fn execution_proof_check_on_trie_backend( + pub fn execution_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, exec: &Exec, @@ -705,12 +667,10 @@ mod execution { H: Hasher, H::Out: Ord + 'static + codec::Codec, Exec: CodeExecutor + Clone + 'static, - N: crate::changes_trie::BlockNumber, Spawn: SpawnNamed + Send + 'static, { - let mut sm = StateMachine::<_, H, N, Exec>::new( + let mut sm = StateMachine::<_, H, Exec>::new( trie_backend, - None, overlay, exec, method, @@ -995,7 +955,7 @@ mod execution { #[cfg(test)] mod tests { - use super::{changes_trie::Configuration as ChangesTrieConfig, ext::Ext, *}; + use super::{ext::Ext, *}; use crate::execution::CallResult; use codec::{Decode, Encode}; use sp_core::{ @@ -1035,13 +995,6 @@ mod tests { use_native: bool, native_call: Option, ) -> (CallResult, bool) { - if self.change_changes_trie_config { - ext.place_storage( - sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), - Some(ChangesTrieConfig { digest_interval: 777, digest_levels: 333 }.encode()), - ); - } - let using_native = use_native && self.native_available; match (using_native, self.native_succeeds, self.fallback_succeeds, native_call) { (true, true, _, Some(call)) => { @@ -1077,7 +1030,6 @@ mod tests { let mut state_machine = StateMachine::new( &backend, - changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, @@ -1103,7 +1055,6 @@ mod tests { let mut state_machine = StateMachine::new( &backend, - changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, @@ -1130,7 +1081,6 @@ mod tests { let mut state_machine = StateMachine::new( &backend, - changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, @@ -1169,7 +1119,7 @@ mod tests { // fetch execution proof from 'remote' full node let mut remote_backend = trie_backend::tests::test_trie(); let remote_root = remote_backend.storage_root(std::iter::empty()).0; - let (remote_result, remote_proof) = prove_execution::<_, _, u64, _, _>( + let (remote_result, remote_proof) = prove_execution( &mut remote_backend, &mut Default::default(), &executor, @@ -1181,7 +1131,7 @@ mod tests { .unwrap(); // check proof locally - let local_result = execution_proof_check::( + let local_result = execution_proof_check::( remote_root, remote_proof, &mut Default::default(), @@ -1223,7 +1173,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); ext.clear_prefix(b"ab", None); @@ -1253,7 +1202,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); assert_eq!((false, 1), ext.clear_prefix(b"ab", Some(1))); @@ -1301,7 +1249,6 @@ mod tests { &mut overlay, &mut cache, &backend, - changes_trie::disabled_state::<_, u64>(), None, ); assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); @@ -1342,7 +1289,6 @@ mod tests { &mut overlay, &mut cache, &backend, - changes_trie::disabled_state::<_, u64>(), None, ); assert_eq!(ext.kill_child_storage(&child_info, Some(0)), (false, 0)); @@ -1367,7 +1313,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); @@ -1390,7 +1335,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); @@ -1403,7 +1347,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); @@ -1418,7 +1361,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); @@ -1446,7 +1388,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); ext.clear_storage(key.as_slice()); @@ -1460,7 +1401,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); @@ -1481,7 +1421,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); @@ -1502,7 +1441,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); assert_eq!( @@ -1784,7 +1722,6 @@ mod tests { &mut overlay, &mut cache, &backend, - changes_trie::disabled_state::<_, u64>(), None, ); ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); @@ -1827,7 +1764,6 @@ mod tests { &mut overlay, &mut cache, backend, - changes_trie::disabled_state::<_, u64>(), None, ); assert_eq!(ext.storage(b"bbb"), Some(vec![])); @@ -1852,7 +1788,6 @@ mod tests { let mut state_machine = StateMachine::new( &backend, - changes_trie::disabled_state::<_, u64>(), &mut overlayed_changes, &DummyCodeExecutor { change_changes_trie_config: false, @@ -1867,7 +1802,7 @@ mod tests { TaskExecutor::new(), ); - let run_state_machine = |state_machine: &mut StateMachine<_, _, _, _>| { + let run_state_machine = |state_machine: &mut StateMachine<_, _, _>| { state_machine .execute_using_consensus_failure_handler:: _, _, _>( ExecutionManager::NativeWhenPossible, diff --git a/primitives/state-machine/src/overlayed_changes/changeset.rs b/primitives/state-machine/src/overlayed_changes/changeset.rs index 1ffd569e2828b..818b7be99bc6e 100644 --- a/primitives/state-machine/src/overlayed_changes/changeset.rs +++ b/primitives/state-machine/src/overlayed_changes/changeset.rs @@ -69,7 +69,6 @@ struct InnerValue { /// Current value. None if value has been deleted. value: V, /// The set of extrinsic indices where the values has been changed. - /// Is filled only if runtime has announced changes trie support. extrinsics: Extrinsics, } diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index cf7af1c9a6f3a..57ac6937cf50e 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -21,12 +21,7 @@ mod changeset; mod offchain; use self::changeset::OverlayedChangeSet; -use crate::{backend::Backend, changes_trie::BlockNumber, stats::StateMachineStats, DefaultError}; -#[cfg(feature = "std")] -use crate::{ - changes_trie::{build_changes_trie, State as ChangesTrieState}, - ChangesTrieTransaction, -}; +use crate::{backend::Backend, stats::StateMachineStats, DefaultError}; use codec::{Decode, Encode}; use hash_db::Hasher; pub use offchain::OffchainOverlayedChanges; @@ -134,7 +129,7 @@ pub enum IndexOperation { /// /// This contains all the changes to the storage and transactions to apply theses changes to the /// backend. -pub struct StorageChanges { +pub struct StorageChanges { /// All changes to the main storage. /// /// A value of `None` means that it was deleted. @@ -150,22 +145,13 @@ pub struct StorageChanges { pub transaction: Transaction, /// The storage root after applying the transaction. pub transaction_storage_root: H::Out, - /// Contains the transaction for the backend for the changes trie. - /// - /// If changes trie is disabled the value is set to `None`. - #[cfg(feature = "std")] - pub changes_trie_transaction: Option>, - /// Phantom data for block number until change trie support no_std. - #[cfg(not(feature = "std"))] - pub _ph: sp_std::marker::PhantomData, - /// Changes to the transaction index, #[cfg(feature = "std")] pub transaction_index_changes: Vec, } #[cfg(feature = "std")] -impl StorageChanges { +impl StorageChanges { /// Deconstruct into the inner values pub fn into_inner( self, @@ -175,7 +161,6 @@ impl StorageChanges { OffchainChangesCollection, Transaction, H::Out, - Option>, Vec, ) { ( @@ -184,57 +169,41 @@ impl StorageChanges { self.offchain_storage_changes, self.transaction, self.transaction_storage_root, - self.changes_trie_transaction, self.transaction_index_changes, ) } } -/// The storage transaction are calculated as part of the `storage_root` and -/// `changes_trie_storage_root`. These transactions can be reused for importing the block into the +/// Storage transactions are calculated as part of the `storage_root`. +/// These transactions can be reused for importing the block into the /// storage. So, we cache them to not require a recomputation of those transactions. -pub struct StorageTransactionCache { +pub struct StorageTransactionCache { /// Contains the changes for the main and the child storages as one transaction. pub(crate) transaction: Option, /// The storage root after applying the transaction. pub(crate) transaction_storage_root: Option, - /// Contains the changes trie transaction. - #[cfg(feature = "std")] - pub(crate) changes_trie_transaction: Option>>, - /// The storage root after applying the changes trie transaction. - #[cfg(feature = "std")] - pub(crate) changes_trie_transaction_storage_root: Option>, - /// Phantom data for block number until change trie support no_std. - #[cfg(not(feature = "std"))] - pub(crate) _ph: sp_std::marker::PhantomData, } -impl StorageTransactionCache { +impl StorageTransactionCache { /// Reset the cached transactions. pub fn reset(&mut self) { *self = Self::default(); } } -impl Default - for StorageTransactionCache +impl Default + for StorageTransactionCache { fn default() -> Self { Self { transaction: None, transaction_storage_root: None, - #[cfg(feature = "std")] - changes_trie_transaction: None, - #[cfg(feature = "std")] - changes_trie_transaction_storage_root: None, - #[cfg(not(feature = "std"))] - _ph: Default::default(), } } } -impl Default - for StorageChanges +impl Default + for StorageChanges { fn default() -> Self { Self { @@ -244,10 +213,6 @@ impl Default transaction: Default::default(), transaction_storage_root: Default::default(), #[cfg(feature = "std")] - changes_trie_transaction: None, - #[cfg(not(feature = "std"))] - _ph: Default::default(), - #[cfg(feature = "std")] transaction_index_changes: Default::default(), } } @@ -539,27 +504,25 @@ impl OverlayedChanges { /// Convert this instance with all changes into a [`StorageChanges`] instance. #[cfg(feature = "std")] - pub fn into_storage_changes, H: Hasher, N: BlockNumber>( + pub fn into_storage_changes, H: Hasher>( mut self, backend: &B, - changes_trie_state: Option<&ChangesTrieState>, parent_hash: H::Out, - mut cache: StorageTransactionCache, - ) -> Result, DefaultError> + mut cache: StorageTransactionCache, + ) -> Result, DefaultError> where H::Out: Ord + Encode + 'static, { - self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) + self.drain_storage_changes(backend, parent_hash, &mut cache) } /// Drain all changes into a [`StorageChanges`] instance. Leave empty overlay in place. - pub fn drain_storage_changes, H: Hasher, N: BlockNumber>( + pub fn drain_storage_changes, H: Hasher>( &mut self, backend: &B, - #[cfg(feature = "std")] changes_trie_state: Option<&ChangesTrieState>, - parent_hash: H::Out, - mut cache: &mut StorageTransactionCache, - ) -> Result, DefaultError> + _parent_hash: H::Out, + mut cache: &mut StorageTransactionCache, + ) -> Result, DefaultError> where H::Out: Ord + Encode + 'static, { @@ -574,21 +537,6 @@ impl OverlayedChanges { .and_then(|t| cache.transaction_storage_root.take().map(|tr| (t, tr))) .expect("Transaction was be generated as part of `storage_root`; qed"); - // If the transaction does not exist, we generate it. - #[cfg(feature = "std")] - if cache.changes_trie_transaction.is_none() { - self.changes_trie_root(backend, changes_trie_state, parent_hash, false, &mut cache) - .map_err(|_| "Failed to generate changes trie transaction")?; - } - #[cfg(not(feature = "std"))] - let _ = parent_hash; - - #[cfg(feature = "std")] - let changes_trie_transaction = cache - .changes_trie_transaction - .take() - .expect("Changes trie transaction was generated by `changes_trie_root`; qed"); - let (main_storage_changes, child_storage_changes) = self.drain_committed(); let offchain_storage_changes = self.offchain_drain_committed().collect(); @@ -604,11 +552,7 @@ impl OverlayedChanges { transaction, transaction_storage_root, #[cfg(feature = "std")] - changes_trie_transaction, - #[cfg(feature = "std")] transaction_index_changes, - #[cfg(not(feature = "std"))] - _ph: Default::default(), }) } @@ -639,10 +583,10 @@ impl OverlayedChanges { /// as seen by the current transaction. /// /// Returns the storage root and caches storage transaction in the given `cache`. - pub fn storage_root>( + pub fn storage_root>( &self, backend: &B, - cache: &mut StorageTransactionCache, + cache: &mut StorageTransactionCache, ) -> H::Out where H::Out: Ord + Encode, @@ -660,40 +604,6 @@ impl OverlayedChanges { root } - /// Generate the changes trie root. - /// - /// Returns the changes trie root and caches the storage transaction into the given `cache`. - /// - /// # Panics - /// - /// Panics on storage error, when `panic_on_storage_error` is set. - #[cfg(feature = "std")] - pub fn changes_trie_root<'a, H: Hasher, N: BlockNumber, B: Backend>( - &self, - backend: &B, - changes_trie_state: Option<&'a ChangesTrieState<'a, H, N>>, - parent_hash: H::Out, - panic_on_storage_error: bool, - cache: &mut StorageTransactionCache, - ) -> Result, ()> - where - H::Out: Ord + Encode + 'static, - { - build_changes_trie::<_, H, N>( - backend, - changes_trie_state, - self, - parent_hash, - panic_on_storage_error, - ) - .map(|r| { - let root = r.as_ref().map(|r| r.1).clone(); - cache.changes_trie_transaction = Some(r.map(|(db, _, cache)| (db, cache))); - cache.changes_trie_transaction_storage_root = Some(root); - root - }) - } - /// Returns an iterator over the keys (in lexicographic order) following `key` (excluding `key`) /// alongside its value. pub fn iter_after(&self, key: &[u8]) -> impl Iterator { @@ -937,7 +847,6 @@ mod tests { .collect(); let backend = InMemoryBackend::::from(initial); let mut overlay = OverlayedChanges::default(); - overlay.set_collect_extrinsics(false); overlay.start_transaction(); overlay.set_storage(b"dog".to_vec(), Some(b"puppy".to_vec())); @@ -954,7 +863,6 @@ mod tests { &mut overlay, &mut cache, &backend, - crate::changes_trie::disabled_state::<_, u64>(), None, ); const ROOT: [u8; 32] = diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index 5b7d568b0311e..b3e43d4c46e7f 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -153,10 +153,6 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< unimplemented!("child_storage_root is not supported in ReadOnlyExternalities") } - fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { - unimplemented!("storage_changes_root is not supported in ReadOnlyExternalities") - } - fn storage_start_transaction(&mut self) { unimplemented!("Transactions are not supported by ReadOnlyExternalities"); } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 23f66ee14d87e..5904558701d2b 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -24,20 +24,15 @@ use std::{ use crate::{ backend::Backend, - changes_trie::{ - BlockNumber as ChangesTrieBlockNumber, Configuration as ChangesTrieConfiguration, - InMemoryStorage as ChangesTrieInMemoryStorage, State as ChangesTrieState, - }, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey, StorageTransactionCache, StorageValue, }; -use codec::Decode; use hash_db::Hasher; use sp_core::{ offchain::testing::TestPersistentOffchainDB, storage::{ - well_known_keys::{is_child_storage_key, CHANGES_TRIE_CONFIG, CODE}, + well_known_keys::{is_child_storage_key, CODE}, Storage, }, testing::TaskExecutor, @@ -46,7 +41,7 @@ use sp_core::{ use sp_externalities::{Extension, ExtensionStore, Extensions}; /// Simple HashMap-based Externalities impl. -pub struct TestExternalities +pub struct TestExternalities where H::Out: codec::Codec + Ord, { @@ -54,33 +49,23 @@ where overlay: OverlayedChanges, offchain_db: TestPersistentOffchainDB, storage_transaction_cache: - StorageTransactionCache< as Backend>::Transaction, H, N>, + StorageTransactionCache< as Backend>::Transaction, H>, /// Storage backend. pub backend: InMemoryBackend, - changes_trie_config: Option, - changes_trie_storage: ChangesTrieInMemoryStorage, /// Extensions. pub extensions: Extensions, } -impl TestExternalities +impl TestExternalities where H::Out: Ord + 'static + codec::Codec, { /// Get externalities implementation. - pub fn ext(&mut self) -> Ext> { + pub fn ext(&mut self) -> Ext> { Ext::new( &mut self.overlay, &mut self.storage_transaction_cache, &self.backend, - match self.changes_trie_config.clone() { - Some(config) => Some(ChangesTrieState { - config, - zero: 0.into(), - storage: &self.changes_trie_storage, - }), - None => None, - }, Some(&mut self.extensions), ) } @@ -97,12 +82,7 @@ where /// Create a new instance of `TestExternalities` with code and storage. pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { - let mut overlay = OverlayedChanges::default(); - let changes_trie_config = storage - .top - .get(CHANGES_TRIE_CONFIG) - .and_then(|v| Decode::decode(&mut &v[..]).ok()); - overlay.set_collect_extrinsics(changes_trie_config.is_some()); + let overlay = OverlayedChanges::default(); assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); assert!(storage.children_default.keys().all(|key| is_child_storage_key(key))); @@ -117,9 +97,7 @@ where TestExternalities { overlay, offchain_db, - changes_trie_config, extensions, - changes_trie_storage: ChangesTrieInMemoryStorage::new(), backend: storage.into(), storage_transaction_cache: Default::default(), } @@ -150,11 +128,6 @@ where self.extensions.register(ext); } - /// Get mutable reference to changes trie storage. - pub fn changes_trie_storage(&mut self) -> &mut ChangesTrieInMemoryStorage { - &mut self.changes_trie_storage - } - /// Return a new backend with all pending changes. /// /// In contrast to [`commit_all`](Self::commit_all) this will not panic if there are open @@ -180,9 +153,8 @@ where /// /// This will panic if there are still open transactions. pub fn commit_all(&mut self) -> Result<(), String> { - let changes = self.overlay.drain_storage_changes::<_, _, N>( + let changes = self.overlay.drain_storage_changes::<_, _>( &self.backend, - None, Default::default(), &mut Default::default(), )?; @@ -216,7 +188,7 @@ where } } -impl std::fmt::Debug for TestExternalities +impl std::fmt::Debug for TestExternalities where H::Out: Ord + codec::Codec, { @@ -225,18 +197,18 @@ where } } -impl PartialEq for TestExternalities +impl PartialEq for TestExternalities where H::Out: Ord + 'static + codec::Codec, { /// This doesn't test if they are in the same state, only if they contains the /// same data at this state - fn eq(&self, other: &TestExternalities) -> bool { + fn eq(&self, other: &TestExternalities) -> bool { self.as_backend().eq(&other.as_backend()) } } -impl Default for TestExternalities +impl Default for TestExternalities where H::Out: Ord + 'static + codec::Codec, { @@ -245,7 +217,7 @@ where } } -impl From for TestExternalities +impl From for TestExternalities where H::Out: Ord + 'static + codec::Codec, { @@ -254,11 +226,10 @@ where } } -impl sp_externalities::ExtensionStore for TestExternalities +impl sp_externalities::ExtensionStore for TestExternalities where H: Hasher, H::Out: Ord + codec::Codec, - N: ChangesTrieBlockNumber, { fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { self.extensions.get_mut(type_id) @@ -284,11 +255,10 @@ where } } -impl sp_externalities::ExternalitiesExt for TestExternalities +impl sp_externalities::ExternalitiesExt for TestExternalities where H: Hasher, H::Out: Ord + codec::Codec, - N: ChangesTrieBlockNumber, { fn extension(&mut self) -> Option<&mut T> { self.extension_by_type_id(TypeId::of::()).and_then(::downcast_mut) @@ -312,7 +282,7 @@ mod tests { #[test] fn commit_should_work() { - let mut ext = TestExternalities::::default(); + let mut ext = TestExternalities::::default(); let mut ext = ext.ext(); ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); @@ -324,7 +294,7 @@ mod tests { #[test] fn set_and_retrieve_code() { - let mut ext = TestExternalities::::default(); + let mut ext = TestExternalities::::default(); let mut ext = ext.ext(); let code = vec![1, 2, 3]; @@ -336,12 +306,12 @@ mod tests { #[test] fn check_send() { fn assert_send() {} - assert_send::>(); + assert_send::>(); } #[test] fn commit_all_and_kill_child_storage() { - let mut ext = TestExternalities::::default(); + let mut ext = TestExternalities::::default(); let child_info = ChildInfo::new_default(&b"test_child"[..]); { @@ -366,7 +336,7 @@ mod tests { #[test] fn as_backend_generates_same_backend_as_commit_all() { - let mut ext = TestExternalities::::default(); + let mut ext = TestExternalities::::default(); { let mut ext = ext.ext(); ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 45474a44693ab..1144e258e0e28 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -204,9 +204,6 @@ pub mod well_known_keys { /// Current extrinsic index (u32) is stored under this key. pub const EXTRINSIC_INDEX: &'static [u8] = b":extrinsic_index"; - /// Changes trie configuration is stored under this key. - pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie"; - /// Prefix of child storage keys. pub const CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:"; diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 975a81af4f53d..59733490a18ff 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -134,10 +134,6 @@ impl Externalities for AsyncExternalities { panic!("`child_storage_root`: should not be used in async externalities!") } - fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { - panic!("`storage_changes_root`: should not be used in async externalities!") - } - fn storage_start_transaction(&mut self) { unimplemented!("Transactions are not supported by AsyncExternalities"); } diff --git a/primitives/test-primitives/src/lib.rs b/primitives/test-primitives/src/lib.rs index d988160b1dc7b..5dfc8e7900832 100644 --- a/primitives/test-primitives/src/lib.rs +++ b/primitives/test-primitives/src/lib.rs @@ -73,15 +73,11 @@ pub type BlockNumber = u64; /// Index of a transaction. pub type Index = u64; /// The item of a block digest. -pub type DigestItem = sp_runtime::generic::DigestItem; +pub type DigestItem = sp_runtime::generic::DigestItem; /// The digest of a block. -pub type Digest = sp_runtime::generic::Digest; +pub type Digest = sp_runtime::generic::Digest; /// A test block. pub type Block = sp_runtime::generic::Block; /// A test block's header. pub type Header = sp_runtime::generic::Header; -/// Changes trie configuration (optionally) used in tests. -pub fn changes_trie_config() -> sp_core::ChangesTrieConfiguration { - sp_core::ChangesTrieConfiguration { digest_interval: 4, digest_levels: 2 } -} diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 204b6ac435e07..3882ad08b91ec 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -23,7 +23,6 @@ sc-client-db = { version = "0.10.0-dev", features = [ ], path = "../../client/db" } sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } sc-executor = { version = "0.10.0-dev", path = "../../client/executor" } -sc-light = { version = "4.0.0-dev", path = "../../client/light" } sc-offchain = { version = "4.0.0-dev", path = "../../client/offchain" } sc-service = { version = "0.10.0-dev", default-features = false, features = [ "test-helpers", diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 9bc411af5d3ed..1aaca472d8e36 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -47,7 +47,7 @@ use serde::Deserialize; use sp_core::storage::ChildInfo; use sp_runtime::{ codec::Encode, - traits::{BlakeTwo256, Block as BlockT}, + traits::Block as BlockT, OpaqueExtrinsic, }; use std::{ @@ -56,10 +56,6 @@ use std::{ sync::Arc, }; -/// Test client light database backend. -pub type LightBackend = - sc_light::Backend, BlakeTwo256>; - /// A genesis storage initialization trait. pub trait GenesisInit: Default { /// Construct genesis storage. diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 75ebb8f23326c..b4f27ee1cce0d 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -12,7 +12,6 @@ publish = false targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-light = { version = "4.0.0-dev", path = "../../../client/light" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } diff --git a/test-utils/runtime/client/src/block_builder_ext.rs b/test-utils/runtime/client/src/block_builder_ext.rs index e8c1d2ac5cd48..4519dce65960d 100644 --- a/test-utils/runtime/client/src/block_builder_ext.rs +++ b/test-utils/runtime/client/src/block_builder_ext.rs @@ -19,7 +19,6 @@ use sc_client_api::backend; use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_core::ChangesTrieConfiguration; use sc_block_builder::BlockBuilderApi; @@ -36,11 +35,6 @@ pub trait BlockBuilderExt { key: Vec, value: Option>, ) -> Result<(), sp_blockchain::Error>; - /// Add changes trie configuration update extrinsic to the block. - fn push_changes_trie_configuration_update( - &mut self, - new_config: Option, - ) -> Result<(), sp_blockchain::Error>; } impl<'a, A, B> BlockBuilderExt @@ -68,11 +62,4 @@ where ) -> Result<(), sp_blockchain::Error> { self.push(substrate_test_runtime::Extrinsic::StorageChange(key, value)) } - - fn push_changes_trie_configuration_update( - &mut self, - new_config: Option, - ) -> Result<(), sp_blockchain::Error> { - self.push(substrate_test_runtime::Extrinsic::ChangesTrieConfigUpdate(new_config)) - } } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index bcfe93b6f7975..2eac1c018cfa8 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -24,22 +24,17 @@ pub mod trait_tests; mod block_builder_ext; pub use sc_consensus::LongestChain; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; pub use substrate_test_client::*; pub use substrate_test_runtime as runtime; pub use self::block_builder_ext::BlockBuilderExt; -use sc_client_api::light::{ - Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, - RemoteReadChildRequest, RemoteReadRequest, -}; use sp_core::{ - sr25519, + sr25519, Pair, storage::{ChildInfo, Storage, StorageChild}, - ChangesTrieConfiguration, Pair, }; -use sp_runtime::traits::{Block as BlockT, Hash as HashT, HashFor, Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; use substrate_test_runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; /// A prelude to import in tests. @@ -51,7 +46,7 @@ pub mod prelude { }; // Client structs pub use super::{ - Backend, ExecutorDispatch, LightBackend, LightExecutor, LocalExecutorDispatch, + Backend, ExecutorDispatch, LocalExecutorDispatch, NativeElseWasmExecutor, TestClient, TestClientBuilder, WasmExecutionMethod, }; // Keyring @@ -84,26 +79,9 @@ pub type ExecutorDispatch = client::LocalCallExecutor< NativeElseWasmExecutor, >; -/// Test client light database backend. -pub type LightBackend = substrate_test_client::LightBackend; - -/// Test client light executor. -pub type LightExecutor = sc_light::GenesisCallExecutor< - LightBackend, - client::LocalCallExecutor< - substrate_test_runtime::Block, - sc_light::Backend< - sc_client_db::light::LightStorage, - HashFor, - >, - NativeElseWasmExecutor, - >, ->; - /// Parameters of test-client builder with test-runtime. #[derive(Default)] pub struct GenesisParameters { - changes_trie_config: Option, heap_pages_override: Option, extra_storage: Storage, wasm_code: Option>, @@ -112,7 +90,6 @@ pub struct GenesisParameters { impl GenesisParameters { fn genesis_config(&self) -> GenesisConfig { GenesisConfig::new( - self.changes_trie_config.clone(), vec![ sr25519::Public::from(Sr25519Keyring::Alice).into(), sr25519::Public::from(Sr25519Keyring::Bob).into(), @@ -210,12 +187,6 @@ pub trait TestClientBuilderExt: Sized { /// Returns a mutable reference to the genesis parameters. fn genesis_init_mut(&mut self) -> &mut GenesisParameters; - /// Set changes trie configuration for genesis. - fn changes_trie_config(mut self, config: Option) -> Self { - self.genesis_init_mut().changes_trie_config = config; - self - } - /// Override the default value for Wasm heap pages. fn set_heap_pages(mut self, heap_pages: u64) -> Self { self.genesis_init_mut().heap_pages_override = Some(heap_pages); @@ -303,142 +274,11 @@ impl TestClientBuilderExt } } -/// Type of optional fetch callback. -type MaybeFetcherCallback = - Option Result + Send + Sync>>; - -/// Type of fetcher future result. -type FetcherFutureResult = futures::future::Ready>; - -/// Implementation of light client fetcher used in tests. -#[derive(Default)] -pub struct LightFetcher { - call: MaybeFetcherCallback, Vec>, - body: MaybeFetcherCallback< - RemoteBodyRequest, - Vec, - >, -} - -impl LightFetcher { - /// Sets remote call callback. - pub fn with_remote_call( - self, - call: MaybeFetcherCallback, Vec>, - ) -> Self { - LightFetcher { call, body: self.body } - } - - /// Sets remote body callback. - pub fn with_remote_body( - self, - body: MaybeFetcherCallback< - RemoteBodyRequest, - Vec, - >, - ) -> Self { - LightFetcher { call: self.call, body } - } -} - -impl Fetcher for LightFetcher { - type RemoteHeaderResult = FetcherFutureResult; - type RemoteReadResult = FetcherFutureResult, Option>>>; - type RemoteCallResult = FetcherFutureResult>; - type RemoteChangesResult = - FetcherFutureResult, u32)>>; - type RemoteBodyResult = FetcherFutureResult>; - - fn remote_header( - &self, - _: RemoteHeaderRequest, - ) -> Self::RemoteHeaderResult { - unimplemented!() - } - - fn remote_read( - &self, - _: RemoteReadRequest, - ) -> Self::RemoteReadResult { - unimplemented!() - } - - fn remote_read_child( - &self, - _: RemoteReadChildRequest, - ) -> Self::RemoteReadResult { - unimplemented!() - } - - fn remote_call( - &self, - req: RemoteCallRequest, - ) -> Self::RemoteCallResult { - match self.call { - Some(ref call) => futures::future::ready(call(req)), - None => unimplemented!(), - } - } - - fn remote_changes( - &self, - _: RemoteChangesRequest, - ) -> Self::RemoteChangesResult { - unimplemented!() - } - - fn remote_body( - &self, - req: RemoteBodyRequest, - ) -> Self::RemoteBodyResult { - match self.body { - Some(ref body) => futures::future::ready(body(req)), - None => unimplemented!(), - } - } -} - /// Creates new client instance used for tests. pub fn new() -> Client { TestClientBuilder::new().build() } -/// Creates new light client instance used for tests. -pub fn new_light() -> ( - client::Client< - LightBackend, - LightExecutor, - substrate_test_runtime::Block, - substrate_test_runtime::RuntimeApi, - >, - Arc, -) { - let storage = sc_client_db::light::LightStorage::new_test(); - let blockchain = Arc::new(sc_light::Blockchain::new(storage)); - let backend = Arc::new(LightBackend::new(blockchain)); - let executor = new_native_executor(); - let local_call_executor = client::LocalCallExecutor::new( - backend.clone(), - executor, - Box::new(sp_core::testing::TaskExecutor::new()), - Default::default(), - ) - .expect("Creates LocalCallExecutor"); - let call_executor = LightExecutor::new(backend.clone(), local_call_executor); - - ( - TestClientBuilder::with_backend(backend.clone()) - .build_with_executor(call_executor) - .0, - backend, - ) -} - -/// Creates new light client fetcher used for tests. -pub fn new_light_fetcher() -> LightFetcher { - LightFetcher::default() -} - /// Create a new native executor. pub fn new_native_executor() -> sc_executor::NativeElseWasmExecutor { sc_executor::NativeElseWasmExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index a8801b8519dfe..a06d9f310fb04 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -23,7 +23,6 @@ use sc_service::client::genesis; use sp_core::{ map, storage::{well_known_keys, Storage}, - ChangesTrieConfiguration, }; use sp_io::hashing::{blake2_256, twox_128}; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; @@ -31,7 +30,6 @@ use std::collections::BTreeMap; /// Configuration of a general Substrate test genesis block. pub struct GenesisConfig { - changes_trie_config: Option, authorities: Vec, balances: Vec<(AccountId, u64)>, heap_pages_override: Option, @@ -41,7 +39,6 @@ pub struct GenesisConfig { impl GenesisConfig { pub fn new( - changes_trie_config: Option, authorities: Vec, endowed_accounts: Vec, balance: u64, @@ -49,7 +46,6 @@ impl GenesisConfig { extra_storage: Storage, ) -> Self { GenesisConfig { - changes_trie_config, authorities, balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), heap_pages_override, @@ -77,9 +73,6 @@ impl GenesisConfig { .into_iter(), ) .collect(); - if let Some(ref changes_trie_config) = self.changes_trie_config { - map.insert(well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), changes_trie_config.encode()); - } map.insert(twox_128(&b"sys:auth"[..])[..].to_vec(), self.authorities.encode()); // Add the extra storage entries. map.extend(self.extra_storage.top.clone().into_iter()); diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 943c41c247f75..08863de510d09 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -28,7 +28,7 @@ use scale_info::TypeInfo; use sp_std::{marker::PhantomData, prelude::*}; use sp_application_crypto::{ecdsa, ed25519, sr25519, RuntimeAppPublic}; -use sp_core::{offchain::KeyTypeId, ChangesTrieConfiguration, OpaqueMetadata, RuntimeDebug}; +use sp_core::{offchain::KeyTypeId, OpaqueMetadata, RuntimeDebug}; use sp_trie::{ trie_types::{TrieDB, TrieDBMut}, PrefixedMemoryDB, StorageProof, @@ -161,7 +161,6 @@ pub enum Extrinsic { }, IncludeData(Vec), StorageChange(Vec, Option>), - ChangesTrieConfigUpdate(Option), OffchainIndexSet(Vec, Vec), OffchainIndexClear(Vec), Store(Vec), @@ -197,8 +196,6 @@ impl BlindCheckable for Extrinsic { }, Extrinsic::IncludeData(v) => Ok(Extrinsic::IncludeData(v)), Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), - Extrinsic::ChangesTrieConfigUpdate(new_config) => - Ok(Extrinsic::ChangesTrieConfigUpdate(new_config)), Extrinsic::OffchainIndexSet(key, value) => Ok(Extrinsic::OffchainIndexSet(key, value)), Extrinsic::OffchainIndexClear(key) => Ok(Extrinsic::OffchainIndexClear(key)), Extrinsic::Store(data) => Ok(Extrinsic::Store(data)), @@ -265,9 +262,9 @@ pub type BlockNumber = u64; /// Index of a transaction. pub type Index = u64; /// The item of a block digest. -pub type DigestItem = sp_runtime::generic::DigestItem; +pub type DigestItem = sp_runtime::generic::DigestItem; /// The digest of a block. -pub type Digest = sp_runtime::generic::Digest; +pub type Digest = sp_runtime::generic::Digest; /// A test block. pub type Block = sp_runtime::generic::Block; /// A test block's header. @@ -1264,15 +1261,13 @@ fn test_witness(proof: StorageProof, root: crate::Hash) { let db: sp_trie::MemoryDB = proof.into_memory_db(); let backend = sp_state_machine::TrieBackend::<_, crate::Hashing>::new(db, root); let mut overlay = sp_state_machine::OverlayedChanges::default(); - let mut cache = sp_state_machine::StorageTransactionCache::<_, _, BlockNumber>::default(); + let mut cache = sp_state_machine::StorageTransactionCache::<_, _>::default(); let mut ext = sp_state_machine::Ext::new( &mut overlay, &mut cache, &backend, #[cfg(feature = "std")] None, - #[cfg(feature = "std")] - None, ); assert!(ext.storage(b"value3").is_some()); assert!(ext.storage_root().as_slice() == &root[..]); diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 334569d055a0c..070e807601188 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -24,10 +24,10 @@ use crate::{ use codec::{Decode, Encode, KeyedVec}; use frame_support::{decl_module, decl_storage, storage}; use frame_system::Config; -use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; +use sp_core::storage::well_known_keys; use sp_io::{ hashing::blake2_256, - storage::{changes_root as storage_changes_root, root as storage_root}, + storage::root as storage_root, trie, }; use sp_runtime::{ @@ -54,7 +54,6 @@ decl_storage! { Number get(fn number): Option; ParentHash get(fn parent_hash): Hash; NewAuthorities get(fn new_authorities): Option>; - NewChangesTrieConfig get(fn new_changes_trie_config): Option>; StorageDigest get(fn storage_digest): Option; Authorities get(fn authorities) config(): Vec; } @@ -207,30 +206,17 @@ pub fn finalize_block() -> Header { let mut digest = ::take().expect("StorageDigest is set by `initialize_block`"); let o_new_authorities = ::take(); - let new_changes_trie_config = ::take(); // This MUST come after all changes to storage are done. Otherwise we will fail the // “Storage root does not match that calculated” assertion. let storage_root = Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); - let storage_changes_root = storage_changes_root(&parent_hash.encode()) - .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); - - if let Some(storage_changes_root) = storage_changes_root { - digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); - } if let Some(new_authorities) = o_new_authorities { digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); } - if let Some(new_config) = new_changes_trie_config { - digest.push(generic::DigestItem::ChangesTrieSignal( - generic::ChangesTrieSignal::NewConfiguration(new_config), - )); - } - Header { number, extrinsics_root, state_root: storage_root, parent_hash, digest } } @@ -251,8 +237,6 @@ fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyEx Extrinsic::IncludeData(_) => Ok(Ok(())), Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), - Extrinsic::ChangesTrieConfigUpdate(ref new_config) => - execute_changes_trie_config_update(new_config.clone()), Extrinsic::OffchainIndexSet(key, value) => { sp_io::offchain_index::set(&key, &value); Ok(Ok(())) @@ -311,18 +295,6 @@ fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicRes Ok(Ok(())) } -fn execute_changes_trie_config_update( - new_config: Option, -) -> ApplyExtrinsicResult { - match new_config.clone() { - Some(new_config) => - storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()), - None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), - } - ::put(new_config); - Ok(Ok(())) -} - #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { use sp_core::hexdisplay::HexDisplay; diff --git a/test-utils/test-runner/src/client.rs b/test-utils/test-runner/src/client.rs index 58c4cf6503a93..6f21ddfb2819f 100644 --- a/test-utils/test-runner/src/client.rs +++ b/test-utils/test-runner/src/client.rs @@ -159,7 +159,6 @@ where transaction_pool: transaction_pool.clone(), spawn_handle: task_manager.spawn_handle(), import_queue, - on_demand: None, block_announce_validator_builder: None, warp_sync: None, }; @@ -195,14 +194,12 @@ where backend: backend.clone(), task_manager: &mut task_manager, keystore: keystore.sync_keystore(), - on_demand: None, transaction_pool: transaction_pool.clone(), rpc_extensions_builder: Box::new(move |_, _| { let mut io = jsonrpc_core::IoHandler::default(); io.extend_with(ManualSealApi::to_delegate(ManualSeal::new(rpc_sink.clone()))); Ok(io) }), - remote_blockchain: None, network, system_rpc_tx, telemetry: None, diff --git a/test-utils/test-runner/src/node.rs b/test-utils/test-runner/src/node.rs index 9114013b747f7..52e40fffc7dec 100644 --- a/test-utils/test-runner/src/node.rs +++ b/test-utils/test-runner/src/node.rs @@ -26,7 +26,7 @@ use futures::{ use jsonrpc_core::MetaIoHandler; use manual_seal::EngineCommand; use sc_client_api::{ - backend::{self, Backend}, + backend::Backend, CallExecutor, ExecutorProvider, }; use sc_executor::NativeElseWasmExecutor; @@ -160,9 +160,6 @@ where { let id = BlockId::Hash(self.client.info().best_hash); let mut overlay = OverlayedChanges::default(); - let changes_trie = - backend::changes_tries_state_at_block(&id, self.backend.changes_trie_storage()) - .unwrap(); let mut cache = StorageTransactionCache::< T::Block, as Backend>::State, @@ -180,7 +177,6 @@ where &mut overlay, &mut cache, &state_backend, - changes_trie.clone(), Some(&mut extensions), ); sp_externalities::set_and_run_with_externalities(&mut ext, closure) diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 5efa970d93580..48d05dd8dda9c 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -33,7 +33,7 @@ use sp_core::offchain::{ }; use sp_externalities::Extensions; use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStorePtr}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use sp_state_machine::StateMachine; use std::{fmt::Debug, sync::Arc, time}; @@ -152,9 +152,8 @@ impl BenchmarkCmd { // Get Benchmark List let state = &state_without_tracking; - let result = StateMachine::<_, _, NumberFor, _>::new( + let result = StateMachine::new( state, - None, &mut changes, &executor, "Benchmark_benchmark_metadata", @@ -243,9 +242,8 @@ impl BenchmarkCmd { if !self.no_verify { // Dont use these results since verification code will add overhead let state = &state_without_tracking; - let _results = StateMachine::<_, _, NumberFor, _>::new( + let _results = StateMachine::new( state, - None, &mut changes, &executor, "Benchmark_dispatch_benchmark", @@ -270,9 +268,8 @@ impl BenchmarkCmd { // Do one loop of DB tracking. { let state = &state_with_tracking; - let result = StateMachine::<_, _, NumberFor, _>::new( + let result = StateMachine::new( state, // todo remove tracking - None, &mut changes, &executor, "Benchmark_dispatch_benchmark", @@ -303,9 +300,8 @@ impl BenchmarkCmd { // Finally run a bunch of loops to get extrinsic timing information. for r in 0..self.external_repeat { let state = &state_without_tracking; - let result = StateMachine::<_, _, NumberFor, _>::new( + let result = StateMachine::new( state, // todo remove tracking - None, &mut changes, &executor, "Benchmark_dispatch_benchmark", diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index f0f37f0b20675..41a5f3ba0eb3e 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -20,14 +20,13 @@ use std::sync::Arc; use codec::{Codec, Decode, Encode}; -use futures::{future::ready, FutureExt, TryFutureExt}; +use futures::FutureExt; use jsonrpc_core::{Error as RpcError, ErrorCode}; use jsonrpc_derive::rpc; -use sc_client_api::light::{future_header, Fetcher, RemoteBlockchain, RemoteCallRequest}; use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool}; use sp_block_builder::BlockBuilder; -use sp_blockchain::{Error as ClientError, HeaderBackend}; +use sp_blockchain::HeaderBackend; use sp_core::{hexdisplay::HexDisplay, Bytes}; use sp_runtime::{generic::BlockId, traits}; @@ -154,90 +153,6 @@ where } } -/// An implementation of System-specific RPC methods on light client. -pub struct LightSystem { - client: Arc, - remote_blockchain: Arc>, - fetcher: Arc, - pool: Arc

, -} - -impl LightSystem { - /// Create new `LightSystem`. - pub fn new( - client: Arc, - remote_blockchain: Arc>, - fetcher: Arc, - pool: Arc

, - ) -> Self { - LightSystem { client, remote_blockchain, fetcher, pool } - } -} - -impl SystemApi<::Hash, AccountId, Index> - for LightSystem -where - P: TransactionPool + 'static, - C: HeaderBackend, - C: Send + Sync + 'static, - F: Fetcher + 'static, - Block: traits::Block, - AccountId: Clone + std::fmt::Display + Codec + Send + 'static, - Index: Clone + std::fmt::Display + Codec + Send + traits::AtLeast32Bit + 'static, -{ - fn nonce(&self, account: AccountId) -> FutureResult { - let best_hash = self.client.info().best_hash; - let best_id = BlockId::hash(best_hash); - let future_best_header = future_header(&*self.remote_blockchain, &*self.fetcher, best_id); - let fetcher = self.fetcher.clone(); - let call_data = account.encode(); - let future_best_header = future_best_header.and_then(move |maybe_best_header| { - ready( - maybe_best_header - .ok_or_else(|| ClientError::UnknownBlock(format!("{}", best_hash))), - ) - }); - - let future_nonce = future_best_header.and_then(move |best_header| { - fetcher.remote_call(RemoteCallRequest { - block: best_hash, - header: best_header, - method: "AccountNonceApi_account_nonce".into(), - call_data, - retry_count: None, - }) - }); - - let future_nonce = future_nonce.and_then(|nonce| async move { - Index::decode(&mut &nonce[..]) - .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e)) - }); - let future_nonce = future_nonce.map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), - message: "Unable to query nonce.".into(), - data: Some(format!("{:?}", e).into()), - }); - - let pool = self.pool.clone(); - future_nonce.map_ok(move |nonce| adjust_nonce(&*pool, account, nonce)).boxed() - } - - fn dry_run( - &self, - _extrinsic: Bytes, - _at: Option<::Hash>, - ) -> FutureResult { - async { - Err(RpcError { - code: ErrorCode::MethodNotFound, - message: "Unable to dry run extrinsic.".into(), - data: None, - }) - } - .boxed() - } -} - /// Adjust account nonce from state, so that tx with the nonce will be /// placed after all ready txpool transactions. fn adjust_nonce(pool: &P, account: AccountId, nonce: Index) -> Index diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 9125db13c78f9..794661254a584 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -152,9 +152,8 @@ where .map_err(|e| format!("failed to decode output: {:?}", e))?; let storage_changes = changes - .drain_storage_changes::<_, _, NumberFor>( + .drain_storage_changes( &state_ext.backend, - None, Default::default(), &mut Default::default(), ) diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index 28e51b38f2ace..f6ceac701f782 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -677,9 +677,8 @@ pub(crate) fn state_machine_call sc_cli::Result<(OverlayedChanges, Vec)> { let mut changes = Default::default(); - let encoded_results = StateMachine::<_, _, NumberFor, _>::new( + let encoded_results = StateMachine::new( &ext.backend, - None, &mut changes, executor, method, From 1e4b322143d6006a8416da893c683d6c6d25de79 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 21 Oct 2021 19:29:33 +0200 Subject: [PATCH 2/8] Update tests --- client/service/test/src/client/mod.rs | 8 ++------ primitives/runtime/src/generic/digest.rs | 2 +- primitives/runtime/src/generic/header.rs | 18 ++---------------- primitives/runtime/src/generic/tests.rs | 6 ++---- 4 files changed, 7 insertions(+), 27 deletions(-) diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 8f028c3691768..1c1a48990b617 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1124,12 +1124,8 @@ fn doesnt_import_blocks_that_revert_finality() { ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); let import_err = block_on(client.import(BlockOrigin::Own, b3)).err().unwrap(); - let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::RuntimeApiError(sp_api::ApiError::Application(Box::new( - sp_blockchain::Error::NotInFinalizedChain, - ))) - .to_string(), - ); + let expected_err = + ConsensusError::ClientImport(sp_blockchain::Error::NotInFinalizedChain.to_string()); assert_eq!(import_err.to_string(), expected_err.to_string()); diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index fc284d7ff9ab7..b0c0b7abbb3d3 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -472,7 +472,7 @@ mod tests { assert_eq!( serde_json::to_string(&digest).unwrap(), - r#"{"logs":["0x0204000000","0x000c010203","0x05746573740c010203"]}"# + r#"{"logs":["0x000c010203","0x05746573740c010203"]}"# ); } diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index 0ad53b8e83b79..db51251df7784 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -245,14 +245,7 @@ mod tests { assert_eq!( header_encoded, vec![ - 146, 205, 245, 120, 196, 112, 133, 165, 153, 34, 86, 240, 220, 249, 125, 11, 25, - 241, 241, 201, 222, 77, 95, 227, 12, 58, 206, 97, 145, 182, 229, 219, 8, 88, 19, - 72, 51, 123, 15, 62, 20, 134, 32, 23, 61, 170, 165, 249, 77, 0, 216, 129, 112, 93, - 203, 240, 170, 131, 239, 218, 186, 97, 210, 237, 225, 235, 134, 73, 33, 73, 151, - 87, 78, 32, 196, 100, 56, 138, 23, 36, 32, 210, 84, 3, 104, 43, 187, 184, 12, 73, - 104, 49, 200, 204, 31, 143, 13, 8, 2, 112, 178, 1, 53, 47, 36, 191, 28, 151, 112, - 185, 159, 143, 113, 32, 24, 33, 65, 28, 244, 20, 55, 124, 155, 140, 45, 188, 238, - 97, 219, 135, 214, 0, 4, 54 + 146, 205, 245, 120, 196, 112, 133, 165, 153, 34, 86, 240, 220, 249, 125, 11, 25, 241, 241, 201, 222, 77, 95, 227, 12, 58, 206, 97, 145, 182, 229, 219, 8, 88, 19, 72, 51, 123, 15, 62, 20, 134, 32, 23, 61, 170, 165, 249, 77, 0, 216, 129, 112, 93, 203, 240, 170, 131, 239, 218, 186, 97, 210, 237, 225, 235, 134, 73, 33, 73, 151, 87, 78, 32, 196, 100, 56, 138, 23, 36, 32, 210, 84, 3, 104, 43, 187, 184, 12, 73, 104, 49, 200, 204, 31, 143, 13, 4, 0, 4, 54 ], ); assert_eq!(header, Header::::decode(&mut &header_encoded[..]).unwrap()); @@ -273,14 +266,7 @@ mod tests { assert_eq!( header_encoded, vec![ - 197, 243, 254, 225, 31, 117, 21, 218, 179, 213, 92, 6, 247, 164, 230, 25, 47, 166, - 140, 117, 142, 159, 195, 202, 67, 196, 238, 26, 44, 18, 33, 92, 65, 31, 219, 225, - 47, 12, 107, 88, 153, 146, 55, 21, 226, 186, 110, 48, 167, 187, 67, 183, 228, 232, - 118, 136, 30, 254, 11, 87, 48, 112, 7, 97, 31, 82, 146, 110, 96, 87, 152, 68, 98, - 162, 227, 222, 78, 14, 244, 194, 120, 154, 112, 97, 222, 144, 174, 101, 220, 44, - 111, 126, 54, 34, 155, 220, 253, 124, 8, 0, 16, 53, 48, 48, 48, 2, 42, 105, 109, - 150, 206, 223, 24, 44, 164, 77, 27, 137, 177, 220, 25, 170, 140, 35, 156, 246, 233, - 112, 26, 23, 192, 61, 226, 14, 84, 219, 144, 252 + 197, 243, 254, 225, 31, 117, 21, 218, 179, 213, 92, 6, 247, 164, 230, 25, 47, 166, 140, 117, 142, 159, 195, 202, 67, 196, 238, 26, 44, 18, 33, 92, 65, 31, 219, 225, 47, 12, 107, 88, 153, 146, 55, 21, 226, 186, 110, 48, 167, 187, 67, 183, 228, 232, 118, 136, 30, 254, 11, 87, 48, 112, 7, 97, 31, 82, 146, 110, 96, 87, 152, 68, 98, 162, 227, 222, 78, 14, 244, 194, 120, 154, 112, 97, 222, 144, 174, 101, 220, 44, 111, 126, 54, 34, 155, 220, 253, 124, 4, 0, 16, 53, 48, 48, 48 ], ); assert_eq!(header, Header::::decode(&mut &header_encoded[..]).unwrap()); diff --git a/primitives/runtime/src/generic/tests.rs b/primitives/runtime/src/generic/tests.rs index ef2d3f0f4b5a2..e777eabca2a59 100644 --- a/primitives/runtime/src/generic/tests.rs +++ b/primitives/runtime/src/generic/tests.rs @@ -26,11 +26,9 @@ fn system_digest_item_encoding() { let encoded = item.encode(); assert_eq!( encoded, - vec![ + vec![4, // type = DigestItemType::Consensus - 2, // trie root - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, + 1, 2, 3, 4, 16, 5, 6, 7, 8, ] ); From 17fb89990e8fdcdf366e42d914e22b061a5718db Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 21 Oct 2021 19:30:26 +0200 Subject: [PATCH 3/8] fmt --- bin/node/cli/src/chain_spec.rs | 4 +- bin/node/executor/tests/common.rs | 4 +- bin/node/rpc/src/lib.rs | 1 - bin/node/testing/src/genesis.rs | 5 +- client/api/src/backend.rs | 1 - client/api/src/in_mem.rs | 4 +- .../basic-authorship/src/basic_authorship.rs | 6 +- client/block-builder/src/lib.rs | 2 +- client/consensus/aura/src/import_queue.rs | 8 +- client/consensus/aura/src/lib.rs | 13 +-- client/consensus/babe/src/lib.rs | 19 +--- client/consensus/babe/src/tests.rs | 2 +- client/consensus/babe/src/verification.rs | 2 +- client/consensus/common/src/block_import.rs | 3 +- client/consensus/manual-seal/src/consensus.rs | 8 +- .../manual-seal/src/consensus/babe.rs | 8 +- client/consensus/pow/src/lib.rs | 10 +- client/consensus/slots/src/lib.rs | 6 +- client/db/src/lib.rs | 17 +-- client/finality-grandpa/src/tests.rs | 6 +- client/network/src/behaviour.rs | 13 +-- client/network/src/light_client_requests.rs | 1 - .../src/light_client_requests/handler.rs | 5 +- client/network/test/src/lib.rs | 4 +- client/rpc/src/chain/mod.rs | 4 +- client/rpc/src/state/state_full.rs | 14 +-- client/rpc/src/state/tests.rs | 7 +- client/service/src/builder.rs | 5 +- client/service/src/client/call_executor.rs | 5 +- client/service/src/client/client.rs | 21 ++-- client/service/test/src/client/mod.rs | 5 +- client/transaction-pool/src/api.rs | 5 +- frame/support/src/storage/mod.rs | 4 +- primitives/consensus/common/src/lib.rs | 5 +- primitives/runtime/src/generic/digest.rs | 87 ++++++--------- primitives/runtime/src/generic/header.rs | 22 ++-- primitives/runtime/src/generic/tests.rs | 4 +- primitives/state-machine/src/ext.rs | 16 +-- primitives/state-machine/src/lib.rs | 105 +++--------------- .../src/overlayed_changes/mod.rs | 20 +--- primitives/state-machine/src/testing.rs | 5 +- primitives/test-primitives/src/lib.rs | 1 - test-utils/client/src/lib.rs | 6 +- test-utils/runtime/client/src/lib.rs | 7 +- test-utils/runtime/src/system.rs | 6 +- test-utils/test-runner/src/node.rs | 12 +- .../cli/src/commands/follow_chain.rs | 6 +- 47 files changed, 148 insertions(+), 376 deletions(-) diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 0e1e0aa2e677d..6d11722081e30 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -295,9 +295,7 @@ pub fn testnet_genesis( const STASH: Balance = ENDOWMENT / 1000; GenesisConfig { - system: SystemConfig { - code: wasm_binary_unwrap().to_vec(), - }, + system: SystemConfig { code: wasm_binary_unwrap().to_vec() }, balances: BalancesConfig { balances: endowed_accounts.iter().cloned().map(|x| (x, ENDOWMENT)).collect(), }, diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index 18da5af1b0fac..c81d6b7c14e02 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -126,9 +126,7 @@ pub fn executor_call< pub fn new_test_ext(code: &[u8]) -> TestExternalities { let ext = TestExternalities::new_with_code( code, - node_testing::genesis::config(Some(code)) - .build_storage() - .unwrap(), + node_testing::genesis::config(Some(code)).build_storage().unwrap(), ); ext } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index c0c79fbe4f5fa..51ab191d5e9a5 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -171,4 +171,3 @@ where Ok(io) } - diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 2afefd9bd3f20..fa0dd22c9c995 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -34,10 +34,7 @@ pub fn config(code: Option<&[u8]>) -> GenesisConfig { /// Create genesis runtime configuration for tests with some extra /// endowed accounts. -pub fn config_endowed( - code: Option<&[u8]>, - extra_endowed: Vec, -) -> GenesisConfig { +pub fn config_endowed(code: Option<&[u8]>, extra_endowed: Vec) -> GenesisConfig { let mut endowed = vec![ (alice(), 111 * DOLLARS), (bob(), 100 * DOLLARS), diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index ee5ede6fdeed6..ebc2bd8e7e722 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -527,4 +527,3 @@ pub trait Backend: AuxStore + Send + Sync { /// Mark for all Backend implementations, that are making use of state data, stored locally. pub trait LocalBackend: Backend {} - diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index 454920df0e0c3..39fe9e063d20b 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -29,8 +29,8 @@ use sp_runtime::{ Justification, Justifications, Storage, }; use sp_state_machine::{ - Backend as StateBackend, ChildStorageCollection, InMemoryBackend, - IndexOperation, StorageCollection, + Backend as StateBackend, ChildStorageCollection, InMemoryBackend, IndexOperation, + StorageCollection, }; use std::{ collections::{HashMap, HashSet}, diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 5bcfeebc6bad0..90ae4a22e1319 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -40,9 +40,9 @@ use sp_consensus::{ use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use sp_runtime::{ - Digest, generic::BlockId, traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, + Digest, }; use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; @@ -690,9 +690,7 @@ mod tests { let state = backend.state_at(block_id).unwrap(); - let storage_changes = api - .into_storage_changes(&state, genesis_hash) - .unwrap(); + let storage_changes = api.into_storage_changes(&state, genesis_hash).unwrap(); assert_eq!( proposal.storage_changes.transaction_storage_root, diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index c86e849f7eaa2..01afaca0cacf4 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -34,9 +34,9 @@ use sp_api::{ use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; use sp_runtime::{ - Digest, generic::BlockId, traits::{Block as BlockT, Hash, HashFor, Header as HeaderT, NumberFor, One}, + Digest, }; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 2311aeed4af22..d2c4ad4498d5e 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -44,9 +44,9 @@ use sp_consensus_slots::Slot; use sp_core::{crypto::Pair, ExecutionContext}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; use sp_runtime::{ - DigestItem, generic::{BlockId, OpaqueDigestItemId}, traits::{Block as BlockT, Header}, + DigestItem, }; use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; @@ -189,11 +189,7 @@ where #[async_trait::async_trait] impl Verifier for AuraVerifier where - C: ProvideRuntimeApi - + Send - + Sync - + sc_client_api::backend::AuxStore - + BlockOf, + C: ProvideRuntimeApi + Send + Sync + sc_client_api::backend::AuxStore + BlockOf, C::Api: BlockBuilderApi + AuraApi> + ApiExt, P: Pair + Send + Sync + 'static, P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index d5664b9b18bef..16880ae188ad6 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -61,9 +61,9 @@ use sp_core::crypto::{Pair, Public}; use sp_inherents::CreateInherentDataProviders; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ - DigestItem, generic::BlockId, traits::{Block as BlockT, Header, Member, NumberFor, Zero}, + DigestItem, }; mod import_queue; @@ -378,11 +378,7 @@ where }) } - fn pre_digest_data( - &self, - slot: Slot, - _claim: &Self::Claim, - ) -> Vec { + fn pre_digest_data(&self, slot: Slot, _claim: &Self::Claim) -> Vec { vec![>::aura_pre_digest(slot)] } @@ -575,7 +571,10 @@ mod tests { use sp_consensus_aura::sr25519::AuthorityPair; use sp_inherents::InherentData; use sp_keyring::sr25519::Keyring; - use sp_runtime::{Digest, traits::{Block as BlockT, Header as _}}; + use sp_runtime::{ + traits::{Block as BlockT, Header as _}, + Digest, + }; use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; use std::{ task::Poll, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 71f45b67134ae..4fb9f750004c5 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -103,9 +103,7 @@ use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE} use sp_api::{ApiExt, NumberFor, ProvideRuntimeApi}; use sp_application_crypto::AppKey; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_blockchain::{ - Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult, -}; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata, Result as ClientResult}; use sp_consensus::{ BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SlotData, @@ -116,9 +114,9 @@ use sp_core::{crypto::Public, ExecutionContext}; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ - DigestItem, generic::{BlockId, OpaqueDigestItemId}, traits::{Block as BlockT, Header, Zero}, + DigestItem, }; pub use sc_consensus_slots::SlotProportion; @@ -676,9 +674,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeSlotWorker where B: BlockT, - C: ProvideRuntimeApi - + HeaderBackend - + HeaderMetadata, + C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata, C::Api: BabeApi, E: Environment + Sync, E::Proposer: Proposer>, @@ -772,11 +768,7 @@ where }); } - fn pre_digest_data( - &self, - _slot: Slot, - claim: &Self::Claim, - ) -> Vec { + fn pre_digest_data(&self, _slot: Slot, claim: &Self::Claim) -> Vec { vec![::babe_pre_digest(claim.0.clone())] } @@ -818,8 +810,7 @@ where .clone() .try_into() .map_err(|_| sp_consensus::Error::InvalidSignature(signature, public))?; - let digest_item = - ::babe_seal(signature.into()); + let digest_item = ::babe_seal(signature.into()); let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); import_block.post_digests.push(digest_item); diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 82203148f99a8..73cc453812eae 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -44,7 +44,7 @@ use sp_core::crypto::Pair; use sp_keystore::{vrf::make_transcript as transcript_from_data, SyncCryptoStore}; use sp_runtime::{ generic::{Digest, DigestItem}, - traits::{Block as BlockT}, + traits::Block as BlockT, }; use sp_timestamp::InherentDataProvider as TimestampInherentDataProvider; use std::{cell::RefCell, task::Poll, time::Duration}; diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index fe23fe81c231f..174b2d03c6ef0 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -32,7 +32,7 @@ use sp_consensus_babe::{ }; use sp_consensus_slots::Slot; use sp_core::{Pair, Public}; -use sp_runtime::{ DigestItem, traits::Header }; +use sp_runtime::{traits::Header, DigestItem}; /// BABE verification parameters pub(super) struct VerificationParams<'a, B: 'a + BlockT> { diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index b7f33f4a8a46e..68131f629256b 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -20,9 +20,8 @@ use serde::{Deserialize, Serialize}; use sp_runtime::{ - DigestItem, traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor}, - Justification, Justifications, + DigestItem, Justification, Justifications, }; use std::{any::Any, borrow::Cow, collections::HashMap, sync::Arc}; diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index bdc6120398dfc..4284d40179d2f 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -21,7 +21,7 @@ use super::Error; use sc_consensus::BlockImportParams; use sp_inherents::InherentData; -use sp_runtime::{Digest, traits::Block as BlockT}; +use sp_runtime::{traits::Block as BlockT, Digest}; pub mod babe; @@ -32,11 +32,7 @@ pub trait ConsensusDataProvider: Send + Sync { type Transaction; /// Attempt to create a consensus digest. - fn create_digest( - &self, - parent: &B::Header, - inherents: &InherentData, - ) -> Result; + fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result; /// set up the neccessary import params. fn append_block_import( diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 711a22245e552..e06c544aaedc3 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -48,9 +48,9 @@ use sp_consensus_babe::{ use sp_consensus_slots::Slot; use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier}; use sp_runtime::{ - DigestItem, generic::{BlockId, Digest}, traits::{Block as BlockT, Header, Zero}, + DigestItem, }; use sp_timestamp::{InherentType, TimestampInherentData, INHERENT_IDENTIFIER}; @@ -194,11 +194,7 @@ where { type Transaction = TransactionFor; - fn create_digest( - &self, - parent: &B::Header, - inherents: &InherentData, - ) -> Result { + fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result { let slot = inherents .babe_inherent_data()? .ok_or_else(|| Error::StringError("No babe inherent data".into()))?; diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index bc2b772c8e0f2..3ab0b977255ee 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -425,10 +425,7 @@ impl PowVerifier { Self { algorithm, _marker: PhantomData } } - fn check_header( - &self, - mut header: B::Header, - ) -> Result<(B::Header, DigestItem), Error> + fn check_header(&self, mut header: B::Header) -> Result<(B::Header, DigestItem), Error> where Algorithm: PowAlgorithm, { @@ -702,10 +699,7 @@ fn find_pre_digest(header: &B::Header) -> Result>, Err } /// Fetch PoW seal. -fn fetch_seal( - digest: Option<&DigestItem>, - hash: B::Hash, -) -> Result, Error> { +fn fetch_seal(digest: Option<&DigestItem>, hash: B::Hash) -> Result, Error> { match digest { Some(DigestItem::Seal(id, seal)) => if id == &POW_ENGINE_ID { diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index a319fcdb5b2c4..ead209ef86a65 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -141,11 +141,7 @@ pub trait SimpleSlotWorker { fn notify_slot(&self, _header: &B::Header, _slot: Slot, _epoch_data: &Self::EpochData) {} /// Return the pre digest data to include in a block authored with the given claim. - fn pre_digest_data( - &self, - slot: Slot, - claim: &Self::Claim, - ) -> Vec; + fn pre_digest_data(&self, slot: Slot, claim: &Self::Claim) -> Vec; /// Returns a function which produces a `BlockImportParams`. fn block_import_params( diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index d30c3e5df9330..7d46b63da5bb4 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -85,9 +85,8 @@ use sp_runtime::{ Justification, Justifications, Storage, }; use sp_state_machine::{ - backend::Backend as StateBackend, - ChildStorageCollection, DBValue, IndexOperation, OffchainChangesCollection, StateMachineStats, - StorageCollection, UsageInfo as StateUsageInfo, + backend::Backend as StateBackend, ChildStorageCollection, DBValue, IndexOperation, + OffchainChangesCollection, StateMachineStats, StorageCollection, UsageInfo as StateUsageInfo, }; use sp_trie::{prefixed_key, MemoryDB, PrefixedMemoryDB}; @@ -774,9 +773,7 @@ impl BlockImportOperation { }); let (root, transaction) = self.old_state.full_storage_root( - storage.top.iter().map(|(k, v)| { - (&k[..], Some(&v[..])) - }), + storage.top.iter().map(|(k, v)| (&k[..], Some(&v[..]))), child_delta, ); @@ -1201,13 +1198,7 @@ impl Backend { self.ensure_sequential_finalization(header, last_finalized)?; let with_state = sc_client_api::Backend::have_state_at(self, &hash, number); - self.note_finalized( - transaction, - header, - *hash, - finalization_displaced, - with_state, - )?; + self.note_finalized(transaction, header, *hash, finalization_displaced, with_state)?; if let Some(justification) = justification { transaction.set_from_vec( diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 77318244eefa7..8439bf6963d01 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -149,11 +149,7 @@ impl TestNetFactory for GrandpaTestNet { ) .expect("Could not create block import for fresh peer."); let justification_import = Box::new(import.clone()); - ( - BlockImportAdapter::new(import), - Some(justification_import), - Mutex::new(Some(link)), - ) + (BlockImportAdapter::new(import), Some(justification_import), Mutex::new(Some(link))) } fn peer(&mut self, i: usize) -> &mut GrandpaPeer { diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index a11cec623328e..e2b950cf67e8c 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -20,8 +20,9 @@ use crate::{ bitswap::Bitswap, config::ProtocolId, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, + peer_info, protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, - peer_info, request_responses, DhtEvent, ObservedRole, + request_responses, DhtEvent, ObservedRole, }; use bytes::Bytes; @@ -422,12 +423,10 @@ impl NetworkBehaviourEventProcess> for Behavi self.events.push_back(BehaviourOut::NotificationsReceived { remote, messages }); }, CustomMessageOutcome::PeerNewBest(_peer_id, _number) => {}, - CustomMessageOutcome::SyncConnected(peer_id) => { - self.events.push_back(BehaviourOut::SyncConnected(peer_id)) - }, - CustomMessageOutcome::SyncDisconnected(peer_id) => { - self.events.push_back(BehaviourOut::SyncDisconnected(peer_id)) - }, + CustomMessageOutcome::SyncConnected(peer_id) => + self.events.push_back(BehaviourOut::SyncConnected(peer_id)), + CustomMessageOutcome::SyncDisconnected(peer_id) => + self.events.push_back(BehaviourOut::SyncDisconnected(peer_id)), CustomMessageOutcome::None => {}, } } diff --git a/client/network/src/light_client_requests.rs b/client/network/src/light_client_requests.rs index 58d5575cbfc74..b1793ce9384ed 100644 --- a/client/network/src/light_client_requests.rs +++ b/client/network/src/light_client_requests.rs @@ -45,4 +45,3 @@ pub fn generate_protocol_config(protocol_id: &ProtocolId) -> ProtocolConfig { inbound_queue: None, } } - diff --git a/client/network/src/light_client_requests/handler.rs b/client/network/src/light_client_requests/handler.rs index f1dd5f1f2cb74..a04c5e310a67e 100644 --- a/client/network/src/light_client_requests/handler.rs +++ b/client/network/src/light_client_requests/handler.rs @@ -38,10 +38,7 @@ use sp_core::{ hexdisplay::HexDisplay, storage::{ChildInfo, ChildType, PrefixedStorageKey}, }; -use sp_runtime::{ - generic::BlockId, - traits::Block, -}; +use sp_runtime::{generic::BlockId, traits::Block}; use std::sync::Arc; const LOG_TARGET: &str = "light-client-request-handler"; diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 7f8cc0de3208d..dde13fdead13c 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -730,8 +730,8 @@ where let (c, longest_chain) = test_client_builder.build_with_longest_chain(); let client = Arc::new(c); - let (block_import, justification_import, data) = - self.make_block_import(PeersClient { client: client.clone(), backend: backend.clone() }); + let (block_import, justification_import, data) = self + .make_block_import(PeersClient { client: client.clone(), backend: backend.clone() }); let verifier = self.make_verifier( PeersClient { client: client.clone(), backend: backend.clone() }, diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index 80dab3ec5d4e5..4f47742323687 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -32,9 +32,7 @@ use rpc::{ use std::sync::Arc; use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; -use sc_client_api::{ - BlockchainEvents, -}; +use sc_client_api::BlockchainEvents; use sp_rpc::{list::ListOrValue, number::NumberOrHex}; use sp_runtime::{ generic::{BlockId, SignedBlock}, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index bd72343583361..d3b41d2f18279 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -26,10 +26,7 @@ use futures::{ use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; use log::warn; use rpc::Result as RpcResult; -use std::{ - collections::HashMap, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; use sc_rpc_api::state::ReadProof; use sp_blockchain::{ @@ -42,10 +39,7 @@ use sp_core::{ }, Bytes, }; -use sp_runtime::{ - generic::BlockId, - traits::Block as BlockT, -}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_version::RuntimeVersion; use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; @@ -145,9 +139,7 @@ where hashes }; - Ok(QueryStorageRange { - hashes, - }) + Ok(QueryStorageRange { hashes }) } /// Iterates through range.unfiltered_range and check each block for changes of keys' values. diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 9d5919ec27f4e..d360701c88b2a 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -512,12 +512,7 @@ fn should_query_storage() { } run_tests(Arc::new(substrate_test_runtime_client::new())); - run_tests( - Arc::new( - TestClientBuilder::new() - .build(), - ), - ); + run_tests(Arc::new(TestClientBuilder::new().build())); } #[test] diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index d7f8a5bc09169..570139b11fdea 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -30,9 +30,8 @@ use log::info; use prometheus_endpoint::Registry; use sc_chain_spec::get_extension; use sc_client_api::{ - execution_extensions::ExecutionExtensions, - proof_provider::ProofProvider, BadBlocks, BlockBackend, BlockchainEvents, ExecutorProvider, - ForkBlocks, StorageProvider, UsageProvider, + execution_extensions::ExecutionExtensions, proof_provider::ProofProvider, BadBlocks, + BlockBackend, BlockchainEvents, ExecutorProvider, ForkBlocks, StorageProvider, UsageProvider, }; use sc_client_db::{Backend, DatabaseSettings}; use sc_consensus::import_queue::ImportQueue; diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index d2c474fc2b877..be871cc371ed5 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -26,10 +26,7 @@ use sp_core::{ NativeOrEncoded, NeverNativeValue, }; use sp_externalities::Extensions; -use sp_runtime::{ - generic::BlockId, - traits::Block as BlockT, -}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_state_machine::{ self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, StateMachine, StorageProof, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 598203e5c2677..617097fd2972c 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -30,9 +30,8 @@ use rand::Rng; use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider, RecordProof}; use sc_client_api::{ backend::{ - self, apply_aux, BlockImportOperation, ClientImportOperation, - Finalizer, ImportSummary, LockImportRun, NewBlockState, - StorageProvider, + self, apply_aux, BlockImportOperation, ClientImportOperation, Finalizer, ImportSummary, + LockImportRun, NewBlockState, StorageProvider, }, client::{ BadBlocks, BlockBackend, BlockImportNotification, BlockOf, BlockchainEvents, ClientInfo, @@ -68,23 +67,17 @@ use sp_keystore::SyncCryptoStorePtr; use sp_runtime::{ generic::{BlockId, SignedBlock}, traits::{ - Block as BlockT, HashFor, Header as HeaderT, NumberFor, One, - SaturatedConversion, Zero, + Block as BlockT, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }, - Digest, BuildStorage, Justification, Justifications, + BuildStorage, Digest, Justification, Justifications, }; use sp_state_machine::{ - prove_child_read, prove_range_read_with_size, prove_read, - read_range_proof_check, Backend as StateBackend, + prove_child_read, prove_range_read_with_size, prove_read, read_range_proof_check, + Backend as StateBackend, }; use sp_trie::StorageProof; use std::{ - collections::HashMap, - marker::PhantomData, - panic::UnwindSafe, - path::PathBuf, - result, - sync::Arc, + collections::HashMap, marker::PhantomData, panic::UnwindSafe, path::PathBuf, result, sync::Arc, }; #[cfg(feature = "test-helpers")] diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 1c1a48990b617..d5e23d319e83e 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -41,10 +41,7 @@ use sp_state_machine::{ }; use sp_storage::{ChildInfo, StorageKey}; use sp_trie::{trie_types::Layout, TrieConfiguration}; -use std::{ - collections::HashSet, - sync::Arc, -}; +use std::{collections::HashSet, sync::Arc}; use substrate_test_runtime::TestAPI; use substrate_test_runtime_client::{ prelude::*, diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 188475ec07239..5555e4f64fcd2 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -28,10 +28,7 @@ use futures::{ use std::{marker::PhantomData, pin::Pin, sync::Arc}; use prometheus_endpoint::Registry as PrometheusRegistry; -use sc_client_api::{ - blockchain::HeaderBackend, - BlockBackend, -}; +use sc_client_api::{blockchain::HeaderBackend, BlockBackend}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index dc2c056eefa1a..3bb799b708fb5 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -1509,9 +1509,7 @@ mod test { let value = unhashed::get_raw(&Storage::storage_value_final_key()).unwrap(); - let expected = Digest { - logs: vec![DigestItem::Other(Vec::new())], - }; + let expected = Digest { logs: vec![DigestItem::Other(Vec::new())] }; assert_eq!(Digest::decode(&mut &value[..]).unwrap(), expected); }); } diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 93ad387ff35e9..ce834fd0a47f4 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -25,9 +25,9 @@ use std::{sync::Arc, time::Duration}; use futures::prelude::*; use sp_runtime::{ - Digest, generic::BlockId, traits::{Block as BlockT, HashFor}, + Digest, }; use sp_state_machine::StorageProof; @@ -112,8 +112,7 @@ pub struct Proposal { /// Proof that was recorded while building the block. pub proof: Proof, /// The storage changes while building this block. - pub storage_changes: - sp_state_machine::StorageChanges>, + pub storage_changes: sp_state_machine::StorageChanges>, } /// Error that is returned when [`ProofRecording`] requested to record a proof, diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index b0c0b7abbb3d3..978653efb93d8 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -63,18 +63,12 @@ impl Digest { } /// Get reference to the first digest item that matches the passed predicate. - pub fn log Option<&T>>( - &self, - predicate: F, - ) -> Option<&T> { + pub fn log Option<&T>>(&self, predicate: F) -> Option<&T> { self.logs().iter().find_map(predicate) } /// Get a conversion of the first digest item that successfully converts using the function. - pub fn convert_first Option>( - &self, - predicate: F, - ) -> Option { + pub fn convert_first Option>(&self, predicate: F) -> Option { self.logs().iter().find_map(predicate) } } @@ -145,47 +139,37 @@ impl TypeInfo for DigestItem { type Identity = Self; fn type_info() -> Type { - Type::builder() - .path(Path::new("DigestItem", module_path!())) - .variant( - Variants::new() - .variant("PreRuntime", |v| { - v.index(DigestItemType::PreRuntime as u8).fields( - Fields::unnamed() - .field(|f| { - f.ty::().type_name("ConsensusEngineId") - }) - .field(|f| f.ty::>().type_name("Vec")), - ) - }) - .variant("Consensus", |v| { - v.index(DigestItemType::Consensus as u8).fields( - Fields::unnamed() - .field(|f| { - f.ty::().type_name("ConsensusEngineId") - }) - .field(|f| f.ty::>().type_name("Vec")), - ) - }) - .variant("Seal", |v| { - v.index(DigestItemType::Seal as u8).fields( - Fields::unnamed() - .field(|f| { - f.ty::().type_name("ConsensusEngineId") - }) - .field(|f| f.ty::>().type_name("Vec")), - ) - }) - .variant("Other", |v| { - v.index(DigestItemType::Other as u8).fields( - Fields::unnamed().field(|f| f.ty::>().type_name("Vec")), - ) - }) - .variant("RuntimeEnvironmentUpdated", |v| { - v.index(DigestItemType::RuntimeEnvironmentUpdated as u8) - .fields(Fields::unit()) - }), - ) + Type::builder().path(Path::new("DigestItem", module_path!())).variant( + Variants::new() + .variant("PreRuntime", |v| { + v.index(DigestItemType::PreRuntime as u8).fields( + Fields::unnamed() + .field(|f| f.ty::().type_name("ConsensusEngineId")) + .field(|f| f.ty::>().type_name("Vec")), + ) + }) + .variant("Consensus", |v| { + v.index(DigestItemType::Consensus as u8).fields( + Fields::unnamed() + .field(|f| f.ty::().type_name("ConsensusEngineId")) + .field(|f| f.ty::>().type_name("Vec")), + ) + }) + .variant("Seal", |v| { + v.index(DigestItemType::Seal as u8).fields( + Fields::unnamed() + .field(|f| f.ty::().type_name("ConsensusEngineId")) + .field(|f| f.ty::>().type_name("Vec")), + ) + }) + .variant("Other", |v| { + v.index(DigestItemType::Other as u8) + .fields(Fields::unnamed().field(|f| f.ty::>().type_name("Vec"))) + }) + .variant("RuntimeEnvironmentUpdated", |v| { + v.index(DigestItemType::RuntimeEnvironmentUpdated as u8).fields(Fields::unit()) + }), + ) } } @@ -464,10 +448,7 @@ mod tests { #[test] fn should_serialize_digest() { let digest = Digest { - logs: vec![ - DigestItem::Other(vec![1, 2, 3]), - DigestItem::Seal(*b"test", vec![1, 2, 3]), - ], + logs: vec![DigestItem::Other(vec![1, 2, 3]), DigestItem::Seal(*b"test", vec![1, 2, 3])], }; assert_eq!( diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index db51251df7784..21c2a6eef73af 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -235,9 +235,7 @@ mod tests { state_root: BlakeTwo256::hash(b"3"), extrinsics_root: BlakeTwo256::hash(b"4"), digest: crate::generic::Digest { - logs: vec![ - crate::generic::DigestItem::Other(b"6".to_vec()), - ], + logs: vec![crate::generic::DigestItem::Other(b"6".to_vec())], }, }; @@ -245,7 +243,12 @@ mod tests { assert_eq!( header_encoded, vec![ - 146, 205, 245, 120, 196, 112, 133, 165, 153, 34, 86, 240, 220, 249, 125, 11, 25, 241, 241, 201, 222, 77, 95, 227, 12, 58, 206, 97, 145, 182, 229, 219, 8, 88, 19, 72, 51, 123, 15, 62, 20, 134, 32, 23, 61, 170, 165, 249, 77, 0, 216, 129, 112, 93, 203, 240, 170, 131, 239, 218, 186, 97, 210, 237, 225, 235, 134, 73, 33, 73, 151, 87, 78, 32, 196, 100, 56, 138, 23, 36, 32, 210, 84, 3, 104, 43, 187, 184, 12, 73, 104, 49, 200, 204, 31, 143, 13, 4, 0, 4, 54 + 146, 205, 245, 120, 196, 112, 133, 165, 153, 34, 86, 240, 220, 249, 125, 11, 25, + 241, 241, 201, 222, 77, 95, 227, 12, 58, 206, 97, 145, 182, 229, 219, 8, 88, 19, + 72, 51, 123, 15, 62, 20, 134, 32, 23, 61, 170, 165, 249, 77, 0, 216, 129, 112, 93, + 203, 240, 170, 131, 239, 218, 186, 97, 210, 237, 225, 235, 134, 73, 33, 73, 151, + 87, 78, 32, 196, 100, 56, 138, 23, 36, 32, 210, 84, 3, 104, 43, 187, 184, 12, 73, + 104, 49, 200, 204, 31, 143, 13, 4, 0, 4, 54 ], ); assert_eq!(header, Header::::decode(&mut &header_encoded[..]).unwrap()); @@ -256,9 +259,7 @@ mod tests { state_root: BlakeTwo256::hash(b"3000"), extrinsics_root: BlakeTwo256::hash(b"4000"), digest: crate::generic::Digest { - logs: vec![ - crate::generic::DigestItem::Other(b"5000".to_vec()), - ], + logs: vec![crate::generic::DigestItem::Other(b"5000".to_vec())], }, }; @@ -266,7 +267,12 @@ mod tests { assert_eq!( header_encoded, vec![ - 197, 243, 254, 225, 31, 117, 21, 218, 179, 213, 92, 6, 247, 164, 230, 25, 47, 166, 140, 117, 142, 159, 195, 202, 67, 196, 238, 26, 44, 18, 33, 92, 65, 31, 219, 225, 47, 12, 107, 88, 153, 146, 55, 21, 226, 186, 110, 48, 167, 187, 67, 183, 228, 232, 118, 136, 30, 254, 11, 87, 48, 112, 7, 97, 31, 82, 146, 110, 96, 87, 152, 68, 98, 162, 227, 222, 78, 14, 244, 194, 120, 154, 112, 97, 222, 144, 174, 101, 220, 44, 111, 126, 54, 34, 155, 220, 253, 124, 4, 0, 16, 53, 48, 48, 48 + 197, 243, 254, 225, 31, 117, 21, 218, 179, 213, 92, 6, 247, 164, 230, 25, 47, 166, + 140, 117, 142, 159, 195, 202, 67, 196, 238, 26, 44, 18, 33, 92, 65, 31, 219, 225, + 47, 12, 107, 88, 153, 146, 55, 21, 226, 186, 110, 48, 167, 187, 67, 183, 228, 232, + 118, 136, 30, 254, 11, 87, 48, 112, 7, 97, 31, 82, 146, 110, 96, 87, 152, 68, 98, + 162, 227, 222, 78, 14, 244, 194, 120, 154, 112, 97, 222, 144, 174, 101, 220, 44, + 111, 126, 54, 34, 155, 220, 253, 124, 4, 0, 16, 53, 48, 48, 48 ], ); assert_eq!(header, Header::::decode(&mut &header_encoded[..]).unwrap()); diff --git a/primitives/runtime/src/generic/tests.rs b/primitives/runtime/src/generic/tests.rs index e777eabca2a59..a65e212bf07ec 100644 --- a/primitives/runtime/src/generic/tests.rs +++ b/primitives/runtime/src/generic/tests.rs @@ -26,8 +26,8 @@ fn system_digest_item_encoding() { let encoded = item.encode(); assert_eq!( encoded, - vec![4, - // type = DigestItemType::Consensus + vec![ + 4, // type = DigestItemType::Consensus 1, 2, 3, 4, 16, 5, 6, 7, 8, ] ); diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index ed93cc8676d99..8f914ab3eee64 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -648,11 +648,7 @@ where self.overlay.rollback_transaction().expect(BENCHMARKING_FN); } self.overlay - .drain_storage_changes( - self.backend, - Default::default(), - self.storage_transaction_cache, - ) + .drain_storage_changes(self.backend, Default::default(), self.storage_transaction_cache) .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL); self.mark_dirty(); @@ -667,11 +663,7 @@ where } let changes = self .overlay - .drain_storage_changes( - self.backend, - Default::default(), - self.storage_transaction_cache, - ) + .drain_storage_changes(self.backend, Default::default(), self.storage_transaction_cache) .expect(EXT_NOT_ALLOWED_TO_FAIL); self.backend .commit( @@ -869,9 +861,7 @@ where #[cfg(test)] mod tests { use super::*; - use crate::{ - InMemoryBackend, - }; + use crate::InMemoryBackend; use codec::Encode; use sp_core::{ map, diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 6126e48e85333..69cc11e02d4f9 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -404,12 +404,7 @@ mod execution { .enter_runtime() .expect("StateMachine is never called from the runtime; qed"); - let mut ext = Ext::new( - self.overlay, - cache, - self.backend, - Some(&mut self.extensions), - ); + let mut ext = Ext::new(self.overlay, cache, self.backend, Some(&mut self.extensions)); let ext_id = ext.id; @@ -1169,12 +1164,7 @@ mod tests { let overlay_limit = overlay.clone(); { let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); ext.clear_prefix(b"ab", None); } overlay.commit_transaction().unwrap(); @@ -1198,12 +1188,7 @@ mod tests { let mut overlay = overlay_limit; { let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!((false, 1), ext.clear_prefix(b"ab", Some(1))); } overlay.commit_transaction().unwrap(); @@ -1245,12 +1230,7 @@ mod tests { { let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); } @@ -1285,12 +1265,7 @@ mod tests { let backend = InMemoryBackend::::from(initial); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); assert_eq!(ext.kill_child_storage(&child_info, Some(0)), (false, 0)); assert_eq!(ext.kill_child_storage(&child_info, Some(1)), (false, 1)); assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); @@ -1309,12 +1284,7 @@ mod tests { let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); ext.set_child_storage(child_info, b"abc".to_vec(), b"def".to_vec()); assert_eq!(ext.child_storage(child_info, b"abc"), Some(b"def".to_vec())); @@ -1331,24 +1301,14 @@ mod tests { let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); { - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); ext.storage_append(key.clone(), reference_data[0].encode()); assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } overlay.start_transaction(); { - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); for i in reference_data.iter().skip(1) { ext.storage_append(key.clone(), i.encode()); @@ -1357,12 +1317,7 @@ mod tests { } overlay.rollback_transaction().unwrap(); { - let ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![reference_data[0].clone()].encode())); } } @@ -1384,12 +1339,7 @@ mod tests { // For example, block initialization with event. { - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); ext.clear_storage(key.as_slice()); ext.storage_append(key.clone(), Item::InitializationItem.encode()); } @@ -1397,12 +1347,7 @@ mod tests { // For example, first transaction resulted in panic during block building { - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); @@ -1417,12 +1362,7 @@ mod tests { // Then we apply next transaction which is valid this time. { - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!(ext.storage(key.as_slice()), Some(vec![Item::InitializationItem].encode())); @@ -1437,12 +1377,7 @@ mod tests { // Then only initlaization item and second (commited) item should persist. { - let ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!( ext.storage(key.as_slice()), Some(vec![Item::InitializationItem, Item::CommitedItem].encode()), @@ -1718,12 +1653,7 @@ mod tests { let mut transaction = { let backend = test_trie(); let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); ext.set_child_storage(&child_info_1, b"abc".to_vec(), b"def".to_vec()); ext.set_child_storage(&child_info_2, b"abc".to_vec(), b"def".to_vec()); ext.storage_root(); @@ -1760,12 +1690,7 @@ mod tests { { let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, backend, None); assert_eq!(ext.storage(b"bbb"), Some(vec![])); assert_eq!(ext.storage(b"ccc"), Some(vec![])); ext.clear_storage(b"ccc"); diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index 57ac6937cf50e..b7a535792aae6 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -191,20 +191,13 @@ impl StorageTransactionCache { } } -impl Default - for StorageTransactionCache -{ +impl Default for StorageTransactionCache { fn default() -> Self { - Self { - transaction: None, - transaction_storage_root: None, - } + Self { transaction: None, transaction_storage_root: None } } } -impl Default - for StorageChanges -{ +impl Default for StorageChanges { fn default() -> Self { Self { main_storage_changes: Default::default(), @@ -859,12 +852,7 @@ mod tests { overlay.set_storage(b"doug".to_vec(), None); let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - None, - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &backend, None); const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 5904558701d2b..59a0a5a6837ec 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -23,9 +23,8 @@ use std::{ }; use crate::{ - backend::Backend, - ext::Ext, - InMemoryBackend, OverlayedChanges, StorageKey, StorageTransactionCache, StorageValue, + backend::Backend, ext::Ext, InMemoryBackend, OverlayedChanges, StorageKey, + StorageTransactionCache, StorageValue, }; use hash_db::Hasher; diff --git a/primitives/test-primitives/src/lib.rs b/primitives/test-primitives/src/lib.rs index 5dfc8e7900832..341839a1deb20 100644 --- a/primitives/test-primitives/src/lib.rs +++ b/primitives/test-primitives/src/lib.rs @@ -80,4 +80,3 @@ pub type Digest = sp_runtime::generic::Digest; pub type Block = sp_runtime::generic::Block; /// A test block's header. pub type Header = sp_runtime::generic::Header; - diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 1aaca472d8e36..2d1cb4bbc66a2 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -45,11 +45,7 @@ use sc_client_api::BlockchainEvents; use sc_service::client::{ClientConfig, LocalCallExecutor}; use serde::Deserialize; use sp_core::storage::ChildInfo; -use sp_runtime::{ - codec::Encode, - traits::Block as BlockT, - OpaqueExtrinsic, -}; +use sp_runtime::{codec::Encode, traits::Block as BlockT, OpaqueExtrinsic}; use std::{ collections::{HashMap, HashSet}, pin::Pin, diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 2eac1c018cfa8..acd49f23327c5 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -31,8 +31,9 @@ pub use substrate_test_runtime as runtime; pub use self::block_builder_ext::BlockBuilderExt; use sp_core::{ - sr25519, Pair, + sr25519, storage::{ChildInfo, Storage, StorageChild}, + Pair, }; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; use substrate_test_runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; @@ -46,8 +47,8 @@ pub mod prelude { }; // Client structs pub use super::{ - Backend, ExecutorDispatch, LocalExecutorDispatch, - NativeElseWasmExecutor, TestClient, TestClientBuilder, WasmExecutionMethod, + Backend, ExecutorDispatch, LocalExecutorDispatch, NativeElseWasmExecutor, TestClient, + TestClientBuilder, WasmExecutionMethod, }; // Keyring pub use super::{AccountKeyring, Sr25519Keyring}; diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index 070e807601188..165fe0355628e 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -25,11 +25,7 @@ use codec::{Decode, Encode, KeyedVec}; use frame_support::{decl_module, decl_storage, storage}; use frame_system::Config; use sp_core::storage::well_known_keys; -use sp_io::{ - hashing::blake2_256, - storage::root as storage_root, - trie, -}; +use sp_io::{hashing::blake2_256, storage::root as storage_root, trie}; use sp_runtime::{ generic, traits::Header as _, diff --git a/test-utils/test-runner/src/node.rs b/test-utils/test-runner/src/node.rs index 52e40fffc7dec..07259263c5e4d 100644 --- a/test-utils/test-runner/src/node.rs +++ b/test-utils/test-runner/src/node.rs @@ -25,10 +25,7 @@ use futures::{ }; use jsonrpc_core::MetaIoHandler; use manual_seal::EngineCommand; -use sc_client_api::{ - backend::Backend, - CallExecutor, ExecutorProvider, -}; +use sc_client_api::{backend::Backend, CallExecutor, ExecutorProvider}; use sc_executor::NativeElseWasmExecutor; use sc_service::{TFullBackend, TFullCallExecutor, TFullClient, TaskManager}; use sc_transaction_pool_api::TransactionPool; @@ -173,12 +170,7 @@ where .state_at(id.clone()) .expect(&format!("State at block {} not found", id)); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &state_backend, - Some(&mut extensions), - ); + let mut ext = Ext::new(&mut overlay, &mut cache, &state_backend, Some(&mut extensions)); sp_externalities::set_and_run_with_externalities(&mut ext, closure) } diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 794661254a584..6a23c05998cb1 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -152,11 +152,7 @@ where .map_err(|e| format!("failed to decode output: {:?}", e))?; let storage_changes = changes - .drain_storage_changes( - &state_ext.backend, - Default::default(), - &mut Default::default(), - ) + .drain_storage_changes(&state_ext.backend, Default::default(), &mut Default::default()) .unwrap(); state_ext.backend.apply_transaction( storage_changes.transaction_storage_root, From 8596e3405466a5e0f58f07a5955eb96e9b4d0515 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 1 Nov 2021 07:42:30 +0100 Subject: [PATCH 4/8] Restore changes_root --- primitives/io/src/lib.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index d93076b1d1cfe..e4f52fd4e0e21 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -195,6 +195,11 @@ pub trait Storage { self.storage_root() } + /// Always returns `None`. This function exists for compatibility reasons. + fn changes_root(&mut self, _parent_hash: &[u8]) -> Option> { + None + } + /// Get the next key in storage after the given one in lexicographic order. fn next_key(&mut self, key: &[u8]) -> Option> { self.next_storage_key(&key) From 120545d3b99d2d494ec36df00a78ed7c16aafdc2 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 1 Nov 2021 08:06:17 +0100 Subject: [PATCH 5/8] Fixed benches --- bin/node/executor/benches/bench.rs | 4 ++-- primitives/state-machine/src/lib.rs | 6 ------ 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 1a39c9decb321..03c3eb53e23f9 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -53,7 +53,7 @@ const SPEC_VERSION: u32 = node_runtime::VERSION.spec_version; const HEAP_PAGES: u64 = 20; -type TestExternalities = CoreTestExternalities; +type TestExternalities = CoreTestExternalities; #[derive(Debug)] enum ExecutionMethod { @@ -188,7 +188,7 @@ fn bench_execute_block(c: &mut Criterion) { for strategy in execution_methods { group.bench_function(format!("{:?}", strategy), |b| { - let genesis_config = node_testing::genesis::config(false, Some(compact_code_unwrap())); + let genesis_config = node_testing::genesis::config(Some(compact_code_unwrap())); let (use_native, wasm_method) = match strategy { ExecutionMethod::Native => (true, WasmExecutionMethod::Interpreted), ExecutionMethod::Wasm(wasm_method) => (false, wasm_method), diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 69cc11e02d4f9..8aba088287539 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -969,7 +969,6 @@ mod tests { #[derive(Clone)] struct DummyCodeExecutor { - change_changes_trie_config: bool, native_available: bool, native_succeeds: bool, fallback_succeeds: bool, @@ -1027,7 +1026,6 @@ mod tests { &backend, &mut overlayed_changes, &DummyCodeExecutor { - change_changes_trie_config: false, native_available: true, native_succeeds: true, fallback_succeeds: true, @@ -1052,7 +1050,6 @@ mod tests { &backend, &mut overlayed_changes, &DummyCodeExecutor { - change_changes_trie_config: false, native_available: true, native_succeeds: true, fallback_succeeds: true, @@ -1078,7 +1075,6 @@ mod tests { &backend, &mut overlayed_changes, &DummyCodeExecutor { - change_changes_trie_config: false, native_available: true, native_succeeds: true, fallback_succeeds: false, @@ -1105,7 +1101,6 @@ mod tests { #[test] fn prove_execution_and_proof_check_works() { let executor = DummyCodeExecutor { - change_changes_trie_config: false, native_available: true, native_succeeds: true, fallback_succeeds: true, @@ -1715,7 +1710,6 @@ mod tests { &backend, &mut overlayed_changes, &DummyCodeExecutor { - change_changes_trie_config: false, native_available: true, native_succeeds: true, fallback_succeeds: false, From 3efd582c15b3252dfff0808613a698966e53ce68 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 1 Nov 2021 08:33:33 +0100 Subject: [PATCH 6/8] Cargo fmt --- client/consensus/babe/src/verification.rs | 4 ++-- client/network/src/protocol/notifications/behaviour.rs | 4 ++-- client/network/src/protocol/sync/blocks.rs | 2 +- client/network/src/service/tests.rs | 2 +- client/network/src/transactions.rs | 4 ++-- client/service/test/src/client/db.rs | 1 - frame/election-provider-multi-phase/src/lib.rs | 2 +- frame/support/procedural/src/pallet/parse/pallet_struct.rs | 4 ++-- 8 files changed, 11 insertions(+), 12 deletions(-) diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index 174b2d03c6ef0..2322a96262161 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -111,7 +111,7 @@ pub(super) fn check_header( ); check_secondary_plain_header::(pre_hash, secondary, sig, &epoch)?; - } + }, PreDigest::SecondaryVRF(secondary) if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => { @@ -122,7 +122,7 @@ pub(super) fn check_header( ); check_secondary_vrf_header::(pre_hash, secondary, sig, &epoch)?; - } + }, _ => return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)), } diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index 01138e3207570..f66f1fbe9e95a 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -712,7 +712,7 @@ impl Notifications { timer: delay_id, timer_deadline: *backoff, }; - } + }, // Disabled => Enabled PeerState::Disabled { mut connections, backoff_until } => { @@ -2085,7 +2085,7 @@ impl NetworkBehaviour for Notifications { .boxed(), ); } - } + }, // We intentionally never remove elements from `delays`, and it may // thus contain obsolete entries. This is a normal situation. diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index 30ba7ffafeffc..ce4535dc0b45f 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -203,7 +203,7 @@ impl BlockCollection { { *downloading -= 1; false - } + }, Some(&mut BlockRangeState::Downloading { .. }) => true, _ => false, }; diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 1c66986e422fc..8271da886fca7 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -529,7 +529,7 @@ fn fallback_name_working() { { assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME)); break - } + }, _ => {}, }; } diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 99350f603a375..6d190651160f0 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -336,13 +336,13 @@ impl TransactionsHandler { }, ); debug_assert!(_was_in.is_none()); - } + }, Event::NotificationStreamClosed { remote, protocol } if protocol == self.protocol_name => { let _peer = self.peers.remove(&remote); debug_assert!(_peer.is_some()); - } + }, Event::NotificationsReceived { remote, messages } => { for (protocol, message) in messages { diff --git a/client/service/test/src/client/db.rs b/client/service/test/src/client/db.rs index 772fdcada72ef..5278c9a13a4d7 100644 --- a/client/service/test/src/client/db.rs +++ b/client/service/test/src/client/db.rs @@ -21,7 +21,6 @@ use std::sync::Arc; type TestBackend = sc_client_api::in_mem::Backend; - #[test] fn test_leaves_with_complex_block_tree() { let backend = Arc::new(TestBackend::new()); diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index a7863fafa7747..ab8b1523f0509 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -772,7 +772,7 @@ pub mod pallet { Self::on_initialize_open_unsigned(enabled, now); T::WeightInfo::on_initialize_open_unsigned() } - } + }, _ => T::WeightInfo::on_initialize_nothing(), } } diff --git a/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/frame/support/procedural/src/pallet/parse/pallet_struct.rs index 278f46e13818e..c528faf669ee3 100644 --- a/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -130,12 +130,12 @@ impl PalletStructDef { if generate_storage_info.is_none() => { generate_storage_info = Some(span); - } + }, PalletStructAttr::StorageVersion { storage_version, .. } if storage_version_found.is_none() => { storage_version_found = Some(storage_version); - } + }, attr => { let msg = "Unexpected duplicated attribute"; return Err(syn::Error::new(attr.span(), msg)) From 637ca4ed30e78db53a070ad16c0c12495b9e4847 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 8 Nov 2021 17:53:53 +0100 Subject: [PATCH 7/8] fmt --- client/service/src/client/client.rs | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index dd8519d011c57..b46c6b99b9a9c 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -75,13 +75,18 @@ use sp_runtime::{ BuildStorage, Digest, Justification, Justifications, }; use sp_state_machine::{ - prove_child_read, prove_range_read_with_child_with_size, - prove_read, read_range_proof_check_with_child_on_proving_backend, Backend as StateBackend, - KeyValueStates, KeyValueStorageLevel, MAX_NESTED_TRIE_DEPTH, + prove_child_read, prove_range_read_with_child_with_size, prove_read, + read_range_proof_check_with_child_on_proving_backend, Backend as StateBackend, KeyValueStates, + KeyValueStorageLevel, MAX_NESTED_TRIE_DEPTH, }; use sp_trie::{CompactProof, StorageProof}; use std::{ - collections::{HashMap, HashSet}, marker::PhantomData, panic::UnwindSafe, path::PathBuf, result, sync::Arc, + collections::{HashMap, HashSet}, + marker::PhantomData, + panic::UnwindSafe, + path::PathBuf, + result, + sync::Arc, }; #[cfg(feature = "test-helpers")] From 47a889648c510fe032adee6c5646e3c8b4dab9fe Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 8 Nov 2021 18:14:28 +0100 Subject: [PATCH 8/8] fmt --- client/consensus/babe/src/verification.rs | 4 ++-- client/network/src/protocol/notifications/behaviour.rs | 4 ++-- client/network/src/protocol/sync/blocks.rs | 2 +- client/network/src/service/tests.rs | 2 +- client/network/src/transactions.rs | 4 ++-- frame/election-provider-multi-phase/src/lib.rs | 2 +- frame/support/procedural/src/pallet/parse/pallet_struct.rs | 4 ++-- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index 2322a96262161..174b2d03c6ef0 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -111,7 +111,7 @@ pub(super) fn check_header( ); check_secondary_plain_header::(pre_hash, secondary, sig, &epoch)?; - }, + } PreDigest::SecondaryVRF(secondary) if epoch.config.allowed_slots.is_secondary_vrf_slots_allowed() => { @@ -122,7 +122,7 @@ pub(super) fn check_header( ); check_secondary_vrf_header::(pre_hash, secondary, sig, &epoch)?; - }, + } _ => return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)), } diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index f66f1fbe9e95a..01138e3207570 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -712,7 +712,7 @@ impl Notifications { timer: delay_id, timer_deadline: *backoff, }; - }, + } // Disabled => Enabled PeerState::Disabled { mut connections, backoff_until } => { @@ -2085,7 +2085,7 @@ impl NetworkBehaviour for Notifications { .boxed(), ); } - }, + } // We intentionally never remove elements from `delays`, and it may // thus contain obsolete entries. This is a normal situation. diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index ce4535dc0b45f..30ba7ffafeffc 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -203,7 +203,7 @@ impl BlockCollection { { *downloading -= 1; false - }, + } Some(&mut BlockRangeState::Downloading { .. }) => true, _ => false, }; diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 8271da886fca7..1c66986e422fc 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -529,7 +529,7 @@ fn fallback_name_working() { { assert_eq!(negotiated_fallback, Some(PROTOCOL_NAME)); break - }, + } _ => {}, }; } diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 6d190651160f0..99350f603a375 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -336,13 +336,13 @@ impl TransactionsHandler { }, ); debug_assert!(_was_in.is_none()); - }, + } Event::NotificationStreamClosed { remote, protocol } if protocol == self.protocol_name => { let _peer = self.peers.remove(&remote); debug_assert!(_peer.is_some()); - }, + } Event::NotificationsReceived { remote, messages } => { for (protocol, message) in messages { diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index 4c4de82af592f..80a13aa99fb70 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -772,7 +772,7 @@ pub mod pallet { Self::on_initialize_open_unsigned(enabled, now); T::WeightInfo::on_initialize_open_unsigned() } - }, + } _ => T::WeightInfo::on_initialize_nothing(), } } diff --git a/frame/support/procedural/src/pallet/parse/pallet_struct.rs b/frame/support/procedural/src/pallet/parse/pallet_struct.rs index c528faf669ee3..278f46e13818e 100644 --- a/frame/support/procedural/src/pallet/parse/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/parse/pallet_struct.rs @@ -130,12 +130,12 @@ impl PalletStructDef { if generate_storage_info.is_none() => { generate_storage_info = Some(span); - }, + } PalletStructAttr::StorageVersion { storage_version, .. } if storage_version_found.is_none() => { storage_version_found = Some(storage_version); - }, + } attr => { let msg = "Unexpected duplicated attribute"; return Err(syn::Error::new(attr.span(), msg))