From 711d91aaabb9687cd26904443c23706cd4dbd1ee Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Wed, 7 Aug 2024 18:03:26 +0100 Subject: [PATCH 01/74] frame-omni-bencher short checks (#5268) - Part of https://github.com/paritytech/ci_cd/issues/1006 - Closes: https://github.com/paritytech/ci_cd/issues/1010 - Related: https://github.com/paritytech/polkadot-sdk/pull/4405 - Possibly affecting how frame-omni-bencher works on different runtimes: https://github.com/paritytech/polkadot-sdk/pull/5083 Currently works in parallel with gitlab short benchmarks. Triggered only by adding `GHA-migration` label to assure smooth transition (kind of feature-flag). Later when tested on random PRs we'll remove the gitlab and turn on by default these tests --------- Co-authored-by: Oliver Tale-Yazdi --- .github/workflows/check-changed-files.yml | 57 ------------- .../workflows/check-frame-omni-bencher.yml | 85 +++++++++++++++++++ .../reusable-check-changed-files.yml | 59 +++++++++++++ .github/workflows/tests-linux-stable.yml | 8 +- .github/workflows/tests.yml | 10 +-- 5 files changed, 153 insertions(+), 66 deletions(-) delete mode 100644 .github/workflows/check-changed-files.yml create mode 100644 .github/workflows/check-frame-omni-bencher.yml create mode 100644 .github/workflows/reusable-check-changed-files.yml diff --git a/.github/workflows/check-changed-files.yml b/.github/workflows/check-changed-files.yml deleted file mode 100644 index 657c05cd047d..000000000000 --- a/.github/workflows/check-changed-files.yml +++ /dev/null @@ -1,57 +0,0 @@ -# Reusable workflow to perform checks and generate conditions for other workflows. -# Currently it checks if any Rust (build-related) file is changed -# and if the current (caller) workflow file is changed. -# Example: -# -# jobs: -# changes: -# permissions: -# pull-requests: read -# uses: ./.github/workflows/check-changed-files.yml -# some-job: -# needs: changes -# if: ${{ needs.changes.outputs.rust }} -# ....... - -name: Check changes files - -on: - workflow_call: - # Map the workflow outputs to job outputs - outputs: - rust: - value: ${{ jobs.changes.outputs.rust }} - description: 'true if any of the build-related OR current (caller) workflow files have changed' - current-workflow: - value: ${{ jobs.changes.outputs.current-workflow }} - description: 'true if current (caller) workflow file has changed' - -jobs: - changes: - runs-on: ubuntu-latest - permissions: - pull-requests: read - outputs: - # true if current workflow (caller) file is changed - rust: ${{ steps.filter.outputs.rust == 'true' || steps.filter.outputs.current-workflow == 'true' }} - current-workflow: ${{ steps.filter.outputs.current-workflow }} - steps: - - id: current-file - run: echo "current-workflow-file=$(echo ${{ github.workflow_ref }} | sed -nE "s/.*(\.github\/workflows\/[a-zA-Z0-9_-]*\.y[a]?ml)@refs.*/\1/p")" >> $GITHUB_OUTPUT - - run: echo "${{ steps.current-file.outputs.current-workflow-file }}" - # For pull requests it's not necessary to checkout the code - - id: filter - uses: dorny/paths-filter@v3 - with: - predicate-quantifier: 'every' - # current-workflow - check if the current (caller) workflow file is changed - # rust - check if any Rust (build-related) file is changed - filters: | - current-workflow: - - '${{ steps.current-file.outputs.current-workflow-file }}' - rust: - - '**/*' - - '!.github/**/*' - - '!prdoc/**/*' - - '!docs/**/*' - # \ No newline at end of file diff --git a/.github/workflows/check-frame-omni-bencher.yml b/.github/workflows/check-frame-omni-bencher.yml new file mode 100644 index 000000000000..e9db2d912979 --- /dev/null +++ b/.github/workflows/check-frame-omni-bencher.yml @@ -0,0 +1,85 @@ +name: Short benchmarks (frame-omni-bencher) + +on: + push: + branches: + - master + pull_request: + types: [ opened, synchronize, reopened, ready_for_review, labeled ] + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +env: + ARTIFACTS_NAME: frame-omni-bencher-artifacts + +jobs: + changes: + # TODO: remove once migration is complete or this workflow is fully stable + if: contains(github.event.label.name, 'GHA-migration') + permissions: + pull-requests: read + uses: ./.github/workflows/reusable-check-changed-files.yml + + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + needs: changes + if: ${{ needs.changes.outputs.rust }} + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + + run-frame-omni-bencher: + runs-on: arc-runners-polkadot-sdk-beefy + needs: [ set-image, changes ] # , build-frame-omni-bencher ] + if: ${{ needs.changes.outputs.rust }} + timeout-minutes: 30 + strategy: + fail-fast: false # keep running other workflows even if one fails, to see the logs of all possible failures + matrix: + runtime: + [ + westend-runtime, + rococo-runtime, + asset-hub-rococo-runtime, + asset-hub-westend-runtime, + bridge-hub-rococo-runtime, + bridge-hub-westend-runtime, + collectives-westend-runtime, + coretime-rococo-runtime, + coretime-westend-runtime, + people-rococo-runtime, + people-westend-runtime, + glutton-westend-runtime, + ] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + PACKAGE_NAME: ${{ matrix.runtime }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: script + run: | + RUNTIME_BLOB_NAME=$(echo $PACKAGE_NAME | sed 's/-/_/g').compact.compressed.wasm + RUNTIME_BLOB_PATH=./target/release/wbuild/$PACKAGE_NAME/$RUNTIME_BLOB_NAME + forklift cargo build --release --locked -p $PACKAGE_NAME -p frame-omni-bencher --features runtime-benchmarks + echo "Running short benchmarking for PACKAGE_NAME=$PACKAGE_NAME and RUNTIME_BLOB_PATH=$RUNTIME_BLOB_PATH" + ls -lrt $RUNTIME_BLOB_PATH + ./target/release/frame-omni-bencher v1 benchmark pallet --runtime $RUNTIME_BLOB_PATH --all --steps 2 --repeat 1 + confirm-frame-omni-benchers-passed: + runs-on: ubuntu-latest + name: All benchmarks passed + needs: run-frame-omni-bencher + steps: + - run: echo '### Good job! All the benchmarks passed 🚀' >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/reusable-check-changed-files.yml b/.github/workflows/reusable-check-changed-files.yml new file mode 100644 index 000000000000..47f0620439c7 --- /dev/null +++ b/.github/workflows/reusable-check-changed-files.yml @@ -0,0 +1,59 @@ +# Reusable workflow to perform checks and generate conditions for other workflows. +# Currently it checks if any Rust (build-related) file is changed +# and if the current (caller) workflow file is changed. +# Example: +# +# jobs: +# changes: +# permissions: +# pull-requests: read +# uses: ./.github/workflows/reusable-check-changed-files.yml +# some-job: +# needs: changes +# if: ${{ needs.changes.outputs.rust }} +# ....... + +name: Check changes files + +on: + workflow_call: + # Map the workflow outputs to job outputs + outputs: + rust: + value: ${{ jobs.changes.outputs.rust }} + description: "true if any of the build-related OR current (caller) workflow files have changed" + current-workflow: + value: ${{ jobs.changes.outputs.current-workflow }} + description: "true if current (caller) workflow file has changed" + +jobs: + changes: + runs-on: ubuntu-latest + permissions: + pull-requests: read + outputs: + # true if current workflow (caller) file is changed + rust: ${{ steps.filter.outputs.rust == 'true' || steps.filter.outputs.current-workflow == 'true' }} + current-workflow: ${{ steps.filter.outputs.current-workflow }} + steps: + - id: current-file + run: echo "current-workflow-file=$(echo ${{ github.workflow_ref }} | sed -nE "s/.*(\.github\/workflows\/[a-zA-Z0-9_-]*\.y[a]?ml)@refs.*/\1/p")" >> $GITHUB_OUTPUT + - run: echo "${{ steps.current-file.outputs.current-workflow-file }}" + # For pull requests it's not necessary to checkout the code + - name: Checkout + if: github.event_name != 'pull_request' + uses: actions/checkout@v4 + - id: filter + uses: dorny/paths-filter@v3 + with: + predicate-quantifier: "every" + # current-workflow - check if the current (caller) workflow file is changed + # rust - check if any Rust (build-related) file is changed + filters: | + current-workflow: + - '${{ steps.current-file.outputs.current-workflow-file }}' + rust: + - '**/*' + - '!.github/**/*' + - '!prdoc/**/*' + - '!docs/**/*' diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index 55addf11de06..da4bb40a2e78 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -6,7 +6,7 @@ on: branches: - master pull_request: - types: [opened, synchronize, reopened, ready_for_review] + types: [ opened, synchronize, reopened, ready_for_review ] merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -16,7 +16,7 @@ jobs: changes: permissions: pull-requests: read - uses: ./.github/workflows/check-changed-files.yml + uses: ./.github/workflows/reusable-check-changed-files.yml set-image: # GitHub Actions allows using 'env' in a container context. @@ -34,7 +34,7 @@ jobs: run: cat .github/env >> $GITHUB_OUTPUT test-linux-stable-int: - needs: [set-image, changes] + needs: [ set-image, changes ] if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 60 @@ -55,7 +55,7 @@ jobs: # https://github.com/paritytech/ci_cd/issues/864 test-linux-stable-runtime-benchmarks: - needs: [set-image, changes] + needs: [ set-image, changes ] if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 60 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a413d3306159..1be2dd7921e0 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -5,7 +5,7 @@ on: branches: - master pull_request: - types: [opened, synchronize, reopened, ready_for_review] + types: [ opened, synchronize, reopened, ready_for_review ] merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -15,7 +15,7 @@ jobs: changes: permissions: pull-requests: read - uses: ./.github/workflows/check-changed-files.yml + uses: ./.github/workflows/reusable-check-changed-files.yml set-image: # GitHub Actions allows using 'env' in a container context. @@ -31,7 +31,7 @@ jobs: run: cat .github/env >> $GITHUB_OUTPUT quick-benchmarks: - needs: [set-image, changes] + needs: [ set-image, changes ] if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 60 @@ -50,7 +50,7 @@ jobs: # cf https://github.com/paritytech/polkadot-sdk/issues/1652 test-syscalls: - needs: [set-image, changes] + needs: [ set-image, changes ] if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 60 @@ -75,7 +75,7 @@ jobs: # fi cargo-check-all-benches: - needs: [set-image, changes] + needs: [ set-image, changes ] if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 60 From eb0a9e593fb6a0f2bbfdb75602a51f4923995529 Mon Sep 17 00:00:00 2001 From: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Date: Thu, 8 Aug 2024 10:47:24 +0200 Subject: [PATCH 02/74] Fix Weight Annotation (#5275) https://github.com/paritytech/polkadot-sdk/pull/4527/files#r1706673828 --- substrate/frame/assets/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index e9b9a7b1e3cc..6f8ad0c29393 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -1775,7 +1775,7 @@ pub mod pallet { /// (false), or transfer everything except at least the minimum balance, which will /// guarantee to keep the sender asset account alive (true). #[pallet::call_index(32)] - #[pallet::weight(T::WeightInfo::refund_other())] + #[pallet::weight(T::WeightInfo::transfer_all())] pub fn transfer_all( origin: OriginFor, id: T::AssetIdParameter, From e82f223f7590736b7f51d9b27e06ca7182af7c90 Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Thu, 8 Aug 2024 14:39:09 +0300 Subject: [PATCH 03/74] Fix bridges compilation Signed-off-by: georgepisaltu --- .../runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs | 4 ++-- .../bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs | 2 +- .../runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs | 4 ++-- .../bridge-hubs/bridge-hub-westend/tests/snowbridge.rs | 6 +++--- .../runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index d90b695dd5af..0124920a2967 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -138,13 +138,13 @@ pub type TxExtension = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - frame_metadata_hash_extension::CheckMetadataHash, pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages, bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages, ), + frame_metadata_hash_extension::CheckMetadataHash, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); @@ -1613,13 +1613,13 @@ mod tests { frame_system::CheckEra::from(Era::Immortal), frame_system::CheckNonce::from(10), frame_system::CheckWeight::new(), - frame_metadata_hash_extension::CheckMetadataHash::new(false), pallet_transaction_payment::ChargeTransactionPayment::from(10), BridgeRejectObsoleteHeadersAndMessages, ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ).into(); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs index 013234a8e7da..c4544bffa129 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/snowbridge.rs @@ -187,8 +187,8 @@ fn construct_extrinsic( OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), frame_metadata_hash_extension::CheckMetadataHash::::new(false), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), tx_ext.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs index d3c583d37f3c..3428b578fe95 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/tests/tests.rs @@ -58,13 +58,13 @@ fn construct_extrinsic( frame_system::Pallet::::account(&account_id).nonce, ), frame_system::CheckWeight::::new(), - frame_metadata_hash_extension::CheckMetadataHash::new(false), pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), ( bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ) .into(); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index fd995857d155..0c0d5bb588eb 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -125,10 +125,10 @@ pub type TxExtension = ( frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - frame_metadata_hash_extension::CheckMetadataHash, pallet_transaction_payment::ChargeTransactionPayment, BridgeRejectObsoleteHeadersAndMessages, (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages,), + frame_metadata_hash_extension::CheckMetadataHash, cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim, ); @@ -1295,12 +1295,12 @@ mod tests { frame_system::CheckEra::from(Era::Immortal), frame_system::CheckNonce::from(10), frame_system::CheckWeight::new(), - frame_metadata_hash_extension::CheckMetadataHash::new(false), pallet_transaction_payment::ChargeTransactionPayment::from(10), BridgeRejectObsoleteHeadersAndMessages, ( bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), ), + frame_metadata_hash_extension::CheckMetadataHash::new(false), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new() ).into(); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs index 46a0fa7a664b..98f2615fbfff 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/snowbridge.rs @@ -22,7 +22,7 @@ use bp_polkadot_core::Signature; use bridge_hub_westend_runtime::{ bridge_to_rococo_config, xcm_config::XcmConfig, AllPalletsWithoutSystem, BridgeRejectObsoleteHeadersAndMessages, Executive, MessageQueueServiceWeight, Runtime, - RuntimeCall, RuntimeEvent, SessionKeys, SignedExtra, UncheckedExtrinsic, + RuntimeCall, RuntimeEvent, SessionKeys, TxExtension, UncheckedExtrinsic, }; use codec::{Decode, Encode}; use cumulus_primitives_core::XcmError::{FailedToTransactAsset, NotHoldingFees}; @@ -171,7 +171,7 @@ fn construct_extrinsic( call: RuntimeCall, ) -> UncheckedExtrinsic { let account_id = AccountId32::from(sender.public()); - let extra: SignedExtra = ( + let extra: TxExtension = ( frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), frame_system::CheckTxVersion::::new(), @@ -184,8 +184,8 @@ fn construct_extrinsic( pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), frame_metadata_hash_extension::CheckMetadataHash::::new(false), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ); let payload = SignedPayload::new(call.clone(), extra.clone()).unwrap(); let signature = payload.using_encoded(|e| sender.sign(e)); diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs index f2e44656e238..4b05e9a5a7b4 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/tests/tests.rs @@ -73,10 +73,10 @@ fn construct_extrinsic( frame_system::Pallet::::account(&account_id).nonce, ), frame_system::CheckWeight::::new(), - frame_metadata_hash_extension::CheckMetadataHash::new(false), pallet_transaction_payment::ChargeTransactionPayment::::from(0), BridgeRejectObsoleteHeadersAndMessages::default(), (bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(),), + frame_metadata_hash_extension::CheckMetadataHash::new(false), cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), ) .into(); From 9a4ffccc09acd72b8b5c1466d5408be739682303 Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Thu, 8 Aug 2024 14:48:40 +0300 Subject: [PATCH 04/74] Fix polkadot pvf dev dependency in Cargo.toml Signed-off-by: georgepisaltu --- Cargo.lock | 1 + polkadot/node/core/pvf/common/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index c6bd6ddf0126..814311c84368 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13578,6 +13578,7 @@ dependencies = [ "sp-externalities 0.25.0", "sp-io", "sp-tracing 16.0.0", + "tempfile", "thiserror", "tracing-gum", ] diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index f3aca6f1867c..18b3f959c955 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -42,6 +42,7 @@ seccompiler = "0.4.0" [dev-dependencies] assert_matches = { workspace = true } +tempfile = { workspace = true } [features] # This feature is used to export test code to other crates without putting it in the production build. From 12539e7a931e82a040e74c84e413baa712ecd638 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Thu, 8 Aug 2024 17:20:20 +0200 Subject: [PATCH 05/74] [ci] Add test-linux-stable jobs GHA (#4897) PR adds github-action for jobs test-linux-stable-oldkernel. PR waits the latest release of forklift. cc https://github.com/paritytech/ci_cd/issues/939 cc https://github.com/paritytech/ci_cd/issues/1006 --------- Co-authored-by: Maksym H <1177472+mordamax@users.noreply.github.com> --- .github/workflows/tests-linux-stable.yml | 63 ++++++++++++++++++++++-- .gitlab/pipeline/test.yml | 2 - 2 files changed, 58 insertions(+), 7 deletions(-) diff --git a/.github/workflows/tests-linux-stable.yml b/.github/workflows/tests-linux-stable.yml index da4bb40a2e78..4a13f5318f7d 100644 --- a/.github/workflows/tests-linux-stable.yml +++ b/.github/workflows/tests-linux-stable.yml @@ -6,7 +6,7 @@ on: branches: - master pull_request: - types: [ opened, synchronize, reopened, ready_for_review ] + types: [opened, synchronize, reopened, ready_for_review, labeled] merge_group: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} @@ -14,6 +14,8 @@ concurrency: jobs: changes: + # TODO: remove once migration is complete or this workflow is fully stable + if: contains(github.event.label.name, 'GHA-migration') permissions: pull-requests: read uses: ./.github/workflows/reusable-check-changed-files.yml @@ -34,7 +36,7 @@ jobs: run: cat .github/env >> $GITHUB_OUTPUT test-linux-stable-int: - needs: [ set-image, changes ] + needs: [set-image, changes] if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 60 @@ -51,11 +53,11 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - run: WASM_BUILD_NO_COLOR=1 time forklift cargo test -p staging-node-cli --release --locked -- --ignored + run: WASM_BUILD_NO_COLOR=1 forklift cargo test -p staging-node-cli --release --locked -- --ignored # https://github.com/paritytech/ci_cd/issues/864 test-linux-stable-runtime-benchmarks: - needs: [ set-image, changes ] + needs: [set-image, changes] if: ${{ needs.changes.outputs.rust }} runs-on: arc-runners-polkadot-sdk-beefy timeout-minutes: 60 @@ -70,4 +72,55 @@ jobs: - name: Checkout uses: actions/checkout@v4 - name: script - run: time forklift cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet + run: forklift cargo nextest run --workspace --features runtime-benchmarks benchmark --locked --cargo-profile testnet + + test-linux-stable: + needs: [set-image, changes] + if: ${{ needs.changes.outputs.rust }} + runs-on: ${{ matrix.runners }} + timeout-minutes: 60 + strategy: + fail-fast: false + matrix: + partition: [1/3, 2/3, 3/3] + runners: [arc-runners-polkadot-sdk-beefy, oldlinux] + container: + image: ${{ needs.set-image.outputs.IMAGE }} + # needed for tests that use unshare syscall + options: --security-opt seccomp=unconfined + env: + RUST_TOOLCHAIN: stable + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: script + run: | + # Fixes "detected dubious ownership" error in the ci + git config --global --add safe.directory '*' + forklift cargo nextest run \ + --workspace \ + --locked \ + --release \ + --no-fail-fast \ + --features try-runtime,experimental,riscv,ci-only-tests \ + --partition count:${{ matrix.partition }} + # run runtime-api tests with `enable-staging-api` feature on the 1st node + - name: runtime-api tests + if: ${{ matrix.partition == '1/3' }} + run: forklift cargo nextest run -p sp-api-test --features enable-staging-api + + confirm-required-jobs-passed: + runs-on: ubuntu-latest + name: All tests passed + # If any new job gets added, be sure to add it to this array + needs: + [ + test-linux-stable-int, + test-linux-stable-runtime-benchmarks, + test-linux-stable, + ] + steps: + - run: echo '### Good job! All the tests passed 🚀' >> $GITHUB_STEP_SUMMARY diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml index 0103c6b76a2d..319c95ad6112 100644 --- a/.gitlab/pipeline/test.yml +++ b/.gitlab/pipeline/test.yml @@ -110,8 +110,6 @@ test-linux-stable-codecov: codecovcli -v do-upload -f target/coverage/result/report-${CI_NODE_INDEX}.lcov --disable-search -t ${CODECOV_TOKEN} -r paritytech/polkadot-sdk --commit-sha ${CI_COMMIT_SHA} --fail-on-error --git-service github; fi - # - test-linux-stable: stage: test extends: From 74da65922bb2875bcedf66c993dbabdcebfb271e Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Fri, 9 Aug 2024 01:23:39 +0300 Subject: [PATCH 06/74] Fix weight tests Signed-off-by: georgepisaltu --- substrate/bin/node/bench/src/import.rs | 7 +- substrate/bin/node/cli/tests/basic.rs | 102 ++++++++++++++++++------- substrate/bin/node/cli/tests/fees.rs | 4 +- 3 files changed, 80 insertions(+), 33 deletions(-) diff --git a/substrate/bin/node/bench/src/import.rs b/substrate/bin/node/bench/src/import.rs index 0b972650ef5a..8334d230a8d2 100644 --- a/substrate/bin/node/bench/src/import.rs +++ b/substrate/bin/node/bench/src/import.rs @@ -121,10 +121,10 @@ impl core::Benchmark for ImportBenchmark { .inspect_state(|| { match self.block_type { BlockType::RandomTransfersKeepAlive => { - // should be 9 per signed extrinsic + 1 per unsigned + // should be 10 per signed extrinsic + 1 per unsigned // we have 2 unsigned (timestamp and glutton bloat) while the rest are // signed in the block. - // those 9 events per signed are: + // those 10 events per signed are: // - transaction paid for the transaction payment // - withdraw (Balances::Withdraw) for charging the transaction fee // - new account (System::NewAccount) as we always transfer fund to @@ -132,12 +132,13 @@ impl core::Benchmark for ImportBenchmark { // - endowed (Balances::Endowed) for this new account // - issued (Balances::Issued) as total issuance is increased // - successful transfer (Event::Transfer) for this transfer operation + // - deposit (Balances::Deposit) for depositing the tx fee refund // - 2x deposit (Balances::Deposit and Treasury::Deposit) for depositing // the transaction fee into the treasury // - extrinsic success assert_eq!( kitchensink_runtime::System::events().len(), - (self.block.extrinsics.len() - 2) * 9 + 2, + (self.block.extrinsics.len() - 2) * 10 + 2, ); }, BlockType::Noop => { diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index 8735b84288c2..16ee2e073773 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -67,6 +67,14 @@ fn transfer_fee(extrinsic: &E) -> Balance { TransactionPayment::compute_fee(extrinsic.encode().len() as u32, &info, 0) } +/// Default transfer fee, same as `transfer_fee`, but with a weight refund factored in. +fn transfer_fee_with_refund(extrinsic: &E, weight_refund: Weight) -> Balance { + let mut info = default_transfer_call().get_dispatch_info(); + info.extension_weight = TxExtension::weight(); + let post_info = (Some(info.total_weight().saturating_sub(weight_refund)), info.pays_fee).into(); + TransactionPayment::compute_actual_fee(extrinsic.encode().len() as u32, &info, &post_info, 0) +} + fn xt() -> UncheckedExtrinsic { sign(CheckedExtrinsic { format: sp_runtime::generic::ExtrinsicFormat::Signed(alice(), tx_ext(0, 0)), @@ -257,13 +265,14 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; assert!(r.is_ok()); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); } @@ -297,13 +306,14 @@ fn successful_execution_with_foreign_code_gives_ok() { let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; assert!(r.is_ok()); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); } @@ -316,6 +326,8 @@ fn full_native_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let transfer_weight = default_transfer_call().get_dispatch_info().call_weight.saturating_add( ::BlockWeights::get() @@ -334,7 +346,7 @@ fn full_native_block_import_works() { executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); alice_last_known_balance = Balances::total_balance(&alice()); let events = vec![ @@ -364,25 +376,33 @@ fn full_native_block_import_works() { }), topics: vec![], }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { + who: alice().into(), + amount: fees - fees_after_refund, + }), + topics: vec![], + }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), - amount: fees * 8 / 10, + amount: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, + value: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Rescinded { - amount: fees * 2 / 10, + amount: fees_after_refund * 2 / 10, }), topics: vec![], }, @@ -391,7 +411,7 @@ fn full_native_block_import_works() { event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: alice().into(), - actual_fee: fees, + actual_fee: fees_after_refund, tip: 0, }, ), @@ -400,7 +420,8 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - weight: transfer_weight.saturating_add(TxExtension::weight()), + weight: transfer_weight + .saturating_add(TxExtension::weight().saturating_sub(weight_refund)), class: Default::default(), pays: Default::default(), }), @@ -412,15 +433,17 @@ fn full_native_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); let pot = t.execute_with(|| Treasury::pot()); + let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), - alice_last_known_balance - 10 * DOLLARS - fees, + alice_last_known_balance - 10 * DOLLARS - fees_after_refund, ); - assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees_after_refund); let events = vec![ EventRecord { phase: Phase::Initialization, @@ -456,25 +479,33 @@ fn full_native_block_import_works() { }), topics: vec![], }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { + who: bob().into(), + amount: fees - fees_after_refund, + }), + topics: vec![], + }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), - amount: fees * 8 / 10, + amount: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, + value: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Rescinded { - amount: fees - fees * 8 / 10, + amount: fees_after_refund - fees_after_refund * 8 / 10, }), topics: vec![], }, @@ -483,7 +514,7 @@ fn full_native_block_import_works() { event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: bob().into(), - actual_fee: fees, + actual_fee: fees_after_refund, tip: 0, }, ), @@ -492,7 +523,8 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - weight: transfer_weight.saturating_add(TxExtension::weight()), + weight: transfer_weight + .saturating_add(TxExtension::weight().saturating_sub(weight_refund)), class: Default::default(), pays: Default::default(), }), @@ -515,25 +547,33 @@ fn full_native_block_import_works() { }), topics: vec![], }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { + who: alice().into(), + amount: fees - fees_after_refund, + }), + topics: vec![], + }, EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { who: pallet_treasury::Pallet::::account_id(), - amount: fees * 8 / 10, + amount: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::Treasury(pallet_treasury::Event::Deposit { - value: fees * 8 / 10, + value: fees_after_refund * 8 / 10, }), topics: vec![], }, EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::Balances(pallet_balances::Event::Rescinded { - amount: fees - fees * 8 / 10, + amount: fees_after_refund - fees_after_refund * 8 / 10, }), topics: vec![], }, @@ -542,7 +582,7 @@ fn full_native_block_import_works() { event: RuntimeEvent::TransactionPayment( pallet_transaction_payment::Event::TransactionFeePaid { who: alice().into(), - actual_fee: fees, + actual_fee: fees_after_refund, tip: 0, }, ), @@ -551,7 +591,8 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { - weight: transfer_weight.saturating_add(TxExtension::weight()), + weight: transfer_weight + .saturating_add(TxExtension::weight().saturating_sub(weight_refund)), class: Default::default(), pays: Default::default(), }), @@ -569,26 +610,28 @@ fn full_wasm_block_import_works() { let (block1, block2) = blocks(); let mut alice_last_known_balance: Balance = Default::default(); - let mut fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); alice_last_known_balance = Balances::total_balance(&alice()); }); - fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); t.execute_with(|| { assert_eq!( Balances::total_balance(&alice()), - alice_last_known_balance - 10 * DOLLARS - fees, + alice_last_known_balance - 10 * DOLLARS - fees_after_refund, ); - assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - 1 * fees); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - 1 * fees_after_refund); }); } @@ -830,7 +873,8 @@ fn successful_execution_gives_ok() { assert_eq!(Balances::total_balance(&alice()), 111 * DOLLARS); }); - let fees = t.execute_with(|| transfer_fee(&xt())); + let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) .0 @@ -841,7 +885,7 @@ fn successful_execution_gives_ok() { .expect("Extrinsic failed"); t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees_after_refund); assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); }); } diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index d27c9c2ffd5c..906f35b94afb 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -179,7 +179,9 @@ fn transaction_fee_is_correct() { let mut info = default_transfer_call().get_dispatch_info(); info.extension_weight = TxExtension::weight(); - let weight = info.total_weight(); + let mut weight = info.total_weight(); + let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + weight.saturating_reduce(weight_refund); let weight_fee = IdentityFee::::weight_to_fee(&weight); // we know that weight to fee multiplier is effect-less in block 1. From 2993b0008e2ec4040be91868bf5f48a892508c3a Mon Sep 17 00:00:00 2001 From: Egor_P Date: Fri, 9 Aug 2024 10:01:55 +0200 Subject: [PATCH 07/74] Add stable release tag as an input parameter (#5282) This PR adds the possibility to set the docker stable release tag as an input parameter to the produced docker images, so that it matches with the release version --- .github/scripts/common/lib.sh | 13 ++++++ .../workflows/release-50_publish-docker.yml | 42 +++++++++++++++---- 2 files changed, 46 insertions(+), 9 deletions(-) diff --git a/.github/scripts/common/lib.sh b/.github/scripts/common/lib.sh index 33ef2d3e7eda..bfb3120ad9bb 100755 --- a/.github/scripts/common/lib.sh +++ b/.github/scripts/common/lib.sh @@ -315,6 +315,7 @@ function import_gpg_keys() { ) & done wait + gpg -k $SEC } # Check the GPG signature for a given binary @@ -457,3 +458,15 @@ function get_polkadot_node_version_from_code() { # Remove the semicolon sed 's/;//g' } + +validate_stable_tag() { + tag="$1" + pattern='^stable[0-9]+(-[0-9]+)?$' + + if [[ $tag =~ $pattern ]]; then + echo $tag + else + echo "The input '$tag' does not match the pattern." + exit 1 + fi +} diff --git a/.github/workflows/release-50_publish-docker.yml b/.github/workflows/release-50_publish-docker.yml index cda10f2ebf15..f09ecf1c7998 100644 --- a/.github/workflows/release-50_publish-docker.yml +++ b/.github/workflows/release-50_publish-docker.yml @@ -45,7 +45,7 @@ on: type: string default: docker.io - # The owner is often the same than the Docker Hub username but does ont have to be. + # The owner is often the same as the Docker Hub username but does ont have to be. # In our case, it is not. owner: description: Owner of the container image repo @@ -58,6 +58,10 @@ on: default: v0.9.18 required: true + stable_tag: + description: Tag matching the actual stable release version in the format stableYYMM or stableYYMM-X for patch releases + required: true + permissions: contents: write @@ -74,6 +78,29 @@ env: VERSION: ${{ inputs.version }} jobs: + validate-inputs: + runs-on: ubuntu-latest + outputs: + stable_tag: ${{ steps.validate_inputs.outputs.stable_tag }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Validate inputs + id: validate_inputs + run: | + . ./.github/scripts/common/lib.sh + + VERSION=$(filter_version_from_input "${{ inputs.version }}") + echo "VERSION=${VERSION}" >> $GITHUB_ENV + + RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") + echo "RELEASE_ID=${RELEASE_ID}" >> $GITHUB_ENV + + STABLE_TAG=$(validate_stable_tag ${{ inputs.stable_tag }}) + echo "stable_tag=${STABLE_TAG}" >> $GITHUB_OUTPUT + fetch-artifacts: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} runs-on: ubuntu-latest @@ -102,9 +129,6 @@ jobs: run: | . ./.github/scripts/common/lib.sh - VERSION=$(filter_version_from_input "${{ inputs.version }}") - echo "VERSION=${VERSION}" >> $GITHUB_ENV - fetch_release_artifacts_from_s3 - name: Fetch chain-spec-builder rc artifacts or release artifacts based on release id @@ -112,7 +136,7 @@ jobs: if: ${{ env.EVENT_NAME == 'workflow_dispatch' && inputs.binary == 'chain-spec-builder' }} run: | . ./.github/scripts/common/lib.sh - RELEASE_ID=$(check_release_id "${{ inputs.release_id }}") + fetch_release_artifacts - name: Upload artifacts @@ -124,7 +148,7 @@ jobs: build-container: # this job will be triggered for the polkadot-parachain rc and release or polkadot rc image build if: ${{ inputs.binary == 'polkadot-parachain' || inputs.binary == 'chain-spec-builder' || inputs.image_type == 'rc' }} runs-on: ubuntu-latest - needs: fetch-artifacts + needs: [fetch-artifacts, validate-inputs] environment: release steps: @@ -179,7 +203,7 @@ jobs: release=$( echo $VERSION | cut -f1 -d- ) echo "tag=latest" >> $GITHUB_OUTPUT echo "release=${release}" >> $GITHUB_OUTPUT - echo "stable=stable" >> $GITHUB_OUTPUT + echo "stable=${{ needs.validate-inputs.outputs.stable_tag }}" >> $GITHUB_OUTPUT - name: Build Injected Container image for polkadot rc or chain-spec-builder if: ${{ env.BINARY == 'polkadot' || env.BINARY == 'chain-spec-builder' }} @@ -257,7 +281,7 @@ jobs: build-polkadot-release-container: # this job will be triggered for polkadot release build if: ${{ inputs.binary == 'polkadot' && inputs.image_type == 'release' }} runs-on: ubuntu-latest - needs: fetch-latest-debian-package-version + needs: [fetch-latest-debian-package-version, validate-inputs] environment: release steps: - name: Checkout sources @@ -295,7 +319,7 @@ jobs: # TODO: The owner should be used below but buildx does not resolve the VARs # TODO: It would be good to get rid of this GHA that we don't really need. tags: | - parity/polkadot:stable + parity/polkadot:${{ needs.validate-inputs.outputs.stable_tag }} parity/polkadot:latest parity/polkadot:${{ needs.fetch-latest-debian-package-version.outputs.polkadot_container_tag }} build-args: | From 87280eb52537a355c67c9b9cee8ff1f97d02d67a Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Fri, 9 Aug 2024 13:03:07 +0200 Subject: [PATCH 08/74] Synchronize templates through PRs, instead of pushes (#5291) Despite what we had in the [original request](https://github.com/paritytech/polkadot-sdk/issues/3155#issuecomment-1979037109), I'm proposing a change to open a PR to the destination template repositories instead of pushing the code. This will give it a chance to run through the destination CI before making changes, and to set stricter branch protection in the destination repos. --- .github/workflows/misc-sync-templates.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/misc-sync-templates.yml b/.github/workflows/misc-sync-templates.yml index d22dc8724f37..c06beb5e98eb 100644 --- a/.github/workflows/misc-sync-templates.yml +++ b/.github/workflows/misc-sync-templates.yml @@ -166,9 +166,13 @@ jobs: title: "[Don't merge] Update the ${{ matrix.template }} template to ${{ github.event.inputs.stable_release_branch }}" body: "The template has NOT been successfully built and needs to be inspected." branch: "update-template/${{ github.event.inputs.stable_release_branch }}" - - name: Push changes - run: | - git add -A . - git commit --allow-empty -m "Update to ${{ github.event.inputs.stable_release_branch }} triggered by ${{ github.event_name }}" - git push - working-directory: "${{ env.template-path }}" + - name: Create PR on success + uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v5 + with: + path: "${{ env.template-path }}" + token: ${{ steps.app_token.outputs.token }} + add-paths: | + ./* + title: "Update the ${{ matrix.template }} template to ${{ github.event.inputs.stable_release_branch }}" + body: "This synchronizes the template to the ${{ github.event.inputs.stable_release_branch }} branch." + branch: "update-template/${{ github.event.inputs.stable_release_branch }}" From b49509baf07a8938318e6e0b46cd266f935a936b Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 9 Aug 2024 15:45:35 +0300 Subject: [PATCH 09/74] Update BHR and BHW runtime versions (#5300) Updating the BHR and BHW runtime versions as a result of the changes in https://github.com/paritytech/polkadot-sdk/pull/5074/ --- .../bridge-hubs/bridge-hub-rococo/src/lib.rs | 76 +++++++++---------- .../bridge-hubs/bridge-hub-westend/src/lib.rs | 72 +++++++++--------- 2 files changed, 74 insertions(+), 74 deletions(-) diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index c8face15627f..c65880771e08 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -219,7 +219,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-rococo"), impl_name: create_runtime_str!("bridge-hub-rococo"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_016_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -1597,42 +1597,42 @@ mod tests { use bp_polkadot_core::SuffixedCommonSignedExtensionExt; sp_io::TestExternalities::default().execute_with(|| { - frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); - let payload: SignedExtra = ( - frame_system::CheckNonZeroSender::new(), - frame_system::CheckSpecVersion::new(), - frame_system::CheckTxVersion::new(), - frame_system::CheckGenesis::new(), - frame_system::CheckEra::from(Era::Immortal), - frame_system::CheckNonce::from(10), - frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(10), - BridgeRejectObsoleteHeadersAndMessages, - ( - bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), - bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), - ), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), - frame_metadata_hash_extension::CheckMetadataHash::new(false), - ); - - // for BridgeHubRococo - { - let bhr_indirect_payload = bp_bridge_hub_rococo::SignedExtension::from_params( - VERSION.spec_version, - VERSION.transaction_version, - bp_runtime::TransactionEra::Immortal, - System::block_hash(BlockNumber::zero()), - 10, - 10, - (((), ()), ((), ())), - ); - assert_eq!(payload.encode().split_last().unwrap().1, bhr_indirect_payload.encode()); - assert_eq!( - payload.additional_signed().unwrap().encode().split_last().unwrap().1, - bhr_indirect_payload.additional_signed().unwrap().encode() - ) - } - }); + frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); + let payload: SignedExtra = ( + frame_system::CheckNonZeroSender::new(), + frame_system::CheckSpecVersion::new(), + frame_system::CheckTxVersion::new(), + frame_system::CheckGenesis::new(), + frame_system::CheckEra::from(Era::Immortal), + frame_system::CheckNonce::from(10), + frame_system::CheckWeight::new(), + pallet_transaction_payment::ChargeTransactionPayment::from(10), + BridgeRejectObsoleteHeadersAndMessages, + ( + bridge_to_westend_config::OnBridgeHubRococoRefundBridgeHubWestendMessages::default(), + bridge_to_bulletin_config::OnBridgeHubRococoRefundRococoBulletinMessages::default(), + ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), + frame_metadata_hash_extension::CheckMetadataHash::new(false), + ); + + // for BridgeHubRococo + { + let bhr_indirect_payload = bp_bridge_hub_rococo::SignedExtension::from_params( + VERSION.spec_version, + VERSION.transaction_version, + bp_runtime::TransactionEra::Immortal, + System::block_hash(BlockNumber::zero()), + 10, + 10, + (((), ()), ((), ())), + ); + assert_eq!(payload.encode().split_last().unwrap().1, bhr_indirect_payload.encode()); + assert_eq!( + payload.additional_signed().unwrap().encode().split_last().unwrap().1, + bhr_indirect_payload.additional_signed().unwrap().encode() + ) + } + }); } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index 7384bf2850fe..1c26742fd67f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -198,7 +198,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("bridge-hub-westend"), impl_name: create_runtime_str!("bridge-hub-westend"), authoring_version: 1, - spec_version: 1_015_000, + spec_version: 1_016_000, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 5, @@ -1281,40 +1281,40 @@ mod tests { use bp_polkadot_core::SuffixedCommonSignedExtensionExt; sp_io::TestExternalities::default().execute_with(|| { - frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); - let payload: SignedExtra = ( - frame_system::CheckNonZeroSender::new(), - frame_system::CheckSpecVersion::new(), - frame_system::CheckTxVersion::new(), - frame_system::CheckGenesis::new(), - frame_system::CheckEra::from(Era::Immortal), - frame_system::CheckNonce::from(10), - frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(10), - BridgeRejectObsoleteHeadersAndMessages, - ( - bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), - ), - cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), - frame_metadata_hash_extension::CheckMetadataHash::new(false), - ); - - { - let bh_indirect_payload = bp_bridge_hub_westend::SignedExtension::from_params( - VERSION.spec_version, - VERSION.transaction_version, - bp_runtime::TransactionEra::Immortal, - System::block_hash(BlockNumber::zero()), - 10, - 10, - (((), ()), ((), ())), - ); - assert_eq!(payload.encode().split_last().unwrap().1, bh_indirect_payload.encode()); - assert_eq!( - payload.additional_signed().unwrap().encode().split_last().unwrap().1, - bh_indirect_payload.additional_signed().unwrap().encode() - ) - } - }); + frame_system::BlockHash::::insert(BlockNumber::zero(), Hash::default()); + let payload: SignedExtra = ( + frame_system::CheckNonZeroSender::new(), + frame_system::CheckSpecVersion::new(), + frame_system::CheckTxVersion::new(), + frame_system::CheckGenesis::new(), + frame_system::CheckEra::from(Era::Immortal), + frame_system::CheckNonce::from(10), + frame_system::CheckWeight::new(), + pallet_transaction_payment::ChargeTransactionPayment::from(10), + BridgeRejectObsoleteHeadersAndMessages, + ( + bridge_to_rococo_config::OnBridgeHubWestendRefundBridgeHubRococoMessages::default(), + ), + cumulus_primitives_storage_weight_reclaim::StorageWeightReclaim::new(), + frame_metadata_hash_extension::CheckMetadataHash::new(false), + ); + + { + let bh_indirect_payload = bp_bridge_hub_westend::SignedExtension::from_params( + VERSION.spec_version, + VERSION.transaction_version, + bp_runtime::TransactionEra::Immortal, + System::block_hash(BlockNumber::zero()), + 10, + 10, + (((), ()), ((), ())), + ); + assert_eq!(payload.encode().split_last().unwrap().1, bh_indirect_payload.encode()); + assert_eq!( + payload.additional_signed().unwrap().encode().split_last().unwrap().1, + bh_indirect_payload.additional_signed().unwrap().encode() + ) + } + }); } } From 32b87605182aabdc3e95aa1882ef7bca1ce34e46 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Fri, 9 Aug 2024 15:53:27 +0300 Subject: [PATCH 10/74] [tests] make emulated setup closer to ecosystem reality (#5301) In the real world, not all assets are sufficient. This aligns our emulated networks to that reality. Only DOT and USDT are sufficient "by default". --- .../assets/asset-hub-rococo/src/genesis.rs | 4 +-- .../assets/asset-hub-westend/src/genesis.rs | 9 ++++-- .../parachains/testing/penpal/src/genesis.rs | 4 ++- .../src/tests/reserve_transfer.rs | 2 +- .../asset-hub-rococo/src/tests/treasury.rs | 16 ++++++----- .../src/tests/fellowship_treasury.rs | 28 ++++++++----------- .../src/tests/reserve_transfer.rs | 2 +- .../asset-hub-westend/src/tests/treasury.rs | 28 ++++++++----------- .../runtimes/testing/penpal/src/xcm_config.rs | 4 +++ 9 files changed, 50 insertions(+), 47 deletions(-) diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs index 82f86e2b32ef..5b70ed490c63 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/genesis.rs @@ -70,7 +70,7 @@ pub fn genesis() -> Storage { }, assets: asset_hub_rococo_runtime::AssetsConfig { assets: vec![ - (RESERVABLE_ASSET_ID, AssetHubRococoAssetOwner::get(), true, ED), + (RESERVABLE_ASSET_ID, AssetHubRococoAssetOwner::get(), false, ED), (USDT_ID, AssetHubRococoAssetOwner::get(), true, ED), ], ..Default::default() @@ -81,7 +81,7 @@ pub fn genesis() -> Storage { ( PenpalTeleportableAssetLocation::get(), PenpalSiblingSovereignAccount::get(), - true, + false, ED, ), ], diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs index fd84030ed13b..d20e059f9fea 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/genesis.rs @@ -21,7 +21,7 @@ use sp_core::{sr25519, storage::Storage}; use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, PenpalSiblingSovereignAccount, PenpalTeleportableAssetLocation, RESERVABLE_ASSET_ID, - SAFE_XCM_VERSION, + SAFE_XCM_VERSION, USDT_ID, }; use parachains_common::{AccountId, Balance}; @@ -65,7 +65,10 @@ pub fn genesis() -> Storage { ..Default::default() }, assets: asset_hub_westend_runtime::AssetsConfig { - assets: vec![(RESERVABLE_ASSET_ID, AssetHubWestendAssetOwner::get(), true, ED)], + assets: vec![ + (RESERVABLE_ASSET_ID, AssetHubWestendAssetOwner::get(), false, ED), + (USDT_ID, AssetHubWestendAssetOwner::get(), true, ED), + ], ..Default::default() }, foreign_assets: asset_hub_westend_runtime::ForeignAssetsConfig { @@ -74,7 +77,7 @@ pub fn genesis() -> Storage { ( PenpalTeleportableAssetLocation::get(), PenpalSiblingSovereignAccount::get(), - true, + false, ED, ), ], diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs index 260875088bbc..38c94b34aa2e 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/testing/penpal/src/genesis.rs @@ -22,7 +22,7 @@ use emulated_integration_tests_common::{ accounts, build_genesis_storage, collators, get_account_id_from_seed, SAFE_XCM_VERSION, }; use parachains_common::{AccountId, Balance}; -use penpal_runtime::xcm_config::{LocalReservableFromAssetHub, RelayLocation}; +use penpal_runtime::xcm_config::{LocalReservableFromAssetHub, RelayLocation, UsdtFromAssetHub}; // Penpal pub const PARA_ID_A: u32 = 2000; pub const PARA_ID_B: u32 = 2001; @@ -81,6 +81,8 @@ pub fn genesis(para_id: u32) -> Storage { (RelayLocation::get(), PenpalAssetOwner::get(), true, ED), // Sufficient AssetHub asset representation (LocalReservableFromAssetHub::get(), PenpalAssetOwner::get(), true, ED), + // USDT from Asset Hub + (UsdtFromAssetHub::get(), PenpalAssetOwner::get(), true, ED), ], ..Default::default() }, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 8b9fedcd4947..313fa953dd05 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -939,7 +939,7 @@ fn reserve_transfer_assets_from_system_para_to_para() { ); } -/// Reserve Transfers of a foreign asset and native asset from Parachain to System Para should +/// Reserve Transfers of a random asset and native asset from Parachain to System Para should /// work #[test] fn reserve_transfer_assets_from_para_to_system_para() { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs index f8190e11c51c..3320392b495d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs @@ -14,7 +14,10 @@ // limitations under the License. use crate::imports::*; -use emulated_integration_tests_common::accounts::{ALICE, BOB}; +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + USDT_ID, +}; use frame_support::{ dispatch::RawOrigin, sp_runtime::traits::Dispatchable, @@ -161,7 +164,6 @@ fn spend_roc_on_asset_hub() { #[test] fn create_and_claim_treasury_spend_in_usdt() { - const ASSET_ID: u32 = 1984; const SPEND_AMOUNT: u128 = 10_000_000; // treasury location from a sibling parachain. let treasury_location: Location = Location::new(1, PalletInstance(18)); @@ -175,7 +177,7 @@ fn create_and_claim_treasury_spend_in_usdt() { let asset_kind = VersionedLocatableAsset::V3 { location: asset_hub_location, asset_id: v3::AssetId::Concrete( - (v3::Junction::PalletInstance(50), v3::Junction::GeneralIndex(ASSET_ID.into())).into(), + (v3::Junction::PalletInstance(50), v3::Junction::GeneralIndex(USDT_ID.into())).into(), ), }; // treasury spend beneficiary. @@ -187,9 +189,9 @@ fn create_and_claim_treasury_spend_in_usdt() { type Assets = ::Assets; // USDT created at genesis, mint some assets to the treasury account. - assert_ok!(>::mint_into(ASSET_ID, &treasury_account, SPEND_AMOUNT * 4)); + assert_ok!(>::mint_into(USDT_ID, &treasury_account, SPEND_AMOUNT * 4)); // beneficiary has zero balance. - assert_eq!(>::balance(ASSET_ID, &alice,), 0u128,); + assert_eq!(>::balance(USDT_ID, &alice,), 0u128,); }); Rococo::execute_with(|| { @@ -231,7 +233,7 @@ fn create_and_claim_treasury_spend_in_usdt() { AssetHubRococo, vec![ RuntimeEvent::Assets(pallet_assets::Event::Transferred { asset_id: id, from, to, amount }) => { - id: id == &ASSET_ID, + id: id == &USDT_ID, from: from == &treasury_account, to: to == &alice, amount: amount == &SPEND_AMOUNT, @@ -241,7 +243,7 @@ fn create_and_claim_treasury_spend_in_usdt() { ] ); // beneficiary received the assets from the treasury. - assert_eq!(>::balance(ASSET_ID, &alice,), SPEND_AMOUNT,); + assert_eq!(>::balance(USDT_ID, &alice,), SPEND_AMOUNT,); }); Rococo::execute_with(|| { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs index 15f4fe33bddc..9520659712fc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs @@ -14,15 +14,17 @@ // limitations under the License. use crate::imports::*; -use emulated_integration_tests_common::accounts::{ALICE, BOB}; -use frame_support::traits::fungibles::{Create, Inspect, Mutate}; +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + USDT_ID, +}; +use frame_support::traits::fungibles::{Inspect, Mutate}; use polkadot_runtime_common::impls::VersionedLocatableAsset; use xcm_executor::traits::ConvertLocation; #[test] fn create_and_claim_treasury_spend() { - const ASSET_ID: u32 = 1984; - const SPEND_AMOUNT: u128 = 1_000_000; + const SPEND_AMOUNT: u128 = 1_000_000_000; // treasury location from a sibling parachain. let treasury_location: Location = Location::new(1, [Parachain(CollectivesWestend::para_id().into()), PalletInstance(65)]); @@ -34,7 +36,7 @@ fn create_and_claim_treasury_spend() { // asset kind to be spent from the treasury. let asset_kind = VersionedLocatableAsset::V4 { location: asset_hub_location, - asset_id: AssetId((PalletInstance(50), GeneralIndex(ASSET_ID.into())).into()), + asset_id: AssetId((PalletInstance(50), GeneralIndex(USDT_ID.into())).into()), }; // treasury spend beneficiary. let alice: AccountId = Westend::account_id_of(ALICE); @@ -44,16 +46,10 @@ fn create_and_claim_treasury_spend() { AssetHubWestend::execute_with(|| { type Assets = ::Assets; - // create an asset class and mint some assets to the treasury account. - assert_ok!(>::create( - ASSET_ID, - treasury_account.clone(), - true, - SPEND_AMOUNT / 2 - )); - assert_ok!(>::mint_into(ASSET_ID, &treasury_account, SPEND_AMOUNT * 4)); + // USDT created at genesis, mint some assets to the fellowship treasury account. + assert_ok!(>::mint_into(USDT_ID, &treasury_account, SPEND_AMOUNT * 4)); // beneficiary has zero balance. - assert_eq!(>::balance(ASSET_ID, &alice,), 0u128,); + assert_eq!(>::balance(USDT_ID, &alice,), 0u128,); }); CollectivesWestend::execute_with(|| { @@ -96,7 +92,7 @@ fn create_and_claim_treasury_spend() { AssetHubWestend, vec![ RuntimeEvent::Assets(pallet_assets::Event::Transferred { asset_id: id, from, to, amount }) => { - id: id == &ASSET_ID, + id: id == &USDT_ID, from: from == &treasury_account, to: to == &alice, amount: amount == &SPEND_AMOUNT, @@ -106,7 +102,7 @@ fn create_and_claim_treasury_spend() { ] ); // beneficiary received the assets from the treasury. - assert_eq!(>::balance(ASSET_ID, &alice,), SPEND_AMOUNT,); + assert_eq!(>::balance(USDT_ID, &alice,), SPEND_AMOUNT,); }); CollectivesWestend::execute_with(|| { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 65d013a0eec4..82ef74fdab10 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -940,7 +940,7 @@ fn reserve_transfer_assets_from_system_para_to_para() { ); } -/// Reserve Transfers of a foreign asset and native asset from Parachain to System Para should +/// Reserve Transfers of a random asset and native asset from Parachain to System Para should /// work #[test] fn reserve_transfer_assets_from_para_to_system_para() { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs index 8cbce3e0d223..b70967184387 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs @@ -14,15 +14,17 @@ // limitations under the License. use crate::imports::*; -use emulated_integration_tests_common::accounts::{ALICE, BOB}; -use frame_support::traits::fungibles::{Create, Inspect, Mutate}; +use emulated_integration_tests_common::{ + accounts::{ALICE, BOB}, + USDT_ID, +}; +use frame_support::traits::fungibles::{Inspect, Mutate}; use polkadot_runtime_common::impls::VersionedLocatableAsset; use xcm_executor::traits::ConvertLocation; #[test] fn create_and_claim_treasury_spend() { - const ASSET_ID: u32 = 1984; - const SPEND_AMOUNT: u128 = 1_000_000; + const SPEND_AMOUNT: u128 = 1_000_000_000; // treasury location from a sibling parachain. let treasury_location: Location = Location::new(1, PalletInstance(37)); // treasury account on a sibling parachain. @@ -33,7 +35,7 @@ fn create_and_claim_treasury_spend() { // asset kind to be spend from the treasury. let asset_kind = VersionedLocatableAsset::V4 { location: asset_hub_location, - asset_id: AssetId([PalletInstance(50), GeneralIndex(ASSET_ID.into())].into()), + asset_id: AssetId([PalletInstance(50), GeneralIndex(USDT_ID.into())].into()), }; // treasury spend beneficiary. let alice: AccountId = Westend::account_id_of(ALICE); @@ -43,16 +45,10 @@ fn create_and_claim_treasury_spend() { AssetHubWestend::execute_with(|| { type Assets = ::Assets; - // create an asset class and mint some assets to the treasury account. - assert_ok!(>::create( - ASSET_ID, - treasury_account.clone(), - true, - SPEND_AMOUNT / 2 - )); - assert_ok!(>::mint_into(ASSET_ID, &treasury_account, SPEND_AMOUNT * 4)); + // USDT created at genesis, mint some assets to the treasury account. + assert_ok!(>::mint_into(USDT_ID, &treasury_account, SPEND_AMOUNT * 4)); // beneficiary has zero balance. - assert_eq!(>::balance(ASSET_ID, &alice,), 0u128,); + assert_eq!(>::balance(USDT_ID, &alice,), 0u128,); }); Westend::execute_with(|| { @@ -94,7 +90,7 @@ fn create_and_claim_treasury_spend() { AssetHubWestend, vec![ RuntimeEvent::Assets(pallet_assets::Event::Transferred { asset_id: id, from, to, amount }) => { - id: id == &ASSET_ID, + id: id == &USDT_ID, from: from == &treasury_account, to: to == &alice, amount: amount == &SPEND_AMOUNT, @@ -104,7 +100,7 @@ fn create_and_claim_treasury_spend() { ] ); // beneficiary received the assets from the treasury. - assert_eq!(>::balance(ASSET_ID, &alice,), SPEND_AMOUNT,); + assert_eq!(>::balance(USDT_ID, &alice,), SPEND_AMOUNT,); }); Westend::execute_with(|| { diff --git a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs index eca7c7bbc3c2..d0c421bccaf8 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/xcm_config.rs @@ -282,6 +282,10 @@ parameter_types! { 1, [Parachain(ASSET_HUB_ID), PalletInstance(ASSETS_PALLET_ID), GeneralIndex(RESERVABLE_ASSET_ID.into())] ); + pub UsdtFromAssetHub: Location = Location::new( + 1, + [Parachain(ASSET_HUB_ID), PalletInstance(ASSETS_PALLET_ID), GeneralIndex(1984)] + ); /// The Penpal runtime is utilized for testing with various environment setups. /// This storage item provides the opportunity to customize testing scenarios From b1a9ad4d387e8e75932e4bc8950c4535f4c82119 Mon Sep 17 00:00:00 2001 From: Alexander Samusev <41779041+alvicsam@users.noreply.github.com> Date: Fri, 9 Aug 2024 15:05:18 +0200 Subject: [PATCH 11/74] [ci] Move checks to GHA (#5289) Closes https://github.com/paritytech/ci_cd/issues/1012 --- .github/workflows/checks.yml | 90 ++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 .github/workflows/checks.yml diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml new file mode 100644 index 000000000000..054c7d786ca9 --- /dev/null +++ b/.github/workflows/checks.yml @@ -0,0 +1,90 @@ +name: checks + +on: + push: + branches: + - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review, labeled] + merge_group: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +permissions: {} + +jobs: + changes: + # TODO: remove once migration is complete or this workflow is fully stable + if: contains(github.event.label.name, 'GHA-migration') + permissions: + pull-requests: read + uses: ./.github/workflows/reusable-check-changed-files.yml + set-image: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + timeout-minutes: 20 + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + cargo-clippy: + runs-on: arc-runners-polkadot-sdk-beefy + needs: [set-image, changes] # , build-frame-omni-bencher ] + if: ${{ needs.changes.outputs.rust }} + timeout-minutes: 40 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + RUSTFLAGS: "-D warnings" + SKIP_WASM_BUILD: 1 + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - name: script + run: | + forklift cargo clippy --all-targets --locked --workspace + forklift cargo clippy --all-targets --all-features --locked --workspace + check-try-runtime: + runs-on: arc-runners-polkadot-sdk-beefy + needs: [set-image, changes] # , build-frame-omni-bencher ] + if: ${{ needs.changes.outputs.rust }} + timeout-minutes: 40 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - name: script + run: | + forklift cargo check --locked --all --features try-runtime + # this is taken from cumulus + # Check that parachain-template will compile with `try-runtime` feature flag. + forklift cargo check --locked -p parachain-template-node --features try-runtime + # add after https://github.com/paritytech/substrate/pull/14502 is merged + # experimental code may rely on try-runtime and vice-versa + forklift cargo check --locked --all --features try-runtime,experimental + # check-core-crypto-features works fast without forklift + check-core-crypto-features: + runs-on: arc-runners-polkadot-sdk-beefy + needs: [set-image, changes] # , build-frame-omni-bencher ] + if: ${{ needs.changes.outputs.rust }} + timeout-minutes: 30 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - name: script + run: | + cd substrate/primitives/core + ./check-features-variants.sh + cd - + cd substrate/primitives/application-crypto + ./check-features-variants.sh + cd - + cd substrate/primitives/keyring + ./check-features-variants.sh + cd - From 862860ecc89e077331b72eb66cd7305b931fb9e8 Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Sat, 10 Aug 2024 00:06:59 +0900 Subject: [PATCH 12/74] StorageWeightReclaim: Fix issue when underestimating refund. (#5273) The code do reduce or increase the weight by comparing `benchmarked_weight` and `consumed_weight`. But `benchmarked_weight` is the pre dispatch weight. not the post dispatch weight that is actually written into the block weight by `CheckWeight`. So in case the consumed weight was: `pre dispatch weight > consumed weight > post dispatch weight` then the reclaim code was reducing the block weight instead of increasing it. Might explain this issue even better https://github.com/paritytech/polkadot-sdk/issues/5229 @skunert @s0me0ne-unkn0wn --- .../storage-weight-reclaim/src/lib.rs | 48 +++++++++++++++++-- prdoc/pr_5273.prdoc | 10 ++++ 2 files changed, 53 insertions(+), 5 deletions(-) create mode 100644 prdoc/pr_5273.prdoc diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index f48dd927ee96..5984fa77a2c5 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -165,15 +165,14 @@ where ); return Ok(()) }; - let benchmarked_weight = info.weight.proof_size(); - let consumed_weight = post_dispatch_proof_size.saturating_sub(pre_dispatch_proof_size); - // Unspent weight according to the `actual_weight` from `PostDispatchInfo` // This unspent weight will be refunded by the `CheckWeight` extension, so we need to // account for that. let unspent = post_info.calc_unspent(info).proof_size(); - let storage_size_diff = - benchmarked_weight.saturating_sub(unspent).abs_diff(consumed_weight as u64); + let benchmarked_weight = info.weight.proof_size().saturating_sub(unspent); + let consumed_weight = post_dispatch_proof_size.saturating_sub(pre_dispatch_proof_size); + + let storage_size_diff = benchmarked_weight.abs_diff(consumed_weight as u64); // This value will be reclaimed by [`frame_system::CheckWeight`], so we need to calculate // that in. @@ -294,6 +293,45 @@ mod tests { }) } + #[test] + fn underestimating_refund() { + // We fixed a bug where `pre dispatch info weight > consumed weight > post info weight` + // resulted in error. + + // The real cost will be 100 bytes of storage size + let mut test_ext = setup_test_externalities(&[0, 100]); + + test_ext.execute_with(|| { + set_current_storage_weight(1000); + + // Benchmarked storage weight: 500 + let info = DispatchInfo { weight: Weight::from_parts(0, 101), ..Default::default() }; + let post_info = PostDispatchInfo { + actual_weight: Some(Weight::from_parts(0, 99)), + pays_fee: Default::default(), + }; + + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(0)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + // We expect an accrue of 1 + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + assert_eq!(get_storage_weight().total().proof_size(), 1250); + }) + } + #[test] fn does_nothing_without_extension() { let mut test_ext = new_test_ext(); diff --git a/prdoc/pr_5273.prdoc b/prdoc/pr_5273.prdoc new file mode 100644 index 000000000000..981172c6c13f --- /dev/null +++ b/prdoc/pr_5273.prdoc @@ -0,0 +1,10 @@ +title: Fix storage weight reclaim bug. + +doc: + - audience: Runtime Dev + description: | + A bug in storage weight reclaim signed extension is fixed. The bug was causing an underestimate of the proof size when the post dispatch info was underestimating the proof size and the pre dispatch info was overestimating the proof size at the same time. + +crates: + - name: cumulus-primitives-storage-weight-reclaim + bump: patch From 380cd21877843d7668139725c98762c6f617fada Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Fri, 9 Aug 2024 18:25:22 +0300 Subject: [PATCH 13/74] Fix bridge zombienet tests (#5306) Fixes https://github.com/paritytech/polkadot-sdk/issues/5296 --- docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile index 60698de1d6ad..55b9156e6a0a 100644 --- a/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile +++ b/docker/dockerfiles/bridges_zombienet_tests_injected.Dockerfile @@ -1,7 +1,7 @@ # this image is built on top of existing Zombienet image ARG ZOMBIENET_IMAGE # this image uses substrate-relay image built elsewhere -ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.6.6 +ARG SUBSTRATE_RELAY_IMAGE=docker.io/paritytech/substrate-relay:v1.6.8 # metadata ARG VCS_REF From 47c1b4cd8dd673d25c37b802f44da30a23ec9b9e Mon Sep 17 00:00:00 2001 From: s0me0ne-unkn0wn <48632512+s0me0ne-unkn0wn@users.noreply.github.com> Date: Fri, 9 Aug 2024 17:59:56 +0200 Subject: [PATCH 14/74] Move PVF code and PoV decompression to PVF host workers (#5142) Closes #5071 This PR aims to * Move all the blocking decompression from the candidate validation subsystem to the PVF host workers; * Run the candidate validation subsystem on the non-blocking pool again. Upsides: no blocking operations in the subsystem's main loop. PVF throughput is not limited by the ability of the subsystem to decompress a lot of stuff. Correctness and homogeneity improve, as the artifact used to be identified by the hash of decompressed code, and now they are identified by the hash of compressed code, which coincides with the on-chain `ValidationCodeHash`. Downsides: the PVF code decompression is now accounted for in the PVF preparation timeout (be it pre-checking or actual preparation). Taking into account that the decompression duration is on the order of milliseconds, and the preparation timeout is on the order of seconds, I believe it is negligible. --- Cargo.lock | 3 + .../node/core/candidate-validation/Cargo.toml | 2 +- .../node/core/candidate-validation/src/lib.rs | 134 +++++------- .../core/candidate-validation/src/metrics.rs | 44 ---- .../core/candidate-validation/src/tests.rs | 183 +---------------- .../benches/host_prepare_rococo_runtime.rs | 2 +- polkadot/node/core/pvf/common/src/error.rs | 10 +- polkadot/node/core/pvf/common/src/execute.rs | 4 + polkadot/node/core/pvf/common/src/prepare.rs | 2 + polkadot/node/core/pvf/common/src/pvf.rs | 20 +- .../node/core/pvf/execute-worker/Cargo.toml | 3 + .../node/core/pvf/execute-worker/src/lib.rs | 65 +++++- .../node/core/pvf/prepare-worker/Cargo.toml | 2 + .../benches/prepare_rococo_runtime.rs | 6 +- .../node/core/pvf/prepare-worker/src/lib.rs | 50 ++++- polkadot/node/core/pvf/src/error.rs | 3 + polkadot/node/core/pvf/src/execute/queue.rs | 52 ++++- .../core/pvf/src/execute/worker_interface.rs | 36 ++-- polkadot/node/core/pvf/src/host.rs | 106 +++++++--- polkadot/node/core/pvf/src/metrics.rs | 46 +++++ .../core/pvf/src/prepare/worker_interface.rs | 10 +- polkadot/node/core/pvf/tests/it/adder.rs | 84 ++++---- polkadot/node/core/pvf/tests/it/main.rs | 192 ++++++++++++------ polkadot/node/core/pvf/tests/it/process.rs | 80 +++++--- polkadot/node/overseer/src/lib.rs | 2 +- prdoc/pr_5142.prdoc | 26 +++ 26 files changed, 642 insertions(+), 525 deletions(-) create mode 100644 prdoc/pr_5142.prdoc diff --git a/Cargo.lock b/Cargo.lock index 54a01f12f35c..4db38311fe66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13589,8 +13589,10 @@ dependencies = [ "nix 0.28.0", "parity-scale-codec", "polkadot-node-core-pvf-common", + "polkadot-node-primitives", "polkadot-parachain-primitives", "polkadot-primitives", + "sp-maybe-compressed-blob", "tracing-gum", ] @@ -13605,6 +13607,7 @@ dependencies = [ "nix 0.28.0", "parity-scale-codec", "polkadot-node-core-pvf-common", + "polkadot-node-primitives", "polkadot-primitives", "rayon", "rococo-runtime", diff --git a/polkadot/node/core/candidate-validation/Cargo.toml b/polkadot/node/core/candidate-validation/Cargo.toml index 13ab3e3fba50..fcacc38cae65 100644 --- a/polkadot/node/core/candidate-validation/Cargo.toml +++ b/polkadot/node/core/candidate-validation/Cargo.toml @@ -17,7 +17,6 @@ gum = { workspace = true, default-features = true } sp-keystore = { workspace = true } sp-application-crypto = { workspace = true } -sp-maybe-compressed-blob = { workspace = true, default-features = true } codec = { features = ["bit-vec", "derive"], workspace = true } polkadot-primitives = { workspace = true, default-features = true } @@ -36,5 +35,6 @@ sp-keyring = { workspace = true, default-features = true } futures = { features = ["thread-pool"], workspace = true } assert_matches = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } polkadot-primitives-test-helpers = { workspace = true } diff --git a/polkadot/node/core/candidate-validation/src/lib.rs b/polkadot/node/core/candidate-validation/src/lib.rs index 1985964ebc51..103d29e8d269 100644 --- a/polkadot/node/core/candidate-validation/src/lib.rs +++ b/polkadot/node/core/candidate-validation/src/lib.rs @@ -27,9 +27,7 @@ use polkadot_node_core_pvf::{ InternalValidationError, InvalidCandidate as WasmInvalidCandidate, PossiblyInvalidError, PrepareError, PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, }; -use polkadot_node_primitives::{ - BlockData, InvalidCandidate, PoV, ValidationResult, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT, -}; +use polkadot_node_primitives::{InvalidCandidate, PoV, ValidationResult}; use polkadot_node_subsystem::{ errors::RuntimeApiError, messages::{ @@ -41,9 +39,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util as util; use polkadot_overseer::ActiveLeavesUpdate; -use polkadot_parachain_primitives::primitives::{ - ValidationParams, ValidationResult as WasmValidationResult, -}; +use polkadot_parachain_primitives::primitives::ValidationResult as WasmValidationResult; use polkadot_primitives::{ executor_params::{ DEFAULT_APPROVAL_EXECUTION_TIMEOUT, DEFAULT_BACKING_EXECUTION_TIMEOUT, @@ -504,21 +500,12 @@ where continue; }; - let pvf = match sp_maybe_compressed_blob::decompress( - &validation_code.0, - VALIDATION_CODE_BOMB_LIMIT, - ) { - Ok(code) => PvfPrepData::from_code( - code.into_owned(), - executor_params.clone(), - timeout, - PrepareJobKind::Prechecking, - ), - Err(e) => { - gum::debug!(target: LOG_TARGET, err=?e, "cannot decompress validation code"); - continue - }, - }; + let pvf = PvfPrepData::from_code( + validation_code.0, + executor_params.clone(), + timeout, + PrepareJobKind::Prechecking, + ); active_pvfs.push(pvf); processed_code_hashes.push(code_hash); @@ -651,21 +638,12 @@ where let timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Precheck); - let pvf = match sp_maybe_compressed_blob::decompress( - &validation_code.0, - VALIDATION_CODE_BOMB_LIMIT, - ) { - Ok(code) => PvfPrepData::from_code( - code.into_owned(), - executor_params, - timeout, - PrepareJobKind::Prechecking, - ), - Err(e) => { - gum::debug!(target: LOG_TARGET, err=?e, "precheck: cannot decompress validation code"); - return PreCheckOutcome::Invalid - }, - }; + let pvf = PvfPrepData::from_code( + validation_code.0, + executor_params, + timeout, + PrepareJobKind::Prechecking, + ); match validation_backend.precheck_pvf(pvf).await { Ok(_) => PreCheckOutcome::Valid, @@ -873,41 +851,7 @@ async fn validate_candidate_exhaustive( return Ok(ValidationResult::Invalid(e)) } - let raw_validation_code = match sp_maybe_compressed_blob::decompress( - &validation_code.0, - VALIDATION_CODE_BOMB_LIMIT, - ) { - Ok(code) => code, - Err(e) => { - gum::info!(target: LOG_TARGET, ?para_id, err=?e, "Invalid candidate (validation code)"); - - // Code already passed pre-checking, if decompression fails now this most likely means - // some local corruption happened. - return Err(ValidationFailed("Code decompression failed".to_string())) - }, - }; - metrics.observe_code_size(raw_validation_code.len()); - - metrics.observe_pov_size(pov.block_data.0.len(), true); - let raw_block_data = - match sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT) { - Ok(block_data) => BlockData(block_data.to_vec()), - Err(e) => { - gum::info!(target: LOG_TARGET, ?para_id, err=?e, "Invalid candidate (PoV code)"); - - // If the PoV is invalid, the candidate certainly is. - return Ok(ValidationResult::Invalid(InvalidCandidate::PoVDecompressionFailure)) - }, - }; - metrics.observe_pov_size(raw_block_data.0.len(), false); - - let params = ValidationParams { - parent_head: persisted_validation_data.parent_head.clone(), - block_data: raw_block_data, - relay_parent_number: persisted_validation_data.relay_parent_number, - relay_parent_storage_root: persisted_validation_data.relay_parent_storage_root, - }; - + let persisted_validation_data = Arc::new(persisted_validation_data); let result = match exec_kind { // Retry is disabled to reduce the chance of nondeterministic blocks getting backed and // honest backers getting slashed. @@ -915,7 +859,7 @@ async fn validate_candidate_exhaustive( let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); let exec_timeout = pvf_exec_timeout(&executor_params, exec_kind); let pvf = PvfPrepData::from_code( - raw_validation_code.to_vec(), + validation_code.0, executor_params, prep_timeout, PrepareJobKind::Compilation, @@ -925,7 +869,8 @@ async fn validate_candidate_exhaustive( .validate_candidate( pvf, exec_timeout, - params.encode(), + persisted_validation_data.clone(), + pov, polkadot_node_core_pvf::Priority::Normal, ) .await @@ -933,9 +878,10 @@ async fn validate_candidate_exhaustive( PvfExecKind::Approval => validation_backend .validate_candidate_with_retry( - raw_validation_code.to_vec(), + validation_code.0, pvf_exec_timeout(&executor_params, exec_kind), - params, + persisted_validation_data.clone(), + pov, executor_params, PVF_APPROVAL_EXECUTION_RETRY_DELAY, polkadot_node_core_pvf::Priority::Critical, @@ -961,6 +907,8 @@ async fn validate_candidate_exhaustive( Ok(ValidationResult::Invalid(InvalidCandidate::Timeout)), Err(ValidationError::Invalid(WasmInvalidCandidate::WorkerReportedInvalid(e))) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError(e))), + Err(ValidationError::Invalid(WasmInvalidCandidate::PoVDecompressionFailure)) => + Ok(ValidationResult::Invalid(InvalidCandidate::PoVDecompressionFailure)), Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)) => Ok(ValidationResult::Invalid(InvalidCandidate::ExecutionError( "ambiguous worker death".to_string(), @@ -1007,7 +955,7 @@ async fn validate_candidate_exhaustive( // invalid. Ok(ValidationResult::Invalid(InvalidCandidate::CommitmentsHashMismatch)) } else { - Ok(ValidationResult::Valid(outputs, persisted_validation_data)) + Ok(ValidationResult::Valid(outputs, (*persisted_validation_data).clone())) } }, } @@ -1020,7 +968,8 @@ trait ValidationBackend { &mut self, pvf: PvfPrepData, exec_timeout: Duration, - encoded_params: Vec, + pvd: Arc, + pov: Arc, // The priority for the preparation job. prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result; @@ -1035,9 +984,10 @@ trait ValidationBackend { /// preparation. async fn validate_candidate_with_retry( &mut self, - raw_validation_code: Vec, + code: Vec, exec_timeout: Duration, - params: ValidationParams, + pvd: Arc, + pov: Arc, executor_params: ExecutorParams, retry_delay: Duration, // The priority for the preparation job. @@ -1046,7 +996,7 @@ trait ValidationBackend { let prep_timeout = pvf_prep_timeout(&executor_params, PvfPrepKind::Prepare); // Construct the PVF a single time, since it is an expensive operation. Cloning it is cheap. let pvf = PvfPrepData::from_code( - raw_validation_code, + code, executor_params, prep_timeout, PrepareJobKind::Compilation, @@ -1057,7 +1007,13 @@ trait ValidationBackend { // Use `Priority::Critical` as finality trumps parachain liveliness. let mut validation_result = self - .validate_candidate(pvf.clone(), exec_timeout, params.encode(), prepare_priority) + .validate_candidate( + pvf.clone(), + exec_timeout, + pvd.clone(), + pov.clone(), + prepare_priority, + ) .await; if validation_result.is_ok() { return validation_result @@ -1130,10 +1086,14 @@ trait ValidationBackend { validation_result ); - // Encode the params again when re-trying. We expect the retry case to be relatively - // rare, and we want to avoid unconditionally cloning data. validation_result = self - .validate_candidate(pvf.clone(), new_timeout, params.encode(), prepare_priority) + .validate_candidate( + pvf.clone(), + new_timeout, + pvd.clone(), + pov.clone(), + prepare_priority, + ) .await; } } @@ -1153,13 +1113,13 @@ impl ValidationBackend for ValidationHost { &mut self, pvf: PvfPrepData, exec_timeout: Duration, - encoded_params: Vec, + pvd: Arc, + pov: Arc, // The priority for the preparation job. prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { let (tx, rx) = oneshot::channel(); - if let Err(err) = - self.execute_pvf(pvf, exec_timeout, encoded_params, prepare_priority, tx).await + if let Err(err) = self.execute_pvf(pvf, exec_timeout, pvd, pov, prepare_priority, tx).await { return Err(InternalValidationError::HostCommunication(format!( "cannot send pvf to the validation host, it might have shut down: {:?}", diff --git a/polkadot/node/core/candidate-validation/src/metrics.rs b/polkadot/node/core/candidate-validation/src/metrics.rs index 28fc957ddb1a..1459907aa599 100644 --- a/polkadot/node/core/candidate-validation/src/metrics.rs +++ b/polkadot/node/core/candidate-validation/src/metrics.rs @@ -23,8 +23,6 @@ pub(crate) struct MetricsInner { pub(crate) validate_from_chain_state: prometheus::Histogram, pub(crate) validate_from_exhaustive: prometheus::Histogram, pub(crate) validate_candidate_exhaustive: prometheus::Histogram, - pub(crate) pov_size: prometheus::HistogramVec, - pub(crate) code_size: prometheus::Histogram, } /// Candidate validation metrics. @@ -70,21 +68,6 @@ impl Metrics { .as_ref() .map(|metrics| metrics.validate_candidate_exhaustive.start_timer()) } - - pub fn observe_code_size(&self, code_size: usize) { - if let Some(metrics) = &self.0 { - metrics.code_size.observe(code_size as f64); - } - } - - pub fn observe_pov_size(&self, pov_size: usize, compressed: bool) { - if let Some(metrics) = &self.0 { - metrics - .pov_size - .with_label_values(&[if compressed { "true" } else { "false" }]) - .observe(pov_size as f64); - } - } } impl metrics::Metrics for Metrics { @@ -121,33 +104,6 @@ impl metrics::Metrics for Metrics { ))?, registry, )?, - pov_size: prometheus::register( - prometheus::HistogramVec::new( - prometheus::HistogramOpts::new( - "polkadot_parachain_candidate_validation_pov_size", - "The compressed and decompressed size of the proof of validity of a candidate", - ) - .buckets( - prometheus::exponential_buckets(16384.0, 2.0, 10) - .expect("arguments are always valid; qed"), - ), - &["compressed"], - )?, - registry, - )?, - code_size: prometheus::register( - prometheus::Histogram::with_opts( - prometheus::HistogramOpts::new( - "polkadot_parachain_candidate_validation_code_size", - "The size of the decompressed WASM validation blob used for checking a candidate", - ) - .buckets( - prometheus::exponential_buckets(16384.0, 2.0, 10) - .expect("arguments are always valid; qed"), - ), - )?, - registry, - )?, }; Ok(Metrics(Some(metrics))) } diff --git a/polkadot/node/core/candidate-validation/src/tests.rs b/polkadot/node/core/candidate-validation/src/tests.rs index 86d855f78b45..55282fdf4ee1 100644 --- a/polkadot/node/core/candidate-validation/src/tests.rs +++ b/polkadot/node/core/candidate-validation/src/tests.rs @@ -20,6 +20,7 @@ use super::*; use assert_matches::assert_matches; use futures::executor; use polkadot_node_core_pvf::PrepareError; +use polkadot_node_primitives::{BlockData, VALIDATION_CODE_BOMB_LIMIT}; use polkadot_node_subsystem::messages::AllMessages; use polkadot_node_subsystem_util::reexports::SubsystemContext; use polkadot_overseer::ActivatedLeaf; @@ -385,7 +386,8 @@ impl ValidationBackend for MockValidateCandidateBackend { &mut self, _pvf: PvfPrepData, _timeout: Duration, - _encoded_params: Vec, + _pvd: Arc, + _pov: Arc, _prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { // This is expected to panic if called more times than expected, indicating an error in the @@ -950,115 +952,6 @@ fn compressed_code_works() { assert_matches!(v, Ok(ValidationResult::Valid(_, _))); } -#[test] -fn code_decompression_failure_is_error() { - let validation_data = PersistedValidationData { max_pov_size: 1024, ..Default::default() }; - let pov = PoV { block_data: BlockData(vec![1; 32]) }; - let head_data = HeadData(vec![1, 1, 1]); - - let raw_code = vec![2u8; VALIDATION_CODE_BOMB_LIMIT + 1]; - let validation_code = - sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT + 1) - .map(ValidationCode) - .unwrap(); - - let descriptor = make_valid_candidate_descriptor( - ParaId::from(1_u32), - dummy_hash(), - validation_data.hash(), - pov.hash(), - validation_code.hash(), - head_data.hash(), - dummy_hash(), - Sr25519Keyring::Alice, - ); - - let validation_result = WasmValidationResult { - head_data, - new_validation_code: None, - upward_messages: Default::default(), - horizontal_messages: Default::default(), - processed_downward_messages: 0, - hrmp_watermark: 0, - }; - - let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - - let pool = TaskExecutor::new(); - let (_ctx, _ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< - AllMessages, - _, - >(pool.clone()); - - let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), - validation_data, - validation_code, - candidate_receipt, - Arc::new(pov), - ExecutorParams::default(), - PvfExecKind::Backing, - &Default::default(), - )); - - assert_matches!(v, Err(_)); -} - -#[test] -fn pov_decompression_failure_is_invalid() { - let validation_data = - PersistedValidationData { max_pov_size: POV_BOMB_LIMIT as u32, ..Default::default() }; - let head_data = HeadData(vec![1, 1, 1]); - - let raw_block_data = vec![2u8; POV_BOMB_LIMIT + 1]; - let pov = sp_maybe_compressed_blob::compress(&raw_block_data, POV_BOMB_LIMIT + 1) - .map(|raw| PoV { block_data: BlockData(raw) }) - .unwrap(); - - let validation_code = ValidationCode(vec![2; 16]); - - let descriptor = make_valid_candidate_descriptor( - ParaId::from(1_u32), - dummy_hash(), - validation_data.hash(), - pov.hash(), - validation_code.hash(), - head_data.hash(), - dummy_hash(), - Sr25519Keyring::Alice, - ); - - let validation_result = WasmValidationResult { - head_data, - new_validation_code: None, - upward_messages: Default::default(), - horizontal_messages: Default::default(), - processed_downward_messages: 0, - hrmp_watermark: 0, - }; - - let candidate_receipt = CandidateReceipt { descriptor, commitments_hash: Hash::zero() }; - - let pool = TaskExecutor::new(); - let (_ctx, _ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< - AllMessages, - _, - >(pool.clone()); - - let v = executor::block_on(validate_candidate_exhaustive( - MockValidateCandidateBackend::with_hardcoded_result(Ok(validation_result)), - validation_data, - validation_code, - candidate_receipt, - Arc::new(pov), - ExecutorParams::default(), - PvfExecKind::Backing, - &Default::default(), - )); - - assert_matches!(v, Ok(ValidationResult::Invalid(InvalidCandidate::PoVDecompressionFailure))); -} - struct MockPreCheckBackend { result: Result<(), PrepareError>, } @@ -1075,7 +968,8 @@ impl ValidationBackend for MockPreCheckBackend { &mut self, _pvf: PvfPrepData, _timeout: Duration, - _encoded_params: Vec, + _pvd: Arc, + _pov: Arc, _prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { unreachable!() @@ -1149,70 +1043,6 @@ fn precheck_works() { executor::block_on(test_fut); } -#[test] -fn precheck_invalid_pvf_blob_compression() { - let relay_parent = [3; 32].into(); - - let raw_code = vec![2u8; VALIDATION_CODE_BOMB_LIMIT + 1]; - let validation_code = - sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT + 1) - .map(ValidationCode) - .unwrap(); - let validation_code_hash = validation_code.hash(); - - let pool = TaskExecutor::new(); - let (mut ctx, mut ctx_handle) = polkadot_node_subsystem_test_helpers::make_subsystem_context::< - AllMessages, - _, - >(pool.clone()); - - let (check_fut, check_result) = precheck_pvf( - ctx.sender(), - MockPreCheckBackend::with_hardcoded_result(Ok(())), - relay_parent, - validation_code_hash, - ) - .remote_handle(); - - let test_fut = async move { - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - rp, - RuntimeApiRequest::ValidationCodeByHash( - vch, - tx - ), - )) => { - assert_eq!(vch, validation_code_hash); - assert_eq!(rp, relay_parent); - - let _ = tx.send(Ok(Some(validation_code.clone()))); - } - ); - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionIndexForChild(tx)) - ) => { - tx.send(Ok(1u32.into())).unwrap(); - } - ); - assert_matches!( - ctx_handle.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(_, RuntimeApiRequest::SessionExecutorParams(_, tx)) - ) => { - tx.send(Ok(Some(ExecutorParams::default()))).unwrap(); - } - ); - assert_matches!(check_result.await, PreCheckOutcome::Invalid); - }; - - let test_fut = future::join(test_fut, check_fut); - executor::block_on(test_fut); -} - #[test] fn precheck_properly_classifies_outcomes() { let inner = |prepare_result, precheck_outcome| { @@ -1292,7 +1122,8 @@ impl ValidationBackend for MockHeadsUp { &mut self, _pvf: PvfPrepData, _timeout: Duration, - _encoded_params: Vec, + _pvd: Arc, + _pov: Arc, _prepare_priority: polkadot_node_core_pvf::Priority, ) -> Result { unreachable!() diff --git a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs index 97a03e6596d1..342128b7cca2 100644 --- a/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/benches/host_prepare_rococo_runtime.rs @@ -116,7 +116,7 @@ fn host_prepare_rococo_runtime(c: &mut Criterion) { cfg.prepare_workers_hard_max_num = 1; }) .await, - pvf.clone().code(), + pvf.clone().maybe_compressed_code(), ) }, |result| async move { diff --git a/polkadot/node/core/pvf/common/src/error.rs b/polkadot/node/core/pvf/common/src/error.rs index 7ee05448d3c5..b0cdba9501db 100644 --- a/polkadot/node/core/pvf/common/src/error.rs +++ b/polkadot/node/core/pvf/common/src/error.rs @@ -94,6 +94,10 @@ pub enum PrepareError { #[codec(index = 11)] #[error("prepare: error interfacing with the kernel: {0}")] Kernel(String), + /// Code blob failed to decompress + #[codec(index = 12)] + #[error("prepare: could not decompress code blob: {0}")] + CouldNotDecompressCodeBlob(String), } impl PrepareError { @@ -106,7 +110,11 @@ impl PrepareError { pub fn is_deterministic(&self) -> bool { use PrepareError::*; match self { - Prevalidation(_) | Preparation(_) | JobError(_) | OutOfMemory => true, + Prevalidation(_) | + Preparation(_) | + JobError(_) | + OutOfMemory | + CouldNotDecompressCodeBlob(_) => true, IoErr(_) | JobDied { .. } | CreateTmpFile(_) | diff --git a/polkadot/node/core/pvf/common/src/execute.rs b/polkadot/node/core/pvf/common/src/execute.rs index 46862f9f80b6..cff3f3b86e95 100644 --- a/polkadot/node/core/pvf/common/src/execute.rs +++ b/polkadot/node/core/pvf/common/src/execute.rs @@ -35,6 +35,8 @@ pub struct WorkerResponse { pub job_response: JobResponse, /// The amount of CPU time taken by the job. pub duration: Duration, + /// The uncompressed PoV size. + pub pov_size: u32, } /// An error occurred in the worker process. @@ -77,6 +79,8 @@ pub enum JobResponse { RuntimeConstruction(String), /// The candidate is invalid. InvalidCandidate(String), + /// PoV decompression failed + PoVDecompressionFailure, } impl JobResponse { diff --git a/polkadot/node/core/pvf/common/src/prepare.rs b/polkadot/node/core/pvf/common/src/prepare.rs index 81e165a7b8a4..4cd1beb30991 100644 --- a/polkadot/node/core/pvf/common/src/prepare.rs +++ b/polkadot/node/core/pvf/common/src/prepare.rs @@ -44,6 +44,8 @@ pub struct PrepareStats { pub cpu_time_elapsed: std::time::Duration, /// The observed memory statistics for the preparation job. pub memory_stats: MemoryStats, + /// The decompressed Wasm code length observed during the preparation. + pub observed_wasm_code_len: u32, } /// Helper struct to contain all the memory stats, including `MemoryAllocationStats` and, if diff --git a/polkadot/node/core/pvf/common/src/pvf.rs b/polkadot/node/core/pvf/common/src/pvf.rs index e2ac36a2406a..4019a8d8b0d0 100644 --- a/polkadot/node/core/pvf/common/src/pvf.rs +++ b/polkadot/node/core/pvf/common/src/pvf.rs @@ -26,9 +26,9 @@ use std::{fmt, sync::Arc, time::Duration}; /// Should be cheap to clone. #[derive(Clone, Encode, Decode)] pub struct PvfPrepData { - /// Wasm code (uncompressed) - code: Arc>, - /// Wasm code hash + /// Wasm code (maybe compressed) + maybe_compressed_code: Arc>, + /// Wasm code hash. code_hash: ValidationCodeHash, /// Executor environment parameters for the session for which artifact is prepared executor_params: Arc, @@ -46,20 +46,20 @@ impl PvfPrepData { prep_timeout: Duration, prep_kind: PrepareJobKind, ) -> Self { - let code = Arc::new(code); - let code_hash = sp_crypto_hashing::blake2_256(&code).into(); + let maybe_compressed_code = Arc::new(code); + let code_hash = sp_crypto_hashing::blake2_256(&maybe_compressed_code).into(); let executor_params = Arc::new(executor_params); - Self { code, code_hash, executor_params, prep_timeout, prep_kind } + Self { maybe_compressed_code, code_hash, executor_params, prep_timeout, prep_kind } } - /// Returns validation code hash for the PVF + /// Returns validation code hash pub fn code_hash(&self) -> ValidationCodeHash { self.code_hash } - /// Returns PVF code - pub fn code(&self) -> Arc> { - self.code.clone() + /// Returns PVF code blob + pub fn maybe_compressed_code(&self) -> Arc> { + self.maybe_compressed_code.clone() } /// Returns executor params diff --git a/polkadot/node/core/pvf/execute-worker/Cargo.toml b/polkadot/node/core/pvf/execute-worker/Cargo.toml index f24b66dc4a0e..6ad340d25612 100644 --- a/polkadot/node/core/pvf/execute-worker/Cargo.toml +++ b/polkadot/node/core/pvf/execute-worker/Cargo.toml @@ -19,8 +19,11 @@ libc = { workspace = true } codec = { features = ["derive"], workspace = true } polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } + [features] builder = [] diff --git a/polkadot/node/core/pvf/execute-worker/src/lib.rs b/polkadot/node/core/pvf/execute-worker/src/lib.rs index 35858ab36cec..4b7c167cc9ec 100644 --- a/polkadot/node/core/pvf/execute-worker/src/lib.rs +++ b/polkadot/node/core/pvf/execute-worker/src/lib.rs @@ -22,6 +22,7 @@ pub use polkadot_node_core_pvf_common::{ error::ExecuteError, executor_interface::execute_artifact, }; +use polkadot_parachain_primitives::primitives::ValidationParams; // NOTE: Initializing logging in e.g. tests will not have an effect in the workers, as they are // separate spawned processes. Run with e.g. `RUST_LOG=parachain::pvf-execute-worker=trace`. @@ -50,8 +51,9 @@ use polkadot_node_core_pvf_common::{ }, worker_dir, }; +use polkadot_node_primitives::{BlockData, PoV, POV_BOMB_LIMIT}; use polkadot_parachain_primitives::primitives::ValidationResult; -use polkadot_primitives::ExecutorParams; +use polkadot_primitives::{ExecutorParams, PersistedValidationData}; use std::{ io::{self, Read}, os::{ @@ -85,8 +87,23 @@ fn recv_execute_handshake(stream: &mut UnixStream) -> io::Result { Ok(handshake) } -fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, Duration)> { - let params = framed_recv_blocking(stream)?; +fn recv_request(stream: &mut UnixStream) -> io::Result<(PersistedValidationData, PoV, Duration)> { + let pvd = framed_recv_blocking(stream)?; + let pvd = PersistedValidationData::decode(&mut &pvd[..]).map_err(|_| { + io::Error::new( + io::ErrorKind::Other, + "execute pvf recv_request: failed to decode persisted validation data".to_string(), + ) + })?; + + let pov = framed_recv_blocking(stream)?; + let pov = PoV::decode(&mut &pov[..]).map_err(|_| { + io::Error::new( + io::ErrorKind::Other, + "execute pvf recv_request: failed to decode PoV".to_string(), + ) + })?; + let execution_timeout = framed_recv_blocking(stream)?; let execution_timeout = Duration::decode(&mut &execution_timeout[..]).map_err(|_| { io::Error::new( @@ -94,7 +111,7 @@ fn recv_request(stream: &mut UnixStream) -> io::Result<(Vec, Duration)> { "execute pvf recv_request: failed to decode duration".to_string(), ) })?; - Ok((params, execution_timeout)) + Ok((pvd, pov, execution_timeout)) } /// Sends an error to the host and returns the original error wrapped in `io::Error`. @@ -149,7 +166,7 @@ pub fn worker_entrypoint( let execute_thread_stack_size = max_stack_size(&executor_params); loop { - let (params, execution_timeout) = recv_request(&mut stream).map_err(|e| { + let (pvd, pov, execution_timeout) = recv_request(&mut stream).map_err(|e| { map_and_send_err!( e, InternalValidationError::HostCommunication, @@ -197,7 +214,33 @@ pub fn worker_entrypoint( let stream_fd = stream.as_raw_fd(); let compiled_artifact_blob = Arc::new(compiled_artifact_blob); - let params = Arc::new(params); + + let raw_block_data = + match sp_maybe_compressed_blob::decompress(&pov.block_data.0, POV_BOMB_LIMIT) { + Ok(data) => data, + Err(_) => { + send_result::( + &mut stream, + Ok(WorkerResponse { + job_response: JobResponse::PoVDecompressionFailure, + duration: Duration::ZERO, + pov_size: 0, + }), + worker_info, + )?; + continue; + }, + }; + + let pov_size = raw_block_data.len() as u32; + + let params = ValidationParams { + parent_head: pvd.parent_head.clone(), + block_data: BlockData(raw_block_data.to_vec()), + relay_parent_number: pvd.relay_parent_number, + relay_parent_storage_root: pvd.relay_parent_storage_root, + }; + let params = Arc::new(params.encode()); cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { @@ -214,6 +257,7 @@ pub fn worker_entrypoint( worker_info, security_status.can_unshare_user_namespace_and_change_root, usage_before, + pov_size, )? } else { // Fall back to using fork. @@ -228,6 +272,7 @@ pub fn worker_entrypoint( execute_thread_stack_size, worker_info, usage_before, + pov_size, )? }; } else { @@ -242,6 +287,7 @@ pub fn worker_entrypoint( execute_thread_stack_size, worker_info, usage_before, + pov_size, )?; } } @@ -300,6 +346,7 @@ fn handle_clone( worker_info: &WorkerInfo, have_unshare_newuser: bool, usage_before: Usage, + pov_size: u32, ) -> io::Result> { use polkadot_node_core_pvf_common::worker::security; @@ -329,6 +376,7 @@ fn handle_clone( worker_info, child, usage_before, + pov_size, execution_timeout, ), Err(security::clone::Error::Clone(errno)) => @@ -347,6 +395,7 @@ fn handle_fork( execute_worker_stack_size: usize, worker_info: &WorkerInfo, usage_before: Usage, + pov_size: u32, ) -> io::Result> { // SAFETY: new process is spawned within a single threaded process. This invariant // is enforced by tests. @@ -367,6 +416,7 @@ fn handle_fork( worker_info, child, usage_before, + pov_size, execution_timeout, ), Err(errno) => Ok(Err(internal_error_from_errno("fork", errno))), @@ -513,6 +563,7 @@ fn handle_parent_process( worker_info: &WorkerInfo, job_pid: Pid, usage_before: Usage, + pov_size: u32, timeout: Duration, ) -> io::Result> { // the read end will wait until all write ends have been closed, @@ -578,7 +629,7 @@ fn handle_parent_process( )))); } - Ok(Ok(WorkerResponse { job_response, duration: cpu_tv })) + Ok(Ok(WorkerResponse { job_response, pov_size, duration: cpu_tv })) }, Err(job_error) => { gum::warn!( diff --git a/polkadot/node/core/pvf/prepare-worker/Cargo.toml b/polkadot/node/core/pvf/prepare-worker/Cargo.toml index 9e0d01fc438b..56235bd82192 100644 --- a/polkadot/node/core/pvf/prepare-worker/Cargo.toml +++ b/polkadot/node/core/pvf/prepare-worker/Cargo.toml @@ -23,10 +23,12 @@ nix = { features = ["process", "resource", "sched"], workspace = true } codec = { features = ["derive"], workspace = true } polkadot-node-core-pvf-common = { workspace = true, default-features = true } +polkadot-node-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } sc-executor-common = { workspace = true, default-features = true } sc-executor-wasmtime = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } [target.'cfg(target_os = "linux")'.dependencies] tikv-jemallocator = "0.5.0" diff --git a/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs b/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs index d531c90b64b5..49b30dc33ceb 100644 --- a/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs +++ b/polkadot/node/core/pvf/prepare-worker/benches/prepare_rococo_runtime.rs @@ -24,7 +24,11 @@ use polkadot_primitives::ExecutorParams; use std::time::Duration; fn do_prepare_runtime(pvf: PvfPrepData) { - let blob = match prevalidate(&pvf.code()) { + let maybe_compressed_code = pvf.maybe_compressed_code(); + let raw_validation_code = + sp_maybe_compressed_blob::decompress(&maybe_compressed_code, usize::MAX).unwrap(); + + let blob = match prevalidate(&raw_validation_code) { Err(err) => panic!("{:?}", err), Ok(b) => b, }; diff --git a/polkadot/node/core/pvf/prepare-worker/src/lib.rs b/polkadot/node/core/pvf/prepare-worker/src/lib.rs index ef33d11720eb..f8ebb6effcec 100644 --- a/polkadot/node/core/pvf/prepare-worker/src/lib.rs +++ b/polkadot/node/core/pvf/prepare-worker/src/lib.rs @@ -38,6 +38,7 @@ use polkadot_node_core_pvf_common::{ executor_interface::{prepare, prevalidate}, worker::{pipe2_cloexec, PipeFd, WorkerInfo}, }; +use polkadot_node_primitives::VALIDATION_CODE_BOMB_LIMIT; use codec::{Decode, Encode}; use polkadot_node_core_pvf_common::{ @@ -105,6 +106,12 @@ impl AsRef<[u8]> for CompiledArtifact { } } +#[derive(Encode, Decode)] +pub struct PrepareOutcome { + pub compiled_artifact: CompiledArtifact, + pub observed_wasm_code_len: u32, +} + /// Get a worker request. fn recv_request(stream: &mut UnixStream) -> io::Result { let pvf = framed_recv_blocking(stream)?; @@ -294,14 +301,23 @@ pub fn worker_entrypoint( ); } -fn prepare_artifact(pvf: PvfPrepData) -> Result { - let blob = match prevalidate(&pvf.code()) { +fn prepare_artifact(pvf: PvfPrepData) -> Result { + let maybe_compressed_code = pvf.maybe_compressed_code(); + let raw_validation_code = + sp_maybe_compressed_blob::decompress(&maybe_compressed_code, VALIDATION_CODE_BOMB_LIMIT) + .map_err(|e| PrepareError::CouldNotDecompressCodeBlob(e.to_string()))?; + let observed_wasm_code_len = raw_validation_code.len() as u32; + + let blob = match prevalidate(&raw_validation_code) { Err(err) => return Err(PrepareError::Prevalidation(format!("{:?}", err))), Ok(b) => b, }; match prepare(blob, &pvf.executor_params()) { - Ok(compiled_artifact) => Ok(CompiledArtifact::new(compiled_artifact)), + Ok(compiled_artifact) => Ok(PrepareOutcome { + compiled_artifact: CompiledArtifact::new(compiled_artifact), + observed_wasm_code_len, + }), Err(err) => Err(PrepareError::Preparation(format!("{:?}", err))), } } @@ -322,6 +338,7 @@ fn runtime_construction_check( struct JobResponse { artifact: CompiledArtifact, memory_stats: MemoryStats, + observed_wasm_code_len: u32, } #[cfg(target_os = "linux")] @@ -500,11 +517,11 @@ fn handle_child_process( "prepare worker", move || { #[allow(unused_mut)] - let mut result = prepare_artifact(pvf); + let mut result = prepare_artifact(pvf).map(|o| (o,)); // Get the `ru_maxrss` stat. If supported, call getrusage for the thread. #[cfg(target_os = "linux")] - let mut result = result.map(|artifact| (artifact, get_max_rss_thread())); + let mut result = result.map(|outcome| (outcome.0, get_max_rss_thread())); // If we are pre-checking, check for runtime construction errors. // @@ -513,7 +530,10 @@ fn handle_child_process( // anyway. if let PrepareJobKind::Prechecking = prepare_job_kind { result = result.and_then(|output| { - runtime_construction_check(output.0.as_ref(), &executor_params)?; + runtime_construction_check( + output.0.compiled_artifact.as_ref(), + &executor_params, + )?; Ok(output) }); } @@ -553,9 +573,9 @@ fn handle_child_process( Ok(ok) => { cfg_if::cfg_if! { if #[cfg(target_os = "linux")] { - let (artifact, max_rss) = ok; + let (PrepareOutcome { compiled_artifact, observed_wasm_code_len }, max_rss) = ok; } else { - let artifact = ok; + let (PrepareOutcome { compiled_artifact, observed_wasm_code_len },) = ok; } } @@ -574,7 +594,11 @@ fn handle_child_process( peak_tracked_alloc: if peak_alloc > 0 { peak_alloc as u64 } else { 0u64 }, }; - Ok(JobResponse { artifact, memory_stats }) + Ok(JobResponse { + artifact: compiled_artifact, + observed_wasm_code_len, + memory_stats, + }) }, } }, @@ -665,7 +689,7 @@ fn handle_parent_process( match result { Err(err) => Err(err), - Ok(JobResponse { artifact, memory_stats }) => { + Ok(JobResponse { artifact, memory_stats, observed_wasm_code_len }) => { // The exit status should have been zero if no error occurred. if exit_status != 0 { return Err(PrepareError::JobError(format!( @@ -696,7 +720,11 @@ fn handle_parent_process( let checksum = blake3::hash(&artifact.as_ref()).to_hex().to_string(); Ok(PrepareWorkerSuccess { checksum, - stats: PrepareStats { memory_stats, cpu_time_elapsed: cpu_tv }, + stats: PrepareStats { + memory_stats, + cpu_time_elapsed: cpu_tv, + observed_wasm_code_len, + }, }) }, } diff --git a/polkadot/node/core/pvf/src/error.rs b/polkadot/node/core/pvf/src/error.rs index 8dc96305eadb..a0634106052d 100644 --- a/polkadot/node/core/pvf/src/error.rs +++ b/polkadot/node/core/pvf/src/error.rs @@ -52,6 +52,9 @@ pub enum InvalidCandidate { /// PVF execution (compilation is not included) took more time than was allotted. #[error("invalid: hard timeout")] HardTimeout, + /// Proof-of-validity failed to decompress correctly + #[error("invalid: PoV failed to decompress")] + PoVDecompressionFailure, } /// Possibly transient issue that may resolve after retries. diff --git a/polkadot/node/core/pvf/src/execute/queue.rs b/polkadot/node/core/pvf/src/execute/queue.rs index bb00a5a652d6..11031bf1074a 100644 --- a/polkadot/node/core/pvf/src/execute/queue.rs +++ b/polkadot/node/core/pvf/src/execute/queue.rs @@ -34,12 +34,14 @@ use polkadot_node_core_pvf_common::{ execute::{JobResponse, WorkerError, WorkerResponse}, SecurityStatus, }; -use polkadot_primitives::{ExecutorParams, ExecutorParamsHash}; +use polkadot_node_primitives::PoV; +use polkadot_primitives::{ExecutorParams, ExecutorParamsHash, PersistedValidationData}; use slotmap::HopSlotMap; use std::{ collections::VecDeque, fmt, path::PathBuf, + sync::Arc, time::{Duration, Instant}, }; @@ -68,7 +70,8 @@ pub enum FromQueue { #[derive(Debug)] pub struct PendingExecutionRequest { pub exec_timeout: Duration, - pub params: Vec, + pub pvd: Arc, + pub pov: Arc, pub executor_params: ExecutorParams, pub result_tx: ResultSender, } @@ -76,7 +79,8 @@ pub struct PendingExecutionRequest { struct ExecuteJob { artifact: ArtifactPathId, exec_timeout: Duration, - params: Vec, + pvd: Arc, + pov: Arc, executor_params: ExecutorParams, result_tx: ResultSender, waiting_since: Instant, @@ -293,18 +297,20 @@ async fn purge_dead(metrics: &Metrics, workers: &mut Workers) { fn handle_to_queue(queue: &mut Queue, to_queue: ToQueue) { let ToQueue::Enqueue { artifact, pending_execution_request } = to_queue; - let PendingExecutionRequest { exec_timeout, params, executor_params, result_tx } = + let PendingExecutionRequest { exec_timeout, pvd, pov, executor_params, result_tx } = pending_execution_request; gum::debug!( target: LOG_TARGET, validation_code_hash = ?artifact.id.code_hash, "enqueueing an artifact for execution", ); + queue.metrics.observe_pov_size(pov.block_data.0.len(), true); queue.metrics.execute_enqueued(); let job = ExecuteJob { artifact, exec_timeout, - params, + pvd, + pov, executor_params, result_tx, waiting_since: Instant::now(), @@ -352,15 +358,19 @@ async fn handle_job_finish( artifact_id: ArtifactId, result_tx: ResultSender, ) { - let (idle_worker, result, duration, sync_channel) = match worker_result { + let (idle_worker, result, duration, sync_channel, pov_size) = match worker_result { Ok(WorkerInterfaceResponse { worker_response: - WorkerResponse { job_response: JobResponse::Ok { result_descriptor }, duration }, + WorkerResponse { + job_response: JobResponse::Ok { result_descriptor }, + duration, + pov_size, + }, idle_worker, }) => { // TODO: propagate the soft timeout - (Some(idle_worker), Ok(result_descriptor), Some(duration), None) + (Some(idle_worker), Ok(result_descriptor), Some(duration), None, Some(pov_size)) }, Ok(WorkerInterfaceResponse { worker_response: WorkerResponse { job_response: JobResponse::InvalidCandidate(err), .. }, @@ -370,6 +380,18 @@ async fn handle_job_finish( Err(ValidationError::Invalid(InvalidCandidate::WorkerReportedInvalid(err))), None, None, + None, + ), + Ok(WorkerInterfaceResponse { + worker_response: + WorkerResponse { job_response: JobResponse::PoVDecompressionFailure, .. }, + idle_worker, + }) => ( + Some(idle_worker), + Err(ValidationError::Invalid(InvalidCandidate::PoVDecompressionFailure)), + None, + None, + None, ), Ok(WorkerInterfaceResponse { worker_response: @@ -393,39 +415,46 @@ async fn handle_job_finish( ))), None, Some(result_rx), + None, ) }, Err(WorkerInterfaceError::InternalError(err)) | Err(WorkerInterfaceError::WorkerError(WorkerError::InternalError(err))) => - (None, Err(ValidationError::Internal(err)), None, None), + (None, Err(ValidationError::Internal(err)), None, None, None), // Either the worker or the job timed out. Kill the worker in either case. Treated as // definitely-invalid, because if we timed out, there's no time left for a retry. Err(WorkerInterfaceError::HardTimeout) | Err(WorkerInterfaceError::WorkerError(WorkerError::JobTimedOut)) => - (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None, None), + (None, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout)), None, None, None), // "Maybe invalid" errors (will retry). Err(WorkerInterfaceError::CommunicationErr(_err)) => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousWorkerDeath)), None, None, + None, ), Err(WorkerInterfaceError::WorkerError(WorkerError::JobDied { err, .. })) => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::AmbiguousJobDeath(err))), None, None, + None, ), Err(WorkerInterfaceError::WorkerError(WorkerError::JobError(err))) => ( None, Err(ValidationError::PossiblyInvalid(PossiblyInvalidError::JobError(err.to_string()))), None, None, + None, ), }; queue.metrics.execute_finished(); + if let Some(pov_size) = pov_size { + queue.metrics.observe_pov_size(pov_size as usize, false) + } if let Err(ref err) = result { gum::warn!( target: LOG_TARGET, @@ -573,7 +602,8 @@ fn assign(queue: &mut Queue, worker: Worker, job: ExecuteJob) { idle, job.artifact.clone(), job.exec_timeout, - job.params, + job.pvd, + job.pov, ) .await; QueueEvent::StartWork(worker, result, job.artifact.id, job.result_tx) diff --git a/polkadot/node/core/pvf/src/execute/worker_interface.rs b/polkadot/node/core/pvf/src/execute/worker_interface.rs index d15d7c15426e..77bd6bedd75c 100644 --- a/polkadot/node/core/pvf/src/execute/worker_interface.rs +++ b/polkadot/node/core/pvf/src/execute/worker_interface.rs @@ -32,8 +32,9 @@ use polkadot_node_core_pvf_common::{ execute::{Handshake, WorkerError, WorkerResponse}, worker_dir, SecurityStatus, }; -use polkadot_primitives::ExecutorParams; -use std::{path::Path, time::Duration}; +use polkadot_node_primitives::PoV; +use polkadot_primitives::{ExecutorParams, PersistedValidationData}; +use std::{path::Path, sync::Arc, time::Duration}; use tokio::{io, net::UnixStream}; /// Spawns a new worker with the given program path that acts as the worker and the spawn timeout. @@ -123,7 +124,8 @@ pub async fn start_work( worker: IdleWorker, artifact: ArtifactPathId, execution_timeout: Duration, - validation_params: Vec, + pvd: Arc, + pov: Arc, ) -> Result { let IdleWorker { mut stream, pid, worker_dir } = worker; @@ -137,18 +139,16 @@ pub async fn start_work( ); with_worker_dir_setup(worker_dir, pid, &artifact.path, |worker_dir| async move { - send_request(&mut stream, &validation_params, execution_timeout).await.map_err( - |error| { - gum::warn!( - target: LOG_TARGET, - worker_pid = %pid, - validation_code_hash = ?artifact.id.code_hash, - "failed to send an execute request: {}", - error, - ); - Error::InternalError(InternalValidationError::HostCommunication(error.to_string())) - }, - )?; + send_request(&mut stream, pvd, pov, execution_timeout).await.map_err(|error| { + gum::warn!( + target: LOG_TARGET, + worker_pid = %pid, + validation_code_hash = ?artifact.id.code_hash, + "failed to send an execute request: {}", + error, + ); + Error::InternalError(InternalValidationError::HostCommunication(error.to_string())) + })?; // We use a generous timeout here. This is in addition to the one in the child process, in // case the child stalls. We have a wall clock timeout here in the host, but a CPU timeout @@ -288,10 +288,12 @@ async fn send_execute_handshake(stream: &mut UnixStream, handshake: Handshake) - async fn send_request( stream: &mut UnixStream, - validation_params: &[u8], + pvd: Arc, + pov: Arc, execution_timeout: Duration, ) -> io::Result<()> { - framed_send(stream, validation_params).await?; + framed_send(stream, &pvd.encode()).await?; + framed_send(stream, &pov.encode()).await?; framed_send(stream, &execution_timeout.encode()).await } diff --git a/polkadot/node/core/pvf/src/host.rs b/polkadot/node/core/pvf/src/host.rs index 462631d33b52..44a4cba2fbf8 100644 --- a/polkadot/node/core/pvf/src/host.rs +++ b/polkadot/node/core/pvf/src/host.rs @@ -36,11 +36,14 @@ use polkadot_node_core_pvf_common::{ prepare::PrepareSuccess, pvf::PvfPrepData, }; +use polkadot_node_primitives::PoV; use polkadot_node_subsystem::{SubsystemError, SubsystemResult}; use polkadot_parachain_primitives::primitives::ValidationResult; +use polkadot_primitives::PersistedValidationData; use std::{ collections::HashMap, path::PathBuf, + sync::Arc, time::{Duration, SystemTime}, }; @@ -108,7 +111,8 @@ impl ValidationHost { &mut self, pvf: PvfPrepData, exec_timeout: Duration, - params: Vec, + pvd: Arc, + pov: Arc, priority: Priority, result_tx: ResultSender, ) -> Result<(), String> { @@ -116,7 +120,8 @@ impl ValidationHost { .send(ToHost::ExecutePvf(ExecutePvfInputs { pvf, exec_timeout, - params, + pvd, + pov, priority, result_tx, })) @@ -147,7 +152,8 @@ enum ToHost { struct ExecutePvfInputs { pvf: PvfPrepData, exec_timeout: Duration, - params: Vec, + pvd: Arc, + pov: Arc, priority: Priority, result_tx: ResultSender, } @@ -539,7 +545,7 @@ async fn handle_execute_pvf( awaiting_prepare: &mut AwaitingPrepare, inputs: ExecutePvfInputs, ) -> Result<(), Fatal> { - let ExecutePvfInputs { pvf, exec_timeout, params, priority, result_tx } = inputs; + let ExecutePvfInputs { pvf, exec_timeout, pvd, pov, priority, result_tx } = inputs; let artifact_id = ArtifactId::from_pvf_prep_data(&pvf); let executor_params = (*pvf.executor_params()).clone(); @@ -558,7 +564,8 @@ async fn handle_execute_pvf( artifact: ArtifactPathId::new(artifact_id, path), pending_execution_request: PendingExecutionRequest { exec_timeout, - params, + pvd, + pov, executor_params, result_tx, }, @@ -587,7 +594,8 @@ async fn handle_execute_pvf( artifact_id, PendingExecutionRequest { exec_timeout, - params, + pvd, + pov, executor_params, result_tx, }, @@ -598,7 +606,7 @@ async fn handle_execute_pvf( ArtifactState::Preparing { .. } => { awaiting_prepare.add( artifact_id, - PendingExecutionRequest { exec_timeout, params, executor_params, result_tx }, + PendingExecutionRequest { exec_timeout, pvd, pov, executor_params, result_tx }, ); }, ArtifactState::FailedToProcess { last_time_failed, num_failures, error } => { @@ -627,7 +635,8 @@ async fn handle_execute_pvf( artifact_id, PendingExecutionRequest { exec_timeout, - params, + pvd, + pov, executor_params, result_tx, }, @@ -648,7 +657,7 @@ async fn handle_execute_pvf( pvf, priority, artifact_id, - PendingExecutionRequest { exec_timeout, params, executor_params, result_tx }, + PendingExecutionRequest { exec_timeout, pvd, pov, executor_params, result_tx }, ) .await?; } @@ -770,7 +779,7 @@ async fn handle_prepare_done( // It's finally time to dispatch all the execution requests that were waiting for this artifact // to be prepared. let pending_requests = awaiting_prepare.take(&artifact_id); - for PendingExecutionRequest { exec_timeout, params, executor_params, result_tx } in + for PendingExecutionRequest { exec_timeout, pvd, pov, executor_params, result_tx } in pending_requests { if result_tx.is_canceled() { @@ -793,7 +802,8 @@ async fn handle_prepare_done( artifact: ArtifactPathId::new(artifact_id.clone(), &path), pending_execution_request: PendingExecutionRequest { exec_timeout, - params, + pvd, + pov, executor_params, result_tx, }, @@ -967,6 +977,8 @@ pub(crate) mod tests { use assert_matches::assert_matches; use futures::future::BoxFuture; use polkadot_node_core_pvf_common::prepare::PrepareStats; + use polkadot_node_primitives::BlockData; + use sp_core::H256; const TEST_EXECUTION_TIMEOUT: Duration = Duration::from_secs(3); pub(crate) const TEST_PREPARATION_TIMEOUT: Duration = Duration::from_secs(30); @@ -1223,12 +1235,21 @@ pub(crate) mod tests { async fn execute_pvf_requests() { let mut test = Builder::default().build(); let mut host = test.host_handle(); + let pvd = Arc::new(PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }); + let pov1 = Arc::new(PoV { block_data: BlockData(b"pov1".to_vec()) }); + let pov2 = Arc::new(PoV { block_data: BlockData(b"pov2".to_vec()) }); let (result_tx, result_rx_pvf_1_1) = oneshot::channel(); host.execute_pvf( PvfPrepData::from_discriminator(1), TEST_EXECUTION_TIMEOUT, - b"pvf1".to_vec(), + pvd.clone(), + pov1.clone(), Priority::Normal, result_tx, ) @@ -1239,7 +1260,8 @@ pub(crate) mod tests { host.execute_pvf( PvfPrepData::from_discriminator(1), TEST_EXECUTION_TIMEOUT, - b"pvf1".to_vec(), + pvd.clone(), + pov1, Priority::Critical, result_tx, ) @@ -1250,7 +1272,8 @@ pub(crate) mod tests { host.execute_pvf( PvfPrepData::from_discriminator(2), TEST_EXECUTION_TIMEOUT, - b"pvf2".to_vec(), + pvd, + pov2, Priority::Normal, result_tx, ) @@ -1382,6 +1405,13 @@ pub(crate) mod tests { async fn test_prepare_done() { let mut test = Builder::default().build(); let mut host = test.host_handle(); + let pvd = Arc::new(PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }); + let pov = Arc::new(PoV { block_data: BlockData(b"pov".to_vec()) }); // Test mixed cases of receiving execute and precheck requests // for the same PVF. @@ -1391,7 +1421,8 @@ pub(crate) mod tests { host.execute_pvf( PvfPrepData::from_discriminator(1), TEST_EXECUTION_TIMEOUT, - b"pvf2".to_vec(), + pvd.clone(), + pov.clone(), Priority::Critical, result_tx, ) @@ -1438,7 +1469,8 @@ pub(crate) mod tests { host.execute_pvf( PvfPrepData::from_discriminator(2), TEST_EXECUTION_TIMEOUT, - b"pvf2".to_vec(), + pvd, + pov, Priority::Critical, result_tx, ) @@ -1534,13 +1566,21 @@ pub(crate) mod tests { async fn test_execute_prepare_retry() { let mut test = Builder::default().build(); let mut host = test.host_handle(); + let pvd = Arc::new(PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }); + let pov = Arc::new(PoV { block_data: BlockData(b"pov".to_vec()) }); // Submit a execute request that fails. let (result_tx, result_rx) = oneshot::channel(); host.execute_pvf( PvfPrepData::from_discriminator(1), TEST_EXECUTION_TIMEOUT, - b"pvf".to_vec(), + pvd.clone(), + pov.clone(), Priority::Critical, result_tx, ) @@ -1570,7 +1610,8 @@ pub(crate) mod tests { host.execute_pvf( PvfPrepData::from_discriminator(1), TEST_EXECUTION_TIMEOUT, - b"pvf".to_vec(), + pvd.clone(), + pov.clone(), Priority::Critical, result_tx_2, ) @@ -1592,7 +1633,8 @@ pub(crate) mod tests { host.execute_pvf( PvfPrepData::from_discriminator(1), TEST_EXECUTION_TIMEOUT, - b"pvf".to_vec(), + pvd.clone(), + pov.clone(), Priority::Critical, result_tx_3, ) @@ -1636,13 +1678,21 @@ pub(crate) mod tests { async fn test_execute_prepare_no_retry() { let mut test = Builder::default().build(); let mut host = test.host_handle(); + let pvd = Arc::new(PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }); + let pov = Arc::new(PoV { block_data: BlockData(b"pov".to_vec()) }); // Submit an execute request that fails. let (result_tx, result_rx) = oneshot::channel(); host.execute_pvf( PvfPrepData::from_discriminator(1), TEST_EXECUTION_TIMEOUT, - b"pvf".to_vec(), + pvd.clone(), + pov.clone(), Priority::Critical, result_tx, ) @@ -1672,7 +1722,8 @@ pub(crate) mod tests { host.execute_pvf( PvfPrepData::from_discriminator(1), TEST_EXECUTION_TIMEOUT, - b"pvf".to_vec(), + pvd.clone(), + pov.clone(), Priority::Critical, result_tx_2, ) @@ -1694,7 +1745,8 @@ pub(crate) mod tests { host.execute_pvf( PvfPrepData::from_discriminator(1), TEST_EXECUTION_TIMEOUT, - b"pvf".to_vec(), + pvd.clone(), + pov.clone(), Priority::Critical, result_tx_3, ) @@ -1755,12 +1807,20 @@ pub(crate) mod tests { async fn cancellation() { let mut test = Builder::default().build(); let mut host = test.host_handle(); + let pvd = Arc::new(PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }); + let pov = Arc::new(PoV { block_data: BlockData(b"pov".to_vec()) }); let (result_tx, result_rx) = oneshot::channel(); host.execute_pvf( PvfPrepData::from_discriminator(1), TEST_EXECUTION_TIMEOUT, - b"pvf1".to_vec(), + pvd, + pov, Priority::Normal, result_tx, ) diff --git a/polkadot/node/core/pvf/src/metrics.rs b/polkadot/node/core/pvf/src/metrics.rs index bc8d300037fe..c59cab464180 100644 --- a/polkadot/node/core/pvf/src/metrics.rs +++ b/polkadot/node/core/pvf/src/metrics.rs @@ -105,6 +105,21 @@ impl Metrics { .observe((memory_stats.peak_tracked_alloc / 1024) as f64); } } + + pub(crate) fn observe_code_size(&self, code_size: usize) { + if let Some(metrics) = &self.0 { + metrics.code_size.observe(code_size as f64); + } + } + + pub(crate) fn observe_pov_size(&self, pov_size: usize, compressed: bool) { + if let Some(metrics) = &self.0 { + metrics + .pov_size + .with_label_values(&[if compressed { "true" } else { "false" }]) + .observe(pov_size as f64); + } + } } #[derive(Clone)] @@ -129,6 +144,8 @@ struct MetricsInner { preparation_max_resident: prometheus::Histogram, // Peak allocation value, tracked by tracking-allocator preparation_peak_tracked_allocation: prometheus::Histogram, + pov_size: prometheus::HistogramVec, + code_size: prometheus::Histogram, } impl metrics::Metrics for Metrics { @@ -323,6 +340,35 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + // The following metrics was moved here from the candidate valiidation subsystem. + // Names are kept to avoid breaking dashboards and stuff. + pov_size: prometheus::register( + prometheus::HistogramVec::new( + prometheus::HistogramOpts::new( + "polkadot_parachain_candidate_validation_pov_size", + "The compressed and decompressed size of the proof of validity of a candidate", + ) + .buckets( + prometheus::exponential_buckets(16384.0, 2.0, 10) + .expect("arguments are always valid; qed"), + ), + &["compressed"], + )?, + registry, + )?, + code_size: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_candidate_validation_code_size", + "The size of the decompressed WASM validation blob used for checking a candidate", + ) + .buckets( + prometheus::exponential_buckets(16384.0, 2.0, 10) + .expect("arguments are always valid; qed"), + ), + )?, + registry, + )?, }; Ok(Metrics(Some(inner))) } diff --git a/polkadot/node/core/pvf/src/prepare/worker_interface.rs b/polkadot/node/core/pvf/src/prepare/worker_interface.rs index 22ee93319d84..d29d2717c4b6 100644 --- a/polkadot/node/core/pvf/src/prepare/worker_interface.rs +++ b/polkadot/node/core/pvf/src/prepare/worker_interface.rs @@ -211,7 +211,7 @@ async fn handle_response( // https://github.com/paritytech/polkadot-sdk/issues/2399 let PrepareWorkerSuccess { checksum: _, - stats: PrepareStats { cpu_time_elapsed, memory_stats }, + stats: PrepareStats { cpu_time_elapsed, memory_stats, observed_wasm_code_len }, } = match result.clone() { Ok(result) => result, // Timed out on the child. This should already be logged by the child. @@ -221,6 +221,8 @@ async fn handle_response( Err(err) => return Outcome::Concluded { worker, result: Err(err) }, }; + metrics.observe_code_size(observed_wasm_code_len as usize); + if cpu_time_elapsed > preparation_timeout { // The job didn't complete within the timeout. gum::warn!( @@ -267,7 +269,11 @@ async fn handle_response( result: Ok(PrepareSuccess { path: artifact_path, size, - stats: PrepareStats { cpu_time_elapsed, memory_stats: memory_stats.clone() }, + stats: PrepareStats { + cpu_time_elapsed, + memory_stats: memory_stats.clone(), + observed_wasm_code_len, + }, }), }, Err(err) => { diff --git a/polkadot/node/core/pvf/tests/it/adder.rs b/polkadot/node/core/pvf/tests/it/adder.rs index 455e8c36c88d..1a95a28fe077 100644 --- a/polkadot/node/core/pvf/tests/it/adder.rs +++ b/polkadot/node/core/pvf/tests/it/adder.rs @@ -18,29 +18,33 @@ use super::TestHost; use codec::{Decode, Encode}; +use polkadot_node_primitives::PoV; use polkadot_parachain_primitives::primitives::{ - BlockData as GenericBlockData, HeadData as GenericHeadData, RelayChainBlockNumber, - ValidationParams, + BlockData as GenericBlockData, HeadData as GenericHeadData, }; +use polkadot_primitives::PersistedValidationData; +use sp_core::H256; use test_parachain_adder::{hash_state, BlockData, HeadData}; #[tokio::test] async fn execute_good_block_on_parent() { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; - let block_data = BlockData { state: 0, add: 512 }; + let pvd = PersistedValidationData { + parent_head: GenericHeadData(parent_head.encode()), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: GenericBlockData(block_data.encode()) }; let host = TestHost::new().await; let ret = host .validate_candidate( test_parachain_adder::wasm_binary_unwrap(), - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ) .await @@ -63,18 +67,20 @@ async fn execute_good_chain_on_parent() { for (number, add) in (0..10).enumerate() { let parent_head = HeadData { number: number as u64, parent_hash, post_state: hash_state(last_state) }; - let block_data = BlockData { state: last_state, add }; + let pvd = PersistedValidationData { + parent_head: GenericHeadData(parent_head.encode()), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: GenericBlockData(block_data.encode()) }; let ret = host .validate_candidate( test_parachain_adder::wasm_binary_unwrap(), - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - relay_parent_number: number as RelayChainBlockNumber + 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ) .await @@ -94,23 +100,25 @@ async fn execute_good_chain_on_parent() { #[tokio::test] async fn execute_bad_block_on_parent() { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; - let block_data = BlockData { state: 256, // start state is wrong. add: 256, }; + let pvd = PersistedValidationData { + parent_head: GenericHeadData(parent_head.encode()), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: GenericBlockData(block_data.encode()) }; let host = TestHost::new().await; let _err = host .validate_candidate( test_parachain_adder::wasm_binary_unwrap(), - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ) .await @@ -124,15 +132,18 @@ async fn stress_spawn() { async fn execute(host: std::sync::Arc) { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; let block_data = BlockData { state: 0, add: 512 }; + let pvd = PersistedValidationData { + parent_head: GenericHeadData(parent_head.encode()), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: GenericBlockData(block_data.encode()) }; let ret = host .validate_candidate( test_parachain_adder::wasm_binary_unwrap(), - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ) .await @@ -161,15 +172,18 @@ async fn execute_can_run_serially() { async fn execute(host: std::sync::Arc) { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; let block_data = BlockData { state: 0, add: 512 }; + let pvd = PersistedValidationData { + parent_head: GenericHeadData(parent_head.encode()), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: GenericBlockData(block_data.encode()) }; let ret = host .validate_candidate( test_parachain_adder::wasm_binary_unwrap(), - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ) .await diff --git a/polkadot/node/core/pvf/tests/it/main.rs b/polkadot/node/core/pvf/tests/it/main.rs index 9ad486657512..a4a085318957 100644 --- a/polkadot/node/core/pvf/tests/it/main.rs +++ b/polkadot/node/core/pvf/tests/it/main.rs @@ -17,7 +17,6 @@ //! General PVF host integration tests checking the functionality of the PVF host itself. use assert_matches::assert_matches; -use codec::Encode as _; #[cfg(all(feature = "ci-only-tests", target_os = "linux"))] use polkadot_node_core_pvf::SecurityStatus; use polkadot_node_core_pvf::{ @@ -25,10 +24,14 @@ use polkadot_node_core_pvf::{ PossiblyInvalidError, PrepareError, PrepareJobKind, PvfPrepData, ValidationError, ValidationHost, JOB_TIMEOUT_WALL_CLOCK_FACTOR, }; -use polkadot_parachain_primitives::primitives::{BlockData, ValidationParams, ValidationResult}; -use polkadot_primitives::{ExecutorParam, ExecutorParams, PvfExecKind, PvfPrepKind}; +use polkadot_node_primitives::{PoV, POV_BOMB_LIMIT, VALIDATION_CODE_BOMB_LIMIT}; +use polkadot_parachain_primitives::primitives::{BlockData, ValidationResult}; +use polkadot_primitives::{ + ExecutorParam, ExecutorParams, PersistedValidationData, PvfExecKind, PvfPrepKind, +}; +use sp_core::H256; -use std::{io::Write, time::Duration}; +use std::{io::Write, sync::Arc, time::Duration}; use tokio::sync::Mutex; mod adder; @@ -80,9 +83,6 @@ impl TestHost { ) -> Result<(), PrepareError> { let (result_tx, result_rx) = futures::channel::oneshot::channel(); - let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024) - .expect("Compression works"); - self.host .lock() .await @@ -103,14 +103,12 @@ impl TestHost { async fn validate_candidate( &self, code: &[u8], - params: ValidationParams, + pvd: PersistedValidationData, + pov: PoV, executor_params: ExecutorParams, ) -> Result { let (result_tx, result_rx) = futures::channel::oneshot::channel(); - let code = sp_maybe_compressed_blob::decompress(code, 16 * 1024 * 1024) - .expect("Compression works"); - self.host .lock() .await @@ -122,7 +120,8 @@ impl TestHost { PrepareJobKind::Compilation, ), TEST_EXECUTION_TIMEOUT, - params.encode(), + Arc::new(pvd), + Arc::new(pov), polkadot_node_core_pvf::Priority::Normal, result_tx, ) @@ -159,19 +158,17 @@ async fn prepare_job_terminates_on_timeout() { #[tokio::test] async fn execute_job_terminates_on_timeout() { let host = TestHost::new().await; + let pvd = PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: BlockData(Vec::new()) }; let start = std::time::Instant::now(); let result = host - .validate_candidate( - test_parachain_halt::wasm_binary_unwrap(), - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, - Default::default(), - ) + .validate_candidate(test_parachain_halt::wasm_binary_unwrap(), pvd, pov, Default::default()) .await; match result { @@ -189,24 +186,23 @@ async fn execute_job_terminates_on_timeout() { async fn ensure_parallel_execution() { // Run some jobs that do not complete, thus timing out. let host = TestHost::new().await; + let pvd = PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: BlockData(Vec::new()) }; let execute_pvf_future_1 = host.validate_candidate( test_parachain_halt::wasm_binary_unwrap(), - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd.clone(), + pov.clone(), Default::default(), ); let execute_pvf_future_2 = host.validate_candidate( test_parachain_halt::wasm_binary_unwrap(), - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ); @@ -237,6 +233,13 @@ async fn execute_queue_doesnt_stall_if_workers_died() { cfg.execute_workers_max_num = 5; }) .await; + let pvd = PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: BlockData(Vec::new()) }; // Here we spawn 8 validation jobs for the `halt` PVF and share those between 5 workers. The // first five jobs should timeout and the workers killed. For the next 3 jobs a new batch of @@ -245,12 +248,8 @@ async fn execute_queue_doesnt_stall_if_workers_died() { futures::future::join_all((0u8..=8).map(|_| { host.validate_candidate( test_parachain_halt::wasm_binary_unwrap(), - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd.clone(), + pov.clone(), Default::default(), ) })) @@ -275,6 +274,13 @@ async fn execute_queue_doesnt_stall_with_varying_executor_params() { cfg.execute_workers_max_num = 2; }) .await; + let pvd = PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: BlockData(Vec::new()) }; let executor_params_1 = ExecutorParams::default(); let executor_params_2 = ExecutorParams::from(&[ExecutorParam::StackLogicalMax(1024)][..]); @@ -288,12 +294,8 @@ async fn execute_queue_doesnt_stall_with_varying_executor_params() { futures::future::join_all((0u8..6).map(|i| { host.validate_candidate( test_parachain_halt::wasm_binary_unwrap(), - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd.clone(), + pov.clone(), match i % 3 { 0 => executor_params_1.clone(), _ => executor_params_2.clone(), @@ -324,6 +326,13 @@ async fn execute_queue_doesnt_stall_with_varying_executor_params() { async fn deleting_prepared_artifact_does_not_dispute() { let host = TestHost::new().await; let cache_dir = host.cache_dir.path(); + let pvd = PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: BlockData(Vec::new()) }; let _stats = host .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), Default::default()) @@ -347,16 +356,7 @@ async fn deleting_prepared_artifact_does_not_dispute() { // Try to validate, artifact should get recreated. let result = host - .validate_candidate( - test_parachain_halt::wasm_binary_unwrap(), - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, - Default::default(), - ) + .validate_candidate(test_parachain_halt::wasm_binary_unwrap(), pvd, pov, Default::default()) .await; assert_matches!(result, Err(ValidationError::Invalid(InvalidCandidate::HardTimeout))); @@ -367,6 +367,13 @@ async fn deleting_prepared_artifact_does_not_dispute() { async fn corrupted_prepared_artifact_does_not_dispute() { let host = TestHost::new().await; let cache_dir = host.cache_dir.path(); + let pvd = PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: BlockData(Vec::new()) }; let _stats = host .precheck_pvf(test_parachain_halt::wasm_binary_unwrap(), Default::default()) @@ -400,16 +407,7 @@ async fn corrupted_prepared_artifact_does_not_dispute() { // Try to validate, artifact should get removed because of the corruption. let result = host - .validate_candidate( - test_parachain_halt::wasm_binary_unwrap(), - ValidationParams { - block_data: BlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, - Default::default(), - ) + .validate_candidate(test_parachain_halt::wasm_binary_unwrap(), pvd, pov, Default::default()) .await; assert_matches!( @@ -652,3 +650,65 @@ async fn artifact_does_reprepare_on_meaningful_exec_parameter_change() { assert_eq!(cache_dir_contents.len(), 3); // new artifact has been added } + +// Checks that we cannot prepare oversized compressed code +#[tokio::test] +async fn invalid_compressed_code_fails_prechecking() { + let host = TestHost::new().await; + let raw_code = vec![2u8; VALIDATION_CODE_BOMB_LIMIT + 1]; + let validation_code = + sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT + 1).unwrap(); + + let res = host.precheck_pvf(&validation_code, Default::default()).await; + + assert_matches!(res, Err(PrepareError::CouldNotDecompressCodeBlob(_))); +} + +// Checks that we cannot validate with oversized compressed code +#[tokio::test] +async fn invalid_compressed_code_fails_validation() { + let host = TestHost::new().await; + let pvd = PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: BlockData(Vec::new()) }; + + let raw_code = vec![2u8; VALIDATION_CODE_BOMB_LIMIT + 1]; + let validation_code = + sp_maybe_compressed_blob::compress(&raw_code, VALIDATION_CODE_BOMB_LIMIT + 1).unwrap(); + + let result = host.validate_candidate(&validation_code, pvd, pov, Default::default()).await; + + assert_matches!( + result, + Err(ValidationError::Preparation(PrepareError::CouldNotDecompressCodeBlob(_))) + ); +} + +// Checks that we cannot validate with an oversized PoV +#[tokio::test] +async fn invalid_compressed_pov_fails_validation() { + let host = TestHost::new().await; + let pvd = PersistedValidationData { + parent_head: Default::default(), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let raw_block_data = vec![1u8; POV_BOMB_LIMIT + 1]; + let block_data = + sp_maybe_compressed_blob::compress(&raw_block_data, POV_BOMB_LIMIT + 1).unwrap(); + let pov = PoV { block_data: BlockData(block_data) }; + + let result = host + .validate_candidate(test_parachain_halt::wasm_binary_unwrap(), pvd, pov, Default::default()) + .await; + + assert_matches!( + result, + Err(ValidationError::Invalid(InvalidCandidate::PoVDecompressionFailure)) + ); +} diff --git a/polkadot/node/core/pvf/tests/it/process.rs b/polkadot/node/core/pvf/tests/it/process.rs index b8fd2cdce0ce..b3023c8a45c3 100644 --- a/polkadot/node/core/pvf/tests/it/process.rs +++ b/polkadot/node/core/pvf/tests/it/process.rs @@ -23,11 +23,14 @@ use codec::Encode; use polkadot_node_core_pvf::{ InvalidCandidate, PossiblyInvalidError, PrepareError, ValidationError, }; +use polkadot_node_primitives::PoV; use polkadot_parachain_primitives::primitives::{ - BlockData as GenericBlockData, HeadData as GenericHeadData, ValidationParams, + BlockData as GenericBlockData, HeadData as GenericHeadData, }; +use polkadot_primitives::PersistedValidationData; use procfs::process; use rusty_fork::rusty_fork_test; +use sp_core::H256; use std::{future::Future, sync::Arc, time::Duration}; use test_parachain_adder::{hash_state, BlockData, HeadData}; @@ -125,15 +128,18 @@ rusty_fork_test! { test_wrapper(|host, _sid| async move { let parent_head = HeadData { number: 0, parent_hash: [0; 32], post_state: hash_state(0) }; let block_data = BlockData { state: 0, add: 512 }; + let pvd = PersistedValidationData { + parent_head: GenericHeadData(parent_head.encode()), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: GenericBlockData(block_data.encode()) }; host .validate_candidate( test_parachain_adder::wasm_binary_unwrap(), - ValidationParams { - parent_head: GenericHeadData(parent_head.encode()), - block_data: GenericBlockData(block_data.encode()), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ) .await @@ -166,17 +172,20 @@ rusty_fork_test! { // Prepare the artifact ahead of time. let binary = test_parachain_halt::wasm_binary_unwrap(); host.precheck_pvf(binary, Default::default()).await.unwrap(); + let pvd = PersistedValidationData { + parent_head: GenericHeadData(HeadData::default().encode()), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: GenericBlockData(Vec::new()) }; let (result, _) = futures::join!( // Choose an job that would normally take the entire timeout. host.validate_candidate( binary, - ValidationParams { - block_data: GenericBlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ), // Send a stop signal to pause the worker. @@ -218,17 +227,20 @@ rusty_fork_test! { // Prepare the artifact ahead of time. let binary = test_parachain_halt::wasm_binary_unwrap(); host.precheck_pvf(binary, Default::default()).await.unwrap(); + let pvd = PersistedValidationData { + parent_head: GenericHeadData(HeadData::default().encode()), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: GenericBlockData(Vec::new()) }; let (result, _) = futures::join!( // Choose an job that would normally take the entire timeout. host.validate_candidate( binary, - ValidationParams { - block_data: GenericBlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ), // Run a future that kills the job while it's running. @@ -274,17 +286,20 @@ rusty_fork_test! { // Prepare the artifact ahead of time. let binary = test_parachain_halt::wasm_binary_unwrap(); host.precheck_pvf(binary, Default::default()).await.unwrap(); + let pvd = PersistedValidationData { + parent_head: GenericHeadData(HeadData::default().encode()), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: GenericBlockData(Vec::new()) }; let (result, _) = futures::join!( // Choose a job that would normally take the entire timeout. host.validate_candidate( binary, - ValidationParams { - block_data: GenericBlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ), // Run a future that kills the job while it's running. @@ -342,17 +357,20 @@ rusty_fork_test! { // Prepare the artifact ahead of time. let binary = test_parachain_halt::wasm_binary_unwrap(); host.precheck_pvf(binary, Default::default()).await.unwrap(); + let pvd = PersistedValidationData { + parent_head: GenericHeadData(HeadData::default().encode()), + relay_parent_number: 1u32, + relay_parent_storage_root: H256::default(), + max_pov_size: 4096 * 1024, + }; + let pov = PoV { block_data: GenericBlockData(Vec::new()) }; let _ = futures::join!( // Choose a job that would normally take the entire timeout. host.validate_candidate( binary, - ValidationParams { - block_data: GenericBlockData(Vec::new()), - parent_head: Default::default(), - relay_parent_number: 1, - relay_parent_storage_root: Default::default(), - }, + pvd, + pov, Default::default(), ), // Run a future that tests the thread count while the worker is running. diff --git a/polkadot/node/overseer/src/lib.rs b/polkadot/node/overseer/src/lib.rs index 4e13d5eda76f..baaff9c7c9f6 100644 --- a/polkadot/node/overseer/src/lib.rs +++ b/polkadot/node/overseer/src/lib.rs @@ -466,7 +466,7 @@ pub async fn forward_events>(client: Arc

, mut hand message_capacity=2048, )] pub struct Overseer { - #[subsystem(blocking, CandidateValidationMessage, sends: [ + #[subsystem(CandidateValidationMessage, sends: [ RuntimeApiMessage, ])] candidate_validation: CandidateValidation, diff --git a/prdoc/pr_5142.prdoc b/prdoc/pr_5142.prdoc new file mode 100644 index 000000000000..4083e5bf53cd --- /dev/null +++ b/prdoc/pr_5142.prdoc @@ -0,0 +1,26 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "Move decompression to worker processes" + +doc: + - audience: Node Dev + description: | + Candidate validation subsystem performed the PVF code decompression as well as the PoV + decompression itself which might affect the subsystem main loop performance and required + it to run on the blocking threadpool. This change moves the decompression to PVF host + workers running synchronously in separate processes. + +crates: + - name: polkadot-node-core-candidate-validation + bump: patch + - name: polkadot-overseer + bump: patch + - name: polkadot-node-core-pvf + bump: major + - name: polkadot-node-core-pvf-common + bump: major + - name: polkadot-node-core-pvf-execute-worker + bump: patch + - name: polkadot-node-core-pvf-prepare-worker + bump: patch From 149c70938f2b29f8d92ba1cc952aeb63d4084e27 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Fri, 9 Aug 2024 18:11:50 +0200 Subject: [PATCH 15/74] Add missing features in templates' node packages (#5294) Corrects the issue we had [here](https://github.com/paritytech/polkadot-sdk-parachain-template/pull/10), in which `cargo build --release` worked but `cargo build --package parachain-template-node --release` failed with missing features. The command has been added to CI to make sure it works, but at the same we're changing it in the readme to just `cargo build --release` for simplification. Labeling silent because those packages are un-published as part of the regular release process. --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Shawn Tabrizi Co-authored-by: Oliver Tale-Yazdi --- templates/minimal/README.md | 2 +- templates/minimal/node/Cargo.toml | 5 ++++- templates/parachain/README.md | 2 +- templates/parachain/node/Cargo.toml | 7 ++++++- templates/solochain/README.md | 2 +- templates/solochain/node/Cargo.toml | 5 ++++- 6 files changed, 17 insertions(+), 6 deletions(-) diff --git a/templates/minimal/README.md b/templates/minimal/README.md index b556a4536089..180c229e744e 100644 --- a/templates/minimal/README.md +++ b/templates/minimal/README.md @@ -42,7 +42,7 @@ packages required to compile this template - please take note of the Rust compil 🔨 Use the following command to build the node without launching it: ```sh -cargo build --package minimal-template-node --release +cargo build --release ``` 🐳 Alternatively, build the docker image: diff --git a/templates/minimal/node/Cargo.toml b/templates/minimal/node/Cargo.toml index 70b24c19f8e7..da5073ea687c 100644 --- a/templates/minimal/node/Cargo.toml +++ b/templates/minimal/node/Cargo.toml @@ -57,4 +57,7 @@ minimal-template-runtime = { workspace = true } substrate-build-script-utils = { workspace = true, default-features = true } [features] -default = [] +default = ["std"] +std = [ + "minimal-template-runtime/std", +] diff --git a/templates/parachain/README.md b/templates/parachain/README.md index 802d8586b39e..b7fa2bed0ed9 100644 --- a/templates/parachain/README.md +++ b/templates/parachain/README.md @@ -44,7 +44,7 @@ packages required to compile this template - please take note of the Rust compil 🔨 Use the following command to build the node without launching it: ```sh -cargo build --package parachain-template-node --release +cargo build --release ``` 🐳 Alternatively, build the docker image: diff --git a/templates/parachain/node/Cargo.toml b/templates/parachain/node/Cargo.toml index 7cf1f1fddc7b..c782888a3e89 100644 --- a/templates/parachain/node/Cargo.toml +++ b/templates/parachain/node/Cargo.toml @@ -79,7 +79,12 @@ color-print = { workspace = true } substrate-build-script-utils = { workspace = true, default-features = true } [features] -default = [] +default = ["std"] +std = [ + "log/std", + "parachain-template-runtime/std", + "xcm/std", +] runtime-benchmarks = [ "cumulus-primitives-core/runtime-benchmarks", "frame-benchmarking-cli/runtime-benchmarks", diff --git a/templates/solochain/README.md b/templates/solochain/README.md index c5dc5db7f3b5..6a5a7853f9c0 100644 --- a/templates/solochain/README.md +++ b/templates/solochain/README.md @@ -28,7 +28,7 @@ installation](#alternatives-installations) options. Use the following command to build the node without launching it: ```sh -cargo build --package solochain-template-node --release +cargo build --release ``` ### Embedded Docs diff --git a/templates/solochain/node/Cargo.toml b/templates/solochain/node/Cargo.toml index 6eebf3694e3a..9dd1b144d229 100644 --- a/templates/solochain/node/Cargo.toml +++ b/templates/solochain/node/Cargo.toml @@ -66,7 +66,10 @@ solochain-template-runtime = { workspace = true } substrate-build-script-utils = { workspace = true, default-features = true } [features] -default = [] +default = ["std"] +std = [ + "solochain-template-runtime/std", +] # Dependencies that are only required if runtime benchmarking should be build. runtime-benchmarks = [ "frame-benchmarking-cli/runtime-benchmarks", From 8ff07a4e8ad4c5bc228ba61044f43415266a88b8 Mon Sep 17 00:00:00 2001 From: georgepisaltu <52418509+georgepisaltu@users.noreply.github.com> Date: Fri, 9 Aug 2024 19:40:41 +0300 Subject: [PATCH 16/74] Split `trait Extrinsic` into logical components (#3736) Fixes #3571 Built on top of #3685. This PR breaks up `trait Extrinsic` into different self-contained interfaces as follows: - `ExtrinsicLike` for querying the type of extrinsic (signed/unsigned) - `CreateTransaction` for creating a transaction (general) - `CreateSignedTransaction` for creating a signed transaction (old-school) - `CreateInherent` for creating an inherent --------- Signed-off-by: georgepisaltu Co-authored-by: command-bot <> --- .../src/validate_block/implementation.rs | 6 +- .../storage-weight-reclaim/src/tests.rs | 80 ++------------ .../runtime/common/src/assigned_slots/mod.rs | 13 ++- .../runtime/common/src/integration_tests.rs | 13 ++- .../runtime/common/src/paras_registrar/mod.rs | 13 ++- .../parachains/src/disputes/slashing.rs | 7 +- polkadot/runtime/parachains/src/mock.rs | 13 ++- polkadot/runtime/parachains/src/paras/mod.rs | 7 +- polkadot/runtime/rococo/src/lib.rs | 57 +++++++--- polkadot/runtime/test-runtime/src/lib.rs | 42 +++++-- polkadot/runtime/westend/src/lib.rs | 57 +++++++--- polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs | 2 +- .../bin/node/cli/tests/submit_transaction.rs | 7 +- substrate/bin/node/runtime/src/lib.rs | 38 ++++++- substrate/client/transaction-pool/src/lib.rs | 2 +- substrate/frame/babe/src/equivocation.rs | 7 +- substrate/frame/babe/src/mock.rs | 13 ++- substrate/frame/beefy/src/equivocation.rs | 7 +- substrate/frame/beefy/src/mock.rs | 13 ++- .../election-provider-multi-phase/src/lib.rs | 4 +- .../election-provider-multi-phase/src/mock.rs | 13 ++- .../src/unsigned.rs | 10 +- .../test-staking-e2e/src/mock.rs | 13 ++- substrate/frame/examples/basic/src/lib.rs | 4 +- .../frame/examples/offchain-worker/src/lib.rs | 11 +- .../examples/offchain-worker/src/tests.rs | 36 +++++- substrate/frame/examples/tasks/src/lib.rs | 15 ++- substrate/frame/examples/tasks/src/mock.rs | 13 ++- substrate/frame/examples/tasks/src/tests.rs | 2 +- substrate/frame/grandpa/src/equivocation.rs | 7 +- substrate/frame/grandpa/src/mock.rs | 13 ++- substrate/frame/im-online/src/lib.rs | 7 +- substrate/frame/im-online/src/mock.rs | 13 ++- .../metadata-hash-extension/src/tests.rs | 2 +- substrate/frame/mixnet/src/lib.rs | 12 +- .../frame/offences/benchmarking/src/mock.rs | 13 ++- substrate/frame/sassafras/src/lib.rs | 7 +- substrate/frame/sassafras/src/mock.rs | 13 ++- .../src/construct_runtime/expand/inherent.rs | 10 +- .../src/construct_runtime/expand/metadata.rs | 8 +- substrate/frame/support/src/traits.rs | 8 +- substrate/frame/support/src/traits/misc.rs | 60 +++++++++- .../undefined_inherent_part.stderr | 3 +- substrate/frame/support/test/tests/pallet.rs | 69 ++++++++++-- substrate/frame/support/test/tests/runtime.rs | 7 +- .../test/tests/runtime_legacy_ordering.rs | 7 +- .../system/src/extensions/check_genesis.rs | 4 +- .../system/src/extensions/check_mortality.rs | 4 +- .../src/extensions/check_non_zero_sender.rs | 4 +- .../system/src/extensions/check_weight.rs | 3 +- substrate/frame/system/src/offchain.rs | 103 ++++++++++-------- .../frame/transaction-payment/src/lib.rs | 4 +- .../primitives/runtime/src/generic/block.rs | 2 +- .../primitives/runtime/src/generic/mod.rs | 3 +- .../src/generic/unchecked_extrinsic.rs | 36 ++---- substrate/primitives/runtime/src/lib.rs | 7 +- substrate/primitives/runtime/src/testing.rs | 11 +- .../primitives/runtime/src/traits/mod.rs | 41 ++++++- .../primitives/test-primitives/src/lib.rs | 15 +-- 59 files changed, 677 insertions(+), 337 deletions(-) diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index e3d80236a40d..e438a50a932a 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -33,7 +33,7 @@ use frame_support::traits::{ExecuteBlock, ExtrinsicCall, Get, IsSubType}; use sp_core::storage::{ChildInfo, StateVersion}; use sp_externalities::{set_and_run_with_externalities, Externalities}; use sp_io::KillStorageResult; -use sp_runtime::traits::{Block as BlockT, Extrinsic, HashingFor, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, ExtrinsicLike, HashingFor, Header as HeaderT}; use sp_trie::{MemoryDB, ProofSizeProvider}; use trie_recorder::SizeOnlyRecorderProvider; @@ -96,7 +96,7 @@ pub fn validate_block< ) -> ValidationResult where B::Extrinsic: ExtrinsicCall, - ::Call: IsSubType>, + ::Call: IsSubType>, { let block_data = codec::decode_from_bytes::>(block_data) .expect("Invalid parachain block data"); @@ -240,7 +240,7 @@ fn extract_parachain_inherent_data( ) -> &ParachainInherentData where B::Extrinsic: ExtrinsicCall, - ::Call: IsSubType>, + ::Call: IsSubType>, { block .extrinsics() diff --git a/cumulus/primitives/storage-weight-reclaim/src/tests.rs b/cumulus/primitives/storage-weight-reclaim/src/tests.rs index 947b6014987f..479cef60a1d7 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/tests.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/tests.rs @@ -94,13 +94,7 @@ fn basic_refund() { .unwrap(); assert_eq!(pre, Some(0)); - assert_ok!(CheckWeight::::post_dispatch_details( - (), - &info, - &post_info, - 0, - &Ok(()), - )); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); // We expect a refund of 400 assert_ok!(StorageWeightReclaim::::post_dispatch_details( pre, @@ -135,13 +129,7 @@ fn does_nothing_without_extension() { .unwrap(); assert_eq!(pre, None); - assert_ok!(CheckWeight::::post_dispatch_details( - (), - &info, - &post_info, - 0, - &Ok(()), - )); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); assert_ok!(StorageWeightReclaim::::post_dispatch_details( pre, &info, @@ -174,13 +162,7 @@ fn negative_refund_is_added_to_weight() { assert_eq!(pre, Some(100)); // We expect no refund - assert_ok!(CheckWeight::::post_dispatch_details( - (), - &info, - &post_info, - 0, - &Ok(()), - )); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); assert_ok!(StorageWeightReclaim::::post_dispatch_details( pre, &info, @@ -212,13 +194,7 @@ fn test_zero_proof_size() { .unwrap(); assert_eq!(pre, Some(0)); - assert_ok!(CheckWeight::::post_dispatch_details( - (), - &info, - &post_info, - 0, - &Ok(()), - )); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); assert_ok!(StorageWeightReclaim::::post_dispatch_details( pre, &info, @@ -252,13 +228,7 @@ fn test_larger_pre_dispatch_proof_size() { assert_eq!(pre, Some(300)); // Refund 500 unspent weight according to `post_info`, total weight is now 1650 - assert_ok!(CheckWeight::::post_dispatch_details( - (), - &info, - &post_info, - 0, - &Ok(()), - )); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); // Recorded proof size is negative -200, total weight is now 1450 assert_ok!(StorageWeightReclaim::::post_dispatch_details( pre, @@ -299,13 +269,7 @@ fn test_incorporates_check_weight_unspent_weight() { // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch_details( - (), - &info, - &post_info, - 0, - &Ok(()), - )); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); assert_ok!(StorageWeightReclaim::::post_dispatch_details( pre, &info, @@ -346,13 +310,7 @@ fn test_incorporates_check_weight_unspent_weight_on_negative() { // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. // Refunds unspent 25 weight according to `post_info`, 1175 - assert_ok!(CheckWeight::::post_dispatch_details( - (), - &info, - &post_info, - 0, - &Ok(()), - )); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); // Adds 200 - 25 (unspent) == 175 weight, total weight 1350 assert_ok!(StorageWeightReclaim::::post_dispatch_details( pre, @@ -397,13 +355,7 @@ fn test_nothing_relcaimed() { // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. // Nothing to refund, unspent is 0, total weight 250 - assert_ok!(CheckWeight::::post_dispatch_details( - (), - &info, - &post_info, - 0, - &Ok(()), - )); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); // `setup_test_externalities` proof recorder value: 200, so this means the extrinsic // actually used 100 proof size. // Nothing to refund or add, weight matches proof recorder @@ -457,13 +409,7 @@ fn test_incorporates_check_weight_unspent_weight_reverse_order() { // `CheckWeight` gets called after `StorageWeightReclaim` this time. // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch_details( - (), - &info, - &post_info, - 0, - &Ok(()), - )); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); // Above call refunds 50 (unspent), total weight is 1350 now assert_eq!(get_storage_weight().total().proof_size(), 1350); @@ -505,13 +451,7 @@ fn test_incorporates_check_weight_unspent_weight_on_negative_reverse_order() { // `CheckWeight` gets called after `StorageWeightReclaim` this time. // The `CheckWeight` extension will refunt `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. - assert_ok!(CheckWeight::::post_dispatch_details( - (), - &info, - &post_info, - 0, - &Ok(()), - )); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); assert_eq!(get_storage_weight().total().proof_size(), 1350); }) diff --git a/polkadot/runtime/common/src/assigned_slots/mod.rs b/polkadot/runtime/common/src/assigned_slots/mod.rs index dd39789e10cf..7a57595972b0 100644 --- a/polkadot/runtime/common/src/assigned_slots/mod.rs +++ b/polkadot/runtime/common/src/assigned_slots/mod.rs @@ -664,12 +664,21 @@ mod tests { } ); - impl frame_system::offchain::SendTransactionTypes for Test + impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; + } + + impl frame_system::offchain::CreateInherent for Test + where + RuntimeCall: From, + { + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } #[derive_impl(frame_system::config_preludes::TestDefaultConfig)] diff --git a/polkadot/runtime/common/src/integration_tests.rs b/polkadot/runtime/common/src/integration_tests.rs index 7a689a517eaa..bfeed04a919f 100644 --- a/polkadot/runtime/common/src/integration_tests.rs +++ b/polkadot/runtime/common/src/integration_tests.rs @@ -98,12 +98,21 @@ frame_support::construct_runtime!( } ); -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } use crate::{auctions::Error as AuctionsError, crowdloan::Error as CrowdloanError}; diff --git a/polkadot/runtime/common/src/paras_registrar/mod.rs b/polkadot/runtime/common/src/paras_registrar/mod.rs index 07f02e926561..2ead621dedf0 100644 --- a/polkadot/runtime/common/src/paras_registrar/mod.rs +++ b/polkadot/runtime/common/src/paras_registrar/mod.rs @@ -752,12 +752,21 @@ mod tests { } ); - impl frame_system::offchain::SendTransactionTypes for Test + impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; + } + + impl frame_system::offchain::CreateInherent for Test + where + RuntimeCall: From, + { + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } const NORMAL_RATIO: Perbill = Perbill::from_percent(75); diff --git a/polkadot/runtime/parachains/src/disputes/slashing.rs b/polkadot/runtime/parachains/src/disputes/slashing.rs index 4b76fb47e1f8..2e09ea667f74 100644 --- a/polkadot/runtime/parachains/src/disputes/slashing.rs +++ b/polkadot/runtime/parachains/src/disputes/slashing.rs @@ -653,7 +653,7 @@ impl Default for SlashingReportHandler { impl HandleReports for SlashingReportHandler where - T: Config + frame_system::offchain::SendTransactionTypes>, + T: Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, T::KeyOwnerIdentification, @@ -685,7 +685,7 @@ where dispute_proof: DisputeProof, key_owner_proof: ::KeyOwnerProof, ) -> Result<(), sp_runtime::TryRuntimeError> { - use frame_system::offchain::SubmitTransaction; + use frame_system::offchain::{CreateInherent, SubmitTransaction}; let session_index = dispute_proof.time_slot.session_index; let validator_index = dispute_proof.validator_index.0; @@ -696,7 +696,8 @@ where key_owner_proof, }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + let xt = >>::create_inherent(call.into()); + match SubmitTransaction::>::submit_transaction(xt) { Ok(()) => { log::info!( target: LOG_TARGET, diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index fbe9ebf809b3..a788dbf3c7ad 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -90,12 +90,21 @@ frame_support::construct_runtime!( } ); -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } parameter_types! { diff --git a/polkadot/runtime/parachains/src/paras/mod.rs b/polkadot/runtime/parachains/src/paras/mod.rs index a4c404de2a65..3c54a1f7872b 100644 --- a/polkadot/runtime/parachains/src/paras/mod.rs +++ b/polkadot/runtime/parachains/src/paras/mod.rs @@ -615,7 +615,7 @@ pub mod pallet { frame_system::Config + configuration::Config + shared::Config - + frame_system::offchain::SendTransactionTypes> + + frame_system::offchain::CreateInherent> { type RuntimeEvent: From + IsType<::RuntimeEvent>; @@ -2189,9 +2189,8 @@ impl Pallet { ) { use frame_system::offchain::SubmitTransaction; - if let Err(e) = SubmitTransaction::>::submit_unsigned_transaction( - Call::include_pvf_check_statement { stmt, signature }.into(), - ) { + let xt = T::create_inherent(Call::include_pvf_check_statement { stmt, signature }.into()); + if let Err(e) = SubmitTransaction::>::submit_transaction(xt) { log::error!(target: LOG_TARGET, "Error submitting pvf check statement: {:?}", e,); } } diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index d8be6001e540..002bf10a26c6 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -99,9 +99,8 @@ use sp_core::{ConstU128, ConstU8, Get, OpaqueMetadata, H256}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ - AccountIdConversion, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, - Extrinsic as ExtrinsicT, IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, - Verify, + AccountIdConversion, BlakeTwo256, Block as BlockT, ConstU32, ConvertInto, IdentityLookup, + Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, RuntimeDebug, @@ -604,18 +603,35 @@ impl pallet_grandpa::Config for Runtime { pallet_grandpa::EquivocationReportSystem; } -/// Submits a transaction with the node's public and signature type. Adheres to the signed extension -/// format of the chain. +impl frame_system::offchain::SigningTypes for Runtime { + type Public = ::Signer; + type Signature = Signature; +} + +impl frame_system::offchain::CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type Extrinsic = UncheckedExtrinsic; + type RuntimeCall = RuntimeCall; +} + +/// Submits a transaction with the node's public and signature type. Adheres to the signed +/// extension format of the chain. impl frame_system::offchain::CreateSignedTransaction for Runtime where RuntimeCall: From, { - fn create_transaction>( + type SignaturePayload = UncheckedSignaturePayload; + + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: ::Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { use sp_runtime::traits::StaticLookup; // take the biggest period possible. let period = @@ -650,21 +666,29 @@ where let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let (call, tx_ext, _) = raw_payload.deconstruct(); let address = ::Lookup::unlookup(account); - Some((call, (address, signature, tx_ext))) + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) } } -impl frame_system::offchain::SigningTypes for Runtime { - type Public = ::Signer; - type Signature = Signature; +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, tx_ext: Self::Extension) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_transaction(call, tx_ext) + } } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateInherent for Runtime where - RuntimeCall: From, + RuntimeCall: From, { - type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_bare(call) + } } parameter_types! { @@ -1550,6 +1574,9 @@ pub type TxExtension = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; /// All migrations that will run on the next runtime upgrade. /// diff --git a/polkadot/runtime/test-runtime/src/lib.rs b/polkadot/runtime/test-runtime/src/lib.rs index 34d2ed31eef9..ef16b4a156df 100644 --- a/polkadot/runtime/test-runtime/src/lib.rs +++ b/polkadot/runtime/test-runtime/src/lib.rs @@ -80,8 +80,8 @@ use sp_runtime::{ curve::PiecewiseLinear, generic, impl_opaque_keys, traits::{ - BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, OpaqueKeys, - SaturatedConversion, StaticLookup, Verify, + BlakeTwo256, Block as BlockT, ConvertInto, OpaqueKeys, SaturatedConversion, StaticLookup, + Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, @@ -165,14 +165,34 @@ impl frame_system::Config for Runtime { type MaxConsumers = frame_support::traits::ConstU32<16>; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = UncheckedExtrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } +} + +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: Self::RuntimeCall, extension: Self::Extension) -> Self::Extrinsic { + UncheckedExtrinsic::new_transaction(call, extension) + } +} + parameter_types! { pub storage EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS as u64; pub storage ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; @@ -393,12 +413,16 @@ impl frame_system::offchain::CreateSignedTransaction for R where RuntimeCall: From, { - fn create_transaction>( + type SignaturePayload = UncheckedSignaturePayload; + + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: ::Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; @@ -426,7 +450,8 @@ where let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let (call, tx_ext, _) = raw_payload.deconstruct(); let address = Indices::unlookup(account); - Some((call, (address, signature, tx_ext))) + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) } } @@ -759,6 +784,9 @@ pub type TxExtension = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index ac4829a2a93a..56fbc5902cfb 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -95,8 +95,8 @@ use sp_core::{ConstU8, OpaqueMetadata, RuntimeDebug, H256}; use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, traits::{ - AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, Extrinsic as ExtrinsicT, - IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, + AccountIdConversion, BlakeTwo256, Block as BlockT, ConvertInto, IdentityLookup, Keccak256, + OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, @@ -859,18 +859,46 @@ impl pallet_grandpa::Config for Runtime { pallet_grandpa::EquivocationReportSystem; } +impl frame_system::offchain::SigningTypes for Runtime { + type Public = ::Signer; + type Signature = Signature; +} + +impl frame_system::offchain::CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type RuntimeCall = RuntimeCall; + type Extrinsic = UncheckedExtrinsic; +} + +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, extension: TxExtension) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_transaction(call, extension) + } +} + /// Submits a transaction with the node's public and signature type. Adheres to the signed extension /// format of the chain. impl frame_system::offchain::CreateSignedTransaction for Runtime where RuntimeCall: From, { - fn create_transaction>( + type SignaturePayload = UncheckedSignaturePayload; + + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: ::Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { use sp_runtime::traits::StaticLookup; // take the biggest period possible. let period = @@ -905,21 +933,18 @@ where let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let (call, tx_ext, _) = raw_payload.deconstruct(); let address = ::Lookup::unlookup(account); - Some((call, (address, signature, tx_ext))) + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) } } -impl frame_system::offchain::SigningTypes for Runtime { - type Public = ::Signer; - type Signature = Signature; -} - -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateInherent for Runtime where - RuntimeCall: From, + RuntimeCall: From, { - type OverarchingCall = RuntimeCall; - type Extrinsic = UncheckedExtrinsic; + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_bare(call) + } } parameter_types! { @@ -1813,6 +1838,10 @@ pub mod migrations { /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; + /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs index 4a12bb7f47c6..54f26c33f3f3 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/lib.rs @@ -62,7 +62,7 @@ const SEED: u32 = 0; /// The XCM executor to use for doing stuff. pub type ExecutorOf = xcm_executor::XcmExecutor<::XcmConfig>; /// The overarching call type. -pub type OverArchingCallOf = ::RuntimeCall; +pub type RuntimeCallOf = ::RuntimeCall; /// The asset transactor of our executor pub type AssetTransactorOf = <::XcmConfig as XcmConfig>::AssetTransactor; /// The call type of executor's config. Should eventually resolve to the same overarching call type. diff --git a/substrate/bin/node/cli/tests/submit_transaction.rs b/substrate/bin/node/cli/tests/submit_transaction.rs index b4434f1b6e28..d20baa1e410d 100644 --- a/substrate/bin/node/cli/tests/submit_transaction.rs +++ b/substrate/bin/node/cli/tests/submit_transaction.rs @@ -44,10 +44,9 @@ fn should_submit_unsigned_transaction() { }; let call = pallet_im_online::Call::heartbeat { heartbeat: heartbeat_data, signature }; - SubmitTransaction::>::submit_unsigned_transaction( - call.into(), - ) - .unwrap(); + let xt = UncheckedExtrinsic::new_bare(call.into()); + SubmitTransaction::>::submit_transaction(xt) + .unwrap(); assert_eq!(state.read().transactions.len(), 1) }); diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index a2c1093b9418..7dcef8d45ee6 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1400,16 +1400,31 @@ parameter_types! { pub const MaxPeerInHeartbeats: u32 = 10_000; } +impl frame_system::offchain::CreateTransaction for Runtime +where + RuntimeCall: From, +{ + type Extension = TxExtension; + + fn create_transaction(call: RuntimeCall, extension: TxExtension) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_transaction(call, extension) + } +} + impl frame_system::offchain::CreateSignedTransaction for Runtime where RuntimeCall: From, { - fn create_transaction>( + type SignaturePayload = UncheckedSignaturePayload; + + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, public: ::Signer, account: AccountId, nonce: Nonce, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { + ) -> Option { let tip = 0; // take the biggest period possible. let period = @@ -1447,7 +1462,17 @@ where let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let address = Indices::unlookup(account); let (call, tx_ext, _) = raw_payload.deconstruct(); - Some((call, (address, signature, tx_ext))) + let transaction = UncheckedExtrinsic::new_signed(call, address, signature, tx_ext); + Some(transaction) + } +} + +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: RuntimeCall) -> UncheckedExtrinsic { + UncheckedExtrinsic::new_bare(call) } } @@ -1456,12 +1481,12 @@ impl frame_system::offchain::SigningTypes for Runtime { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { type Extrinsic = UncheckedExtrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; } impl pallet_im_online::Config for Runtime { @@ -2526,6 +2551,9 @@ pub type TxExtension = ( /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Unchecked signature payload type as expected by this runtime. +pub type UncheckedSignaturePayload = + generic::UncheckedSignaturePayload; /// The payload being signed in transactions. pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. diff --git a/substrate/client/transaction-pool/src/lib.rs b/substrate/client/transaction-pool/src/lib.rs index 730cfe367122..33062e7d3ac5 100644 --- a/substrate/client/transaction-pool/src/lib.rs +++ b/substrate/client/transaction-pool/src/lib.rs @@ -58,7 +58,7 @@ use sc_transaction_pool_api::{ use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ generic::BlockId, - traits::{AtLeast32Bit, Block as BlockT, Extrinsic, Header as HeaderT, NumberFor, Zero}, + traits::{AtLeast32Bit, Block as BlockT, ExtrinsicLike, Header as HeaderT, NumberFor, Zero}, }; use std::time::Instant; diff --git a/substrate/frame/babe/src/equivocation.rs b/substrate/frame/babe/src/equivocation.rs index 4be07bdae1f0..524ad23e58ee 100644 --- a/substrate/frame/babe/src/equivocation.rs +++ b/substrate/frame/babe/src/equivocation.rs @@ -100,7 +100,7 @@ impl Offence for EquivocationOffence { /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactionTypes`. +/// `offchain::CreateTransactionBase`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. @@ -110,7 +110,7 @@ impl OffenceReportSystem, (EquivocationProof>, T::KeyOwnerProof)> for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -132,7 +132,8 @@ where equivocation_proof: Box::new(equivocation_proof), key_owner_proof, }; - let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); + let xt = T::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report"), Err(e) => error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e), diff --git a/substrate/frame/babe/src/mock.rs b/substrate/frame/babe/src/mock.rs index c436c29f3d7d..980779a93e20 100644 --- a/substrate/frame/babe/src/mock.rs +++ b/substrate/frame/babe/src/mock.rs @@ -69,14 +69,23 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = UncheckedExtrinsic; } +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } +} + impl_opaque_keys! { pub struct MockSessionKeys { pub babe_authority: super::Pallet, diff --git a/substrate/frame/beefy/src/equivocation.rs b/substrate/frame/beefy/src/equivocation.rs index 15345e6ae199..3a49b9e169ce 100644 --- a/substrate/frame/beefy/src/equivocation.rs +++ b/substrate/frame/beefy/src/equivocation.rs @@ -118,7 +118,7 @@ where /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactionTypes`. +/// `offchain::CreateTransactionBase`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the authorship pallet. @@ -262,7 +262,7 @@ impl EquivocationEvidenceFor { impl OffenceReportSystem, EquivocationEvidenceFor> for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -278,7 +278,8 @@ where use frame_system::offchain::SubmitTransaction; let call: Call = evidence.into(); - let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); + let xt = T::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report."), Err(e) => error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e), diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index 0e4b27525da7..01b92d2c9c6b 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -74,14 +74,23 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = UncheckedExtrinsic; } +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } +} + #[derive(Clone, Debug, Decode, Encode, PartialEq, TypeInfo)] pub struct MockAncestryProofContext { pub is_valid: bool, diff --git a/substrate/frame/election-provider-multi-phase/src/lib.rs b/substrate/frame/election-provider-multi-phase/src/lib.rs index 09248e77848b..3efd055ad310 100644 --- a/substrate/frame/election-provider-multi-phase/src/lib.rs +++ b/substrate/frame/election-provider-multi-phase/src/lib.rs @@ -245,7 +245,7 @@ use frame_support::{ weights::Weight, DefaultNoBound, EqNoBound, PartialEqNoBound, }; -use frame_system::{ensure_none, offchain::SendTransactionTypes, pallet_prelude::BlockNumberFor}; +use frame_system::{ensure_none, offchain::CreateInherent, pallet_prelude::BlockNumberFor}; use scale_info::TypeInfo; use sp_arithmetic::{ traits::{CheckedAdd, Zero}, @@ -576,7 +576,7 @@ pub mod pallet { use sp_runtime::traits::Convert; #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config + CreateInherent> { type RuntimeEvent: From> + IsType<::RuntimeEvent> + TryInto>; diff --git a/substrate/frame/election-provider-multi-phase/src/mock.rs b/substrate/frame/election-provider-multi-phase/src/mock.rs index ad968e53e3cc..2b925de4431a 100644 --- a/substrate/frame/election-provider-multi-phase/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/src/mock.rs @@ -421,14 +421,23 @@ impl Convert> for Runtime { } } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + pub type Extrinsic = sp_runtime::generic::UncheckedExtrinsic; parameter_types! { diff --git a/substrate/frame/election-provider-multi-phase/src/unsigned.rs b/substrate/frame/election-provider-multi-phase/src/unsigned.rs index d9eabc7c4af8..191131ed3acc 100644 --- a/substrate/frame/election-provider-multi-phase/src/unsigned.rs +++ b/substrate/frame/election-provider-multi-phase/src/unsigned.rs @@ -31,7 +31,10 @@ use frame_support::{ traits::{DefensiveResult, Get}, BoundedVec, }; -use frame_system::{offchain::SubmitTransaction, pallet_prelude::BlockNumberFor}; +use frame_system::{ + offchain::{CreateInherent, SubmitTransaction}, + pallet_prelude::BlockNumberFor, +}; use scale_info::TypeInfo; use sp_npos_elections::{ assignment_ratio_to_staked_normalized, assignment_staked_to_ratio_normalized, ElectionResult, @@ -179,7 +182,7 @@ fn ocw_solution_exists() -> bool { matches!(StorageValueRef::persistent(OFFCHAIN_CACHED_CALL).get::>(), Ok(Some(_))) } -impl Pallet { +impl>> Pallet { /// Mine a new npos solution. /// /// The Npos Solver type, `S`, must have the same AccountId and Error type as the @@ -277,7 +280,8 @@ impl Pallet { fn submit_call(call: Call) -> Result<(), MinerError> { log!(debug, "miner submitting a solution as an unsigned transaction"); - SubmitTransaction::>::submit_unsigned_transaction(call.into()) + let xt = T::create_inherent(call.into()); + SubmitTransaction::>::submit_transaction(xt) .map_err(|_| MinerError::PoolSubmissionFailed) } diff --git a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs index 2aeff44519eb..72bbb87fabe9 100644 --- a/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs +++ b/substrate/frame/election-provider-multi-phase/test-staking-e2e/src/mock.rs @@ -308,14 +308,23 @@ impl pallet_staking::Config for Runtime { type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + pub struct OnChainSeqPhragmen; parameter_types! { diff --git a/substrate/frame/examples/basic/src/lib.rs b/substrate/frame/examples/basic/src/lib.rs index 8c1ac4f41281..db08da7833e7 100644 --- a/substrate/frame/examples/basic/src/lib.rs +++ b/substrate/frame/examples/basic/src/lib.rs @@ -493,8 +493,8 @@ impl TransactionExtensionBase for WatchDummy { const IDENTIFIER: &'static str = "WatchDummy"; type Implicit = (); } -impl - TransactionExtension<::RuntimeCall> for WatchDummy +impl TransactionExtension<::RuntimeCall> + for WatchDummy where ::RuntimeCall: IsSubType>, { diff --git a/substrate/frame/examples/offchain-worker/src/lib.rs b/substrate/frame/examples/offchain-worker/src/lib.rs index add014f6b34a..b3fdb6ea1897 100644 --- a/substrate/frame/examples/offchain-worker/src/lib.rs +++ b/substrate/frame/examples/offchain-worker/src/lib.rs @@ -53,8 +53,8 @@ use frame_support::traits::Get; use frame_system::{ self as system, offchain::{ - AppCrypto, CreateSignedTransaction, SendSignedTransaction, SendUnsignedTransaction, - SignedPayload, Signer, SigningTypes, SubmitTransaction, + AppCrypto, CreateInherent, CreateSignedTransaction, SendSignedTransaction, + SendUnsignedTransaction, SignedPayload, Signer, SigningTypes, SubmitTransaction, }, pallet_prelude::BlockNumberFor, }; @@ -124,7 +124,9 @@ pub mod pallet { /// This pallet's configuration trait #[pallet::config] - pub trait Config: CreateSignedTransaction> + frame_system::Config { + pub trait Config: + CreateSignedTransaction> + CreateInherent> + frame_system::Config + { /// The identifier type for an offchain worker. type AuthorityId: AppCrypto; @@ -501,7 +503,8 @@ impl Pallet { // implement unsigned validation logic, as any mistakes can lead to opening DoS or spam // attack vectors. See validation logic docs for more details. // - SubmitTransaction::>::submit_unsigned_transaction(call.into()) + let xt = T::create_inherent(call.into()); + SubmitTransaction::>::submit_transaction(xt) .map_err(|()| "Unable to submit unsigned transaction.")?; Ok(()) diff --git a/substrate/frame/examples/offchain-worker/src/tests.rs b/substrate/frame/examples/offchain-worker/src/tests.rs index b612dfba7dfc..1f7e8bf9fa3b 100644 --- a/substrate/frame/examples/offchain-worker/src/tests.rs +++ b/substrate/frame/examples/offchain-worker/src/tests.rs @@ -31,7 +31,7 @@ use sp_core::{ use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use sp_runtime::{ generic::UncheckedExtrinsic, - traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify}, + traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, RuntimeAppPublic, }; @@ -80,25 +80,49 @@ impl frame_system::offchain::SigningTypes for Test { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateTransaction for Test +where + RuntimeCall: From, +{ + type Extension = (); + + fn create_transaction(call: RuntimeCall, _extension: Self::Extension) -> Extrinsic { + Extrinsic::new_transaction(call, ()) + } +} + impl frame_system::offchain::CreateSignedTransaction for Test where RuntimeCall: From, { - fn create_transaction>( + type SignaturePayload = (u64, (), ()); + + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( call: RuntimeCall, _public: ::Signer, _account: AccountId, nonce: u64, - ) -> Option<(RuntimeCall, ::SignaturePayload)> { - Some((call, (nonce, (), ()))) + ) -> Option { + Some(Extrinsic::new_signed(call, nonce, (), ())) + } +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) } } diff --git a/substrate/frame/examples/tasks/src/lib.rs b/substrate/frame/examples/tasks/src/lib.rs index 1908a235ba15..7d51617497d6 100644 --- a/substrate/frame/examples/tasks/src/lib.rs +++ b/substrate/frame/examples/tasks/src/lib.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::dispatch::DispatchResult; -use frame_system::offchain::SendTransactionTypes; +use frame_system::offchain::CreateInherent; #[cfg(feature = "experimental")] use frame_system::offchain::SubmitTransaction; // Re-export pallet items so that they can be accessed from the crate namespace. @@ -77,22 +77,21 @@ pub mod pallet { let call = frame_system::Call::::do_task { task: runtime_task.into() }; // Submit the task as an unsigned transaction - let res = - SubmitTransaction::>::submit_unsigned_transaction( - call.into(), - ); + let xt = >>::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => log::info!(target: LOG_TARGET, "Submitted the task."), Err(e) => log::error!(target: LOG_TARGET, "Error submitting task: {:?}", e), } } } + + #[cfg(not(feature = "experimental"))] + fn offchain_worker(_block_number: BlockNumberFor) {} } #[pallet::config] - pub trait Config: - SendTransactionTypes> + frame_system::Config - { + pub trait Config: CreateInherent> + frame_system::Config { type RuntimeTask: frame_support::traits::Task + IsType<::RuntimeTask> + From>; diff --git a/substrate/frame/examples/tasks/src/mock.rs b/substrate/frame/examples/tasks/src/mock.rs index 97591b287cbd..3c9e6e6d12d5 100644 --- a/substrate/frame/examples/tasks/src/mock.rs +++ b/substrate/frame/examples/tasks/src/mock.rs @@ -40,14 +40,23 @@ impl frame_system::Config for Runtime { type Block = Block; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + impl tasks_example::Config for Runtime { type RuntimeTask = RuntimeTask; type WeightInfo = (); diff --git a/substrate/frame/examples/tasks/src/tests.rs b/substrate/frame/examples/tasks/src/tests.rs index fe5a2d2844a3..4b31849c2ea9 100644 --- a/substrate/frame/examples/tasks/src/tests.rs +++ b/substrate/frame/examples/tasks/src/tests.rs @@ -157,7 +157,7 @@ fn task_with_offchain_worker() { let tx = pool_state.write().transactions.pop().unwrap(); assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); - use sp_runtime::traits::Extrinsic as ExtrinsicT; + use sp_runtime::traits::ExtrinsicLike; assert!(tx.is_bare()); }); } diff --git a/substrate/frame/grandpa/src/equivocation.rs b/substrate/frame/grandpa/src/equivocation.rs index b213c1ceb721..2366c957e9ab 100644 --- a/substrate/frame/grandpa/src/equivocation.rs +++ b/substrate/frame/grandpa/src/equivocation.rs @@ -110,7 +110,7 @@ impl Offence for EquivocationOffence { /// /// This type implements `OffenceReportSystem` such that: /// - Equivocation reports are published on-chain as unsigned extrinsic via -/// `offchain::SendTransactionTypes`. +/// `offchain::CreateTransactionBase`. /// - On-chain validity checks and processing are mostly delegated to the user provided generic /// types implementing `KeyOwnerProofSystem` and `ReportOffence` traits. /// - Offence reporter for unsigned transactions is fetched via the the authorship pallet. @@ -122,7 +122,7 @@ impl (EquivocationProof>, T::KeyOwnerProof), > for EquivocationReportSystem where - T: Config + pallet_authorship::Config + frame_system::offchain::SendTransactionTypes>, + T: Config + pallet_authorship::Config + frame_system::offchain::CreateInherent>, R: ReportOffence< T::AccountId, P::IdentificationTuple, @@ -144,7 +144,8 @@ where equivocation_proof: Box::new(equivocation_proof), key_owner_proof, }; - let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); + let xt = T::create_inherent(call.into()); + let res = SubmitTransaction::>::submit_transaction(xt); match res { Ok(_) => info!(target: LOG_TARGET, "Submitted equivocation report"), Err(e) => error!(target: LOG_TARGET, "Error submitting equivocation report: {:?}", e), diff --git a/substrate/frame/grandpa/src/mock.rs b/substrate/frame/grandpa/src/mock.rs index 490cd4ec8451..d3966810031c 100644 --- a/substrate/frame/grandpa/src/mock.rs +++ b/substrate/frame/grandpa/src/mock.rs @@ -69,14 +69,23 @@ impl frame_system::Config for Test { type AccountData = pallet_balances::AccountData; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = UncheckedExtrinsic; } +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } +} + parameter_types! { pub const Period: u64 = 1; pub const Offset: u64 = 0; diff --git a/substrate/frame/im-online/src/lib.rs b/substrate/frame/im-online/src/lib.rs index ee2a8451d6fb..74d3bc6484dd 100644 --- a/substrate/frame/im-online/src/lib.rs +++ b/substrate/frame/im-online/src/lib.rs @@ -95,7 +95,7 @@ use frame_support::{ BoundedSlice, WeakBoundedVec, }; use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, + offchain::{CreateInherent, SubmitTransaction}, pallet_prelude::*, }; pub use pallet::*; @@ -261,7 +261,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: SendTransactionTypes> + frame_system::Config { + pub trait Config: CreateInherent> + frame_system::Config { /// The identifier type for an authority. type AuthorityId: Member + Parameter @@ -642,7 +642,8 @@ impl Pallet { call, ); - SubmitTransaction::>::submit_unsigned_transaction(call.into()) + let xt = T::create_inherent(call.into()); + SubmitTransaction::>::submit_transaction(xt) .map_err(|_| OffchainErr::SubmitTransaction)?; Ok(()) diff --git a/substrate/frame/im-online/src/mock.rs b/substrate/frame/im-online/src/mock.rs index a7e48e2d3563..d912b7c2231d 100644 --- a/substrate/frame/im-online/src/mock.rs +++ b/substrate/frame/im-online/src/mock.rs @@ -187,14 +187,23 @@ impl Config for Runtime { type MaxPeerInHeartbeats = ConstU32<10_000>; } -impl frame_system::offchain::SendTransactionTypes for Runtime +impl frame_system::offchain::CreateTransactionBase for Runtime where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = Extrinsic; } +impl frame_system::offchain::CreateInherent for Runtime +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } +} + pub fn advance_session() { let now = System::block_number().max(1); System::set_block_number(now + 1); diff --git a/substrate/frame/metadata-hash-extension/src/tests.rs b/substrate/frame/metadata-hash-extension/src/tests.rs index 289307e41f31..8dfb8d85b14d 100644 --- a/substrate/frame/metadata-hash-extension/src/tests.rs +++ b/substrate/frame/metadata-hash-extension/src/tests.rs @@ -25,7 +25,7 @@ use frame_support::{ use merkleized_metadata::{generate_metadata_digest, ExtraInfo}; use sp_api::{Metadata, ProvideRuntimeApi}; use sp_runtime::{ - traits::{Extrinsic as _, TransactionExtensionBase}, + traits::{ExtrinsicLike, TransactionExtensionBase}, transaction_validity::{TransactionSource, UnknownTransaction}, }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; diff --git a/substrate/frame/mixnet/src/lib.rs b/substrate/frame/mixnet/src/lib.rs index c0505a4f0105..5ba14d3884e0 100644 --- a/substrate/frame/mixnet/src/lib.rs +++ b/substrate/frame/mixnet/src/lib.rs @@ -31,7 +31,7 @@ use frame_support::{ BoundedVec, }; use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, + offchain::{CreateInherent, SubmitTransaction}, pallet_prelude::BlockNumberFor, }; pub use pallet::*; @@ -178,7 +178,7 @@ pub mod pallet { pub struct Pallet(_); #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config + CreateInherent> { /// The maximum number of authorities per session. #[pallet::constant] type MaxAuthorities: Get; @@ -361,7 +361,10 @@ pub mod pallet { } } -impl Pallet { +impl Pallet +where + T:, +{ /// Returns the phase of the current session. fn session_phase() -> SessionPhase { let block_in_phase = frame_system::Pallet::::block_number() @@ -531,7 +534,8 @@ impl Pallet { return false }; let call = Call::register { registration, signature }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + let xt = T::create_inherent(call.into()); + match SubmitTransaction::>::submit_transaction(xt) { Ok(()) => true, Err(()) => { log::debug!( diff --git a/substrate/frame/offences/benchmarking/src/mock.rs b/substrate/frame/offences/benchmarking/src/mock.rs index 23f803feb53c..cf5a5e5d31a7 100644 --- a/substrate/frame/offences/benchmarking/src/mock.rs +++ b/substrate/frame/offences/benchmarking/src/mock.rs @@ -157,12 +157,21 @@ impl pallet_offences::Config for Test { type OnOffenceHandler = Staking; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { type Extrinsic = Extrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; +} + +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + Extrinsic::new_bare(call) + } } impl crate::Config for Test {} diff --git a/substrate/frame/sassafras/src/lib.rs b/substrate/frame/sassafras/src/lib.rs index 285758afbe6d..f6c409833e33 100644 --- a/substrate/frame/sassafras/src/lib.rs +++ b/substrate/frame/sassafras/src/lib.rs @@ -61,7 +61,7 @@ use frame_support::{ BoundedVec, WeakBoundedVec, }; use frame_system::{ - offchain::{SendTransactionTypes, SubmitTransaction}, + offchain::{CreateInherent, SubmitTransaction}, pallet_prelude::BlockNumberFor, }; use sp_consensus_sassafras::{ @@ -131,7 +131,7 @@ pub mod pallet { /// Configuration parameters. #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> { + pub trait Config: frame_system::Config + CreateInherent> { /// Amount of slots that each epoch should last. #[pallet::constant] type EpochLength: Get; @@ -1020,7 +1020,8 @@ impl Pallet { pub fn submit_tickets_unsigned_extrinsic(tickets: Vec) -> bool { let tickets = BoundedVec::truncate_from(tickets); let call = Call::submit_tickets { tickets }; - match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { + let xt = T::create_inherent(call.into()); + match SubmitTransaction::>::submit_transaction(xt) { Ok(_) => true, Err(e) => { error!(target: LOG_TARGET, "Error submitting tickets {:?}", e); diff --git a/substrate/frame/sassafras/src/mock.rs b/substrate/frame/sassafras/src/mock.rs index 2b44fb992648..5fdd27cb99af 100644 --- a/substrate/frame/sassafras/src/mock.rs +++ b/substrate/frame/sassafras/src/mock.rs @@ -49,14 +49,23 @@ impl frame_system::Config for Test { type Block = frame_system::mocking::MockBlock; } -impl frame_system::offchain::SendTransactionTypes for Test +impl frame_system::offchain::CreateTransactionBase for Test where RuntimeCall: From, { - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; type Extrinsic = UncheckedExtrinsic; } +impl frame_system::offchain::CreateInherent for Test +where + RuntimeCall: From, +{ + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } +} + impl pallet_sassafras::Config for Test { type EpochLength = ConstU32; type MaxAuthorities = ConstU32; diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs index 0bf4e8e76c63..e34c6ac5016a 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/inherent.rs @@ -66,14 +66,14 @@ pub fn expand_outer_inherent( fn create_extrinsics(&self) -> #scrate::__private::Vec<<#block as #scrate::sp_runtime::traits::Block>::Extrinsic> { - use #scrate::inherent::ProvideInherent; + use #scrate::{inherent::ProvideInherent, traits::InherentBuilder}; let mut inherents = #scrate::__private::Vec::new(); #( #pallet_attrs if let Some(inherent) = #pallet_names::create_inherent(self) { - let inherent = <#unchecked_extrinsic as #scrate::sp_runtime::traits::Extrinsic>::new_inherent( + let inherent = <#unchecked_extrinsic as InherentBuilder>::new_inherent( inherent.into(), ); @@ -121,7 +121,7 @@ pub fn expand_outer_inherent( for xt in block.extrinsics() { // Inherents are before any other extrinsics. // And signed extrinsics are not inherents. - if !(#scrate::sp_runtime::traits::Extrinsic::is_bare(xt)) { + if !(#scrate::sp_runtime::traits::ExtrinsicLike::is_bare(xt)) { break } @@ -159,7 +159,7 @@ pub fn expand_outer_inherent( match #pallet_names::is_inherent_required(self) { Ok(Some(e)) => { let found = block.extrinsics().iter().any(|xt| { - let is_bare = #scrate::sp_runtime::traits::Extrinsic::is_bare(xt); + let is_bare = #scrate::sp_runtime::traits::ExtrinsicLike::is_bare(xt); if is_bare { let call = < @@ -206,7 +206,7 @@ pub fn expand_outer_inherent( use #scrate::inherent::ProvideInherent; use #scrate::traits::{IsSubType, ExtrinsicCall}; - let is_bare = #scrate::sp_runtime::traits::Extrinsic::is_bare(ext); + let is_bare = #scrate::sp_runtime::traits::ExtrinsicLike::is_bare(ext); if !is_bare { // Inherents must be bare extrinsics. return false diff --git a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs index cc7da70d0545..a9923b5da65a 100644 --- a/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs +++ b/substrate/frame/support/procedural/src/construct_runtime/expand/metadata.rs @@ -98,16 +98,16 @@ pub fn expand_runtime_metadata( let ty = #scrate::__private::scale_info::meta_type::<#extrinsic>(); let address_ty = #scrate::__private::scale_info::meta_type::< - <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::SignatureAddress + <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Address >(); let call_ty = #scrate::__private::scale_info::meta_type::< - <#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::Call + <#extrinsic as #scrate::traits::ExtrinsicCall>::Call >(); let signature_ty = #scrate::__private::scale_info::meta_type::< - <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::Signature + <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Signature >(); let extra_ty = #scrate::__private::scale_info::meta_type::< - <<#extrinsic as #scrate::sp_runtime::traits::Extrinsic>::SignaturePayload as #scrate::sp_runtime::traits::SignaturePayload>::SignatureExtra + <#extrinsic as #scrate::traits::SignedTransactionBuilder>::Extension >(); #scrate::__private::metadata_ir::MetadataIR { diff --git a/substrate/frame/support/src/traits.rs b/substrate/frame/support/src/traits.rs index a423656c394f..eb35a6873ab0 100644 --- a/substrate/frame/support/src/traits.rs +++ b/substrate/frame/support/src/traits.rs @@ -59,10 +59,10 @@ pub use misc::{ AccountTouch, Backing, ConstBool, ConstI128, ConstI16, ConstI32, ConstI64, ConstI8, ConstU128, ConstU16, ConstU32, ConstU64, ConstU8, DefensiveMax, DefensiveMin, DefensiveSaturating, DefensiveTruncateFrom, EnsureInherentsAreFirst, EqualPrivilegeOnly, EstimateCallFee, - ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, IsInherent, - IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, PrivilegeCmp, - SameOrOther, Time, TryCollect, TryDrop, TypedGet, UnixTime, VariantCount, VariantCountOf, - WrapperKeepOpaque, WrapperOpaque, + ExecuteBlock, ExtrinsicCall, Get, GetBacking, GetDefault, HandleLifetime, InherentBuilder, + IsInherent, IsSubType, IsType, Len, OffchainWorker, OnKilledAccount, OnNewAccount, + PrivilegeCmp, SameOrOther, SignedTransactionBuilder, Time, TryCollect, TryDrop, TypedGet, + UnixTime, VariantCount, VariantCountOf, WrapperKeepOpaque, WrapperOpaque, }; #[allow(deprecated)] pub use misc::{PreimageProvider, PreimageRecipient}; diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 261a86106b1a..eba9fce28e54 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -915,7 +915,9 @@ pub trait IsInherent { } /// An extrinsic on which we can get access to call. -pub trait ExtrinsicCall: sp_runtime::traits::Extrinsic { +pub trait ExtrinsicCall: sp_runtime::traits::ExtrinsicLike { + type Call; + /// Get the call of the extrinsic. fn call(&self) -> &Self::Call; } @@ -928,11 +930,65 @@ where Signature: TypeInfo, Extra: TypeInfo, { - fn call(&self) -> &Self::Call { + type Call = Call; + + fn call(&self) -> &Call { &self.function } } +pub trait InherentBuilder: ExtrinsicCall { + fn new_inherent(call: Self::Call) -> Self; +} + +impl InherentBuilder + for sp_runtime::generic::UncheckedExtrinsic +where + Address: TypeInfo, + Call: TypeInfo, + Signature: TypeInfo, + Extra: TypeInfo, +{ + fn new_inherent(call: Self::Call) -> Self { + Self::new_bare(call) + } +} + +pub trait SignedTransactionBuilder: ExtrinsicCall { + type Address; + type Signature; + type Extension; + + fn new_signed_transaction( + call: Self::Call, + signed: Self::Address, + signature: Self::Signature, + tx_ext: Self::Extension, + ) -> Self; +} + +impl SignedTransactionBuilder + for sp_runtime::generic::UncheckedExtrinsic +where + Address: TypeInfo, + Call: TypeInfo, + Signature: TypeInfo, + Extension: TypeInfo, +{ + type Address = Address; + type Signature = Signature; + type Extension = Extension; + + fn new_signed_transaction( + call: Self::Call, + signed: Address, + signature: Signature, + tx_ext: Extension, + ) -> Self { + Self::new_signed(call, signed, signature, tx_ext) + } +} + /// Something that can estimate the fee of a (frame-based) call. /// /// Typically, the same pallet that will charge transaction fees will implement this. diff --git a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index 30005c07cb63..12ecfe303a2b 100644 --- a/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/substrate/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -32,8 +32,9 @@ error[E0599]: no function or associated item named `create_inherent` found for s | |_^ function or associated item not found in `Pallet` | = help: items from traits can only be used if the trait is implemented and in scope - = note: the following trait defines an item `create_inherent`, perhaps you need to implement it: + = note: the following traits define an item `create_inherent`, perhaps you need to implement one of them: candidate #1: `ProvideInherent` + candidate #2: `CreateInherent` = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0599]: no function or associated item named `is_inherent` found for struct `pallet::Pallet` in the current scope diff --git a/substrate/frame/support/test/tests/pallet.rs b/substrate/frame/support/test/tests/pallet.rs index 80a1d9945d99..4ca87ba7f06d 100644 --- a/substrate/frame/support/test/tests/pallet.rs +++ b/substrate/frame/support/test/tests/pallet.rs @@ -30,13 +30,14 @@ use frame_support::{ weights::{RuntimeDbWeight, Weight}, OrdNoBound, PartialOrdNoBound, }; +use frame_system::offchain::{CreateSignedTransaction, CreateTransactionBase, SigningTypes}; use scale_info::{meta_type, TypeInfo}; use sp_io::{ hashing::{blake2_128, twox_128, twox_64}, TestExternalities, }; use sp_runtime::{ - traits::{Dispatchable, Extrinsic as ExtrinsicT, SignaturePayload as SignaturePayloadT}, + traits::{Dispatchable, SignaturePayload as SignaturePayloadT}, DispatchError, ModuleError, }; @@ -742,7 +743,9 @@ impl pallet5::Config for Runtime { type RuntimeOrigin = RuntimeOrigin; } -#[derive(Clone, Debug, codec::Encode, codec::Decode, PartialEq, Eq, scale_info::TypeInfo)] +#[derive( + Clone, Debug, codec::Encode, codec::Decode, PartialEq, Eq, PartialOrd, Ord, scale_info::TypeInfo, +)] pub struct AccountU64(u64); impl sp_runtime::traits::IdentifyAccount for AccountU64 { type AccountId = u64; @@ -776,6 +779,47 @@ pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic< AccountU64, frame_system::CheckNonZeroSender, >; +pub type UncheckedSignaturePayload = sp_runtime::generic::UncheckedSignaturePayload< + u64, + AccountU64, + frame_system::CheckNonZeroSender, +>; + +impl SigningTypes for Runtime { + type Public = AccountU64; + type Signature = AccountU64; +} + +impl CreateTransactionBase for Runtime +where + RuntimeCall: From, +{ + type RuntimeCall = RuntimeCall; + type Extrinsic = UncheckedExtrinsic; +} + +impl CreateSignedTransaction for Runtime +where + RuntimeCall: From, +{ + type SignaturePayload = UncheckedSignaturePayload; + + fn create_signed_transaction< + C: frame_system::offchain::AppCrypto, + >( + call: RuntimeCall, + _public: AccountU64, + account: u64, + nonce: u64, + ) -> Option { + Some(UncheckedExtrinsic::new_signed( + call, + nonce, + account.into(), + frame_system::CheckNonZeroSender::new(), + )) + } +} frame_support::construct_runtime!( pub struct Runtime { @@ -1855,12 +1899,12 @@ fn metadata() { ty: meta_type::<()>(), additional_signed: meta_type::<()>(), }], - address_ty: meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureAddress>(), - call_ty: meta_type::<::Call>(), + address_ty: meta_type::<<>::SignaturePayload as SignaturePayloadT>::SignatureAddress>(), + call_ty: meta_type::<>::RuntimeCall>(), signature_ty: meta_type::< - <::SignaturePayload as SignaturePayloadT>::Signature + <>::SignaturePayload as SignaturePayloadT>::Signature >(), - extra_ty: meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), + extra_ty: meta_type::<<>::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), }; let outer_enums = OuterEnums { @@ -1940,21 +1984,24 @@ fn metadata_ir_pallet_runtime_docs() { fn extrinsic_metadata_ir_types() { let ir = Runtime::metadata_ir().extrinsic; - assert_eq!(meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureAddress>(), ir.address_ty); + assert_eq!(meta_type::<<>::SignaturePayload as SignaturePayloadT>::SignatureAddress>(), ir.address_ty); assert_eq!(meta_type::(), ir.address_ty); - assert_eq!(meta_type::<::Call>(), ir.call_ty); + assert_eq!( + meta_type::<>::RuntimeCall>(), + ir.call_ty + ); assert_eq!(meta_type::(), ir.call_ty); assert_eq!( meta_type::< - <::SignaturePayload as SignaturePayloadT>::Signature, - >(), + <>::SignaturePayload as SignaturePayloadT>::Signature + >(), ir.signature_ty ); assert_eq!(meta_type::(), ir.signature_ty); - assert_eq!(meta_type::<<::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), ir.extra_ty); + assert_eq!(meta_type::<<>::SignaturePayload as SignaturePayloadT>::SignatureExtra>(), ir.extra_ty); assert_eq!(meta_type::>(), ir.extra_ty); } diff --git a/substrate/frame/support/test/tests/runtime.rs b/substrate/frame/support/test/tests/runtime.rs index d611138e6612..6b7f770b794a 100644 --- a/substrate/frame/support/test/tests/runtime.rs +++ b/substrate/frame/support/test/tests/runtime.rs @@ -533,8 +533,11 @@ fn origin_codec() { fn event_codec() { use codec::Encode; - let event = frame_system::Event::::ExtrinsicSuccess { weight: Default::default(), class: Default::default(), - pays: Default::default(), }; + let event = frame_system::Event::::ExtrinsicSuccess { + weight: Default::default(), + class: Default::default(), + pays: Default::default(), + }; assert_eq!(RuntimeEvent::from(event).encode()[0], 30); let event = module1::Event::::A(test_pub()); diff --git a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs index 06afe333c4ac..276830d8680c 100644 --- a/substrate/frame/support/test/tests/runtime_legacy_ordering.rs +++ b/substrate/frame/support/test/tests/runtime_legacy_ordering.rs @@ -533,8 +533,11 @@ fn origin_codec() { fn event_codec() { use codec::Encode; - let event = frame_system::Event::::ExtrinsicSuccess { weight: Default::default(), class: Default::default(), - pays: Default::default(), }; + let event = frame_system::Event::::ExtrinsicSuccess { + weight: Default::default(), + class: Default::default(), + pays: Default::default(), + }; assert_eq!(RuntimeEvent::from(event).encode()[0], 30); let event = module1::Event::::A(test_pub()); diff --git a/substrate/frame/system/src/extensions/check_genesis.rs b/substrate/frame/system/src/extensions/check_genesis.rs index 9c99ef32b6f8..d5baac157315 100644 --- a/substrate/frame/system/src/extensions/check_genesis.rs +++ b/substrate/frame/system/src/extensions/check_genesis.rs @@ -63,9 +63,7 @@ impl TransactionExtensionBase for CheckGenesis { ::check_genesis() } } -impl TransactionExtension - for CheckGenesis -{ +impl TransactionExtension for CheckGenesis { type Val = (); type Pre = (); impl_tx_ext_default!(T::RuntimeCall; validate prepare); diff --git a/substrate/frame/system/src/extensions/check_mortality.rs b/substrate/frame/system/src/extensions/check_mortality.rs index 58823e57c22e..88d4fe1e6b2a 100644 --- a/substrate/frame/system/src/extensions/check_mortality.rs +++ b/substrate/frame/system/src/extensions/check_mortality.rs @@ -76,9 +76,7 @@ impl TransactionExtensionBase for CheckMortality { ::check_mortality() } } -impl TransactionExtension - for CheckMortality -{ +impl TransactionExtension for CheckMortality { type Pre = (); type Val = (); diff --git a/substrate/frame/system/src/extensions/check_non_zero_sender.rs b/substrate/frame/system/src/extensions/check_non_zero_sender.rs index 00177c50c3a2..ceecdc532dae 100644 --- a/substrate/frame/system/src/extensions/check_non_zero_sender.rs +++ b/substrate/frame/system/src/extensions/check_non_zero_sender.rs @@ -59,9 +59,7 @@ impl TransactionExtensionBase for CheckNonZeroSender ::check_non_zero_sender() } } -impl TransactionExtension - for CheckNonZeroSender -{ +impl TransactionExtension for CheckNonZeroSender { type Val = (); type Pre = (); fn validate( diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 4cfc7378df6e..0d64268d32ab 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -216,8 +216,7 @@ impl TransactionExtensionBase for CheckWeight { ::check_weight() } } -impl TransactionExtension - for CheckWeight +impl TransactionExtension for CheckWeight where T::RuntimeCall: Dispatchable, { diff --git a/substrate/frame/system/src/offchain.rs b/substrate/frame/system/src/offchain.rs index b58ea134a7ec..ea7ae8b40f1b 100644 --- a/substrate/frame/system/src/offchain.rs +++ b/substrate/frame/system/src/offchain.rs @@ -58,9 +58,10 @@ use alloc::{boxed::Box, collections::btree_set::BTreeSet, vec::Vec}; use codec::Encode; +use scale_info::TypeInfo; use sp_runtime::{ app_crypto::RuntimeAppPublic, - traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}, + traits::{ExtrinsicLike, IdentifyAccount, One, SignaturePayload}, RuntimeDebug, }; @@ -75,34 +76,18 @@ pub struct ForAny {} /// For submitting unsigned transactions, `submit_unsigned_transaction` /// utility function can be used. However, this struct is used by `Signer` /// to submit a signed transactions providing the signature along with the call. -pub struct SubmitTransaction, OverarchingCall> { - _phantom: core::marker::PhantomData<(T, OverarchingCall)>, +pub struct SubmitTransaction, RuntimeCall> { + _phantom: core::marker::PhantomData<(T, RuntimeCall)>, } -// TODO [#2415]: Avoid splitting call and the totally opaque `signature`; `CreateTransaction` trait -// should provide something which impls `Encode`, which can be sent onwards to -// `sp_io::offchain::submit_transaction`. There's no great need to split things up as in here. impl SubmitTransaction where - T: SendTransactionTypes, + T: CreateTransactionBase, { - /// Submit transaction onchain by providing the call and an optional signature - pub fn submit_transaction( - call: >::OverarchingCall, - signature: Option<::SignaturePayload>, - ) -> Result<(), ()> { - // TODO: Use regular transaction API instead. - #[allow(deprecated)] - let xt = T::Extrinsic::new(call, signature).ok_or(())?; + /// A convenience method to submit an extrinsic onchain. + pub fn submit_transaction(xt: T::Extrinsic) -> Result<(), ()> { sp_io::offchain::submit_transaction(xt.encode()) } - - /// A convenience method to submit an unsigned transaction onchain. - pub fn submit_unsigned_transaction( - call: >::OverarchingCall, - ) -> Result<(), ()> { - SubmitTransaction::::submit_transaction(call, None) - } } /// Provides an implementation for signing transaction payloads. @@ -289,7 +274,7 @@ impl< } impl< - T: SigningTypes + SendTransactionTypes, + T: SigningTypes + CreateInherent, C: AppCrypto, LocalCall, > SendUnsignedTransaction for Signer @@ -315,7 +300,7 @@ impl< } impl< - T: SigningTypes + SendTransactionTypes, + T: SigningTypes + CreateInherent, C: AppCrypto, LocalCall, > SendUnsignedTransaction for Signer @@ -462,38 +447,54 @@ pub trait SigningTypes: crate::Config { type Signature: Clone + PartialEq + core::fmt::Debug + codec::Codec + scale_info::TypeInfo; } -/// A definition of types required to submit transactions from within the runtime. -pub trait SendTransactionTypes { - /// The extrinsic type expected by the runtime. - type Extrinsic: ExtrinsicT + codec::Encode; +/// Common interface for the `CreateTransaction` trait family to unify the `Call` type. +pub trait CreateTransactionBase { + /// The extrinsic. + type Extrinsic: ExtrinsicLike + Encode; + /// The runtime's call type. /// /// This has additional bound to be able to be created from pallet-local `Call` types. - type OverarchingCall: From + codec::Encode; + type RuntimeCall: From + Encode; } -/// Create signed transaction. -/// -/// This trait is meant to be implemented by the runtime and is responsible for constructing -/// a payload to be signed and contained within the extrinsic. -/// This will most likely include creation of `TxExtension` (a tuple of `TransactionExtension`s). -/// Note that the result can be altered by inspecting the `Call` (for instance adjusting -/// fees, or mortality depending on the `pallet` being called). +/// Interface for creating a transaction. +pub trait CreateTransaction: CreateTransactionBase { + /// The extension. + type Extension: TypeInfo; + + /// Create a transaction using the call and the desired transaction extension. + fn create_transaction( + call: >::RuntimeCall, + extension: Self::Extension, + ) -> Self::Extrinsic; +} + +/// Interface for creating an old-school signed transaction. pub trait CreateSignedTransaction: - SendTransactionTypes + SigningTypes + CreateTransactionBase + SigningTypes { + /// TODO: revisit this, currently here for metadata purposes. + type SignaturePayload: SignaturePayload; + /// Attempt to create signed extrinsic data that encodes call from given account. /// /// Runtime implementation is free to construct the payload to sign and the signature /// in any way it wants. /// Returns `None` if signed extrinsic could not be created (either because signing failed /// or because of any other runtime-specific reason). - fn create_transaction>( - call: Self::OverarchingCall, + fn create_signed_transaction>( + call: >::RuntimeCall, public: Self::Public, account: Self::AccountId, nonce: Self::Nonce, - ) -> Option<(Self::OverarchingCall, ::SignaturePayload)>; + ) -> Option; +} + +/// Interface for creating an inherent. +pub trait CreateInherent: CreateTransactionBase { + /// Create an inherent. + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic; } /// A message signer. @@ -521,7 +522,7 @@ pub trait SignMessage { /// Submit a signed transaction to the transaction pool. pub trait SendSignedTransaction< - T: SigningTypes + CreateSignedTransaction, + T: CreateSignedTransaction, C: AppCrypto, LocalCall, > @@ -552,13 +553,14 @@ pub trait SendSignedTransaction< account.id, account_data.nonce, ); - let (call, signature) = T::create_transaction::( + let transaction = T::create_signed_transaction::( call.into(), account.public.clone(), account.id.clone(), account_data.nonce, )?; - let res = SubmitTransaction::::submit_transaction(call, Some(signature)); + + let res = SubmitTransaction::::submit_transaction(transaction); if res.is_ok() { // increment the nonce. This is fine, since the code should always @@ -572,7 +574,7 @@ pub trait SendSignedTransaction< } /// Submit an unsigned transaction onchain with a signed payload -pub trait SendUnsignedTransaction, LocalCall> { +pub trait SendUnsignedTransaction, LocalCall> { /// A submission result. /// /// Should contain the submission result and the account(s) that signed the payload. @@ -595,7 +597,8 @@ pub trait SendUnsignedTransaction Option> { - Some(SubmitTransaction::::submit_unsigned_transaction(call.into())) + let xt = T::create_inherent(call.into()); + Some(SubmitTransaction::::submit_transaction(xt)) } } @@ -638,9 +641,15 @@ mod tests { type Extrinsic = UncheckedExtrinsic; - impl SendTransactionTypes for TestRuntime { + impl CreateTransactionBase for TestRuntime { type Extrinsic = Extrinsic; - type OverarchingCall = RuntimeCall; + type RuntimeCall = RuntimeCall; + } + + impl CreateInherent for TestRuntime { + fn create_inherent(call: Self::RuntimeCall) -> Self::Extrinsic { + UncheckedExtrinsic::new_bare(call) + } } #[derive(codec::Encode, codec::Decode)] diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index 5cc9cf22d56e..194bfb92015a 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -505,7 +505,7 @@ impl Pallet { /// /// All dispatchables must be annotated with weight and will have some fee info. This function /// always returns. - pub fn query_info( + pub fn query_info( unchecked_extrinsic: Extrinsic, len: u32, ) -> RuntimeDispatchInfo> @@ -532,7 +532,7 @@ impl Pallet { } /// Query the detailed fee of a given `call`. - pub fn query_fee_details( + pub fn query_fee_details( unchecked_extrinsic: Extrinsic, len: u32, ) -> FeeDetails> diff --git a/substrate/primitives/runtime/src/generic/block.rs b/substrate/primitives/runtime/src/generic/block.rs index 8ed79c7c8dcf..a084a3703f9e 100644 --- a/substrate/primitives/runtime/src/generic/block.rs +++ b/substrate/primitives/runtime/src/generic/block.rs @@ -99,7 +99,7 @@ where impl traits::Block for Block where Header: HeaderT + MaybeSerializeDeserialize, - Extrinsic: Member + Codec + traits::Extrinsic, + Extrinsic: Member + Codec + traits::ExtrinsicLike, { type Extrinsic = Extrinsic; type Header = Header; diff --git a/substrate/primitives/runtime/src/generic/mod.rs b/substrate/primitives/runtime/src/generic/mod.rs index 4a1c0df395a9..488c55d4e204 100644 --- a/substrate/primitives/runtime/src/generic/mod.rs +++ b/substrate/primitives/runtime/src/generic/mod.rs @@ -16,7 +16,7 @@ // limitations under the License. //! Generic implementations of [`crate::traits::Header`], [`crate::traits::Block`] and -//! [`crate::traits::Extrinsic`]. +//! [`crate::traits::ExtrinsicLike`]. mod block; mod checked_extrinsic; @@ -37,3 +37,4 @@ pub use self::{ Preamble, SignedPayload, UncheckedExtrinsic, EXTENSION_VERSION, EXTRINSIC_FORMAT_VERSION, }, }; +pub use unchecked_extrinsic::UncheckedSignaturePayload; diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 63cb32753ded..383283d5f749 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -20,8 +20,8 @@ use crate::{ generic::{CheckedExtrinsic, ExtrinsicFormat}, traits::{ - self, transaction_extension::TransactionExtensionBase, Checkable, Dispatchable, Extrinsic, - ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, SignaturePayload, + self, transaction_extension::TransactionExtensionBase, Checkable, Dispatchable, + ExtrinsicLike, ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, SignaturePayload, TransactionExtension, }, transaction_validity::{InvalidTransaction, TransactionValidityError}, @@ -55,7 +55,7 @@ pub const EXTRINSIC_FORMAT_VERSION: ExtrinsicVersion = 5; pub const EXTENSION_VERSION: ExtensionVersion = 0; /// The `SignaturePayload` of `UncheckedExtrinsic`. -type UncheckedSignaturePayload = (Address, Signature, Extension); +pub type UncheckedSignaturePayload = (Address, Signature, Extension); impl SignaturePayload for UncheckedSignaturePayload @@ -372,15 +372,9 @@ impl UncheckedExtrinsic Extrinsic +impl ExtrinsicLike for UncheckedExtrinsic { - type Call = Call; - - type SignaturePayload = UncheckedSignaturePayload; - fn is_bare(&self) -> bool { matches!(self.preamble, Preamble::Bare(_)) } @@ -388,18 +382,6 @@ impl Option { Some(matches!(self.preamble, Preamble::Signed(..))) } - - fn new(function: Call, signed_data: Option) -> Option { - Some(if let Some((address, signature, extra)) = signed_data { - Self::new_signed(function, address, signature, extra) - } else { - Self::new_bare(function) - }) - } - - fn new_inherent(function: Call) -> Self { - Self::new_bare(function) - } } impl Checkable @@ -848,14 +830,14 @@ mod tests { #[test] fn unsigned_codec_should_work() { let call: TestCall = vec![0u8; 0].into(); - let ux = Ex::new_inherent(call); + let ux = Ex::new_bare(call); let encoded = ux.encode(); assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); } #[test] fn invalid_length_prefix_is_detected() { - let ux = Ex::new_inherent(vec![0u8; 0].into()); + let ux = Ex::new_bare(vec![0u8; 0].into()); let mut encoded = ux.encode(); let length = Compact::::decode(&mut &encoded[..]).unwrap(); @@ -900,7 +882,7 @@ mod tests { #[test] fn unsigned_check_should_work() { - let ux = Ex::new_inherent(vec![0u8; 0].into()); + let ux = Ex::new_bare(vec![0u8; 0].into()); assert!(ux.is_inherent()); assert_eq!( >::check(ux, &Default::default()), @@ -961,7 +943,7 @@ mod tests { #[test] fn encoding_matches_vec() { - let ex = Ex::new_inherent(vec![0u8; 0].into()); + let ex = Ex::new_bare(vec![0u8; 0].into()); let encoded = ex.encode(); let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); assert_eq!(decoded, ex); @@ -971,7 +953,7 @@ mod tests { #[test] fn conversion_to_opaque() { - let ux = Ex::new_inherent(vec![0u8; 0].into()); + let ux = Ex::new_bare(vec![0u8; 0].into()); let encoded = ux.encode(); let opaque: OpaqueExtrinsic = ux.into(); let opaque_encoded = opaque.encode(); diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index 0c3645edf6e9..60b8c7a73f5e 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -26,7 +26,7 @@ //! communication between the client and the runtime. This includes: //! //! - A set of traits to declare what any block/header/extrinsic type should provide. -//! - [`traits::Block`], [`traits::Header`], [`traits::Extrinsic`] +//! - [`traits::Block`], [`traits::Header`], [`traits::ExtrinsicLike`] //! - A set of types that implement these traits, whilst still providing a high degree of //! configurability via generics. //! - [`generic::Block`], [`generic::Header`], [`generic::UncheckedExtrinsic`] and @@ -950,10 +950,7 @@ impl<'a> ::serde::Deserialize<'a> for OpaqueExtrinsic { } } -// TODO: OpaqueExtrinsics cannot act like regular extrinsics, right?! -impl traits::Extrinsic for OpaqueExtrinsic { - type Call = (); - type SignaturePayload = (); +impl traits::ExtrinsicLike for OpaqueExtrinsic { fn is_bare(&self) -> bool { false } diff --git a/substrate/primitives/runtime/src/testing.rs b/substrate/primitives/runtime/src/testing.rs index 3d6e0b5ab8bc..ad5edfbdaf4f 100644 --- a/substrate/primitives/runtime/src/testing.rs +++ b/substrate/primitives/runtime/src/testing.rs @@ -204,7 +204,16 @@ impl traits::HeaderProvider for Block { } impl< - Xt: 'static + Codec + Sized + Send + Sync + Serialize + Clone + Eq + Debug + traits::Extrinsic, + Xt: 'static + + Codec + + Sized + + Send + + Sync + + Serialize + + Clone + + Eq + + Debug + + traits::ExtrinsicLike, > traits::Block for Block { type Extrinsic = Xt; diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs index 983fd218458a..94b727e64260 100644 --- a/substrate/primitives/runtime/src/traits/mod.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -1292,7 +1292,7 @@ pub trait Block: + 'static { /// Type for extrinsics. - type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize; + type Extrinsic: Member + Codec + ExtrinsicLike + MaybeSerialize; /// Header type. type Header: Header + MaybeSerializeDeserialize; /// Block hash type. @@ -1316,6 +1316,7 @@ pub trait Block: } /// Something that acts like an `Extrinsic`. +#[deprecated = "Use `ExtrinsicLike` along with the `CreateTransaction` trait family instead"] pub trait Extrinsic: Sized { /// The function call. type Call: TypeInfo; @@ -1329,20 +1330,19 @@ pub trait Extrinsic: Sized { /// Is this `Extrinsic` signed? /// If no information are available about signed/unsigned, `None` should be returned. - #[deprecated = "Use and implement `!is_bare()` instead"] fn is_signed(&self) -> Option { None } /// Returns `true` if this `Extrinsic` is bare. fn is_bare(&self) -> bool { - #[allow(deprecated)] - !self.is_signed().unwrap_or(true) + !self + .is_signed() + .expect("`is_signed` must return `Some` on production extrinsics; qed") } /// Create a new old-school extrinsic, either a bare extrinsic if `_signed_data` is `None` or /// a signed transaction is it is `Some`. - #[deprecated = "Use `new_inherent` or the `CreateTransaction` trait instead"] fn new(_call: Self::Call, _signed_data: Option) -> Option { None } @@ -1354,6 +1354,37 @@ pub trait Extrinsic: Sized { } } +/// Something that acts like an `Extrinsic`. +pub trait ExtrinsicLike: Sized { + /// Is this `Extrinsic` signed? + /// If no information are available about signed/unsigned, `None` should be returned. + #[deprecated = "Use and implement `!is_bare()` instead"] + fn is_signed(&self) -> Option { + None + } + + /// Returns `true` if this `Extrinsic` is bare. + fn is_bare(&self) -> bool { + #[allow(deprecated)] + !self.is_signed().unwrap_or(true) + } +} + +#[allow(deprecated)] +impl ExtrinsicLike for T +where + T: Extrinsic, +{ + fn is_signed(&self) -> Option { + #[allow(deprecated)] + ::is_signed(&self) + } + + fn is_bare(&self) -> bool { + ::is_bare(&self) + } +} + /// Something that acts like a [`SignaturePayload`](Extrinsic::SignaturePayload) of an /// [`Extrinsic`]. pub trait SignaturePayload { diff --git a/substrate/primitives/test-primitives/src/lib.rs b/substrate/primitives/test-primitives/src/lib.rs index 1e3b912eaf48..adc96d773694 100644 --- a/substrate/primitives/test-primitives/src/lib.rs +++ b/substrate/primitives/test-primitives/src/lib.rs @@ -28,7 +28,7 @@ use sp_application_crypto::sr25519; use alloc::vec::Vec; pub use sp_core::{hash::H256, RuntimeDebug}; -use sp_runtime::traits::{BlakeTwo256, Extrinsic as ExtrinsicT, Verify}; +use sp_runtime::traits::{BlakeTwo256, ExtrinsicLike, Verify}; /// Extrinsic for test-runtime. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, scale_info::TypeInfo)] @@ -47,10 +47,7 @@ impl serde::Serialize for Extrinsic { } } -impl ExtrinsicT for Extrinsic { - type Call = Extrinsic; - type SignaturePayload = (); - +impl ExtrinsicLike for Extrinsic { fn is_signed(&self) -> Option { if let Extrinsic::IncludeData(_) = *self { Some(false) @@ -59,8 +56,12 @@ impl ExtrinsicT for Extrinsic { } } - fn new(call: Self::Call, _signature_payload: Option) -> Option { - Some(call) + fn is_bare(&self) -> bool { + if let Extrinsic::IncludeData(_) = *self { + true + } else { + false + } } } From 0b52a2c19ebcf5d7a0d07974b70aec656704d249 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Mon, 12 Aug 2024 11:03:23 +0300 Subject: [PATCH 17/74] prospective-parachains rework: take II (#4937) Resolves https://github.com/paritytech/polkadot-sdk/issues/4800 # Problem In https://github.com/paritytech/polkadot-sdk/pull/4035, we removed support for parachain forks and cycles and added support for backing unconnected candidates (candidates for which we don't yet know the full path to the latest included block), which is useful for elastic scaling (parachains using multiple cores). Removing support for backing forks turned out to be a bad idea, as there are legitimate cases for a parachain to fork (if they have other consensus mechanism for example, like BABE or PoW). This leads to validators getting lower backing rewards (depending on whether they back the winning fork or not) and a higher pressure on only the half of the backing group (during availability-distribution for example). Since we don't yet have approval voting rewards, backing rewards are a pretty big deal (which may change in the future). # Description A backing group is now allowed to back forks. Once a candidate becomes backed (has the minimum backing votes), we don't accept new forks unless they adhere to the new fork selection rule (have a lower candidate hash). This helps with keeping the implementation simpler, since forks will only be taken into account for candidates which are not backed yet (only seconded). Having this fork selection rule also helps with reducing the work backing validators need to do, since they have a shared way of picking the winning fork. Once they see a candidate backed, they can all decide to back a fork and not accept new ones. But they still accept new ones during the seconding phase (until the backing quorum is reached). Therefore, a block author which is not part of the backing group will likely not even see the forks (only the winning one). Just as before, a parachain producing forks will still not be able to leverage elastic scaling but will still work with a single core. Also, cycles are still not accepted. ## Some implementation details `CandidateStorage` is no longer a subsystem-wide construct. It was previously holding candidates from all relay chain forks and complicated the code. Each fragment chain now holds their candidate chain and their potential candidates. This should not increase the storage consumption since the heavy candidate data is already wrapped in an Arc and shared. It however allows for great simplifications and increase readability. `FragmentChain`s are now only creating a chain with backed candidates and the fork selection rule. As said before, `FragmentChain`s are now also responsible for maintaining their own potential candidate storage. Since we no longer have the subsytem-wide `CandidateStorage`, when getting a new leaf update, we use the storage of our latest ancestor, which may contain candidates seconded/backed that are still in scope. When a candidate is backed, the fragment chains which hold it are recreated (due to the fork selection rule, it could trigger a "reorg" of the fragment chain). I generally tried to simplify the subsystem and not introduce unneccessary optimisations that would otherwise complicate the code and not gain us much (fragment chains wouldn't realistically ever hold many candidates) TODO: - [x] update metrics - [x] update docs and comments - [x] fix and add unit tests - [x] tested with fork-producing parachain - [x] tested with cycle-producing parachain - [x] versi test - [x] prdoc --- Cargo.lock | 10 +- .../core/prospective-parachains/Cargo.toml | 10 +- .../core/prospective-parachains/src/error.rs | 15 - .../src/fragment_chain/mod.rs | 1357 +++++++---- .../src/fragment_chain/tests.rs | 2156 ++++++++--------- .../core/prospective-parachains/src/lib.rs | 638 ++--- .../prospective-parachains/src/metrics.rs | 105 +- .../core/prospective-parachains/src/tests.rs | 691 ++++-- polkadot/node/core/provisioner/src/lib.rs | 8 +- .../statement-distribution/src/v2/mod.rs | 4 +- polkadot/node/subsystem-types/src/messages.rs | 40 +- .../src/backing_implicit_view.rs | 76 + .../src/inclusion_emulator/mod.rs | 114 +- prdoc/pr_4937.prdoc | 21 + 14 files changed, 2921 insertions(+), 2324 deletions(-) create mode 100644 prdoc/pr_4937.prdoc diff --git a/Cargo.lock b/Cargo.lock index 4db38311fe66..6ebacc9ec5a8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13443,23 +13443,17 @@ name = "polkadot-node-core-prospective-parachains" version = "6.0.0" dependencies = [ "assert_matches", - "bitvec", "fatality", "futures", - "parity-scale-codec", - "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", - "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rand", "rstest", - "sc-keystore", - "sp-application-crypto", "sp-core", - "sp-keyring", - "sp-keystore", + "sp-tracing 16.0.0", "thiserror", "tracing-gum", ] diff --git a/polkadot/node/core/prospective-parachains/Cargo.toml b/polkadot/node/core/prospective-parachains/Cargo.toml index 97da5a1e94a0..705014e67a05 100644 --- a/polkadot/node/core/prospective-parachains/Cargo.toml +++ b/polkadot/node/core/prospective-parachains/Cargo.toml @@ -12,24 +12,18 @@ workspace = true [dependencies] futures = { workspace = true } gum = { workspace = true, default-features = true } -codec = { workspace = true, default-features = true } thiserror = { workspace = true } fatality = { workspace = true } -bitvec = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } -polkadot-node-primitives = { workspace = true, default-features = true } polkadot-node-subsystem = { workspace = true, default-features = true } polkadot-node-subsystem-util = { workspace = true, default-features = true } [dev-dependencies] assert_matches = { workspace = true } polkadot-node-subsystem-test-helpers = { workspace = true } -polkadot-node-subsystem-types = { workspace = true, default-features = true } polkadot-primitives-test-helpers = { workspace = true } +sp-tracing = { workspace = true } sp-core = { workspace = true, default-features = true } -sc-keystore = { workspace = true, default-features = true } -sp-application-crypto = { workspace = true, default-features = true } -sp-keyring = { workspace = true, default-features = true } -sp-keystore = { workspace = true, default-features = true } +rand = { workspace = true } rstest = { workspace = true } diff --git a/polkadot/node/core/prospective-parachains/src/error.rs b/polkadot/node/core/prospective-parachains/src/error.rs index 2b0933ab1c7e..4b332b9c5de5 100644 --- a/polkadot/node/core/prospective-parachains/src/error.rs +++ b/polkadot/node/core/prospective-parachains/src/error.rs @@ -30,18 +30,6 @@ use fatality::Nested; #[allow(missing_docs)] #[fatality::fatality(splitable)] pub enum Error { - #[fatal] - #[error("SubsystemError::Context error: {0}")] - SubsystemContext(String), - - #[fatal] - #[error("Spawning a task failed: {0}")] - SpawnFailed(SubsystemError), - - #[fatal] - #[error("Participation worker receiver exhausted.")] - ParticipationWorkerReceiverExhausted, - #[fatal] #[error("Receiving message from overseer failed: {0}")] SubsystemReceive(#[source] SubsystemError), @@ -55,9 +43,6 @@ pub enum Error { #[error(transparent)] ChainApi(#[from] ChainApiError), - #[error(transparent)] - Subsystem(SubsystemError), - #[error("Request to chain API subsystem dropped")] ChainApiRequestCanceled(oneshot::Canceled), diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs index b5fe70e76923..b060897d4391 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/mod.rs @@ -18,41 +18,58 @@ //! //! # Overview //! -//! This module exposes two main types: [`FragmentChain`] and [`CandidateStorage`] which are meant -//! to be used in close conjunction. Each fragment chain is associated with a particular -//! relay-parent and each node in the chain represents a candidate. Each parachain has a single -//! candidate storage, but can have one chain for each relay chain block in the view. -//! Therefore, the same candidate can be present in multiple fragment chains of a parachain. One of -//! the purposes of the candidate storage is to deduplicate the large candidate data that is being -//! referenced from multiple fragment chains. +//! The main type exposed by this module is the [`FragmentChain`]. //! -//! A chain has an associated [`Scope`] which defines limits on candidates within the chain. -//! Candidates themselves have their own [`Constraints`] which are either the constraints from the -//! scope, or, if there are previous nodes in the chain, a modified version of the previous -//! candidate's constraints. +//! Each fragment chain is associated with a particular relay-parent (an active leaf) and has a +//! [`Scope`], which contains the allowed relay parents (up to `allowed_ancestry_len`), the pending +//! availability candidates and base constraints derived from the latest included candidate. Each +//! parachain has a single `FragmentChain` for each active leaf where it's scheduled. //! -//! Another use of the `CandidateStorage` is to keep a record of candidates which may not be yet -//! included in any chain, but which may become part of a chain in the future. This is needed for -//! elastic scaling, so that we may parallelise the backing process across different groups. As long -//! as some basic constraints are not violated by an unconnected candidate (like the relay parent -//! being in scope), we proceed with the backing process, hoping that its predecessors will be -//! backed soon enough. This is commonly called a potential candidate. Note that not all potential -//! candidates will be maintained in the CandidateStorage. The total number of connected + potential -//! candidates will be at most max_candidate_depth + 1. +//! A fragment chain consists mainly of the current best backable chain (we'll call this the best +//! chain) and a storage of unconnected potential candidates (we'll call this the unconnected +//! storage). +//! +//! The best chain contains all the candidates pending availability and a subsequent chain +//! of candidates that have reached the backing quorum and are better than any other backable forks +//! according to the fork selection rule (more on this rule later). It has a length of size at most +//! `max_candidate_depth + 1`. +//! +//! The unconnected storage keeps a record of seconded/backable candidates that may be +//! added to the best chain in the future. +//! Once a candidate is seconded, it becomes part of this unconnected storage. +//! Only after it is backed it may be added to the best chain (but not necessarily). It's only +//! added if it builds on the latest candidate in the chain and if there isn't a better backable +//! candidate according to the fork selection rule. +//! +//! An important thing to note is that the candidates present in the unconnected storage may have +//! any/no relationship between them. In other words, they may form N trees and may even form +//! cycles. This is needed so that we may begin validating candidates for which we don't yet know +//! their parent (so we may parallelize the backing process across different groups for elastic +//! scaling) and so that we accept parachain forks. +//! +//! We accept parachain forks only if the fork selection rule allows for it. In other words, if we +//! have a backed candidate, we begin seconding/validating a fork only if it has a lower candidate +//! hash. Once both forks are backed, we discard the one with the higher candidate hash. +//! We assume all validators pick the same fork according to the fork selection rule. If we decided +//! to not accept parachain forks, candidates could end up getting only half of the backing votes or +//! even less (for forks of larger arity). This would affect the validator rewards. Still, we don't +//! guarantee that a fork-producing parachains will be able to fully use elastic scaling. +//! +//! Once a candidate is backed and becomes part of the best chain, we can trim from the +//! unconnected storage candidates which constitute forks on the best chain and no longer have +//! potential. //! //! This module also makes use of types provided by the Inclusion Emulator module, such as //! [`Fragment`] and [`Constraints`]. These perform the actual job of checking for validity of //! prospective fragments. //! -//! # Parachain forks +//! # Fork choice rule //! -//! Parachains are expected to not create forks, hence the use of fragment chains as opposed to -//! fragment trees. If parachains do create forks, their performance in regards to async backing and -//! elastic scaling will suffer, because different validators will have different views of the -//! future. +//! The motivation for the fork choice rule is described in the previous chapter. //! -//! This is a compromise we can make - collators which want to use async backing and elastic scaling -//! need to cooperate for the highest throughput. +//! The current rule is: choose the candidate with the lower candidate hash. +//! The candidate hash is quite random and finding a candidate with a lower hash in order to favour +//! it would essentially mean solving a proof of work problem. //! //! # Parachain cycles //! @@ -65,70 +82,117 @@ //! resolved by having candidates reference their parent by candidate hash. //! //! However, dealing with cycles increases complexity during the backing/inclusion process for no -//! practical reason. Therefore, fragment chains will not accept such candidates. +//! practical reason. +//! These cycles may be accepted by fragment chains while candidates are part of the unconnected +//! storage, but they will definitely not make it to the best chain. //! //! On the other hand, enforcing that a parachain will NEVER be acyclic would be very complicated //! (looping through the entire parachain's history on every new candidate or changing the candidate //! receipt to reference the parent's candidate hash). //! +//! Therefore, we don't provide a guarantee that a cycle-producing parachain will work (although in +//! practice they probably will if the cycle length is larger than the number of assigned cores +//! multiplied by two). +//! //! # Spam protection //! -//! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied, -//! [`FragmentChain`] complexity is bounded. This means that higher-level code needs to be selective -//! about limiting the amount of candidates that are considered. +//! As long as the supplied number of candidates is bounded, [`FragmentChain`] complexity is +//! bounded. This means that higher-level code needs to be selective about limiting the amount of +//! candidates that are considered. +//! +//! Practically speaking, the collator-protocol will not allow more than `max_candidate_depth + 1` +//! collations to be fetched at a relay parent and statement-distribution will not allow more than +//! `max_candidate_depth + 1` seconded candidates at a relay parent per each validator in the +//! backing group. Considering the `allowed_ancestry_len` configuration value, the number of +//! candidates in a `FragmentChain` (including its unconnected storage) should not exceed: +//! +//! `allowed_ancestry_len * (max_candidate_depth + 1) * backing_group_size`. //! //! The code in this module is not designed for speed or efficiency, but conceptual simplicity. //! Our assumption is that the amount of candidates and parachains we consider will be reasonably //! bounded and in practice will not exceed a few thousand at any time. This naive implementation //! will still perform fairly well under these conditions, despite being somewhat wasteful of //! memory. +//! +//! Still, the expensive candidate data (CandidateCommitments) are wrapped in an `Arc` and shared +//! across fragment chains of the same para on different active leaves. #[cfg(test)] mod tests; use std::{ + cmp::{min, Ordering}, collections::{ hash_map::{Entry, HashMap}, - BTreeMap, HashSet, + BTreeMap, HashSet, VecDeque, }, sync::Arc, }; use super::LOG_TARGET; -use polkadot_node_subsystem::messages::{Ancestors, HypotheticalCandidate}; +use polkadot_node_subsystem::messages::Ancestors; use polkadot_node_subsystem_util::inclusion_emulator::{ - ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, + self, ConstraintModifications, Constraints, Fragment, HypotheticalOrConcreteCandidate, + ProspectiveCandidate, RelayChainBlockInfo, }; use polkadot_primitives::{ - BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, - PersistedValidationData, + BlockNumber, CandidateCommitments, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, + PersistedValidationData, ValidationCodeHash, }; +use thiserror::Error; + +/// Fragment chain related errors. +#[derive(Debug, Clone, PartialEq, Error)] +pub(crate) enum Error { + #[error("Candidate already known")] + CandidateAlreadyKnown, + #[error("Candidate's parent head is equal to its output head. Would introduce a cycle.")] + ZeroLengthCycle, + #[error("Candidate would introduce a cycle")] + Cycle, + #[error("Candidate would introduce two paths to the same output state")] + MultiplePaths, + #[error("Attempting to directly introduce a Backed candidate. It should first be introduced as Seconded")] + IntroduceBackedCandidate, + #[error("Relay parent {0:?} of the candidate precedes the relay parent {1:?} of a pending availability candidate")] + RelayParentPrecedesCandidatePendingAvailability(Hash, Hash), + #[error("Candidate would introduce a fork with a pending availability candidate: {0:?}")] + ForkWithCandidatePendingAvailability(CandidateHash), + #[error("Fork selection rule favours another candidate: {0:?}")] + ForkChoiceRule(CandidateHash), + #[error("Could not find parent of the candidate")] + ParentCandidateNotFound, + #[error("Could not compute candidate constraints: {0:?}")] + ComputeConstraints(inclusion_emulator::ModificationError), + #[error("Candidate violates constraints: {0:?}")] + CheckAgainstConstraints(inclusion_emulator::FragmentValidityError), + #[error("Relay parent would move backwards from the latest candidate in the chain")] + RelayParentMovedBackwards, + #[error(transparent)] + CandidateEntry(#[from] CandidateEntryError), + #[error("Relay parent {0:?} not in scope. Earliest relay parent allowed {1:?}")] + RelayParentNotInScope(Hash, Hash), +} -/// Kinds of failures to import a candidate into storage. -#[derive(Debug, Clone, PartialEq)] -pub enum CandidateStorageInsertionError { - /// An error indicating that a supplied candidate didn't match the persisted - /// validation data provided alongside it. - PersistedValidationDataMismatch, - /// The candidate was already known. - CandidateAlreadyKnown(CandidateHash), +/// The rule for selecting between two backed candidate forks, when adding to the chain. +/// All validators should adhere to this rule, in order to not lose out on rewards in case of +/// forking parachains. +fn fork_selection_rule(hash1: &CandidateHash, hash2: &CandidateHash) -> Ordering { + hash1.cmp(hash2) } -/// Stores candidates and information about them such as their relay-parents and their backing -/// states. +/// Utility for storing candidates and information about them such as their relay-parents and their +/// backing states. This does not assume any restriction on whether or not the candidates form a +/// chain. Useful for storing all kinds of candidates. #[derive(Clone, Default)] pub(crate) struct CandidateStorage { - // Index from head data hash to candidate hashes with that head data as a parent. Purely for + // Index from head data hash to candidate hashes with that head data as a parent. Useful for // efficiency when responding to `ProspectiveValidationDataRequest`s or when trying to find a // new candidate to push to a chain. - // Even though having multiple candidates with same parent would be invalid for a parachain, it - // could happen across different relay chain forks, hence the HashSet. by_parent_head: HashMap>, - // Index from head data hash to candidate hashes outputting that head data. Purely for + // Index from head data hash to candidate hashes outputting that head data. For // efficiency when responding to `ProspectiveValidationDataRequest`s. - // Even though having multiple candidates with same output would be invalid for a parachain, - // it could happen across different relay chain forks. by_output_head: HashMap>, // Index from candidate hash to fragment node. @@ -136,63 +200,59 @@ pub(crate) struct CandidateStorage { } impl CandidateStorage { - /// Introduce a new candidate. - pub fn add_candidate( + /// Introduce a new pending availability candidate. + pub fn add_pending_availability_candidate( &mut self, + candidate_hash: CandidateHash, candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - state: CandidateState, - ) -> Result { - let candidate_hash = candidate.hash(); - if self.by_candidate_hash.contains_key(&candidate_hash) { - return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) - } + ) -> Result<(), Error> { + let entry = CandidateEntry::new( + candidate_hash, + candidate, + persisted_validation_data, + CandidateState::Backed, + )?; - if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { - return Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) - } + self.add_candidate_entry(entry) + } - let entry = CandidateEntry { - candidate_hash, - parent_head_data_hash: persisted_validation_data.parent_head.hash(), - output_head_data_hash: candidate.commitments.head_data.hash(), - relay_parent: candidate.descriptor.relay_parent, - state, - candidate: Arc::new(ProspectiveCandidate { - commitments: candidate.commitments, - persisted_validation_data, - pov_hash: candidate.descriptor.pov_hash, - validation_code_hash: candidate.descriptor.validation_code_hash, - }), - }; + /// Return the number of stored candidates. + pub fn len(&self) -> usize { + self.by_candidate_hash.len() + } + + /// Introduce a new candidate entry. + fn add_candidate_entry(&mut self, candidate: CandidateEntry) -> Result<(), Error> { + let candidate_hash = candidate.candidate_hash; + if self.by_candidate_hash.contains_key(&candidate_hash) { + return Err(Error::CandidateAlreadyKnown) + } self.by_parent_head - .entry(entry.parent_head_data_hash()) + .entry(candidate.parent_head_data_hash) .or_default() .insert(candidate_hash); self.by_output_head - .entry(entry.output_head_data_hash()) + .entry(candidate.output_head_data_hash) .or_default() .insert(candidate_hash); - // sanity-checked already. - self.by_candidate_hash.insert(candidate_hash, entry); + self.by_candidate_hash.insert(candidate_hash, candidate); - Ok(candidate_hash) + Ok(()) } /// Remove a candidate from the store. - pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { + fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { - if let Entry::Occupied(mut e) = self.by_parent_head.entry(entry.parent_head_data_hash()) - { + if let Entry::Occupied(mut e) = self.by_parent_head.entry(entry.parent_head_data_hash) { e.get_mut().remove(&candidate_hash); if e.get().is_empty() { e.remove(); } } - if let Entry::Occupied(mut e) = self.by_output_head.entry(entry.output_head_data_hash()) - { + if let Entry::Occupied(mut e) = self.by_output_head.entry(entry.output_head_data_hash) { e.get_mut().remove(&candidate_hash); if e.get().is_empty() { e.remove(); @@ -202,7 +262,7 @@ impl CandidateStorage { } /// Note that an existing candidate has been backed. - pub fn mark_backed(&mut self, candidate_hash: &CandidateHash) { + fn mark_backed(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { gum::trace!(target: LOG_TARGET, ?candidate_hash, "Candidate marked as backed"); entry.state = CandidateState::Backed; @@ -211,38 +271,18 @@ impl CandidateStorage { } } - /// Whether a candidate is recorded as being backed. - pub fn is_backed(&self, candidate_hash: &CandidateHash) -> bool { - self.by_candidate_hash - .get(candidate_hash) - .map_or(false, |e| e.state == CandidateState::Backed) - } - /// Whether a candidate is contained within the storage already. - pub fn contains(&self, candidate_hash: &CandidateHash) -> bool { + fn contains(&self, candidate_hash: &CandidateHash) -> bool { self.by_candidate_hash.contains_key(candidate_hash) } - /// Return an iterator over the stored candidates. - pub fn candidates(&self) -> impl Iterator { + /// Return an iterator over references to the stored candidates, in arbitrary order. + fn candidates(&self) -> impl Iterator { self.by_candidate_hash.values() } - /// Retain only candidates which pass the predicate. - pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) { - self.by_candidate_hash.retain(|h, _v| pred(h)); - self.by_parent_head.retain(|_parent, children| { - children.retain(|h| pred(h)); - !children.is_empty() - }); - self.by_output_head.retain(|_output, candidates| { - candidates.retain(|h| pred(h)); - !candidates.is_empty() - }); - } - - /// Get head-data by hash. - pub(crate) fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { + /// Try getting head-data by hash. + fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { // First, search for candidates outputting this head data and extract the head data // from their commitments if they exist. // @@ -262,16 +302,8 @@ impl CandidateStorage { }) } - /// Returns candidate's relay parent, if present. - pub(crate) fn relay_parent_of_candidate(&self, candidate_hash: &CandidateHash) -> Option { - self.by_candidate_hash.get(candidate_hash).map(|entry| entry.relay_parent) - } - - /// Returns the candidates which have the given head data hash as parent. - /// We don't allow forks in a parachain, but we may have multiple candidates with same parent - /// across different relay chain forks. That's why it returns an iterator (but only one will be - /// valid and used in the end). - fn possible_para_children<'a>( + /// Returns the backed candidates which have the given head data hash as parent. + fn possible_backed_para_children<'a>( &'a self, parent_head_hash: &'a Hash, ) -> impl Iterator + 'a { @@ -280,12 +312,11 @@ impl CandidateStorage { .get(parent_head_hash) .into_iter() .flat_map(|hashes| hashes.iter()) - .filter_map(move |h| by_candidate_hash.get(h)) - } - - #[cfg(test)] - pub fn len(&self) -> (usize, usize) { - (self.by_parent_head.len(), self.by_candidate_hash.len()) + .filter_map(move |h| { + by_candidate_hash.get(h).and_then(|candidate| { + (candidate.state == CandidateState::Backed).then_some(candidate) + }) + }) } } @@ -293,14 +324,24 @@ impl CandidateStorage { /// /// Candidates aren't even considered until they've at least been seconded. #[derive(Debug, PartialEq, Clone)] -pub(crate) enum CandidateState { +enum CandidateState { /// The candidate has been seconded. Seconded, /// The candidate has been completely backed by the group. Backed, } +#[derive(Debug, Clone, PartialEq, Error)] +/// Possible errors when construcing a candidate entry. +pub enum CandidateEntryError { + #[error("Candidate does not match the persisted validation data provided alongside it")] + PersistedValidationDataMismatch, + #[error("Candidate's parent head is equal to its output head. Would introduce a cycle")] + ZeroLengthCycle, +} + #[derive(Debug, Clone)] +/// Representation of a candidate into the [`CandidateStorage`]. pub(crate) struct CandidateEntry { candidate_hash: CandidateHash, parent_head_data_hash: Hash, @@ -311,16 +352,79 @@ pub(crate) struct CandidateEntry { } impl CandidateEntry { + /// Create a new seconded candidate entry. + pub fn new_seconded( + candidate_hash: CandidateHash, + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + ) -> Result { + Self::new(candidate_hash, candidate, persisted_validation_data, CandidateState::Seconded) + } + pub fn hash(&self) -> CandidateHash { self.candidate_hash } - pub fn parent_head_data_hash(&self) -> Hash { + fn new( + candidate_hash: CandidateHash, + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + state: CandidateState, + ) -> Result { + if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { + return Err(CandidateEntryError::PersistedValidationDataMismatch) + } + + let parent_head_data_hash = persisted_validation_data.parent_head.hash(); + let output_head_data_hash = candidate.commitments.head_data.hash(); + + if parent_head_data_hash == output_head_data_hash { + return Err(CandidateEntryError::ZeroLengthCycle) + } + + Ok(Self { + candidate_hash, + parent_head_data_hash, + output_head_data_hash, + relay_parent: candidate.descriptor.relay_parent, + state, + candidate: Arc::new(ProspectiveCandidate { + commitments: candidate.commitments, + persisted_validation_data, + pov_hash: candidate.descriptor.pov_hash, + validation_code_hash: candidate.descriptor.validation_code_hash, + }), + }) + } +} + +impl HypotheticalOrConcreteCandidate for CandidateEntry { + fn commitments(&self) -> Option<&CandidateCommitments> { + Some(&self.candidate.commitments) + } + + fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { + Some(&self.candidate.persisted_validation_data) + } + + fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { + Some(&self.candidate.validation_code_hash) + } + + fn parent_head_data_hash(&self) -> Hash { self.parent_head_data_hash } - pub fn output_head_data_hash(&self) -> Hash { - self.output_head_data_hash + fn output_head_data_hash(&self) -> Option { + Some(self.output_head_data_hash) + } + + fn relay_parent(&self) -> Hash { + self.relay_parent + } + + fn candidate_hash(&self) -> CandidateHash { + self.candidate_hash } } @@ -337,8 +441,6 @@ pub(crate) struct PendingAvailability { /// The scope of a [`FragmentChain`]. #[derive(Debug, Clone)] pub(crate) struct Scope { - /// The assigned para id of this `FragmentChain`. - para: ParaId, /// The relay parent we're currently building on top of. relay_parent: RelayChainBlockInfo, /// The other relay parents candidates are allowed to build upon, mapped by the block number. @@ -356,10 +458,14 @@ pub(crate) struct Scope { /// An error variant indicating that ancestors provided to a scope /// had unexpected order. #[derive(Debug)] -pub struct UnexpectedAncestor { +pub(crate) struct UnexpectedAncestor { /// The block number that this error occurred at. + /// Allow as dead code, but it's being read in logs. + #[allow(dead_code)] pub number: BlockNumber, /// The previous seen block number, which did not match `number`. + /// Allow as dead code, but it's being read in logs. + #[allow(dead_code)] pub prev: BlockNumber, } @@ -381,7 +487,6 @@ impl Scope { /// /// It is allowed to provide zero ancestors. pub fn with_ancestors( - para: ParaId, relay_parent: RelayChainBlockInfo, base_constraints: Constraints, pending_availability: Vec, @@ -408,7 +513,6 @@ impl Scope { } Ok(Scope { - para, relay_parent, base_constraints, pending_availability, @@ -436,24 +540,29 @@ impl Scope { self.ancestors_by_hash.get(hash).map(|info| info.clone()) } + /// Get the base constraints of the scope + pub fn base_constraints(&self) -> &Constraints { + &self.base_constraints + } + /// Whether the candidate in question is one pending availability in this scope. - pub fn get_pending_availability( + fn get_pending_availability( &self, candidate_hash: &CandidateHash, ) -> Option<&PendingAvailability> { self.pending_availability.iter().find(|c| &c.candidate_hash == candidate_hash) } - - /// Get the base constraints of the scope - pub fn base_constraints(&self) -> &Constraints { - &self.base_constraints - } } -pub struct FragmentNode { +#[cfg_attr(test, derive(Clone))] +/// A node that is part of a `BackedChain`. It holds constraints based on the ancestors in the +/// chain. +struct FragmentNode { fragment: Fragment, candidate_hash: CandidateHash, cumulative_modifications: ConstraintModifications, + parent_head_data_hash: Hash, + output_head_data_hash: Hash, } impl FragmentNode { @@ -462,211 +571,336 @@ impl FragmentNode { } } -/// Response given by `can_add_candidate_as_potential` -#[derive(PartialEq, Debug)] -pub enum PotentialAddition { - /// Can be added as either connected or unconnected candidate. - Anyhow, - /// Can only be added as a connected candidate to the chain. - IfConnected, - /// Cannot be added. - None, +impl From<&FragmentNode> for CandidateEntry { + fn from(node: &FragmentNode) -> Self { + // We don't need to perform the checks done in `CandidateEntry::new()`, since a + // `FragmentNode` always comes from a `CandidateEntry` + Self { + candidate_hash: node.candidate_hash, + parent_head_data_hash: node.parent_head_data_hash, + output_head_data_hash: node.output_head_data_hash, + candidate: node.fragment.candidate_clone(), + relay_parent: node.relay_parent(), + // A fragment node is always backed. + state: CandidateState::Backed, + } + } } -/// This is a chain of candidates based on some underlying storage of candidates and a scope. +/// A candidate chain of backed/backable candidates. +/// Includes the candidates pending availability and candidates which may be backed on-chain. +#[derive(Default)] +#[cfg_attr(test, derive(Clone))] +struct BackedChain { + // Holds the candidate chain. + chain: Vec, + // Index from head data hash to the candidate hash with that head data as a parent. + // Only contains the candidates present in the `chain`. + by_parent_head: HashMap, + // Index from head data hash to the candidate hash outputting that head data. + // Only contains the candidates present in the `chain`. + by_output_head: HashMap, + // A set of the candidate hashes in the `chain`. + candidates: HashSet, +} + +impl BackedChain { + fn push(&mut self, candidate: FragmentNode) { + self.candidates.insert(candidate.candidate_hash); + self.by_parent_head + .insert(candidate.parent_head_data_hash, candidate.candidate_hash); + self.by_output_head + .insert(candidate.output_head_data_hash, candidate.candidate_hash); + self.chain.push(candidate); + } + + fn clear(&mut self) -> Vec { + self.by_parent_head.clear(); + self.by_output_head.clear(); + self.candidates.clear(); + + std::mem::take(&mut self.chain) + } + + fn revert_to_parent_hash<'a>( + &'a mut self, + parent_head_data_hash: &Hash, + ) -> impl Iterator + 'a { + let mut found_index = None; + for index in 0..self.chain.len() { + let node = &self.chain[0]; + + if found_index.is_some() { + self.by_parent_head.remove(&node.parent_head_data_hash); + self.by_output_head.remove(&node.output_head_data_hash); + self.candidates.remove(&node.candidate_hash); + } else if &node.output_head_data_hash == parent_head_data_hash { + found_index = Some(index); + } + } + + if let Some(index) = found_index { + self.chain.drain(min(index + 1, self.chain.len())..) + } else { + // Don't remove anything, but use drain to satisfy the compiler. + self.chain.drain(0..0) + } + } + + fn contains(&self, hash: &CandidateHash) -> bool { + self.candidates.contains(hash) + } +} + +/// This is the fragment chain specific to an active leaf. /// -/// All nodes in the chain must be either pending availability or within the scope. Within the scope -/// means it's built off of the relay-parent or an ancestor. +/// It holds the current best backable candidate chain, as well as potential candidates +/// which could become connected to the chain in the future or which could even overwrite the +/// existing chain. +#[cfg_attr(test, derive(Clone))] pub(crate) struct FragmentChain { + // The current scope, which dictates the on-chain operating constraints that all future + // candidates must adhere to. scope: Scope, - chain: Vec, - - candidates: HashSet, + // The current best chain of backable candidates. It only contains candidates which build on + // top of each other and which have reached the backing quorum. In the presence of potential + // forks, this chain will pick a fork according to the `fork_selection_rule`. + best_chain: BackedChain, - // Index from head data hash to candidate hashes with that head data as a parent. - by_parent_head: HashMap, - // Index from head data hash to candidate hashes outputting that head data. - by_output_head: HashMap, + // The potential candidate storage. Contains candidates which are not yet part of the `chain` + // but may become in the future. These can form any tree shape as well as contain any + // unconnected candidates for which we don't know the parent. + unconnected: CandidateStorage, } impl FragmentChain { - /// Create a new [`FragmentChain`] with given scope and populated from the storage. - pub fn populate(scope: Scope, storage: &CandidateStorage) -> Self { - gum::trace!( - target: LOG_TARGET, - relay_parent = ?scope.relay_parent.hash, - relay_parent_num = scope.relay_parent.number, - para_id = ?scope.para, - ancestors = scope.ancestors.len(), - "Instantiating Fragment Chain", - ); - + /// Create a new [`FragmentChain`] with the given scope and populate it with the candidates + /// pending availability. + pub fn init(scope: Scope, mut candidates_pending_availability: CandidateStorage) -> Self { let mut fragment_chain = Self { scope, - chain: Vec::new(), - candidates: HashSet::new(), - by_parent_head: HashMap::new(), - by_output_head: HashMap::new(), + best_chain: BackedChain::default(), + unconnected: CandidateStorage::default(), }; - fragment_chain.populate_chain(storage); + // We only need to populate the best backable chain. Candidates pending availability must + // form a chain with the latest included head. + fragment_chain.populate_chain(&mut candidates_pending_availability); fragment_chain } - /// Get the scope of the Fragment Chain. + /// Populate the [`FragmentChain`] given the new candidates pending availability and the + /// optional previous fragment chain (of the previous relay parent). + pub fn populate_from_previous(&mut self, prev_fragment_chain: &FragmentChain) { + let mut prev_storage = prev_fragment_chain.unconnected.clone(); + + for candidate in prev_fragment_chain.best_chain.chain.iter() { + // If they used to be pending availability, don't add them. This is fine + // because: + // - if they still are pending availability, they have already been added to the new + // storage. + // - if they were included, no point in keeping them. + // + // This cannot happen for the candidates in the unconnected storage. The pending + // availability candidates will always be part of the best chain. + if prev_fragment_chain + .scope + .get_pending_availability(&candidate.candidate_hash) + .is_none() + { + let _ = prev_storage.add_candidate_entry(candidate.into()); + } + } + + // First populate the best backable chain. + self.populate_chain(&mut prev_storage); + + // Now that we picked the best backable chain, trim the forks generated by candidates which + // are not present in the best chain. + self.trim_uneligible_forks(&mut prev_storage, None); + + // Finally, keep any candidates which haven't been trimmed but still have potential. + self.populate_unconnected_potential_candidates(prev_storage); + } + + /// Get the scope of the [`FragmentChain`]. pub fn scope(&self) -> &Scope { &self.scope } - /// Returns the number of candidates in the chain - pub(crate) fn len(&self) -> usize { - self.candidates.len() + /// Returns the number of candidates in the best backable chain. + pub fn best_chain_len(&self) -> usize { + self.best_chain.chain.len() } - /// Whether the candidate exists. - pub(crate) fn contains_candidate(&self, candidate: &CandidateHash) -> bool { - self.candidates.contains(candidate) + /// Returns the number of candidates in unconnected potential storage. + pub fn unconnected_len(&self) -> usize { + self.unconnected.len() + } + + /// Whether the candidate exists as part of the unconnected potential candidates. + pub fn contains_unconnected_candidate(&self, candidate: &CandidateHash) -> bool { + self.unconnected.contains(candidate) } /// Return a vector of the chain's candidate hashes, in-order. - pub(crate) fn to_vec(&self) -> Vec { - self.chain.iter().map(|candidate| candidate.candidate_hash).collect() + pub fn best_chain_vec(&self) -> Vec { + self.best_chain.chain.iter().map(|candidate| candidate.candidate_hash).collect() } - /// Try accumulating more candidates onto the chain. - /// - /// Candidates can only be added if they build on the already existing chain. - pub(crate) fn extend_from_storage(&mut self, storage: &CandidateStorage) { - self.populate_chain(storage); + /// Return a vector of the unconnected potential candidate hashes, in arbitrary order. + pub fn unconnected(&self) -> impl Iterator { + self.unconnected.candidates() } - /// Returns the hypothetical state of a candidate with the given hash and parent head data - /// in regards to the existing chain. - /// - /// Returns true if either: - /// - the candidate is already present - /// - the candidate can be added to the chain - /// - the candidate could potentially be added to the chain in the future (its ancestors are - /// still unknown but it doesn't violate other rules). - /// - /// If this returns false, the candidate could never be added to the current chain (not now, not - /// ever) - pub(crate) fn hypothetical_membership( + /// Return whether this candidate is backed in this chain or the unconnected storage. + pub fn is_candidate_backed(&self, hash: &CandidateHash) -> bool { + self.best_chain.candidates.contains(hash) || + matches!( + self.unconnected.by_candidate_hash.get(hash), + Some(candidate) if candidate.state == CandidateState::Backed + ) + } + + /// Mark a candidate as backed. This can trigger a recreation of the best backable chain. + pub fn candidate_backed(&mut self, newly_backed_candidate: &CandidateHash) { + // Already backed. + if self.best_chain.candidates.contains(newly_backed_candidate) { + return + } + let Some(parent_head_hash) = self + .unconnected + .by_candidate_hash + .get(newly_backed_candidate) + .map(|entry| entry.parent_head_data_hash) + else { + // Candidate is not in unconnected storage. + return + }; + + // Mark the candidate hash. + self.unconnected.mark_backed(newly_backed_candidate); + + // Revert to parent_head_hash + if !self.revert_to(&parent_head_hash) { + // If nothing was reverted, there is nothing we can do for now. + return + } + + let mut prev_storage = std::mem::take(&mut self.unconnected); + + // Populate the chain. + self.populate_chain(&mut prev_storage); + + // Now that we picked the best backable chain, trim the forks generated by candidates + // which are not present in the best chain. We can start trimming from this candidate + // onwards. + self.trim_uneligible_forks(&mut prev_storage, Some(parent_head_hash)); + + // Finally, keep any candidates which haven't been trimmed but still have potential. + self.populate_unconnected_potential_candidates(prev_storage); + } + + /// Checks if this candidate could be added in the future to this chain. + /// This will return `Error::CandidateAlreadyKnown` if the candidate is already in the chain or + /// the unconnected candidate storage. + pub fn can_add_candidate_as_potential( &self, - candidate: HypotheticalCandidate, - candidate_storage: &CandidateStorage, - ) -> bool { + candidate: &impl HypotheticalOrConcreteCandidate, + ) -> Result<(), Error> { let candidate_hash = candidate.candidate_hash(); - // If we've already used this candidate in the chain - if self.candidates.contains(&candidate_hash) { - return true + if self.best_chain.contains(&candidate_hash) || self.unconnected.contains(&candidate_hash) { + return Err(Error::CandidateAlreadyKnown) } - let can_add_as_potential = self.can_add_candidate_as_potential( - candidate_storage, - &candidate.candidate_hash(), - &candidate.relay_parent(), - candidate.parent_head_data_hash(), - candidate.output_head_data_hash(), - ); + self.check_potential(candidate) + } - if can_add_as_potential == PotentialAddition::None { - return false + /// Try adding a seconded candidate, if the candidate has potential. It will never be added to + /// the chain directly in the seconded state, it will only be part of the unconnected storage. + pub fn try_adding_seconded_candidate( + &mut self, + candidate: &CandidateEntry, + ) -> Result<(), Error> { + if candidate.state == CandidateState::Backed { + return Err(Error::IntroduceBackedCandidate); } - let Some(candidate_relay_parent) = self.scope.ancestor(&candidate.relay_parent()) else { - // can_add_candidate_as_potential already checked for this, but just to be safe. - return false - }; + self.can_add_candidate_as_potential(candidate)?; - let identity_modifications = ConstraintModifications::identity(); - let cumulative_modifications = if let Some(last_candidate) = self.chain.last() { - &last_candidate.cumulative_modifications - } else { - &identity_modifications - }; + // This clone is cheap, as it uses an Arc for the expensive stuff. + // We can't consume the candidate because other fragment chains may use it also. + self.unconnected.add_candidate_entry(candidate.clone())?; - let child_constraints = - match self.scope.base_constraints.apply_modifications(&cumulative_modifications) { - Err(e) => { - gum::debug!( - target: LOG_TARGET, - new_parent_head = ?cumulative_modifications.required_parent, - ?candidate_hash, - err = ?e, - "Failed to apply modifications", - ); - - return false - }, - Ok(c) => c, - }; + Ok(()) + } - let parent_head_hash = candidate.parent_head_data_hash(); - if parent_head_hash == child_constraints.required_parent.hash() { - // We do additional checks for complete candidates. - if let HypotheticalCandidate::Complete { - ref receipt, - ref persisted_validation_data, - .. - } = candidate - { - if Fragment::check_against_constraints( - &candidate_relay_parent, - &child_constraints, - &receipt.commitments, - &receipt.descriptor().validation_code_hash, - persisted_validation_data, - ) - .is_err() - { - gum::debug!( - target: LOG_TARGET, - "Fragment::check_against_constraints() returned error", - ); - return false - } - } + /// Try getting the full head data associated with this hash. + pub fn get_head_data_by_hash(&self, head_data_hash: &Hash) -> Option { + // First, see if this is the head data of the latest included candidate. + let required_parent = &self.scope.base_constraints().required_parent; + if &required_parent.hash() == head_data_hash { + return Some(required_parent.clone()) + } - // If we got this far, it can be added to the chain right now. - true - } else if can_add_as_potential == PotentialAddition::Anyhow { - // Otherwise it is or can be an unconnected candidate, but only if PotentialAddition - // does not force us to only add a connected candidate. - true - } else { - false + // Cheaply check if the head data is in the best backable chain. + let has_head_data_in_chain = self + .best_chain + .by_parent_head + .get(head_data_hash) + .or_else(|| self.best_chain.by_output_head.get(head_data_hash)) + .is_some(); + + if has_head_data_in_chain { + return self.best_chain.chain.iter().find_map(|candidate| { + if &candidate.parent_head_data_hash == head_data_hash { + Some( + candidate + .fragment + .candidate() + .persisted_validation_data + .parent_head + .clone(), + ) + } else if &candidate.output_head_data_hash == head_data_hash { + Some(candidate.fragment.candidate().commitments.head_data.clone()) + } else { + None + } + }); } + + // Lastly, try getting the head data from the unconnected candidates. + self.unconnected.head_data_by_hash(head_data_hash).cloned() } - /// Select `count` candidates after the given `ancestors` which pass - /// the predicate and have not already been backed on chain. + /// Select `count` candidates after the given `ancestors` which can be backed on chain next. /// /// The intention of the `ancestors` is to allow queries on the basis of /// one or more candidates which were previously pending availability becoming /// available or candidates timing out. - pub(crate) fn find_backable_chain( + pub fn find_backable_chain( &self, ancestors: Ancestors, count: u32, - pred: impl Fn(&CandidateHash) -> bool, - ) -> Vec { + ) -> Vec<(CandidateHash, Hash)> { if count == 0 { return vec![] } let base_pos = self.find_ancestor_path(ancestors); - let actual_end_index = std::cmp::min(base_pos + (count as usize), self.chain.len()); + let actual_end_index = + std::cmp::min(base_pos + (count as usize), self.best_chain.chain.len()); let mut res = Vec::with_capacity(actual_end_index - base_pos); - for elem in &self.chain[base_pos..actual_end_index] { - if self.scope.get_pending_availability(&elem.candidate_hash).is_none() && - pred(&elem.candidate_hash) - { - res.push(elem.candidate_hash); + for elem in &self.best_chain.chain[base_pos..actual_end_index] { + // Only supply candidates which are not yet pending availability. `ancestors` should + // have already contained them, but check just in case. + if self.scope.get_pending_availability(&elem.candidate_hash).is_none() { + res.push((elem.candidate_hash, elem.relay_parent())); } else { break } @@ -679,11 +913,11 @@ impl FragmentChain { // Stops when the ancestors are all used or when a node in the chain is not present in the // ancestor set. Returns the index in the chain were the search stopped. fn find_ancestor_path(&self, mut ancestors: Ancestors) -> usize { - if self.chain.is_empty() { + if self.best_chain.chain.is_empty() { return 0; } - for (index, candidate) in self.chain.iter().enumerate() { + for (index, candidate) in self.best_chain.chain.iter().enumerate() { if !ancestors.remove(&candidate.candidate_hash) { return index } @@ -691,16 +925,16 @@ impl FragmentChain { // This means that we found the entire chain in the ancestor set. There won't be anything // left to back. - self.chain.len() + self.best_chain.chain.len() } - // Return the earliest relay parent a new candidate can have in order to be added to the chain. - // This is the relay parent of the last candidate in the chain. + // Return the earliest relay parent a new candidate can have in order to be added to the chain + // right now. This is the relay parent of the last candidate in the chain. // The value returned may not be valid if we want to add a candidate pending availability, which // may have a relay parent which is out of scope. Special handling is needed in that case. // `None` is returned if the candidate's relay parent info cannot be found. fn earliest_relay_parent(&self) -> Option { - if let Some(last_candidate) = self.chain.last() { + if let Some(last_candidate) = self.best_chain.chain.last() { self.scope.ancestor(&last_candidate.relay_parent()).or_else(|| { // if the relay-parent is out of scope _and_ it is in the chain, // it must be a candidate pending availability. @@ -713,152 +947,239 @@ impl FragmentChain { } } - // Checks if this candidate could be added in the future to this chain. - // This assumes that the chain does not already contain this candidate. It may or may not be - // present in the `CandidateStorage`. - // Even if the candidate is a potential candidate, this function will indicate that it can be - // kept only if there's enough room for it. - pub(crate) fn can_add_candidate_as_potential( - &self, - storage: &CandidateStorage, - candidate_hash: &CandidateHash, - relay_parent: &Hash, - parent_head_hash: Hash, - output_head_hash: Option, - ) -> PotentialAddition { - // If we've got enough candidates for the configured depth, no point in adding more. - if self.chain.len() > self.scope.max_depth { - return PotentialAddition::None - } + // Return the earliest relay parent a potential candidate may have for it to ever be added to + // the chain. This is the relay parent of the last candidate pending availability or the + // earliest relay parent in scope. + fn earliest_relay_parent_pending_availability(&self) -> RelayChainBlockInfo { + self.best_chain + .chain + .iter() + .rev() + .find_map(|candidate| { + self.scope + .get_pending_availability(&candidate.candidate_hash) + .map(|c| c.relay_parent.clone()) + }) + .unwrap_or_else(|| self.scope.earliest_relay_parent()) + } - if !self.check_potential(relay_parent, parent_head_hash, output_head_hash) { - return PotentialAddition::None - } + // Populate the unconnected potential candidate storage starting from a previous storage. + fn populate_unconnected_potential_candidates(&mut self, old_storage: CandidateStorage) { + for candidate in old_storage.by_candidate_hash.into_values() { + // Sanity check, all pending availability candidates should be already present in the + // chain. + if self.scope.get_pending_availability(&candidate.candidate_hash).is_some() { + continue + } - let present_in_storage = storage.contains(candidate_hash); + match self.can_add_candidate_as_potential(&candidate) { + Ok(()) => { + let _ = self.unconnected.add_candidate_entry(candidate); + }, + // Swallow these errors as they can legitimately happen when pruning stale + // candidates. + Err(_) => {}, + }; + } + } - let unconnected = self - .find_unconnected_potential_candidates( - storage, - present_in_storage.then_some(candidate_hash), - ) - .len(); + // Check whether a candidate outputting this head data would introduce a cycle or multiple paths + // to the same state. Trivial 0-length cycles are checked in `CandidateEntry::new`. + fn check_cycles_or_invalid_tree(&self, output_head_hash: &Hash) -> Result<(), Error> { + // this should catch a cycle where this candidate would point back to the parent of some + // candidate in the chain. + if self.best_chain.by_parent_head.contains_key(output_head_hash) { + return Err(Error::Cycle) + } - if (self.chain.len() + unconnected) < self.scope.max_depth { - PotentialAddition::Anyhow - } else if (self.chain.len() + unconnected) == self.scope.max_depth { - // If we've only one slot left to fill, it must be filled with a connected candidate. - PotentialAddition::IfConnected - } else { - PotentialAddition::None + // multiple paths to the same state, which can't happen for a chain. + if self.best_chain.by_output_head.contains_key(output_head_hash) { + return Err(Error::MultiplePaths) } + + Ok(()) } - // The candidates which are present in `CandidateStorage`, are not part of this chain but could - // become part of this chain in the future. Capped at the max depth minus the existing chain - // length. - // If `ignore_candidate` is supplied and found in storage, it won't be counted. - pub(crate) fn find_unconnected_potential_candidates( + // Checks the potential of a candidate to be added to the chain now or in the future. + // It works both with concrete candidates for which we have the full PVD and committed receipt, + // but also does some more basic checks for incomplete candidates (before even fetching them). + fn check_potential( &self, - storage: &CandidateStorage, - ignore_candidate: Option<&CandidateHash>, - ) -> Vec { - let mut candidates = vec![]; - for candidate in storage.candidates() { - if let Some(ignore_candidate) = ignore_candidate { - if ignore_candidate == &candidate.candidate_hash { - continue - } - } - // We stop at max_depth + 1 with the search. There's no point in looping further. - if (self.chain.len() + candidates.len()) > self.scope.max_depth { - break - } - if !self.candidates.contains(&candidate.candidate_hash) && - self.check_potential( - &candidate.relay_parent, - candidate.candidate.persisted_validation_data.parent_head.hash(), - Some(candidate.candidate.commitments.head_data.hash()), - ) { - candidates.push(candidate.candidate_hash); + candidate: &impl HypotheticalOrConcreteCandidate, + ) -> Result<(), Error> { + let relay_parent = candidate.relay_parent(); + let parent_head_hash = candidate.parent_head_data_hash(); + + // trivial 0-length cycle. + if let Some(output_head_hash) = candidate.output_head_data_hash() { + if parent_head_hash == output_head_hash { + return Err(Error::ZeroLengthCycle) } } - candidates - } + // Check if the relay parent is in scope. + let Some(relay_parent) = self.scope.ancestor(&relay_parent) else { + return Err(Error::RelayParentNotInScope( + relay_parent, + self.scope.earliest_relay_parent().hash, + )) + }; - // Check if adding a candidate which transitions `parent_head_hash` to `output_head_hash` would - // introduce a fork or a cycle in the parachain. - // `output_head_hash` is optional because we sometimes make this check before retrieving the - // collation. - fn is_fork_or_cycle(&self, parent_head_hash: Hash, output_head_hash: Option) -> bool { - if self.by_parent_head.contains_key(&parent_head_hash) { - // fork. our parent has another child already - return true + // Check if the relay parent moved backwards from the latest candidate pending availability. + let earliest_rp_of_pending_availability = self.earliest_relay_parent_pending_availability(); + if relay_parent.number < earliest_rp_of_pending_availability.number { + return Err(Error::RelayParentPrecedesCandidatePendingAvailability( + relay_parent.hash, + earliest_rp_of_pending_availability.hash, + )) } - if let Some(output_head_hash) = output_head_hash { - if self.by_output_head.contains_key(&output_head_hash) { - // this is not a chain, there are multiple paths to the same state. - return true + // If it's a fork with a backed candidate in the current chain. + if let Some(other_candidate) = self.best_chain.by_parent_head.get(&parent_head_hash) { + if self.scope().get_pending_availability(other_candidate).is_some() { + // Cannot accept a fork with a candidate pending availability. + return Err(Error::ForkWithCandidatePendingAvailability(*other_candidate)) } - // trivial 0-length cycle. - if parent_head_hash == output_head_hash { - return true - } - - // this should catch any other cycles. our output state cannot already be the parent - // state of another candidate, unless this is a cycle, since the already added - // candidates form a chain. - if self.by_parent_head.contains_key(&output_head_hash) { - return true + // If the candidate is backed and in the current chain, accept only a candidate + // according to the fork selection rule. + if fork_selection_rule(other_candidate, &candidate.candidate_hash()) == Ordering::Less { + return Err(Error::ForkChoiceRule(*other_candidate)) } } - false - } + // Try seeing if the parent candidate is in the current chain or if it is the latest + // included candidate. If so, get the constraints the candidate must satisfy. + let (constraints, maybe_min_relay_parent_number) = + if let Some(parent_candidate) = self.best_chain.by_output_head.get(&parent_head_hash) { + let Some(parent_candidate) = + self.best_chain.chain.iter().find(|c| &c.candidate_hash == parent_candidate) + else { + // Should never really happen. + return Err(Error::ParentCandidateNotFound) + }; - // Checks the potential of a candidate to be added to the chain in the future. - // Verifies that the relay parent is in scope and not moving backwards and that we're not - // introducing forks or cycles with other candidates in the chain. - // `output_head_hash` is optional because we sometimes make this check before retrieving the - // collation. - fn check_potential( - &self, - relay_parent: &Hash, - parent_head_hash: Hash, - output_head_hash: Option, - ) -> bool { - if self.is_fork_or_cycle(parent_head_hash, output_head_hash) { - return false + ( + self.scope + .base_constraints + .apply_modifications(&parent_candidate.cumulative_modifications) + .map_err(Error::ComputeConstraints)?, + self.scope.ancestor(&parent_candidate.relay_parent()).map(|rp| rp.number), + ) + } else if self.scope.base_constraints.required_parent.hash() == parent_head_hash { + // It builds on the latest included candidate. + (self.scope.base_constraints.clone(), None) + } else { + // If the parent is not yet part of the chain, there's nothing else we can check for + // now. + return Ok(()) + }; + + // Check for cycles or invalid tree transitions. + if let Some(ref output_head_hash) = candidate.output_head_data_hash() { + self.check_cycles_or_invalid_tree(output_head_hash)?; } - let Some(earliest_rp) = self.earliest_relay_parent() else { return false }; + // Check against constraints if we have a full concrete candidate. + if let (Some(commitments), Some(pvd), Some(validation_code_hash)) = ( + candidate.commitments(), + candidate.persisted_validation_data(), + candidate.validation_code_hash(), + ) { + Fragment::check_against_constraints( + &relay_parent, + &constraints, + commitments, + validation_code_hash, + pvd, + ) + .map_err(Error::CheckAgainstConstraints)?; + } - let Some(relay_parent) = self.scope.ancestor(relay_parent) else { return false }; + if relay_parent.number < constraints.min_relay_parent_number { + return Err(Error::RelayParentMovedBackwards) + } - if relay_parent.number < earliest_rp.number { - return false // relay parent moved backwards. + if let Some(earliest_rp) = maybe_min_relay_parent_number { + if relay_parent.number < earliest_rp { + return Err(Error::RelayParentMovedBackwards) + } } - true + Ok(()) } - // Populate the fragment chain with candidates from CandidateStorage. - // Can be called by the constructor or when introducing a new candidate. - // If we're introducing a new candidate onto an existing chain, we may introduce more than one, - // since we may connect already existing candidates to the chain. - fn populate_chain(&mut self, storage: &CandidateStorage) { - let mut cumulative_modifications = if let Some(last_candidate) = self.chain.last() { - last_candidate.cumulative_modifications.clone() + // Once the backable chain was populated, trim the forks generated by candidates which + // are not present in the best chain. Fan this out into a full breadth-first search. + // If `starting_point` is `Some()`, start the search from the candidates having this parent head + // hash. + fn trim_uneligible_forks(&self, storage: &mut CandidateStorage, starting_point: Option) { + // Start out with the candidates in the chain. They are all valid candidates. + let mut queue: VecDeque<_> = if let Some(starting_point) = starting_point { + [(starting_point, true)].into_iter().collect() } else { - ConstraintModifications::identity() + if self.best_chain.chain.is_empty() { + [(self.scope.base_constraints.required_parent.hash(), true)] + .into_iter() + .collect() + } else { + self.best_chain.chain.iter().map(|c| (c.parent_head_data_hash, true)).collect() + } }; + // To make sure that cycles don't make us loop forever, keep track of the visited parent + // heads. + let mut visited = HashSet::new(); + + while let Some((parent, parent_has_potential)) = queue.pop_front() { + visited.insert(parent); + + let Some(children) = storage.by_parent_head.get(&parent) else { continue }; + // Cannot remove while iterating so store them here temporarily. + let mut to_remove = vec![]; + + for child_hash in children.iter() { + let Some(child) = storage.by_candidate_hash.get(child_hash) else { continue }; + + // Already visited this parent. Either is a cycle or multiple paths that lead to the + // same candidate. Either way, stop this branch to avoid looping forever. + if visited.contains(&child.output_head_data_hash) { + continue + } + + // Only keep a candidate if its full ancestry was already kept as potential and this + // candidate itself has potential. + if parent_has_potential && self.check_potential(child).is_ok() { + queue.push_back((child.output_head_data_hash, true)); + } else { + // Otherwise, remove this candidate and continue looping for its children, but + // mark the parent's potential as `false`. We only want to remove its + // children. + to_remove.push(*child_hash); + queue.push_back((child.output_head_data_hash, false)); + } + } + + for hash in to_remove { + storage.remove_candidate(&hash); + } + } + } + + // Populate the fragment chain with candidates from the supplied `CandidateStorage`. + // Can be called by the constructor or when backing a new candidate. + // When this is called, it may cause the previous chain to be completely erased or it may add + // more than one candidate. + fn populate_chain(&mut self, storage: &mut CandidateStorage) { + let mut cumulative_modifications = + if let Some(last_candidate) = self.best_chain.chain.last() { + last_candidate.cumulative_modifications.clone() + } else { + ConstraintModifications::identity() + }; let Some(mut earliest_rp) = self.earliest_relay_parent() else { return }; loop { - if self.chain.len() > self.scope.max_depth { + if self.best_chain.chain.len() > self.scope.max_depth { break; } @@ -878,113 +1199,157 @@ impl FragmentChain { }; let required_head_hash = child_constraints.required_parent.hash(); - // Even though we don't allow parachain forks under the same active leaf, they may still - // appear under different relay chain forks, hence the iterator below. - let possible_children = storage.possible_para_children(&required_head_hash); - let mut added_child = false; - for candidate in possible_children { - // Add one node to chain if - // 1. it does not introduce a fork or a cycle. - // 2. parent hash is correct. - // 3. relay-parent does not move backwards. - // 4. all non-pending-availability candidates have relay-parent in scope. - // 5. candidate outputs fulfill constraints - - if self.is_fork_or_cycle( - candidate.parent_head_data_hash(), - Some(candidate.output_head_data_hash()), - ) { - continue - } - let pending = self.scope.get_pending_availability(&candidate.candidate_hash); - let Some(relay_parent) = pending - .map(|p| p.relay_parent.clone()) - .or_else(|| self.scope.ancestor(&candidate.relay_parent)) - else { - continue - }; - - // require: candidates don't move backwards - // and only pending availability candidates can be out-of-scope. - // - // earliest_rp can be before the earliest relay parent in the scope - // when the parent is a pending availability candidate as well, but - // only other pending candidates can have a relay parent out of scope. - let min_relay_parent_number = pending - .map(|p| match self.chain.len() { - 0 => p.relay_parent.number, - _ => earliest_rp.number, - }) - .unwrap_or_else(|| earliest_rp.number); - - if relay_parent.number < min_relay_parent_number { - continue // relay parent moved backwards. - } + // Select the few possible backed/backable children which can be added to the chain + // right now. + let possible_children = storage + .possible_backed_para_children(&required_head_hash) + .filter_map(|candidate| { + // Only select a candidate if: + // 1. it does not introduce a fork or a cycle. + // 2. parent hash is correct. + // 3. relay-parent does not move backwards. + // 4. all non-pending-availability candidates have relay-parent in scope. + // 5. candidate outputs fulfill constraints + + let pending = self.scope.get_pending_availability(&candidate.candidate_hash); + let Some(relay_parent) = pending + .map(|p| p.relay_parent.clone()) + .or_else(|| self.scope.ancestor(&candidate.relay_parent)) + else { + return None + }; + + if self.check_cycles_or_invalid_tree(&candidate.output_head_data_hash).is_err() + { + return None + } - // don't add candidates if they're already present in the chain. - // this can never happen, as candidates can only be duplicated if there's a cycle - // and we shouldn't have allowed for a cycle to be chained. - if self.contains_candidate(&candidate.candidate_hash) { - continue - } + // require: candidates don't move backwards + // and only pending availability candidates can be out-of-scope. + // + // earliest_rp can be before the earliest relay parent in the scope + // when the parent is a pending availability candidate as well, but + // only other pending candidates can have a relay parent out of scope. + let min_relay_parent_number = pending + .map(|p| match self.best_chain.chain.len() { + 0 => p.relay_parent.number, + _ => earliest_rp.number, + }) + .unwrap_or_else(|| earliest_rp.number); + + if relay_parent.number < min_relay_parent_number { + return None // relay parent moved backwards. + } - let fragment = { - let mut constraints = child_constraints.clone(); - if let Some(ref p) = pending { - // overwrite for candidates pending availability as a special-case. - constraints.min_relay_parent_number = p.relay_parent.number; + // don't add candidates if they're already present in the chain. + // this can never happen, as candidates can only be duplicated if there's a + // cycle and we shouldn't have allowed for a cycle to be chained. + if self.best_chain.contains(&candidate.candidate_hash) { + return None } - let f = Fragment::new( - relay_parent.clone(), - constraints, - // It's cheap to clone because it's wrapped in an Arc - candidate.candidate.clone(), - ); - - match f { - Ok(f) => f, - Err(e) => { - gum::debug!( - target: LOG_TARGET, - err = ?e, - ?relay_parent, - candidate_hash = ?candidate.candidate_hash, - "Failed to instantiate fragment", - ); - - break - }, + let fragment = { + let mut constraints = child_constraints.clone(); + if let Some(ref p) = pending { + // overwrite for candidates pending availability as a special-case. + constraints.min_relay_parent_number = p.relay_parent.number; + } + + let f = Fragment::new( + relay_parent.clone(), + constraints, + // It's cheap to clone because it's wrapped in an Arc + candidate.candidate.clone(), + ); + + match f { + Ok(f) => f, + Err(e) => { + gum::debug!( + target: LOG_TARGET, + err = ?e, + ?relay_parent, + candidate_hash = ?candidate.candidate_hash, + "Failed to instantiate fragment", + ); + + return None + }, + } + }; + + Some(( + fragment, + candidate.candidate_hash, + candidate.output_head_data_hash, + candidate.parent_head_data_hash, + )) + }); + + // Choose the best candidate. + let best_candidate = + possible_children.min_by(|(_, ref child1, _, _), (_, ref child2, _, _)| { + // Always pick a candidate pending availability as best. + if self.scope.get_pending_availability(child1).is_some() { + Ordering::Less + } else if self.scope.get_pending_availability(child2).is_some() { + Ordering::Greater + } else { + // Otherwise, use the fork selection rule. + fork_selection_rule(child1, child2) } - }; + }); + + if let Some((fragment, candidate_hash, output_head_data_hash, parent_head_data_hash)) = + best_candidate + { + // Remove the candidate from storage. + storage.remove_candidate(&candidate_hash); // Update the cumulative constraint modifications. cumulative_modifications.stack(fragment.constraint_modifications()); // Update the earliest rp - earliest_rp = relay_parent; + earliest_rp = fragment.relay_parent().clone(); let node = FragmentNode { fragment, - candidate_hash: candidate.candidate_hash, + candidate_hash, + parent_head_data_hash, + output_head_data_hash, cumulative_modifications: cumulative_modifications.clone(), }; - self.chain.push(node); - self.candidates.insert(candidate.candidate_hash); - // We've already checked for forks and cycles. - self.by_parent_head - .insert(candidate.parent_head_data_hash(), candidate.candidate_hash); - self.by_output_head - .insert(candidate.output_head_data_hash(), candidate.candidate_hash); - added_child = true; - // We can only add one child for a candidate. (it's a chain, not a tree) - break; - } - - if !added_child { + // Add the candidate to the chain now. + self.best_chain.push(node); + } else { break } } } + + // Revert the best backable chain so that the last candidate will be one outputting the given + // `parent_head_hash`. If the `parent_head_hash` is exactly the required parent of the base + // constraints (builds on the latest included candidate), revert the entire chain. + // Return false if we couldn't find the parent head hash. + fn revert_to(&mut self, parent_head_hash: &Hash) -> bool { + let mut removed_items = None; + if &self.scope.base_constraints.required_parent.hash() == parent_head_hash { + removed_items = Some(self.best_chain.clear()); + } + + if removed_items.is_none() && self.best_chain.by_output_head.contains_key(parent_head_hash) + { + removed_items = Some(self.best_chain.revert_to_parent_hash(parent_head_hash).collect()); + } + + let Some(removed_items) = removed_items else { return false }; + + // Even if it's empty, we need to return true, because we'll be able to add a new candidate + // to the chain. + for node in &removed_items { + let _ = self.unconnected.add_candidate_entry(node.into()); + } + true + } } diff --git a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs index 26ee94d59d8e..9886d19e5224 100644 --- a/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/fragment_chain/tests.rs @@ -17,8 +17,12 @@ use super::*; use assert_matches::assert_matches; use polkadot_node_subsystem_util::inclusion_emulator::InboundHrmpLimitations; -use polkadot_primitives::{BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData}; +use polkadot_primitives::{ + BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, Id as ParaId, +}; use polkadot_primitives_test_helpers as test_helpers; +use rand::{seq::SliceRandom, thread_rng}; +use std::ops::Range; fn make_constraints( min_relay_parent_number: BlockNumber, @@ -54,7 +58,7 @@ fn make_committed_candidate( let persisted_validation_data = PersistedValidationData { parent_head, relay_parent_number, - relay_parent_storage_root: Hash::repeat_byte(69), + relay_parent_storage_root: Hash::zero(), max_pov_size: 1_000_000, }; @@ -83,9 +87,20 @@ fn make_committed_candidate( (persisted_validation_data, candidate) } +fn populate_chain_from_previous_storage( + scope: &Scope, + storage: &CandidateStorage, +) -> FragmentChain { + let mut chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); + let mut prev_chain = chain.clone(); + prev_chain.unconnected = storage.clone(); + + chain.populate_from_previous(&prev_chain); + chain +} + #[test] fn scope_rejects_ancestors_that_skip_blocks() { - let para_id = ParaId::from(5u32); let relay_parent = RelayChainBlockInfo { number: 10, hash: Hash::repeat_byte(10), @@ -104,7 +119,6 @@ fn scope_rejects_ancestors_that_skip_blocks() { assert_matches!( Scope::with_ancestors( - para_id, relay_parent, base_constraints, pending_availability, @@ -117,7 +131,6 @@ fn scope_rejects_ancestors_that_skip_blocks() { #[test] fn scope_rejects_ancestor_for_0_block() { - let para_id = ParaId::from(5u32); let relay_parent = RelayChainBlockInfo { number: 0, hash: Hash::repeat_byte(0), @@ -136,7 +149,6 @@ fn scope_rejects_ancestor_for_0_block() { assert_matches!( Scope::with_ancestors( - para_id, relay_parent, base_constraints, pending_availability, @@ -149,7 +161,6 @@ fn scope_rejects_ancestor_for_0_block() { #[test] fn scope_only_takes_ancestors_up_to_min() { - let para_id = ParaId::from(5u32); let relay_parent = RelayChainBlockInfo { number: 5, hash: Hash::repeat_byte(0), @@ -179,7 +190,6 @@ fn scope_only_takes_ancestors_up_to_min() { let pending_availability = Vec::new(); let scope = Scope::with_ancestors( - para_id, relay_parent, base_constraints, pending_availability, @@ -194,7 +204,6 @@ fn scope_only_takes_ancestors_up_to_min() { #[test] fn scope_rejects_unordered_ancestors() { - let para_id = ParaId::from(5u32); let relay_parent = RelayChainBlockInfo { number: 5, hash: Hash::repeat_byte(0), @@ -225,7 +234,6 @@ fn scope_rejects_unordered_ancestors() { assert_matches!( Scope::with_ancestors( - para_id, relay_parent, base_constraints, pending_availability, @@ -257,718 +265,695 @@ fn candidate_storage_methods() { let mut wrong_pvd = pvd.clone(); wrong_pvd.max_pov_size = 0; assert_matches!( - storage.add_candidate(candidate.clone(), wrong_pvd, CandidateState::Seconded), - Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) + CandidateEntry::new( + candidate_hash, + candidate.clone(), + wrong_pvd.clone(), + CandidateState::Seconded + ), + Err(CandidateEntryError::PersistedValidationDataMismatch) + ); + assert_matches!( + CandidateEntry::new_seconded(candidate_hash, candidate.clone(), wrong_pvd), + Err(CandidateEntryError::PersistedValidationDataMismatch) ); + // Zero-length cycle. + { + let mut candidate = candidate.clone(); + candidate.commitments.head_data = HeadData(vec![1; 10]); + let mut pvd = pvd.clone(); + pvd.parent_head = HeadData(vec![1; 10]); + candidate.descriptor.persisted_validation_data_hash = pvd.hash(); + assert_matches!( + CandidateEntry::new_seconded(candidate_hash, candidate, pvd), + Err(CandidateEntryError::ZeroLengthCycle) + ); + } assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); - assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); + assert_eq!(storage.possible_backed_para_children(&parent_head_hash).count(), 0); assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); - assert_eq!(storage.is_backed(&candidate_hash), false); - // Add a valid candidate - storage - .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded) - .unwrap(); + // Add a valid candidate. + let candidate_entry = CandidateEntry::new( + candidate_hash, + candidate.clone(), + pvd.clone(), + CandidateState::Seconded, + ) + .unwrap(); + storage.add_candidate_entry(candidate_entry.clone()).unwrap(); assert!(storage.contains(&candidate_hash)); - assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 1); - assert_eq!(storage.possible_para_children(&candidate.descriptor.para_head).count(), 0); - assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), Some(relay_parent)); + assert_eq!(storage.possible_backed_para_children(&parent_head_hash).count(), 0); + assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head).count(), 0); assert_eq!( storage.head_data_by_hash(&candidate.descriptor.para_head).unwrap(), &candidate.commitments.head_data ); assert_eq!(storage.head_data_by_hash(&parent_head_hash).unwrap(), &pvd.parent_head); - assert_eq!(storage.is_backed(&candidate_hash), false); + // Now mark it as backed + storage.mark_backed(&candidate_hash); + // Marking it twice is fine. storage.mark_backed(&candidate_hash); - assert_eq!(storage.is_backed(&candidate_hash), true); + assert_eq!( + storage + .possible_backed_para_children(&parent_head_hash) + .map(|c| c.candidate_hash) + .collect::>(), + vec![candidate_hash] + ); + assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head).count(), 0); // Re-adding a candidate fails. assert_matches!( - storage.add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded), - Err(CandidateStorageInsertionError::CandidateAlreadyKnown(hash)) if candidate_hash == hash + storage.add_candidate_entry(candidate_entry), + Err(Error::CandidateAlreadyKnown) ); // Remove candidate and re-add it later in backed state. storage.remove_candidate(&candidate_hash); assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); - assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); + + // Removing it twice is fine. + storage.remove_candidate(&candidate_hash); + assert!(!storage.contains(&candidate_hash)); + assert_eq!(storage.possible_backed_para_children(&parent_head_hash).count(), 0); assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); - assert_eq!(storage.is_backed(&candidate_hash), false); storage - .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Backed) + .add_pending_availability_candidate(candidate_hash, candidate.clone(), pvd) .unwrap(); - assert_eq!(storage.is_backed(&candidate_hash), true); - - // Test retain - storage.retain(|_| true); assert!(storage.contains(&candidate_hash)); - storage.retain(|_| false); - assert!(!storage.contains(&candidate_hash)); - assert_eq!(storage.possible_para_children(&parent_head_hash).count(), 0); - assert_eq!(storage.relay_parent_of_candidate(&candidate_hash), None); - assert_eq!(storage.head_data_by_hash(&candidate.descriptor.para_head), None); - assert_eq!(storage.head_data_by_hash(&parent_head_hash), None); - assert_eq!(storage.is_backed(&candidate_hash), false); + + assert_eq!( + storage + .possible_backed_para_children(&parent_head_hash) + .map(|c| c.candidate_hash) + .collect::>(), + vec![candidate_hash] + ); + assert_eq!(storage.possible_backed_para_children(&candidate.descriptor.para_head).count(), 0); + + // Now add a second candidate in Seconded state. This will be a fork. + let (pvd_2, candidate_2) = make_committed_candidate( + ParaId::from(5u32), + relay_parent, + 8, + vec![4, 5, 6].into(), + vec![2, 3, 4].into(), + 7, + ); + let candidate_hash_2 = candidate_2.hash(); + let candidate_entry_2 = + CandidateEntry::new_seconded(candidate_hash_2, candidate_2, pvd_2).unwrap(); + + storage.add_candidate_entry(candidate_entry_2).unwrap(); + assert_eq!( + storage + .possible_backed_para_children(&parent_head_hash) + .map(|c| c.candidate_hash) + .collect::>(), + vec![candidate_hash] + ); + + // Now mark it as backed. + storage.mark_backed(&candidate_hash_2); + assert_eq!( + storage + .possible_backed_para_children(&parent_head_hash) + .map(|c| c.candidate_hash) + .collect::>(), + [candidate_hash, candidate_hash_2].into_iter().collect() + ); } #[test] -fn populate_and_extend_from_storage_empty() { +fn init_and_populate_from_empty() { // Empty chain and empty storage. - let storage = CandidateStorage::default(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); let scope = Scope::with_ancestors( - ParaId::from(2), RelayChainBlockInfo { number: 1, hash: Hash::repeat_byte(1), storage_root: Hash::repeat_byte(2), }, base_constraints, - pending_availability, + Vec::new(), 4, vec![], ) .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert!(chain.to_vec().is_empty()); - - chain.extend_from_storage(&storage); - assert!(chain.to_vec().is_empty()); + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); + assert_eq!(chain.best_chain_len(), 0); + assert_eq!(chain.unconnected_len(), 0); + + let mut new_chain = FragmentChain::init(scope, CandidateStorage::default()); + new_chain.populate_from_previous(&chain); + assert_eq!(chain.best_chain_len(), 0); + assert_eq!(chain.unconnected_len(), 0); } #[test] -fn populate_and_extend_from_storage_with_existing_empty_to_vec() { +fn test_populate_and_check_potential() { let mut storage = CandidateStorage::default(); let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - let relay_parent_c = Hash::repeat_byte(3); + let relay_parent_x = Hash::repeat_byte(1); + let relay_parent_y = Hash::repeat_byte(2); + let relay_parent_z = Hash::repeat_byte(3); + let relay_parent_x_info = + RelayChainBlockInfo { number: 0, hash: relay_parent_x, storage_root: Hash::zero() }; + let relay_parent_y_info = + RelayChainBlockInfo { number: 1, hash: relay_parent_y, storage_root: Hash::zero() }; + let relay_parent_z_info = + RelayChainBlockInfo { number: 2, hash: relay_parent_z, storage_root: Hash::zero() }; + + let ancestors = vec![ + // These need to be ordered in reverse. + relay_parent_y_info.clone(), + relay_parent_x_info.clone(), + ]; + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + // Candidates A -> B -> C. They are all backed let (pvd_a, candidate_a) = make_committed_candidate( para_id, - relay_parent_a, - 0, + relay_parent_x_info.hash, + relay_parent_x_info.number, vec![0x0a].into(), vec![0x0b].into(), - 0, + relay_parent_x_info.number, ); let candidate_a_hash = candidate_a.hash(); - + let candidate_a_entry = + CandidateEntry::new(candidate_a_hash, candidate_a, pvd_a.clone(), CandidateState::Backed) + .unwrap(); + storage.add_candidate_entry(candidate_a_entry.clone()).unwrap(); let (pvd_b, candidate_b) = make_committed_candidate( para_id, - relay_parent_b, - 1, + relay_parent_y_info.hash, + relay_parent_y_info.number, vec![0x0b].into(), vec![0x0c].into(), - 1, + relay_parent_y_info.number, ); let candidate_b_hash = candidate_b.hash(); - + let candidate_b_entry = + CandidateEntry::new(candidate_b_hash, candidate_b, pvd_b, CandidateState::Backed).unwrap(); + storage.add_candidate_entry(candidate_b_entry.clone()).unwrap(); let (pvd_c, candidate_c) = make_committed_candidate( para_id, - relay_parent_c, - 2, + relay_parent_z_info.hash, + relay_parent_z_info.number, vec![0x0c].into(), vec![0x0d].into(), - 2, + relay_parent_z_info.number, ); let candidate_c_hash = candidate_c.hash(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - let relay_parent_c_info = RelayChainBlockInfo { - number: pvd_c.relay_parent_number, - hash: relay_parent_c, - storage_root: pvd_c.relay_parent_storage_root, - }; - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let ancestors = vec![ - // These need to be ordered in reverse. - relay_parent_b_info.clone(), - relay_parent_a_info.clone(), - ]; - - storage - .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) - .unwrap(); - storage - .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Backed) - .unwrap(); - storage - .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Backed) - .unwrap(); + let candidate_c_entry = + CandidateEntry::new(candidate_c_hash, candidate_c, pvd_c, CandidateState::Backed).unwrap(); + storage.add_candidate_entry(candidate_c_entry.clone()).unwrap(); // Candidate A doesn't adhere to the base constraints. { for wrong_constraints in [ // Different required parent - make_constraints(0, vec![0], vec![0x0e].into()), + make_constraints( + relay_parent_x_info.number, + vec![relay_parent_x_info.number], + vec![0x0e].into(), + ), // Min relay parent number is wrong - make_constraints(1, vec![0], vec![0x0a].into()), + make_constraints(relay_parent_y_info.number, vec![0], vec![0x0a].into()), ] { let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), + relay_parent_z_info.clone(), wrong_constraints.clone(), - pending_availability.clone(), + vec![], 4, ancestors.clone(), ) .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); + let chain = populate_chain_from_previous_storage(&scope, &storage); - assert!(chain.to_vec().is_empty()); - - chain.extend_from_storage(&storage); - assert!(chain.to_vec().is_empty()); + assert!(chain.best_chain_vec().is_empty()); // If the min relay parent number is wrong, candidate A can never become valid. // Otherwise, if only the required parent doesn't match, candidate A is still a // potential candidate. - if wrong_constraints.min_relay_parent_number == 1 { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate_a.hash(), - &candidate_a.descriptor.relay_parent, - pvd_a.parent_head.hash(), - Some(candidate_a.commitments.head_data.hash()), - ), - PotentialAddition::None + if wrong_constraints.min_relay_parent_number == relay_parent_y_info.number { + // If A is not a potential candidate, its descendants will also not be added. + assert_eq!(chain.unconnected_len(), 0); + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::RelayParentNotInScope(_, _)) ); + // However, if taken independently, both B and C still have potential, since we + // don't know that A doesn't. + assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); } else { assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate_a.hash(), - &candidate_a.descriptor.relay_parent, - pvd_a.parent_head.hash(), - Some(candidate_a.commitments.head_data.hash()), - ), - PotentialAddition::Anyhow - ); - } - - // All other candidates can always be potential candidates. - for (candidate, pvd) in - [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] - { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::Anyhow + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_a_hash, candidate_b_hash, candidate_c_hash].into_iter().collect() ); } } } - // Various max depths. + // Various depths { - // depth is 0, will only allow 1 candidate + // Depth is 0, only allows one candidate, but the others will be kept as potential. let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), + relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![], 0, ancestors.clone(), ) .unwrap(); - // Before populating the chain, all candidates are potential candidates. However, they can - // only be added as connected candidates, because only one candidates is allowed by max - // depth - let chain = FragmentChain::populate(scope.clone(), &CandidateStorage::default()); - for (candidate, pvd) in [ - (candidate_a.clone(), pvd_a.clone()), - (candidate_b.clone(), pvd_b.clone()), - (candidate_c.clone(), pvd_c.clone()), - ] { - assert_eq!( - chain.can_add_candidate_as_potential( - &CandidateStorage::default(), - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::IfConnected - ); - } - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash]); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash]); - // since depth is maxed out, we can't add more potential candidates - // candidate A is no longer a potential candidate because it's already present. - for (candidate, pvd) in [ - (candidate_a.clone(), pvd_a.clone()), - (candidate_b.clone(), pvd_b.clone()), - (candidate_c.clone(), pvd_c.clone()), - ] { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); + assert!(chain.can_add_candidate_as_potential(&candidate_a_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); + + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_b_hash, candidate_c_hash].into_iter().collect() + ); // depth is 1, allows two candidates let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), + relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![], 1, ancestors.clone(), ) .unwrap(); - // Before populating the chain, all candidates can be added as potential. - let mut modified_storage = CandidateStorage::default(); - let chain = FragmentChain::populate(scope.clone(), &modified_storage); - for (candidate, pvd) in [ - (candidate_a.clone(), pvd_a.clone()), - (candidate_b.clone(), pvd_b.clone()), - (candidate_c.clone(), pvd_c.clone()), - ] { - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::Anyhow - ); - } - // Add an unconnected candidate. We now should only allow a Connected candidate, because max - // depth only allows one more candidate. - modified_storage - .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) - .unwrap(); - let chain = FragmentChain::populate(scope.clone(), &modified_storage); - for (candidate, pvd) in - [(candidate_a.clone(), pvd_a.clone()), (candidate_c.clone(), pvd_c.clone())] - { - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::IfConnected - ); - } + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); + assert!(chain.can_add_candidate_as_potential(&candidate_a_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); - // Now try populating from all candidates. - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - // since depth is maxed out, we can't add more potential candidates - // candidate A and B are no longer a potential candidate because they're already present. - for (candidate, pvd) in [ - (candidate_a.clone(), pvd_a.clone()), - (candidate_b.clone(), pvd_b.clone()), - (candidate_c.clone(), pvd_c.clone()), - ] { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_c_hash].into_iter().collect() + ); - // depths larger than 2, allows all candidates + // depth is larger than 2, allows all three candidates for depth in 2..6 { let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), + relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![], depth, ancestors.clone(), ) .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - // Candidates are no longer potential candidates because they're already part of the - // chain. - for (candidate, pvd) in [ - (candidate_a.clone(), pvd_a.clone()), - (candidate_b.clone(), pvd_b.clone()), - (candidate_c.clone(), pvd_c.clone()), - ] { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); + assert!(chain.can_add_candidate_as_potential(&candidate_a_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); + + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert_eq!( + chain.best_chain_vec(), + vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] + ); + assert_eq!(chain.unconnected_len(), 0); } } - // Wrong relay parents + // Relay parents out of scope { - // Candidates A has relay parent out of scope. - let ancestors_without_a = vec![relay_parent_b_info.clone()]; + // Candidate A has relay parent out of scope. Candidates B and C will also be deleted since + // they form a chain with A. + let ancestors_without_x = vec![relay_parent_y_info.clone()]; let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), + relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![], 4, - ancestors_without_a, + ancestors_without_x, ) .unwrap(); + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert!(chain.best_chain_vec().is_empty()); + assert_eq!(chain.unconnected_len(), 0); - let mut chain = FragmentChain::populate(scope, &storage); - assert!(chain.to_vec().is_empty()); - - chain.extend_from_storage(&storage); - assert!(chain.to_vec().is_empty()); - - // Candidate A is not a potential candidate, but candidates B and C still are. - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate_a.hash(), - &candidate_a.descriptor.relay_parent, - pvd_a.parent_head.hash(), - Some(candidate_a.commitments.head_data.hash()), - ), - PotentialAddition::None + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::RelayParentNotInScope(_, _)) ); - for (candidate, pvd) in - [(candidate_b.clone(), pvd_b.clone()), (candidate_c.clone(), pvd_c.clone())] - { - assert_eq!( - chain.can_add_candidate_as_potential( - &storage, - &candidate.hash(), - &candidate.descriptor.relay_parent, - pvd.parent_head.hash(), - Some(candidate.commitments.head_data.hash()), - ), - PotentialAddition::Anyhow - ); - } + // However, if taken independently, both B and C still have potential, since we + // don't know that A doesn't. + assert!(chain.can_add_candidate_as_potential(&candidate_b_entry).is_ok()); + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); - // Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed - // to move backwards - let mut modified_storage = storage.clone(); - modified_storage.remove_candidate(&candidate_c_hash); - let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( - para_id, - relay_parent_a, - 1, - vec![0x0c].into(), - vec![0x0d].into(), - 2, - ); - modified_storage - .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) - .unwrap(); + // Candidates A and B have relay parents out of scope. Candidate C will also be deleted + // since it forms a chain with A and B. let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), + relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![], 4, - ancestors.clone(), + vec![], ) .unwrap(); - let mut chain = FragmentChain::populate(scope, &modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - chain.extend_from_storage(&modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); + let chain = populate_chain_from_previous_storage(&scope, &storage); - // Candidate C is not even a potential candidate. - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &wrong_candidate_c.hash(), - &wrong_candidate_c.descriptor.relay_parent, - wrong_pvd_c.parent_head.hash(), - Some(wrong_candidate_c.commitments.head_data.hash()), - ), - PotentialAddition::None + assert!(chain.best_chain_vec().is_empty()); + assert_eq!(chain.unconnected_len(), 0); + + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::RelayParentNotInScope(_, _)) + ); + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_b_entry), + Err(Error::RelayParentNotInScope(_, _)) ); + // However, if taken independently, C still has potential, since we + // don't know that A and B don't + assert!(chain.can_add_candidate_as_potential(&candidate_c_entry).is_ok()); } - // Parachain fork and cycles are not allowed. + // Parachain cycle is not allowed. Make C have the same parent as A. { - // Candidate C has the same parent as candidate B. - let mut modified_storage = storage.clone(); - modified_storage.remove_candidate(&candidate_c_hash); - let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( - para_id, - relay_parent_c, - 2, - vec![0x0b].into(), - vec![0x0d].into(), - 2, - ); - modified_storage - .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) - .unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &modified_storage); - // We'll either have A->B or A->C. It's not deterministic because CandidateStorage uses - // HashSets and HashMaps. - if chain.to_vec() == vec![candidate_a_hash, candidate_b_hash] { - chain.extend_from_storage(&modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - // Candidate C is not even a potential candidate. - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &wrong_candidate_c.hash(), - &wrong_candidate_c.descriptor.relay_parent, - wrong_pvd_c.parent_head.hash(), - Some(wrong_candidate_c.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } else if chain.to_vec() == vec![candidate_a_hash, wrong_candidate_c.hash()] { - chain.extend_from_storage(&modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, wrong_candidate_c.hash()]); - // Candidate B is not even a potential candidate. - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &candidate_b.hash(), - &candidate_b.descriptor.relay_parent, - pvd_b.parent_head.hash(), - Some(candidate_b.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - } else { - panic!("Unexpected chain: {:?}", chain.to_vec()); - } - - // Candidate C is a 0-length cycle. - // Candidate C has the same parent as candidate B. let mut modified_storage = storage.clone(); modified_storage.remove_candidate(&candidate_c_hash); let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( para_id, - relay_parent_c, - 2, - vec![0x0c].into(), + relay_parent_z_info.hash, + relay_parent_z_info.number, vec![0x0c].into(), - 2, + vec![0x0a].into(), + relay_parent_z_info.number, ); - modified_storage - .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) - .unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), + let wrong_candidate_c_entry = CandidateEntry::new( + wrong_candidate_c.hash(), + wrong_candidate_c, + wrong_pvd_c, + CandidateState::Backed, ) .unwrap(); - let mut chain = FragmentChain::populate(scope, &modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - chain.extend_from_storage(&modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - // Candidate C is not even a potential candidate. - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &wrong_candidate_c.hash(), - &wrong_candidate_c.descriptor.relay_parent, - wrong_pvd_c.parent_head.hash(), - Some(wrong_candidate_c.commitments.head_data.hash()), - ), - PotentialAddition::None - ); - - // Candidate C points back to the pre-state of candidate C. - let mut modified_storage = storage.clone(); - modified_storage.remove_candidate(&candidate_c_hash); - let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( - para_id, - relay_parent_c, - 2, - vec![0x0c].into(), - vec![0x0b].into(), - 2, - ); - modified_storage - .add_candidate(wrong_candidate_c.clone(), wrong_pvd_c.clone(), CandidateState::Seconded) - .unwrap(); + modified_storage.add_candidate_entry(wrong_candidate_c_entry.clone()).unwrap(); let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), + relay_parent_z_info.clone(), base_constraints.clone(), - pending_availability.clone(), + vec![], 4, ancestors.clone(), ) .unwrap(); - let mut chain = FragmentChain::populate(scope, &modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - chain.extend_from_storage(&modified_storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - // Candidate C is not even a potential candidate. - assert_eq!( - chain.can_add_candidate_as_potential( - &modified_storage, - &wrong_candidate_c.hash(), - &wrong_candidate_c.descriptor.relay_parent, - wrong_pvd_c.parent_head.hash(), - Some(wrong_candidate_c.commitments.head_data.hash()), - ), - PotentialAddition::None + + let chain = populate_chain_from_previous_storage(&scope, &modified_storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + assert_eq!(chain.unconnected_len(), 0); + + assert_matches!( + chain.can_add_candidate_as_potential(&wrong_candidate_c_entry), + Err(Error::Cycle) ); + // However, if taken independently, C still has potential, since we don't know A and B. + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); + assert!(chain.can_add_candidate_as_potential(&wrong_candidate_c_entry).is_ok()); } - // Test with candidates pending availability - { - // Valid options - for pending in [ - vec![PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info.clone(), - }], - vec![ - PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info.clone(), - }, - PendingAvailability { - candidate_hash: candidate_b_hash, - relay_parent: relay_parent_b_info.clone(), - }, - ], - vec![ - PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info.clone(), - }, - PendingAvailability { - candidate_hash: candidate_b_hash, - relay_parent: relay_parent_b_info.clone(), - }, - PendingAvailability { - candidate_hash: candidate_c_hash, - relay_parent: relay_parent_c_info.clone(), - }, - ], - ] { - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - pending, - 3, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - } + // Candidate C has the same relay parent as candidate A's parent. Relay parent not allowed + // to move backwards + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0x0c].into(), + vec![0x0d].into(), + 0, + ); + let wrong_candidate_c_entry = CandidateEntry::new( + wrong_candidate_c.hash(), + wrong_candidate_c, + wrong_pvd_c, + CandidateState::Backed, + ) + .unwrap(); + modified_storage.add_candidate_entry(wrong_candidate_c_entry.clone()).unwrap(); + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![], + 4, + ancestors.clone(), + ) + .unwrap(); - // Relay parents of pending availability candidates can be out of scope - // Relay parent of candidate A is out of scope. - let ancestors_without_a = vec![relay_parent_b_info.clone()]; - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), - base_constraints.clone(), - vec![PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info.clone(), - }], - 4, - ancestors_without_a, - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + let chain = populate_chain_from_previous_storage(&scope, &modified_storage); - // Even relay parents of pending availability candidates which are out of scope cannot move - // backwards. - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info.clone(), + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + assert_eq!(chain.unconnected_len(), 0); + assert_matches!( + chain.can_add_candidate_as_potential(&wrong_candidate_c_entry), + Err(Error::RelayParentMovedBackwards) + ); + + // Candidate C is an unconnected candidate. + // C's relay parent is allowed to move backwards from B's relay parent, because C may later on + // trigger a reorg and B may get removed. + let mut modified_storage = storage.clone(); + modified_storage.remove_candidate(&candidate_c_hash); + let (unconnected_pvd_c, unconnected_candidate_c) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0x0d].into(), + vec![0x0e].into(), + 0, + ); + let unconnected_candidate_c_hash = unconnected_candidate_c.hash(); + let unconnected_candidate_c_entry = CandidateEntry::new( + unconnected_candidate_c_hash, + unconnected_candidate_c, + unconnected_pvd_c, + CandidateState::Backed, + ) + .unwrap(); + modified_storage + .add_candidate_entry(unconnected_candidate_c_entry.clone()) + .unwrap(); + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![], + 4, + ancestors.clone(), + ) + .unwrap(); + let chain = FragmentChain::init(scope.clone(), CandidateStorage::default()); + assert!(chain.can_add_candidate_as_potential(&unconnected_candidate_c_entry).is_ok()); + + let chain = populate_chain_from_previous_storage(&scope, &modified_storage); + + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [unconnected_candidate_c_hash].into_iter().collect() + ); + + // Candidate A is a pending availability candidate and Candidate C is an unconnected candidate, + // C's relay parent is not allowed to move backwards from A's relay parent because we're sure A + // will not get removed in the future, as it's already on-chain (unless it times out + // availability, a case for which we don't care to optimise for) + + modified_storage.remove_candidate(&candidate_a_hash); + let (modified_pvd_a, modified_candidate_a) = make_committed_candidate( + para_id, + relay_parent_y_info.hash, + relay_parent_y_info.number, + vec![0x0a].into(), + vec![0x0b].into(), + relay_parent_y_info.number, + ); + let modified_candidate_a_hash = modified_candidate_a.hash(); + modified_storage + .add_candidate_entry( + CandidateEntry::new( + modified_candidate_a_hash, + modified_candidate_a, + modified_pvd_a, + CandidateState::Backed, + ) + .unwrap(), + ) + .unwrap(); + + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![PendingAvailability { + candidate_hash: modified_candidate_a_hash, + relay_parent: relay_parent_y_info.clone(), + }], + 4, + ancestors.clone(), + ) + .unwrap(); + + let chain = populate_chain_from_previous_storage(&scope, &modified_storage); + assert_eq!(chain.best_chain_vec(), vec![modified_candidate_a_hash, candidate_b_hash]); + assert_eq!(chain.unconnected_len(), 0); + assert_matches!( + chain.can_add_candidate_as_potential(&unconnected_candidate_c_entry), + Err(Error::RelayParentPrecedesCandidatePendingAvailability(_, _)) + ); + + // Not allowed to fork from a candidate pending availability + let (wrong_pvd_c, wrong_candidate_c) = make_committed_candidate( + para_id, + relay_parent_y_info.hash, + relay_parent_y_info.number, + vec![0x0a].into(), + vec![0x0b2].into(), + 0, + ); + let wrong_candidate_c_hash = wrong_candidate_c.hash(); + let wrong_candidate_c_entry = CandidateEntry::new( + wrong_candidate_c_hash, + wrong_candidate_c, + wrong_pvd_c, + CandidateState::Backed, + ) + .unwrap(); + modified_storage.add_candidate_entry(wrong_candidate_c_entry.clone()).unwrap(); + + // Does not even matter if the fork selection rule would have picked up the new candidate, as + // the other is already pending availability. + assert_eq!( + fork_selection_rule(&wrong_candidate_c_hash, &modified_candidate_a_hash), + Ordering::Less + ); + + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![PendingAvailability { + candidate_hash: modified_candidate_a_hash, + relay_parent: relay_parent_y_info.clone(), + }], + 4, + ancestors.clone(), + ) + .unwrap(); + + let chain = populate_chain_from_previous_storage(&scope, &modified_storage); + assert_eq!(chain.best_chain_vec(), vec![modified_candidate_a_hash, candidate_b_hash]); + assert_eq!(chain.unconnected_len(), 0); + assert_matches!( + chain.can_add_candidate_as_potential(&wrong_candidate_c_entry), + Err(Error::ForkWithCandidatePendingAvailability(_)) + ); + + // Test with candidates pending availability + { + // Valid options + for pending in [ + vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_x_info.clone(), + }], + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_x_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: relay_parent_y_info.clone(), + }, + ], + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_x_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: relay_parent_y_info.clone(), + }, + PendingAvailability { + candidate_hash: candidate_c_hash, + relay_parent: relay_parent_z_info.clone(), + }, + ], + ] { + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + pending, + 3, + ancestors.clone(), + ) + .unwrap(); + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert_eq!( + chain.best_chain_vec(), + vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] + ); + assert_eq!(chain.unconnected_len(), 0); + } + + // Relay parents of pending availability candidates can be out of scope + // Relay parent of candidate A is out of scope. + let ancestors_without_x = vec![relay_parent_y_info.clone()]; + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_x_info.clone(), + }], + 4, + ancestors_without_x, + ) + .unwrap(); + let chain = populate_chain_from_previous_storage(&scope, &storage); + + assert_eq!( + chain.best_chain_vec(), + vec![candidate_a_hash, candidate_b_hash, candidate_c_hash] + ); + assert_eq!(chain.unconnected_len(), 0); + + // Even relay parents of pending availability candidates which are out of scope cannot + // move backwards. + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), base_constraints.clone(), vec![ PendingAvailability { candidate_hash: candidate_a_hash, relay_parent: RelayChainBlockInfo { - hash: relay_parent_a_info.hash, + hash: relay_parent_x_info.hash, number: 1, - storage_root: relay_parent_a_info.storage_root, + storage_root: relay_parent_x_info.storage_root, }, }, PendingAvailability { candidate_hash: candidate_b_hash, relay_parent: RelayChainBlockInfo { - hash: relay_parent_b_info.hash, + hash: relay_parent_y_info.hash, number: 0, - storage_root: relay_parent_b_info.storage_root, + storage_root: relay_parent_y_info.storage_root, }, }, ], @@ -976,271 +961,418 @@ fn populate_and_extend_from_storage_with_existing_empty_to_vec() { vec![], ) .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert!(chain.to_vec().is_empty()); - - chain.extend_from_storage(&storage); - assert!(chain.to_vec().is_empty()); + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert!(chain.best_chain_vec().is_empty()); + assert_eq!(chain.unconnected_len(), 0); } -} -#[test] -fn extend_from_storage_with_existing_to_vec() { - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - let relay_parent_d = Hash::repeat_byte(3); + // More complex case: + // max_depth is 2 (a chain of max depth 3). + // A -> B -> C are the best backable chain. + // D is backed but would exceed the max depth. + // F is unconnected and seconded. + // A1 has same parent as A, is backed but has a higher candidate hash. It'll therefore be + // deleted. + // A1 has underneath a subtree that will all need to be trimmed. A1 -> B1. B1 -> C1 + // and B1 -> C2. (C1 is backed). + // A2 is seconded but is kept because it has a lower candidate hash than A. + // A2 points to B2, which is backed. + // + // Check that D, F, A2 and B2 are kept as unconnected potential candidates. - let (pvd_a, candidate_a) = make_committed_candidate( + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![], + 2, + ancestors.clone(), + ) + .unwrap(); + + // Candidate D + let (pvd_d, candidate_d) = make_committed_candidate( para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, + relay_parent_z_info.hash, + relay_parent_z_info.number, + vec![0x0d].into(), + vec![0x0e].into(), + relay_parent_z_info.number, ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( + let candidate_d_hash = candidate_d.hash(); + let candidate_d_entry = + CandidateEntry::new(candidate_d_hash, candidate_d, pvd_d, CandidateState::Backed).unwrap(); + assert!(populate_chain_from_previous_storage(&scope, &storage) + .can_add_candidate_as_potential(&candidate_d_entry) + .is_ok()); + storage.add_candidate_entry(candidate_d_entry).unwrap(); + + // Candidate F + let (pvd_f, candidate_f) = make_committed_candidate( para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, + relay_parent_z_info.hash, + relay_parent_z_info.number, + vec![0x0f].into(), + vec![0xf1].into(), + 1000, ); - let candidate_b_hash = candidate_b.hash(); + let candidate_f_hash = candidate_f.hash(); + let candidate_f_entry = + CandidateEntry::new(candidate_f_hash, candidate_f, pvd_f, CandidateState::Seconded) + .unwrap(); + assert!(populate_chain_from_previous_storage(&scope, &storage) + .can_add_candidate_as_potential(&candidate_f_entry) + .is_ok()); + storage.add_candidate_entry(candidate_f_entry.clone()).unwrap(); - let (pvd_c, candidate_c) = make_committed_candidate( + // Candidate A1 + let (pvd_a1, candidate_a1) = make_committed_candidate( para_id, - // Use the same relay parent number as B to test that it doesn't need to change between - // candidates. - relay_parent_b, - 1, - vec![0x0c].into(), - vec![0x0d].into(), - 1, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0x0a].into(), + vec![0xb1].into(), + relay_parent_x_info.number, ); - let candidate_c_hash = candidate_c.hash(); + let candidate_a1_hash = candidate_a1.hash(); + let candidate_a1_entry = + CandidateEntry::new(candidate_a1_hash, candidate_a1, pvd_a1, CandidateState::Backed) + .unwrap(); + // Candidate A1 is created so that its hash is greater than the candidate A hash. + assert_eq!(fork_selection_rule(&candidate_a_hash, &candidate_a1_hash), Ordering::Less); - // Candidate D will never be added to the chain. - let (pvd_d, candidate_d) = make_committed_candidate( - para_id, - relay_parent_d, - 2, - vec![0x0e].into(), - vec![0x0f].into(), - 1, + assert_matches!( + populate_chain_from_previous_storage(&scope, &storage) + .can_add_candidate_as_potential(&candidate_a1_entry), + Err(Error::ForkChoiceRule(other)) if candidate_a_hash == other ); - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - let relay_parent_d_info = RelayChainBlockInfo { - number: pvd_d.relay_parent_number, - hash: relay_parent_d, - storage_root: pvd_d.relay_parent_storage_root, - }; + storage.add_candidate_entry(candidate_a1_entry.clone()).unwrap(); - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); + // Candidate B1. + let (pvd_b1, candidate_b1) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0xb1].into(), + vec![0xc1].into(), + relay_parent_x_info.number, + ); + let candidate_b1_hash = candidate_b1.hash(); + let candidate_b1_entry = + CandidateEntry::new(candidate_b1_hash, candidate_b1, pvd_b1, CandidateState::Seconded) + .unwrap(); + assert!(populate_chain_from_previous_storage(&scope, &storage) + .can_add_candidate_as_potential(&candidate_b1_entry) + .is_ok()); - let ancestors = vec![ - // These need to be ordered in reverse. - relay_parent_b_info.clone(), - relay_parent_a_info.clone(), - ]; + storage.add_candidate_entry(candidate_b1_entry).unwrap(); - // Already had A and C in the storage. Introduce B, which should add both B and C to the chain - // now. - { - let mut storage = CandidateStorage::default(); - storage - .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) + // Candidate C1. + let (pvd_c1, candidate_c1) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0xc1].into(), + vec![0xd1].into(), + relay_parent_x_info.number, + ); + let candidate_c1_hash = candidate_c1.hash(); + let candidate_c1_entry = + CandidateEntry::new(candidate_c1_hash, candidate_c1, pvd_c1, CandidateState::Backed) .unwrap(); - storage - .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) + assert!(populate_chain_from_previous_storage(&scope, &storage) + .can_add_candidate_as_potential(&candidate_c1_entry) + .is_ok()); + + storage.add_candidate_entry(candidate_c1_entry).unwrap(); + + // Candidate C2. + let (pvd_c2, candidate_c2) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0xc1].into(), + vec![0xd2].into(), + relay_parent_x_info.number, + ); + let candidate_c2_hash = candidate_c2.hash(); + let candidate_c2_entry = + CandidateEntry::new(candidate_c2_hash, candidate_c2, pvd_c2, CandidateState::Seconded) .unwrap(); - storage - .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) + assert!(populate_chain_from_previous_storage(&scope, &storage) + .can_add_candidate_as_potential(&candidate_c2_entry) + .is_ok()); + storage.add_candidate_entry(candidate_c2_entry).unwrap(); + + // Candidate A2. + let (pvd_a2, candidate_a2) = make_committed_candidate( + para_id, + relay_parent_x_info.hash, + relay_parent_x_info.number, + vec![0x0a].into(), + vec![0xb3].into(), + relay_parent_x_info.number, + ); + let candidate_a2_hash = candidate_a2.hash(); + let candidate_a2_entry = + CandidateEntry::new(candidate_a2_hash, candidate_a2, pvd_a2, CandidateState::Seconded) .unwrap(); + // Candidate A2 is created so that its hash is greater than the candidate A hash. + assert_eq!(fork_selection_rule(&candidate_a2_hash, &candidate_a_hash), Ordering::Less); - let scope = Scope::with_ancestors( - para_id, - relay_parent_d_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), - ) - .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash]); + assert!(populate_chain_from_previous_storage(&scope, &storage) + .can_add_candidate_as_potential(&candidate_a2_entry) + .is_ok()); - storage - .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) + storage.add_candidate_entry(candidate_a2_entry).unwrap(); + + // Candidate B2. + let (pvd_b2, candidate_b2) = make_committed_candidate( + para_id, + relay_parent_y_info.hash, + relay_parent_y_info.number, + vec![0xb3].into(), + vec![0xb4].into(), + relay_parent_y_info.number, + ); + let candidate_b2_hash = candidate_b2.hash(); + let candidate_b2_entry = + CandidateEntry::new(candidate_b2_hash, candidate_b2, pvd_b2, CandidateState::Backed) .unwrap(); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - } + assert!(populate_chain_from_previous_storage(&scope, &storage) + .can_add_candidate_as_potential(&candidate_b2_entry) + .is_ok()); + storage.add_candidate_entry(candidate_b2_entry).unwrap(); - // Already had A and B in the chain. Introduce C. + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_d_hash, candidate_f_hash, candidate_a2_hash, candidate_b2_hash] + .into_iter() + .collect() + ); + // Cannot add as potential an already present candidate (whether it's in the best chain or in + // unconnected storage) + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::CandidateAlreadyKnown) + ); + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_f_entry), + Err(Error::CandidateAlreadyKnown) + ); + + // Simulate a best chain reorg by backing a2. { - let mut storage = CandidateStorage::default(); - storage - .add_candidate(candidate_a.clone(), pvd_a.clone(), CandidateState::Seconded) - .unwrap(); - storage - .add_candidate(candidate_b.clone(), pvd_b.clone(), CandidateState::Seconded) - .unwrap(); - storage - .add_candidate(candidate_d.clone(), pvd_d.clone(), CandidateState::Seconded) - .unwrap(); + let mut chain = chain.clone(); + chain.candidate_backed(&candidate_a2_hash); + assert_eq!(chain.best_chain_vec(), vec![candidate_a2_hash, candidate_b2_hash]); + // F is kept as it was truly unconnected. The rest will be trimmed. + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_f_hash].into_iter().collect() + ); - let scope = Scope::with_ancestors( - para_id, - relay_parent_d_info.clone(), - base_constraints.clone(), - pending_availability.clone(), - 4, - ancestors.clone(), + // A and A1 will never have potential again. + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a1_entry), + Err(Error::ForkChoiceRule(_)) + ); + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::ForkChoiceRule(_)) + ); + } + + // Candidate F has an invalid hrmp watermark. however, it was not checked beforehand as we don't + // have its parent yet. Add its parent now. This will not impact anything as E is not yet part + // of the best chain. + + let (pvd_e, candidate_e) = make_committed_candidate( + para_id, + relay_parent_z_info.hash, + relay_parent_z_info.number, + vec![0x0e].into(), + vec![0x0f].into(), + relay_parent_z_info.number, + ); + let candidate_e_hash = candidate_e.hash(); + storage + .add_candidate_entry( + CandidateEntry::new(candidate_e_hash, candidate_e, pvd_e, CandidateState::Seconded) + .unwrap(), ) .unwrap(); - let mut chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - storage - .add_candidate(candidate_c.clone(), pvd_c.clone(), CandidateState::Seconded) - .unwrap(); - chain.extend_from_storage(&storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); - } + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [ + candidate_d_hash, + candidate_f_hash, + candidate_a2_hash, + candidate_b2_hash, + candidate_e_hash + ] + .into_iter() + .collect() + ); + + // Simulate the fact that candidates A, B, C are now pending availability. + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![ + PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_x_info, + }, + PendingAvailability { + candidate_hash: candidate_b_hash, + relay_parent: relay_parent_y_info, + }, + PendingAvailability { + candidate_hash: candidate_c_hash, + relay_parent: relay_parent_z_info.clone(), + }, + ], + 2, + ancestors.clone(), + ) + .unwrap(); + + // A2 and B2 will now be trimmed + let chain = populate_chain_from_previous_storage(&scope, &storage); + assert_eq!(chain.best_chain_vec(), vec![candidate_a_hash, candidate_b_hash, candidate_c_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_d_hash, candidate_f_hash, candidate_e_hash].into_iter().collect() + ); + // Cannot add as potential an already pending availability candidate + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_a_entry), + Err(Error::CandidateAlreadyKnown) + ); + + // Simulate the fact that candidates A, B and C have been included. + + let base_constraints = make_constraints(0, vec![0], HeadData(vec![0x0d])); + let scope = Scope::with_ancestors( + relay_parent_z_info.clone(), + base_constraints.clone(), + vec![], + 2, + ancestors.clone(), + ) + .unwrap(); + + let prev_chain = chain; + let mut chain = FragmentChain::init(scope, CandidateStorage::default()); + chain.populate_from_previous(&prev_chain); + assert_eq!(chain.best_chain_vec(), vec![candidate_d_hash]); + assert_eq!( + chain.unconnected().map(|c| c.candidate_hash).collect::>(), + [candidate_e_hash, candidate_f_hash].into_iter().collect() + ); + + // Mark E as backed. F will be dropped for invalid watermark. No other unconnected candidates. + chain.candidate_backed(&candidate_e_hash); + assert_eq!(chain.best_chain_vec(), vec![candidate_d_hash, candidate_e_hash]); + assert_eq!(chain.unconnected_len(), 0); + + assert_matches!( + chain.can_add_candidate_as_potential(&candidate_f_entry), + Err(Error::CheckAgainstConstraints(_)) + ); } #[test] -fn test_find_ancestor_path_and_find_backable_chain_empty_to_vec() { - let para_id = ParaId::from(5u32); +fn test_find_ancestor_path_and_find_backable_chain_empty_best_chain() { let relay_parent = Hash::repeat_byte(1); let required_parent: HeadData = vec![0xff].into(); let max_depth = 10; // Empty chain - let storage = CandidateStorage::default(); let base_constraints = make_constraints(0, vec![0], required_parent.clone()); let relay_parent_info = RelayChainBlockInfo { number: 0, hash: relay_parent, storage_root: Hash::zero() }; - let scope = Scope::with_ancestors( - para_id, - relay_parent_info, - base_constraints, - vec![], - max_depth, - vec![], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - assert!(chain.to_vec().is_empty()); + let scope = + Scope::with_ancestors(relay_parent_info, base_constraints, vec![], max_depth, vec![]) + .unwrap(); + let chain = FragmentChain::init(scope, CandidateStorage::default()); + assert_eq!(chain.best_chain_len(), 0); assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); - assert_eq!(chain.find_backable_chain(Ancestors::new(), 2, |_| true), vec![]); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 2), vec![]); // Invalid candidate. let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); - assert_eq!(chain.find_backable_chain(ancestors, 2, |_| true), vec![]); + assert_eq!(chain.find_backable_chain(ancestors, 2), vec![]); } #[test] -fn test_find_ancestor_path_and_find_backable_to_vec() { +fn test_find_ancestor_path_and_find_backable_chain() { let para_id = ParaId::from(5u32); let relay_parent = Hash::repeat_byte(1); let required_parent: HeadData = vec![0xff].into(); let max_depth = 5; let relay_parent_number = 0; - let relay_parent_storage_root = Hash::repeat_byte(69); + let relay_parent_storage_root = Hash::zero(); let mut candidates = vec![]; - - // Candidate 0 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - required_parent.clone(), - vec![0].into(), - 0, - )); - // Candidate 1 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![0].into(), - vec![1].into(), - 0, - )); - // Candidate 2 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![1].into(), - vec![2].into(), - 0, - )); - // Candidate 3 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![2].into(), - vec![3].into(), - 0, - )); - // Candidate 4 - candidates.push(make_committed_candidate( - para_id, - relay_parent, - 0, - vec![3].into(), - vec![4].into(), - 0, - )); - // Candidate 5 + + // Candidate 0 candidates.push(make_committed_candidate( para_id, relay_parent, 0, - vec![4].into(), - vec![5].into(), + required_parent.clone(), + vec![0].into(), 0, )); - let base_constraints = make_constraints(0, vec![0], required_parent.clone()); + // Candidates 1..=5 + for index in 1..=5 { + candidates.push(make_committed_candidate( + para_id, + relay_parent, + 0, + vec![index - 1].into(), + vec![index].into(), + 0, + )); + } + let mut storage = CandidateStorage::default(); + for (pvd, candidate) in candidates.iter() { + storage + .add_candidate_entry( + CandidateEntry::new_seconded(candidate.hash(), candidate.clone(), pvd.clone()) + .unwrap(), + ) + .unwrap(); + } + + let candidates = candidates + .into_iter() + .map(|(_pvd, candidate)| candidate.hash()) + .collect::>(); + let hashes = + |range: Range| range.map(|i| (candidates[i], relay_parent)).collect::>(); + let relay_parent_info = RelayChainBlockInfo { number: relay_parent_number, hash: relay_parent, storage_root: relay_parent_storage_root, }; - for (pvd, candidate) in candidates.iter() { - storage - .add_candidate(candidate.clone(), pvd.clone(), CandidateState::Seconded) - .unwrap(); - } - let candidates = candidates.into_iter().map(|(_pvd, candidate)| candidate).collect::>(); + let base_constraints = make_constraints(0, vec![0], required_parent.clone()); let scope = Scope::with_ancestors( - para_id, relay_parent_info.clone(), base_constraints.clone(), vec![], @@ -1248,506 +1380,140 @@ fn test_find_ancestor_path_and_find_backable_to_vec() { vec![], ) .unwrap(); - let chain = FragmentChain::populate(scope, &storage); + let mut chain = populate_chain_from_previous_storage(&scope, &storage); + // For now, candidates are only seconded, not backed. So the best chain is empty and no + // candidate will be returned. assert_eq!(candidates.len(), 6); - assert_eq!(chain.to_vec().len(), 6); + assert_eq!(chain.best_chain_len(), 0); + assert_eq!(chain.unconnected_len(), 6); + + for count in 0..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); + } + + // Do tests with only a couple of candidates being backed. + { + let mut chain = chain.clone(); + chain.candidate_backed(&&candidates[5]); + for count in 0..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); + } + chain.candidate_backed(&&candidates[3]); + chain.candidate_backed(&&candidates[4]); + for count in 0..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); + } + + chain.candidate_backed(&&candidates[1]); + for count in 0..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count).len(), 0); + } + + chain.candidate_backed(&&candidates[0]); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 1), hashes(0..1)); + for count in 2..10 { + assert_eq!(chain.find_backable_chain(Ancestors::new(), count), hashes(0..2)); + } + + // Now back the missing piece. + chain.candidate_backed(&&candidates[2]); + assert_eq!(chain.best_chain_len(), 6); + for count in 0..10 { + assert_eq!( + chain.find_backable_chain(Ancestors::new(), count), + (0..6) + .take(count as usize) + .map(|i| (candidates[i], relay_parent)) + .collect::>() + ); + } + } + + // Now back all candidates. Back them in a random order. The result should always be the same. + let mut candidates_shuffled = candidates.clone(); + candidates_shuffled.shuffle(&mut thread_rng()); + for candidate in candidates.iter() { + chain.candidate_backed(candidate); + storage.mark_backed(candidate); + } // No ancestors supplied. assert_eq!(chain.find_ancestor_path(Ancestors::new()), 0); - assert_eq!(chain.find_backable_chain(Ancestors::new(), 0, |_| true), vec![]); - assert_eq!( - chain.find_backable_chain(Ancestors::new(), 1, |_| true), - [0].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - assert_eq!( - chain.find_backable_chain(Ancestors::new(), 2, |_| true), - [0, 1].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - assert_eq!( - chain.find_backable_chain(Ancestors::new(), 5, |_| true), - [0, 1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 0), vec![]); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 1), hashes(0..1)); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 2), hashes(0..2)); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 5), hashes(0..5)); for count in 6..10 { - assert_eq!( - chain.find_backable_chain(Ancestors::new(), count, |_| true), - [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_backable_chain(Ancestors::new(), count), hashes(0..6)); } - assert_eq!( - chain.find_backable_chain(Ancestors::new(), 7, |_| true), - [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - assert_eq!( - chain.find_backable_chain(Ancestors::new(), 10, |_| true), - [0, 1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 7), hashes(0..6)); + assert_eq!(chain.find_backable_chain(Ancestors::new(), 10), hashes(0..6)); // Ancestor which is not part of the chain. Will be ignored. let ancestors: Ancestors = [CandidateHash::default()].into_iter().collect(); assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); - assert_eq!( - chain.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - let ancestors: Ancestors = - [candidates[1].hash(), CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_backable_chain(ancestors, 4), hashes(0..4)); + + let ancestors: Ancestors = [candidates[1], CandidateHash::default()].into_iter().collect(); assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); - assert_eq!( - chain.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - let ancestors: Ancestors = - [candidates[0].hash(), CandidateHash::default()].into_iter().collect(); + assert_eq!(chain.find_backable_chain(ancestors, 4), hashes(0..4)); + + let ancestors: Ancestors = [candidates[0], CandidateHash::default()].into_iter().collect(); assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); - assert_eq!( - chain.find_backable_chain(ancestors, 4, |_| true), - [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_backable_chain(ancestors, 4), hashes(1..5)); // Ancestors which are part of the chain but don't form a path from root. Will be ignored. - let ancestors: Ancestors = [candidates[1].hash(), candidates[2].hash()].into_iter().collect(); + let ancestors: Ancestors = [candidates[1], candidates[2]].into_iter().collect(); assert_eq!(chain.find_ancestor_path(ancestors.clone()), 0); - assert_eq!( - chain.find_backable_chain(ancestors, 4, |_| true), - [0, 1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_backable_chain(ancestors, 4), hashes(0..4)); // Valid ancestors. - let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); + let ancestors: Ancestors = [candidates[2], candidates[0], candidates[1]].into_iter().collect(); assert_eq!(chain.find_ancestor_path(ancestors.clone()), 3); - assert_eq!( - chain.find_backable_chain(ancestors.clone(), 2, |_| true), - [3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_backable_chain(ancestors.clone(), 2), hashes(3..5)); for count in 3..10 { - assert_eq!( - chain.find_backable_chain(ancestors.clone(), count, |_| true), - [3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_backable_chain(ancestors.clone(), count), hashes(3..6)); } // Valid ancestors with candidates which have been omitted due to timeouts - let ancestors: Ancestors = [candidates[0].hash(), candidates[2].hash()].into_iter().collect(); + let ancestors: Ancestors = [candidates[0], candidates[2]].into_iter().collect(); assert_eq!(chain.find_ancestor_path(ancestors.clone()), 1); - assert_eq!( - chain.find_backable_chain(ancestors.clone(), 3, |_| true), - [1, 2, 3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - assert_eq!( - chain.find_backable_chain(ancestors.clone(), 4, |_| true), - [1, 2, 3, 4].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_backable_chain(ancestors.clone(), 3), hashes(1..4)); + assert_eq!(chain.find_backable_chain(ancestors.clone(), 4), hashes(1..5)); for count in 5..10 { - assert_eq!( - chain.find_backable_chain(ancestors.clone(), count, |_| true), - [1, 2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_backable_chain(ancestors.clone(), count), hashes(1..6)); } - let ancestors: Ancestors = [candidates[0].hash(), candidates[1].hash(), candidates[3].hash()] - .into_iter() - .collect(); + let ancestors: Ancestors = [candidates[0], candidates[1], candidates[3]].into_iter().collect(); assert_eq!(chain.find_ancestor_path(ancestors.clone()), 2); - assert_eq!( - chain.find_backable_chain(ancestors.clone(), 4, |_| true), - [2, 3, 4, 5].into_iter().map(|i| candidates[i].hash()).collect::>() - ); + assert_eq!(chain.find_backable_chain(ancestors.clone(), 4), hashes(2..6)); // Requested count is 0. - assert_eq!(chain.find_backable_chain(ancestors, 0, |_| true), vec![]); - - // Stop when we've found a candidate for which pred returns false. - let ancestors: Ancestors = [candidates[2].hash(), candidates[0].hash(), candidates[1].hash()] - .into_iter() - .collect(); - for count in 1..10 { - assert_eq!( - // Stop at 4. - chain.find_backable_chain(ancestors.clone(), count, |hash| hash != - &candidates[4].hash()), - [3].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } + assert_eq!(chain.find_backable_chain(ancestors, 0), vec![]); // Stop when we've found a candidate which is pending availability { let scope = Scope::with_ancestors( - para_id, relay_parent_info.clone(), base_constraints, // Mark the third candidate as pending availability vec![PendingAvailability { - candidate_hash: candidates[3].hash(), + candidate_hash: candidates[3], relay_parent: relay_parent_info, }], max_depth, vec![], ) .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - let ancestors: Ancestors = - [candidates[0].hash(), candidates[1].hash()].into_iter().collect(); + let chain = populate_chain_from_previous_storage(&scope, &storage); + let ancestors: Ancestors = [candidates[0], candidates[1]].into_iter().collect(); assert_eq!( // Stop at 4. - chain.find_backable_chain(ancestors.clone(), 3, |_| true), - [2].into_iter().map(|i| candidates[i].hash()).collect::>() - ); - } -} - -#[test] -fn hypothetical_membership() { - let mut storage = CandidateStorage::default(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0b].into(), - vec![0x0c].into(), - 0, - ); - let candidate_b_hash = candidate_b.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); - storage.add_candidate(candidate_b, pvd_b, CandidateState::Seconded).unwrap(); - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info.clone(), - base_constraints.clone(), - vec![], - max_depth, - vec![], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - - assert_eq!(chain.to_vec().len(), 2); - - // Check candidates which are already present - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: candidate_a_hash, - }, - &storage, - )); - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: candidate_b_hash, - }, - &storage, - )); - - // Forks not allowed. - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: CandidateHash(Hash::repeat_byte(21)), - }, - &storage, - )); - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: CandidateHash(Hash::repeat_byte(22)), - }, - &storage, - )); - - // Unknown candidate which builds on top of the current chain. - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: CandidateHash(Hash::repeat_byte(23)), - }, - &storage, - )); - - // Unknown unconnected candidate which may be valid. - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: CandidateHash(Hash::repeat_byte(23)), - }, - &storage, - )); - - // The number of unconnected candidates is limited (chain.len() + unconnected) <= max_depth - { - // C will be an unconnected candidate. - let (pvd_c, candidate_c) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0e].into(), - vec![0x0f].into(), - 0, - ); - let candidate_c_hash = candidate_c.hash(); - - // Add an invalid candidate in the storage. This would introduce a fork. Just to test that - // it's ignored. - let (invalid_pvd, invalid_candidate) = make_committed_candidate( - para_id, - relay_parent_a, - 1, - vec![0x0a].into(), - vec![0x0b].into(), - 0, + chain.find_backable_chain(ancestors.clone(), 3), + hashes(2..3) ); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - vec![], - 2, - vec![], - ) - .unwrap(); - let mut storage = storage.clone(); - storage.add_candidate(candidate_c, pvd_c, CandidateState::Seconded).unwrap(); - - let chain = FragmentChain::populate(scope, &storage); - assert_eq!(chain.to_vec(), vec![candidate_a_hash, candidate_b_hash]); - - storage - .add_candidate(invalid_candidate, invalid_pvd, CandidateState::Seconded) - .unwrap(); - - // Check that C is accepted as a potential unconnected candidate. - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0e]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_hash: candidate_c_hash, - candidate_para: para_id - }, - &storage, - )); - - // Since C is already an unconnected candidate in the storage. - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0f]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: CandidateHash(Hash::repeat_byte(23)), - }, - &storage, - )); } } - -#[test] -fn hypothetical_membership_stricter_on_complete_candidates() { - let storage = CandidateStorage::default(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 1000, // watermark is illegal - ); - - let candidate_a_hash = candidate_a.hash(); - - let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); - let pending_availability = Vec::new(); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - - let max_depth = 4; - let scope = Scope::with_ancestors( - para_id, - relay_parent_a_info, - base_constraints, - pending_availability, - max_depth, - vec![], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_para: para_id, - candidate_hash: candidate_a_hash, - }, - &storage, - )); - - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Complete { - receipt: Arc::new(candidate_a), - persisted_validation_data: pvd_a, - candidate_hash: candidate_a_hash, - }, - &storage, - )); -} - -#[test] -fn hypothetical_membership_with_pending_availability_in_scope() { - let mut storage = CandidateStorage::default(); - - let para_id = ParaId::from(5u32); - let relay_parent_a = Hash::repeat_byte(1); - let relay_parent_b = Hash::repeat_byte(2); - let relay_parent_c = Hash::repeat_byte(3); - - let (pvd_a, candidate_a) = make_committed_candidate( - para_id, - relay_parent_a, - 0, - vec![0x0a].into(), - vec![0x0b].into(), - 0, - ); - let candidate_a_hash = candidate_a.hash(); - - let (pvd_b, candidate_b) = make_committed_candidate( - para_id, - relay_parent_b, - 1, - vec![0x0b].into(), - vec![0x0c].into(), - 1, - ); - - // Note that relay parent `a` is not allowed. - let base_constraints = make_constraints(1, vec![], vec![0x0a].into()); - - let relay_parent_a_info = RelayChainBlockInfo { - number: pvd_a.relay_parent_number, - hash: relay_parent_a, - storage_root: pvd_a.relay_parent_storage_root, - }; - let pending_availability = vec![PendingAvailability { - candidate_hash: candidate_a_hash, - relay_parent: relay_parent_a_info, - }]; - - let relay_parent_b_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number, - hash: relay_parent_b, - storage_root: pvd_b.relay_parent_storage_root, - }; - let relay_parent_c_info = RelayChainBlockInfo { - number: pvd_b.relay_parent_number + 1, - hash: relay_parent_c, - storage_root: Hash::zero(), - }; - - let max_depth = 4; - storage.add_candidate(candidate_a, pvd_a, CandidateState::Seconded).unwrap(); - storage.add_candidate(candidate_b, pvd_b, CandidateState::Backed).unwrap(); - storage.mark_backed(&candidate_a_hash); - - let scope = Scope::with_ancestors( - para_id, - relay_parent_c_info, - base_constraints, - pending_availability, - max_depth, - vec![relay_parent_b_info], - ) - .unwrap(); - let chain = FragmentChain::populate(scope, &storage); - - assert_eq!(chain.to_vec().len(), 2); - - let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); - - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - candidate_relay_parent: relay_parent_a, - candidate_hash: candidate_a_hash, - candidate_para: para_id - }, - &storage, - )); - - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), - candidate_relay_parent: relay_parent_c, - candidate_para: para_id, - candidate_hash: candidate_d_hash, - }, - &storage, - )); - - assert!(!chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), - candidate_relay_parent: relay_parent_c, - candidate_para: para_id, - candidate_hash: candidate_d_hash, - }, - &storage, - )); - - assert!(chain.hypothetical_membership( - HypotheticalCandidate::Incomplete { - parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), - candidate_relay_parent: relay_parent_b, - candidate_para: para_id, - candidate_hash: candidate_d_hash, - }, - &storage, - )); -} diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index e4b6deffdf4a..ecb1f3a476ec 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -26,9 +26,11 @@ //! //! This subsystem also handles concerns such as the relay-chain being forkful and session changes. +#![deny(unused_crate_dependencies)] + use std::collections::{HashMap, HashSet}; -use fragment_chain::{FragmentChain, PotentialAddition}; +use fragment_chain::CandidateStorage; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ @@ -41,6 +43,7 @@ use polkadot_node_subsystem::{ overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ + backing_implicit_view::{BlockInfoProspectiveParachains as BlockInfo, View as ImplicitView}, inclusion_emulator::{Constraints, RelayChainBlockInfo}, request_session_index_for_child, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, @@ -55,8 +58,7 @@ use polkadot_primitives::{ use crate::{ error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, fragment_chain::{ - CandidateState, CandidateStorage, CandidateStorageInsertionError, - Scope as FragmentChainScope, + CandidateEntry, Error as FragmentChainError, FragmentChain, Scope as FragmentChainScope, }, }; @@ -71,20 +73,33 @@ use self::metrics::Metrics; const LOG_TARGET: &str = "parachain::prospective-parachains"; struct RelayBlockViewData { - // Scheduling info for paras and upcoming paras. + // The fragment chains for current and upcoming scheduled paras. fragment_chains: HashMap, - pending_availability: HashSet, } struct View { - // Active or recent relay-chain blocks by block hash. - active_leaves: HashMap, - candidate_storage: HashMap, + // Per relay parent fragment chains. These includes all relay parents under the implicit view. + per_relay_parent: HashMap, + // The hashes of the currently active leaves. This is a subset of the keys in + // `per_relay_parent`. + active_leaves: HashSet, + // The backing implicit view. + implicit_view: ImplicitView, } impl View { + // Initialize with empty values. fn new() -> Self { - View { active_leaves: HashMap::new(), candidate_storage: HashMap::new() } + View { + per_relay_parent: HashMap::new(), + active_leaves: HashSet::new(), + implicit_view: ImplicitView::default(), + } + } + + // Get the fragment chains of this leaf. + fn get_fragment_chains(&self, leaf: &Hash) -> Option<&HashMap> { + self.per_relay_parent.get(&leaf).map(|view_data| &view_data.fragment_chains) } } @@ -142,9 +157,9 @@ async fn run_iteration( FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Communication { msg } => match msg { ProspectiveParachainsMessage::IntroduceSecondedCandidate(request, tx) => - handle_introduce_seconded_candidate(&mut *ctx, view, request, tx, metrics).await, + handle_introduce_seconded_candidate(view, request, tx, metrics).await, ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => - handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await, + handle_candidate_backed(view, para, candidate_hash, metrics).await, ProspectiveParachainsMessage::GetBackableCandidates( relay_parent, para, @@ -170,17 +185,32 @@ async fn handle_active_leaves_update( update: ActiveLeavesUpdate, metrics: &Metrics, ) -> JfyiErrorResult<()> { - // 1. clean up inactive leaves - // 2. determine all scheduled paras at the new block - // 3. construct new fragment chain for each para for each new leaf - // 4. prune candidate storage. + // For any new active leaf: + // - determine the scheduled paras + // - pre-populate the candidate storage with pending availability candidates and candidates from + // the parent leaf + // - populate the fragment chain + // - add it to the implicit view + // + // Then mark the newly-deactivated leaves as deactivated and update the implicit view. + // Finally, remove any relay parents that are no longer part of the implicit view. + + let _timer = metrics.time_handle_active_leaves_update(); - for deactivated in &update.deactivated { - view.active_leaves.remove(deactivated); - } + gum::trace!( + target: LOG_TARGET, + activated = ?update.activated, + deactivated = ?update.deactivated, + "Handle ActiveLeavesUpdate" + ); let mut temp_header_cache = HashMap::new(); + // There can only be one newly activated leaf, `update.activated` is an `Option`. for activated in update.activated.into_iter() { + if update.deactivated.contains(&activated.hash) { + continue + } + let hash = activated.hash; let mode = prospective_parachains_mode(ctx.sender(), hash) @@ -199,38 +229,34 @@ async fn handle_active_leaves_update( return Ok(()) }; - let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; + let scheduled_paras = fetch_upcoming_paras(ctx, hash).await?; - let block_info: RelayChainBlockInfo = - match fetch_block_info(&mut *ctx, &mut temp_header_cache, hash).await? { - None => { - gum::warn!( - target: LOG_TARGET, - block_hash = ?hash, - "Failed to get block info for newly activated leaf block." - ); + let block_info = match fetch_block_info(ctx, &mut temp_header_cache, hash).await? { + None => { + gum::warn!( + target: LOG_TARGET, + block_hash = ?hash, + "Failed to get block info for newly activated leaf block." + ); - // `update.activated` is an option, but we can use this - // to exit the 'loop' and skip this block without skipping - // pruning logic. - continue - }, - Some(info) => info, - }; + // `update.activated` is an option, but we can use this + // to exit the 'loop' and skip this block without skipping + // pruning logic. + continue + }, + Some(info) => info, + }; let ancestry = - fetch_ancestry(&mut *ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?; + fetch_ancestry(ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?; - let mut all_pending_availability = HashSet::new(); + let prev_fragment_chains = + ancestry.first().and_then(|prev_leaf| view.get_fragment_chains(&prev_leaf.hash)); - // Find constraints. let mut fragment_chains = HashMap::new(); for para in scheduled_paras { - let candidate_storage = - view.candidate_storage.entry(para).or_insert_with(CandidateStorage::default); - - let backing_state = fetch_backing_state(&mut *ctx, hash, para).await?; - + // Find constraints and pending availability candidates. + let backing_state = fetch_backing_state(ctx, hash, para).await?; let Some((constraints, pending_availability)) = backing_state else { // This indicates a runtime conflict of some kind. gum::debug!( @@ -243,8 +269,6 @@ async fn handle_active_leaves_update( continue }; - all_pending_availability.extend(pending_availability.iter().map(|c| c.candidate_hash)); - let pending_availability = preprocess_candidates_pending_availability( ctx, &mut temp_header_cache, @@ -254,16 +278,18 @@ async fn handle_active_leaves_update( .await?; let mut compact_pending = Vec::with_capacity(pending_availability.len()); + let mut pending_availability_storage = CandidateStorage::default(); + for c in pending_availability { - let res = candidate_storage.add_candidate( + let candidate_hash = c.compact.candidate_hash; + let res = pending_availability_storage.add_pending_availability_candidate( + candidate_hash, c.candidate, c.persisted_validation_data, - CandidateState::Backed, ); - let candidate_hash = c.compact.candidate_hash; match res { - Ok(_) | Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => {}, + Ok(_) | Err(FragmentChainError::CandidateAlreadyKnown) => {}, Err(err) => { gum::warn!( target: LOG_TARGET, @@ -280,119 +306,138 @@ async fn handle_active_leaves_update( compact_pending.push(c.compact); } - let scope = FragmentChainScope::with_ancestors( - para, - block_info.clone(), + let scope = match FragmentChainScope::with_ancestors( + block_info.clone().into(), constraints, compact_pending, max_candidate_depth, - ancestry.iter().cloned(), - ) - .expect("ancestors are provided in reverse order and correctly; qed"); + ancestry + .iter() + .map(|a| RelayChainBlockInfo::from(a.clone())) + .collect::>(), + ) { + Ok(scope) => scope, + Err(unexpected_ancestors) => { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + max_candidate_depth, + ?ancestry, + leaf = ?hash, + "Relay chain ancestors have wrong order: {:?}", + unexpected_ancestors + ); + continue + }, + }; gum::trace!( target: LOG_TARGET, relay_parent = ?hash, min_relay_parent = scope.earliest_relay_parent().number, para_id = ?para, + ancestors = ?ancestry, "Creating fragment chain" ); - let chain = FragmentChain::populate(scope, &*candidate_storage); + let number_of_pending_candidates = pending_availability_storage.len(); + + // Init the fragment chain with the pending availability candidates. + let mut chain = FragmentChain::init(scope, pending_availability_storage); + + if chain.best_chain_len() < number_of_pending_candidates { + gum::warn!( + target: LOG_TARGET, + relay_parent = ?hash, + para_id = ?para, + "Not all pending availability candidates could be introduced. Actual vs expected count: {}, {}", + chain.best_chain_len(), + number_of_pending_candidates + ) + } + + // If we know the previous fragment chain, use that for further populating the fragment + // chain. + if let Some(prev_fragment_chain) = + prev_fragment_chains.and_then(|chains| chains.get(¶)) + { + chain.populate_from_previous(prev_fragment_chain); + } gum::trace!( target: LOG_TARGET, relay_parent = ?hash, para_id = ?para, - "Populated fragment chain with {} candidates", - chain.len() + "Populated fragment chain with {} candidates: {:?}", + chain.best_chain_len(), + chain.best_chain_vec() + ); + + gum::trace!( + target: LOG_TARGET, + relay_parent = ?hash, + para_id = ?para, + "Potential candidate storage for para: {:?}", + chain.unconnected().map(|candidate| candidate.hash()).collect::>() ); fragment_chains.insert(para, chain); } - view.active_leaves.insert( - hash, - RelayBlockViewData { fragment_chains, pending_availability: all_pending_availability }, - ); - } + view.per_relay_parent.insert(hash, RelayBlockViewData { fragment_chains }); - if !update.deactivated.is_empty() { - // This has potential to be a hotspot. - prune_view_candidate_storage(view, metrics); - } + view.active_leaves.insert(hash); - Ok(()) -} + view.implicit_view + .activate_leaf_from_prospective_parachains(block_info, &ancestry); + } -fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { - let _timer = metrics.time_prune_view_candidate_storage(); + for deactivated in update.deactivated { + view.active_leaves.remove(&deactivated); + view.implicit_view.deactivate_leaf(deactivated); + } - let active_leaves = &view.active_leaves; - let mut live_candidates = HashSet::new(); - let mut live_paras = HashSet::new(); - for sub_view in active_leaves.values() { - live_candidates.extend(sub_view.pending_availability.iter().cloned()); + { + let remaining: HashSet<_> = view.implicit_view.all_allowed_relay_parents().collect(); - for (para_id, fragment_chain) in &sub_view.fragment_chains { - live_candidates.extend(fragment_chain.to_vec()); - live_paras.insert(*para_id); - } + view.per_relay_parent.retain(|r, _| remaining.contains(&r)); } - let connected_candidates_count = live_candidates.len(); - for (leaf, sub_view) in active_leaves.iter() { - for (para_id, fragment_chain) in &sub_view.fragment_chains { - if let Some(storage) = view.candidate_storage.get(para_id) { - let unconnected_potential = - fragment_chain.find_unconnected_potential_candidates(storage, None); - if !unconnected_potential.is_empty() { - gum::trace!( - target: LOG_TARGET, - ?leaf, - "Keeping {} unconnected candidates for paraid {} in storage: {:?}", - unconnected_potential.len(), - para_id, - unconnected_potential - ); + if metrics.0.is_some() { + let mut active_connected = 0; + let mut active_unconnected = 0; + let mut candidates_in_implicit_view = 0; + + for (hash, RelayBlockViewData { fragment_chains, .. }) in view.per_relay_parent.iter() { + if view.active_leaves.contains(hash) { + for chain in fragment_chains.values() { + active_connected += chain.best_chain_len(); + active_unconnected += chain.unconnected_len(); + } + } else { + for chain in fragment_chains.values() { + candidates_in_implicit_view += chain.best_chain_len(); + candidates_in_implicit_view += chain.unconnected_len(); } - live_candidates.extend(unconnected_potential); } } - } - - view.candidate_storage.retain(|para_id, storage| { - if !live_paras.contains(¶_id) { - return false - } - - storage.retain(|h| live_candidates.contains(&h)); - - // Even if `storage` is now empty, we retain. - // This maintains a convenient invariant that para-id storage exists - // as long as there's an active head which schedules the para. - true - }); - for (para_id, storage) in view.candidate_storage.iter() { - gum::trace!( - target: LOG_TARGET, - "Keeping a total of {} connected candidates for paraid {} in storage", - storage.candidates().count(), - para_id, - ); + metrics.record_candidate_count(active_connected as u64, active_unconnected as u64); + metrics.record_candidate_count_in_implicit_view(candidates_in_implicit_view as u64); } - metrics.record_candidate_storage_size( - connected_candidates_count as u64, - live_candidates.len().saturating_sub(connected_candidates_count) as u64, - ); + let num_active_leaves = view.active_leaves.len() as u64; + let num_inactive_leaves = + (view.per_relay_parent.len() as u64).saturating_sub(num_active_leaves); + metrics.record_leaves_count(num_active_leaves, num_inactive_leaves); + + Ok(()) } struct ImportablePendingAvailability { candidate: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, - compact: crate::fragment_chain::PendingAvailability, + compact: fragment_chain::PendingAvailability, } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] @@ -435,9 +480,9 @@ async fn preprocess_candidates_pending_availability( relay_parent_number: relay_parent.number, relay_parent_storage_root: relay_parent.storage_root, }, - compact: crate::fragment_chain::PendingAvailability { + compact: fragment_chain::PendingAvailability { candidate_hash: pending.candidate_hash, - relay_parent, + relay_parent: relay_parent.into(), }, }); @@ -447,9 +492,7 @@ async fn preprocess_candidates_pending_availability( Ok(importable) } -#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn handle_introduce_seconded_candidate( - _ctx: &mut Context, +async fn handle_introduce_seconded_candidate( view: &mut View, request: IntroduceSecondedCandidateRequest, tx: oneshot::Sender, @@ -463,167 +506,163 @@ async fn handle_introduce_seconded_candidate( persisted_validation_data: pvd, } = request; - let Some(storage) = view.candidate_storage.get_mut(¶) else { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - candidate_hash = ?candidate.hash(), - "Received seconded candidate for inactive para", - ); + let candidate_hash = candidate.hash(); + let candidate_entry = match CandidateEntry::new_seconded(candidate_hash, candidate, pvd) { + Ok(candidate) => candidate, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + para = ?para, + "Cannot add seconded candidate: {}", + err + ); - let _ = tx.send(false); - return + let _ = tx.send(false); + return + }, }; - let parent_head_hash = pvd.parent_head.hash(); - let output_head_hash = Some(candidate.commitments.head_data.hash()); + let mut added = false; + let mut para_scheduled = false; + // We don't iterate only through the active leaves. We also update the deactivated parents in + // the implicit view, so that their upcoming children may see these candidates. + for (relay_parent, rp_data) in view.per_relay_parent.iter_mut() { + let Some(chain) = rp_data.fragment_chains.get_mut(¶) else { continue }; + let is_active_leaf = view.active_leaves.contains(relay_parent); - // We first introduce the candidate in the storage and then try to extend the chain. - // If the candidate gets included in the chain, we can keep it in storage. - // If it doesn't, check that it's still a potential candidate in at least one fragment chain. - // If it's not, we can remove it. + para_scheduled = true; - let candidate_hash = - match storage.add_candidate(candidate.clone(), pvd, CandidateState::Seconded) { - Ok(c) => c, - Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { + match chain.try_adding_seconded_candidate(&candidate_entry) { + Ok(()) => { gum::debug!( target: LOG_TARGET, - para = ?para, - "Attempting to introduce an already known candidate: {:?}", - candidate.hash() + ?para, + ?relay_parent, + ?is_active_leaf, + "Added seconded candidate {:?}", + candidate_hash ); - // Candidate already known. - let _ = tx.send(true); - return + added = true; }, - Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => { - // We can't log the candidate hash without either doing more ~expensive - // hashing but this branch indicates something is seriously wrong elsewhere - // so it's doubtful that it would affect debugging. - - gum::warn!( + Err(FragmentChainError::CandidateAlreadyKnown) => { + gum::debug!( target: LOG_TARGET, - para = ?para, - "Received seconded candidate had mismatching validation data", + ?para, + ?relay_parent, + ?is_active_leaf, + "Attempting to introduce an already known candidate: {:?}", + candidate_hash ); - - let _ = tx.send(false); - return + added = true; }, - }; - - let mut keep_in_storage = false; - for (relay_parent, leaf_data) in view.active_leaves.iter_mut() { - if let Some(chain) = leaf_data.fragment_chains.get_mut(¶) { - gum::trace!( - target: LOG_TARGET, - para = ?para, - ?relay_parent, - "Candidates in chain before trying to introduce a new one: {:?}", - chain.to_vec() - ); - chain.extend_from_storage(&*storage); - if chain.contains_candidate(&candidate_hash) { - keep_in_storage = true; - - gum::trace!( + Err(err) => { + gum::debug!( target: LOG_TARGET, + ?para, ?relay_parent, - para = ?para, ?candidate_hash, - "Added candidate to chain.", - ); - } else { - match chain.can_add_candidate_as_potential( - &storage, - &candidate_hash, - &candidate.descriptor.relay_parent, - parent_head_hash, - output_head_hash, - ) { - PotentialAddition::Anyhow => { - gum::trace!( - target: LOG_TARGET, - para = ?para, - ?relay_parent, - ?candidate_hash, - "Kept candidate as unconnected potential.", - ); - - keep_in_storage = true; - }, - _ => { - gum::trace!( - target: LOG_TARGET, - para = ?para, - ?relay_parent, - "Not introducing a new candidate: {:?}", - candidate_hash - ); - }, - } - } + ?is_active_leaf, + "Cannot introduce seconded candidate: {}", + err + ) + }, } } - // If there is at least one leaf where this candidate can be added or potentially added in the - // future, keep it in storage. - if !keep_in_storage { - storage.remove_candidate(&candidate_hash); + if !para_scheduled { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received seconded candidate for inactive para", + ); + } + if !added { gum::debug!( target: LOG_TARGET, para = ?para, candidate = ?candidate_hash, - "Newly-seconded candidate cannot be kept under any active leaf", + "Newly-seconded candidate cannot be kept under any relay parent", ); } - let _ = tx.send(keep_in_storage); + let _ = tx.send(added); } -#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn handle_candidate_backed( - _ctx: &mut Context, +async fn handle_candidate_backed( view: &mut View, para: ParaId, candidate_hash: CandidateHash, + metrics: &Metrics, ) { - let Some(storage) = view.candidate_storage.get_mut(¶) else { - gum::warn!( - target: LOG_TARGET, - para_id = ?para, - ?candidate_hash, - "Received instruction to back a candidate for unscheduled para", - ); + let _timer = metrics.time_candidate_backed(); - return - }; + let mut found_candidate = false; + let mut found_para = false; + + // We don't iterate only through the active leaves. We also update the deactivated parents in + // the implicit view, so that their upcoming children may see these candidates. + for (relay_parent, rp_data) in view.per_relay_parent.iter_mut() { + let Some(chain) = rp_data.fragment_chains.get_mut(¶) else { continue }; + let is_active_leaf = view.active_leaves.contains(relay_parent); + + found_para = true; + if chain.is_candidate_backed(&candidate_hash) { + gum::debug!( + target: LOG_TARGET, + ?para, + ?candidate_hash, + ?is_active_leaf, + "Received redundant instruction to mark as backed an already backed candidate", + ); + found_candidate = true; + } else if chain.contains_unconnected_candidate(&candidate_hash) { + found_candidate = true; + // Mark the candidate as backed. This can recreate the fragment chain. + chain.candidate_backed(&candidate_hash); + + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?para, + ?is_active_leaf, + "Candidate backed. Candidate chain for para: {:?}", + chain.best_chain_vec() + ); + + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?para, + ?is_active_leaf, + "Potential candidate storage for para: {:?}", + chain.unconnected().map(|candidate| candidate.hash()).collect::>() + ); + } + } - if !storage.contains(&candidate_hash) { + if !found_para { gum::warn!( target: LOG_TARGET, - para_id = ?para, + ?para, ?candidate_hash, - "Received instruction to back unknown candidate", + "Received instruction to back a candidate for unscheduled para", ); return } - if storage.is_backed(&candidate_hash) { + if !found_candidate { + // This can be harmless. It can happen if we received a better backed candidate before and + // dropped this other candidate already. gum::debug!( target: LOG_TARGET, - para_id = ?para, + ?para, ?candidate_hash, - "Received redundant instruction to mark candidate as backed", + "Received instruction to back unknown candidate", ); - - return } - - storage.mark_backed(&candidate_hash); } fn answer_get_backable_candidates( @@ -634,7 +673,7 @@ fn answer_get_backable_candidates( ancestors: Ancestors, tx: oneshot::Sender>, ) { - let Some(data) = view.active_leaves.get(&relay_parent) else { + if !view.active_leaves.contains(&relay_parent) { gum::debug!( target: LOG_TARGET, ?relay_parent, @@ -644,26 +683,25 @@ fn answer_get_backable_candidates( let _ = tx.send(vec![]); return - }; - - let Some(chain) = data.fragment_chains.get(¶) else { + } + let Some(data) = view.per_relay_parent.get(&relay_parent) else { gum::debug!( target: LOG_TARGET, ?relay_parent, para_id = ?para, - "Requested backable candidate for inactive para." + "Requested backable candidate for inexistent relay-parent." ); let _ = tx.send(vec![]); return }; - let Some(storage) = view.candidate_storage.get(¶) else { - gum::warn!( + let Some(chain) = data.fragment_chains.get(¶) else { + gum::debug!( target: LOG_TARGET, ?relay_parent, para_id = ?para, - "No candidate storage for active para", + "Requested backable candidate for inactive para." ); let _ = tx.send(vec![]); @@ -674,38 +712,19 @@ fn answer_get_backable_candidates( target: LOG_TARGET, ?relay_parent, para_id = ?para, - "Candidate storage for para: {:?}", - storage.candidates().map(|candidate| candidate.hash()).collect::>() + "Candidate chain for para: {:?}", + chain.best_chain_vec() ); gum::trace!( target: LOG_TARGET, ?relay_parent, para_id = ?para, - "Candidate chain for para: {:?}", - chain.to_vec() + "Potential candidate storage for para: {:?}", + chain.unconnected().map(|candidate| candidate.hash()).collect::>() ); - let backable_candidates: Vec<_> = chain - .find_backable_chain(ancestors.clone(), count, |candidate| storage.is_backed(candidate)) - .into_iter() - .filter_map(|child_hash| { - storage.relay_parent_of_candidate(&child_hash).map_or_else( - || { - // Here, we'd actually need to trim all of the candidates that follow. Or - // not, the runtime will do this. Impossible scenario anyway. - gum::error!( - target: LOG_TARGET, - ?child_hash, - para_id = ?para, - "Candidate is present in fragment chain but not in candidate's storage!", - ); - None - }, - |parent_hash| Some((child_hash, parent_hash)), - ) - }) - .collect(); + let backable_candidates = chain.find_backable_chain(ancestors.clone(), count); if backable_candidates.is_empty() { gum::trace!( @@ -742,19 +761,32 @@ fn answer_hypothetical_membership_request( } let required_active_leaf = request.fragment_chain_relay_parent; - for (active_leaf, leaf_view) in view + for active_leaf in view .active_leaves .iter() - .filter(|(h, _)| required_active_leaf.as_ref().map_or(true, |x| h == &x)) + .filter(|h| required_active_leaf.as_ref().map_or(true, |x| h == &x)) { + let Some(leaf_view) = view.per_relay_parent.get(&active_leaf) else { continue }; for &mut (ref candidate, ref mut membership) in &mut response { let para_id = &candidate.candidate_para(); let Some(fragment_chain) = leaf_view.fragment_chains.get(para_id) else { continue }; - let Some(candidate_storage) = view.candidate_storage.get(para_id) else { continue }; - if fragment_chain.hypothetical_membership(candidate.clone(), candidate_storage) { - membership.push(*active_leaf); - } + let res = fragment_chain.can_add_candidate_as_potential(candidate); + match res { + Err(FragmentChainError::CandidateAlreadyKnown) | Ok(()) => { + membership.push(*active_leaf); + }, + Err(err) => { + gum::debug!( + target: LOG_TARGET, + para = ?para_id, + leaf = ?active_leaf, + candidate = ?candidate.candidate_hash(), + "Candidate is not a hypothetical member: {}", + err + ) + }, + }; } } @@ -767,9 +799,11 @@ fn answer_minimum_relay_parents_request( tx: oneshot::Sender>, ) { let mut v = Vec::new(); - if let Some(leaf_data) = view.active_leaves.get(&relay_parent) { - for (para_id, fragment_chain) in &leaf_data.fragment_chains { - v.push((*para_id, fragment_chain.scope().earliest_relay_parent().number)); + if view.active_leaves.contains(&relay_parent) { + if let Some(leaf_data) = view.per_relay_parent.get(&relay_parent) { + for (para_id, fragment_chain) in &leaf_data.fragment_chains { + v.push((*para_id, fragment_chain.scope().earliest_relay_parent().number)); + } } } @@ -781,37 +815,21 @@ fn answer_prospective_validation_data_request( request: ProspectiveValidationDataRequest, tx: oneshot::Sender>, ) { - // 1. Try to get the head-data from the candidate store if known. - // 2. Otherwise, it might exist as the base in some relay-parent and we can find it by iterating - // fragment chains. - // 3. Otherwise, it is unknown. - // 4. Also try to find the relay parent block info by scanning fragment chains. - // 5. If head data and relay parent block info are found - success. Otherwise, failure. - - let storage = match view.candidate_storage.get(&request.para_id) { - None => { - let _ = tx.send(None); - return - }, - Some(s) => s, - }; + // Try getting the needed data from any fragment chain. let (mut head_data, parent_head_data_hash) = match request.parent_head_data { - ParentHeadData::OnlyHash(parent_head_data_hash) => ( - storage.head_data_by_hash(&parent_head_data_hash).map(|x| x.clone()), - parent_head_data_hash, - ), + ParentHeadData::OnlyHash(parent_head_data_hash) => (None, parent_head_data_hash), ParentHeadData::WithData { head_data, hash } => (Some(head_data), hash), }; let mut relay_parent_info = None; let mut max_pov_size = None; - for fragment_chain in view - .active_leaves - .values() - .filter_map(|x| x.fragment_chains.get(&request.para_id)) - { + for fragment_chain in view.active_leaves.iter().filter_map(|x| { + view.per_relay_parent + .get(&x) + .and_then(|data| data.fragment_chains.get(&request.para_id)) + }) { if head_data.is_some() && relay_parent_info.is_some() && max_pov_size.is_some() { break } @@ -819,10 +837,7 @@ fn answer_prospective_validation_data_request( relay_parent_info = fragment_chain.scope().ancestor(&request.candidate_relay_parent); } if head_data.is_none() { - let required_parent = &fragment_chain.scope().base_constraints().required_parent; - if required_parent.hash() == parent_head_data_hash { - head_data = Some(required_parent.clone()); - } + head_data = fragment_chain.get_head_data_by_hash(&parent_head_data_hash); } if max_pov_size.is_none() { let contains_ancestor = @@ -925,7 +940,7 @@ async fn fetch_ancestry( cache: &mut HashMap, relay_hash: Hash, ancestors: usize, -) -> JfyiErrorResult> { +) -> JfyiErrorResult> { if ancestors == 0 { return Ok(Vec::new()) } @@ -1004,12 +1019,13 @@ async fn fetch_block_info( ctx: &mut Context, cache: &mut HashMap, relay_hash: Hash, -) -> JfyiErrorResult> { +) -> JfyiErrorResult> { let header = fetch_block_header_with_cache(ctx, cache, relay_hash).await?; - Ok(header.map(|header| RelayChainBlockInfo { + Ok(header.map(|header| BlockInfo { hash: relay_hash, number: header.number, + parent_hash: header.parent_hash, storage_root: header.state_root, })) } diff --git a/polkadot/node/core/prospective-parachains/src/metrics.rs b/polkadot/node/core/prospective-parachains/src/metrics.rs index 5abd9f56f306..78561bc878ac 100644 --- a/polkadot/node/core/prospective-parachains/src/metrics.rs +++ b/polkadot/node/core/prospective-parachains/src/metrics.rs @@ -17,15 +17,18 @@ use polkadot_node_subsystem::prometheus::Opts; use polkadot_node_subsystem_util::metrics::{ self, - prometheus::{self, GaugeVec, U64}, + prometheus::{self, Gauge, GaugeVec, U64}, }; #[derive(Clone)] pub(crate) struct MetricsInner { - prune_view_candidate_storage: prometheus::Histogram, - introduce_seconded_candidate: prometheus::Histogram, - hypothetical_membership: prometheus::Histogram, - candidate_storage_count: prometheus::GaugeVec, + time_active_leaves_update: prometheus::Histogram, + time_introduce_seconded_candidate: prometheus::Histogram, + time_candidate_backed: prometheus::Histogram, + time_hypothetical_membership: prometheus::Histogram, + candidate_count: prometheus::GaugeVec, + active_leaves_count: prometheus::GaugeVec, + implicit_view_candidate_count: prometheus::Gauge, } /// Candidate backing metrics. @@ -33,13 +36,11 @@ pub(crate) struct MetricsInner { pub struct Metrics(pub(crate) Option); impl Metrics { - /// Provide a timer for handling `prune_view_candidate_storage` which observes on drop. - pub fn time_prune_view_candidate_storage( + /// Provide a timer for handling `ActiveLeavesUpdate` which observes on drop. + pub fn time_handle_active_leaves_update( &self, ) -> Option { - self.0 - .as_ref() - .map(|metrics| metrics.prune_view_candidate_storage.start_timer()) + self.0.as_ref().map(|metrics| metrics.time_active_leaves_update.start_timer()) } /// Provide a timer for handling `IntroduceSecondedCandidate` which observes on drop. @@ -48,31 +49,47 @@ impl Metrics { ) -> Option { self.0 .as_ref() - .map(|metrics| metrics.introduce_seconded_candidate.start_timer()) + .map(|metrics| metrics.time_introduce_seconded_candidate.start_timer()) + } + + /// Provide a timer for handling `CandidateBacked` which observes on drop. + pub fn time_candidate_backed(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.time_candidate_backed.start_timer()) } /// Provide a timer for handling `GetHypotheticalMembership` which observes on drop. pub fn time_hypothetical_membership_request( &self, ) -> Option { - self.0.as_ref().map(|metrics| metrics.hypothetical_membership.start_timer()) + self.0 + .as_ref() + .map(|metrics| metrics.time_hypothetical_membership.start_timer()) } - /// Record the size of the candidate storage. First param is the connected candidates count, - /// second param is the unconnected candidates count. - pub fn record_candidate_storage_size(&self, connected_count: u64, unconnected_count: u64) { + /// Record number of candidates across all fragment chains. First param is the connected + /// candidates count, second param is the unconnected candidates count. + pub fn record_candidate_count(&self, connected_count: u64, unconnected_count: u64) { self.0.as_ref().map(|metrics| { + metrics.candidate_count.with_label_values(&["connected"]).set(connected_count); metrics - .candidate_storage_count - .with_label_values(&["connected"]) - .set(connected_count) + .candidate_count + .with_label_values(&["unconnected"]) + .set(unconnected_count); }); + } + /// Record the number of candidates present in the implicit view of the subsystem. + pub fn record_candidate_count_in_implicit_view(&self, count: u64) { self.0.as_ref().map(|metrics| { - metrics - .candidate_storage_count - .with_label_values(&["unconnected"]) - .set(unconnected_count) + metrics.implicit_view_candidate_count.set(count); + }); + } + + /// Record the number of active/inactive leaves kept by the subsystem. + pub fn record_leaves_count(&self, active_count: u64, inactive_count: u64) { + self.0.as_ref().map(|metrics| { + metrics.active_leaves_count.with_label_values(&["active"]).set(active_count); + metrics.active_leaves_count.with_label_values(&["inactive"]).set(inactive_count); }); } } @@ -80,37 +97,61 @@ impl Metrics { impl metrics::Metrics for Metrics { fn try_register(registry: &prometheus::Registry) -> Result { let metrics = MetricsInner { - prune_view_candidate_storage: prometheus::register( + time_active_leaves_update: prometheus::register( prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_prospective_parachains_prune_view_candidate_storage", - "Time spent within `prospective_parachains::prune_view_candidate_storage`", + "polkadot_parachain_prospective_parachains_time_active_leaves_update", + "Time spent within `prospective_parachains::handle_active_leaves_update`", ))?, registry, )?, - introduce_seconded_candidate: prometheus::register( + time_introduce_seconded_candidate: prometheus::register( prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_prospective_parachains_introduce_seconded_candidate", + "polkadot_parachain_prospective_parachains_time_introduce_seconded_candidate", "Time spent within `prospective_parachains::handle_introduce_seconded_candidate`", ))?, registry, )?, - hypothetical_membership: prometheus::register( + time_candidate_backed: prometheus::register( prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( - "polkadot_parachain_prospective_parachains_hypothetical_membership", + "polkadot_parachain_prospective_parachains_time_candidate_backed", + "Time spent within `prospective_parachains::handle_candidate_backed`", + ))?, + registry, + )?, + time_hypothetical_membership: prometheus::register( + prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + "polkadot_parachain_prospective_parachains_time_hypothetical_membership", "Time spent responding to `GetHypotheticalMembership`", ))?, registry, )?, - candidate_storage_count: prometheus::register( + candidate_count: prometheus::register( GaugeVec::new( Opts::new( - "polkadot_parachain_prospective_parachains_candidate_storage_count", - "Number of candidates present in the candidate storage, split by connected and unconnected" + "polkadot_parachain_prospective_parachains_candidate_count", + "Number of candidates present across all fragment chains, split by connected and unconnected" ), &["type"], )?, registry, )?, + active_leaves_count: prometheus::register( + GaugeVec::new( + Opts::new( + "polkadot_parachain_prospective_parachains_active_leaves_count", + "Number of leaves kept by the subsystem, split by active/inactive" + ), + &["type"], + )?, + registry, + )?, + implicit_view_candidate_count: prometheus::register( + Gauge::new( + "polkadot_parachain_prospective_parachains_implicit_view_candidate_count", + "Number of candidates present in the implicit view" + )?, + registry + )?, }; Ok(Metrics(Some(metrics))) } diff --git a/polkadot/node/core/prospective-parachains/src/tests.rs b/polkadot/node/core/prospective-parachains/src/tests.rs index 221fbf4c4e60..14a093239e8e 100644 --- a/polkadot/node/core/prospective-parachains/src/tests.rs +++ b/polkadot/node/core/prospective-parachains/src/tests.rs @@ -111,6 +111,8 @@ fn get_parent_hash(hash: Hash) -> Hash { fn test_harness>( test: impl FnOnce(VirtualOverseer) -> T, ) -> View { + sp_tracing::init_for_tests(); + let pool = sp_core::testing::TaskExecutor::new(); let (mut context, virtual_overseer) = @@ -203,6 +205,32 @@ async fn activate_leaf( activate_leaf_with_params(virtual_overseer, leaf, test_state, ASYNC_BACKING_PARAMETERS).await; } +async fn activate_leaf_with_parent_hash_fn( + virtual_overseer: &mut VirtualOverseer, + leaf: &TestLeaf, + test_state: &TestState, + parent_hash_fn: impl Fn(Hash) -> Hash, +) { + let TestLeaf { number, hash, .. } = leaf; + + let activated = new_leaf(*hash, *number); + + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + activated, + )))) + .await; + + handle_leaf_activation( + virtual_overseer, + leaf, + test_state, + ASYNC_BACKING_PARAMETERS, + parent_hash_fn, + ) + .await; +} + async fn activate_leaf_with_params( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, @@ -219,7 +247,14 @@ async fn activate_leaf_with_params( )))) .await; - handle_leaf_activation(virtual_overseer, leaf, test_state, async_backing_params).await; + handle_leaf_activation( + virtual_overseer, + leaf, + test_state, + async_backing_params, + get_parent_hash, + ) + .await; } async fn handle_leaf_activation( @@ -227,6 +262,7 @@ async fn handle_leaf_activation( leaf: &TestLeaf, test_state: &TestState, async_backing_params: AsyncBackingParams, + parent_hash_fn: impl Fn(Hash) -> Hash, ) { let TestLeaf { number, hash, para_data } = leaf; @@ -281,7 +317,7 @@ async fn handle_leaf_activation( let min_min = para_data.iter().map(|(_, data)| data.min_relay_parent).min().unwrap_or(*number); let ancestry_len = number - min_min; let ancestry_hashes: Vec = - std::iter::successors(Some(*hash), |h| Some(get_parent_hash(*h))) + std::iter::successors(Some(*hash), |h| Some(parent_hash_fn(*h))) .skip(1) .take(ancestry_len as usize) .collect(); @@ -307,16 +343,20 @@ async fn handle_leaf_activation( ); } + let mut used_relay_parents = HashSet::new(); for (hash, number) in ancestry_iter { - send_block_header(virtual_overseer, hash, number).await; - assert_matches!( - virtual_overseer.recv().await, - AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) - ) if parent == hash => { - tx.send(Ok(1)).unwrap(); - } - ); + if !used_relay_parents.contains(&hash) { + send_block_header(virtual_overseer, hash, number).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == hash => { + tx.send(Ok(1)).unwrap(); + } + ); + used_relay_parents.insert(hash); + } } let paras: HashSet<_> = test_state.claim_queue.values().flatten().collect(); @@ -353,12 +393,16 @@ async fn handle_leaf_activation( ); for pending in pending_availability { - send_block_header( - virtual_overseer, - pending.descriptor.relay_parent, - pending.relay_parent_number, - ) - .await; + if !used_relay_parents.contains(&pending.descriptor.relay_parent) { + send_block_header( + virtual_overseer, + pending.descriptor.relay_parent, + pending.relay_parent_number, + ) + .await; + + used_relay_parents.insert(pending.descriptor.relay_parent); + } } } @@ -513,6 +557,26 @@ async fn get_pvd( assert_eq!(resp, expected_pvd); } +macro_rules! make_and_back_candidate { + ($test_state:ident, $virtual_overseer:ident, $leaf:ident, $parent:expr, $index:expr) => {{ + let (mut candidate, pvd) = make_candidate( + $leaf.hash, + $leaf.number, + 1.into(), + $parent.commitments.head_data.clone(), + HeadData(vec![$index]), + $test_state.validation_code_hash, + ); + // Set a field to make this candidate unique. + candidate.descriptor.para_head = Hash::from_low_u64_le($index); + let candidate_hash = candidate.hash(); + introduce_seconded_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await; + back_candidate(&mut $virtual_overseer, &candidate, candidate_hash).await; + + (candidate, candidate_hash) + }}; +} + #[test] fn should_do_no_work_if_async_backing_disabled_for_leaf() { async fn activate_leaf_async_backing_disabled(virtual_overseer: &mut VirtualOverseer) { @@ -542,7 +606,6 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { }); assert!(view.active_leaves.is_empty()); - assert!(view.candidate_storage.is_empty()); } // Send some candidates and make sure all are found: @@ -718,10 +781,6 @@ fn introduce_candidates_basic() { }); assert_eq!(view.active_leaves.len(), 3); - assert_eq!(view.candidate_storage.len(), 2); - // Two parents and two candidates per para. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (2, 2)); } #[test] @@ -766,28 +825,36 @@ fn introduce_candidate_multiple_times() { 1.into(), Ancestors::default(), 5, - response_a, + response_a.clone(), ) .await; - // Introduce the same candidate multiple times. It'll return true but it won't be added. - // We'll check below that the candidate count remains 1. + // Introduce the same candidate multiple times. It'll return true but it will only be added + // once. for _ in 0..5 { introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) .await; } + // Check candidate tree membership. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + 1.into(), + Ancestors::default(), + 5, + response_a, + ) + .await; + virtual_overseer }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } #[test] -fn fragment_chain_length_is_bounded() { +fn fragment_chain_best_chain_length_is_bounded() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -835,12 +902,11 @@ fn fragment_chain_length_is_bounded() { ); // Introduce candidates A and B. Since max depth is 1, only these two will be allowed. - introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) - .await; - introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) - .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; - // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. + // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates and + // they won't be part of the best chain. back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await; @@ -855,103 +921,25 @@ fn fragment_chain_length_is_bounded() { ) .await; - // Introducing C will fail. - introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c.clone()) - .await; - - virtual_overseer - }); - - assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); -} - -#[test] -fn unconnected_candidate_count_is_bounded() { - let test_state = TestState::default(); - let view = test_harness(|mut virtual_overseer| async move { - // Leaf A - let leaf_a = TestLeaf { - number: 100, - hash: Hash::from_low_u64_be(130), - para_data: vec![ - (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), - (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), - ], - }; - // Activate leaves. - activate_leaf_with_params( - &mut virtual_overseer, - &leaf_a, - &test_state, - AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3 }, - ) - .await; - - // Candidates A, B and C are all potential candidates but don't form a chain. - let (candidate_a, pvd_a) = make_candidate( - leaf_a.hash, - leaf_a.number, - 1.into(), - HeadData(vec![1]), - HeadData(vec![2]), - test_state.validation_code_hash, - ); - let (candidate_b, pvd_b) = make_candidate( - leaf_a.hash, - leaf_a.number, - 1.into(), - HeadData(vec![3]), - HeadData(vec![4]), - test_state.validation_code_hash, - ); - let (candidate_c, pvd_c) = make_candidate( - leaf_a.hash, - leaf_a.number, - 1.into(), - HeadData(vec![4]), - HeadData(vec![5]), - test_state.validation_code_hash, - ); - - // Introduce candidates A and B. Although max depth is 1 (which should allow for two - // candidates), only 1 is allowed, because the last candidate must be a connected candidate. - introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) - .await; - introduce_seconded_candidate_failed( - &mut virtual_overseer, - candidate_b.clone(), - pvd_b.clone(), - ) - .await; - - // Back candidates. Otherwise, we cannot check membership with GetBackableCandidates. - back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + // Introducing C will not fail. It will be kept as unconnected storage. + introduce_seconded_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; + // When being backed, candidate C will be dropped. + back_candidate(&mut virtual_overseer, &candidate_c, candidate_c.hash()).await; - // Check candidate tree membership. Should be empty. get_backable_candidates( &mut virtual_overseer, &leaf_a, 1.into(), Ancestors::default(), 5, - vec![], + vec![(candidate_a.hash(), leaf_a.hash), (candidate_b.hash(), leaf_a.hash)], ) .await; - // Introducing C will also fail. - introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c.clone()) - .await; - virtual_overseer }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } // Send some candidates, check if the candidate won't be found once its relay parent leaves the @@ -1178,7 +1166,6 @@ fn introduce_candidate_parent_leaving_view() { }); assert_eq!(view.active_leaves.len(), 0); - assert_eq!(view.candidate_storage.len(), 0); } // Introduce a candidate to multiple forks, see how the membership is returned. @@ -1249,13 +1236,12 @@ fn introduce_candidate_on_multiple_forks() { }); assert_eq!(view.active_leaves.len(), 2); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (1, 1)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } #[test] fn unconnected_candidates_become_connected() { + // This doesn't test all the complicated cases with many unconnected candidates, as it's more + // extensively tested in the `fragment_chain::tests` module. let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -1351,9 +1337,6 @@ fn unconnected_candidates_become_connected() { }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (4, 4)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } // Backs some candidates and tests `GetBackableCandidates` when requesting a single candidate. @@ -1435,6 +1418,10 @@ fn check_backable_query_single_candidate() { back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + // Back an unknown candidate. It doesn't return anything but it's ignored. Will not have any + // effect on the backable candidates. + back_candidate(&mut virtual_overseer, &candidate_b, CandidateHash(Hash::random())).await; + // Should not get any backable candidates for the other para. get_backable_candidates( &mut virtual_overseer, @@ -1490,35 +1477,11 @@ fn check_backable_query_single_candidate() { }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - // Two parents and two candidates on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } // Backs some candidates and tests `GetBackableCandidates` when requesting a multiple candidates. #[test] fn check_backable_query_multiple_candidates() { - macro_rules! make_and_back_candidate { - ($test_state:ident, $virtual_overseer:ident, $leaf:ident, $parent:expr, $index:expr) => {{ - let (mut candidate, pvd) = make_candidate( - $leaf.hash, - $leaf.number, - 1.into(), - $parent.commitments.head_data.clone(), - HeadData(vec![$index]), - $test_state.validation_code_hash, - ); - // Set a field to make this candidate unique. - candidate.descriptor.para_head = Hash::from_low_u64_le($index); - let candidate_hash = candidate.hash(); - introduce_seconded_candidate(&mut $virtual_overseer, candidate.clone(), pvd).await; - back_candidate(&mut $virtual_overseer, &candidate, candidate_hash).await; - - (candidate, candidate_hash) - }}; - } - let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -1786,10 +1749,6 @@ fn check_backable_query_multiple_candidates() { }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); - // 4 candidates on para 1. - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (4, 4)); - assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); } // Test hypothetical membership query. @@ -1885,11 +1844,13 @@ fn check_hypothetical_membership_query() { introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) .await; - // Get membership of candidates after adding A. C is not a potential candidate because we - // may only add one more candidate, which must be a connected candidate. - for (candidate, pvd) in - [(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())] - { + // Get membership of candidates after adding A. They all are still unconnected candidates + // (not part of the best backable chain). + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { get_hypothetical_membership( &mut virtual_overseer, candidate.hash(), @@ -1900,14 +1861,24 @@ fn check_hypothetical_membership_query() { .await; } - get_hypothetical_membership( - &mut virtual_overseer, - candidate_c.hash(), - candidate_c.clone(), - pvd_c.clone(), - vec![], - ) - .await; + // Back A. Now A is part of the best chain the rest can be added as unconnected. + + back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { + get_hypothetical_membership( + &mut virtual_overseer, + candidate.hash(), + candidate, + pvd, + vec![leaf_a.hash, leaf_b.hash], + ) + .await; + } // Candidate D has invalid relay parent. let (candidate_d, pvd_d) = make_candidate( @@ -1920,14 +1891,17 @@ fn check_hypothetical_membership_query() { ); introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_d, pvd_d).await; - // Add candidate B. + // Add candidate B and back it. introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()) .await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_b.hash()).await; // Get membership of candidates after adding B. - for (candidate, pvd) in - [(candidate_a.clone(), pvd_a.clone()), (candidate_b.clone(), pvd_b.clone())] - { + for (candidate, pvd) in [ + (candidate_a.clone(), pvd_a.clone()), + (candidate_b.clone(), pvd_b.clone()), + (candidate_c.clone(), pvd_c.clone()), + ] { get_hypothetical_membership( &mut virtual_overseer, candidate.hash(), @@ -1938,24 +1912,10 @@ fn check_hypothetical_membership_query() { .await; } - get_hypothetical_membership( - &mut virtual_overseer, - candidate_c.hash(), - candidate_c.clone(), - pvd_c.clone(), - vec![], - ) - .await; - - // Add candidate C. It will fail because we have enough candidates for the configured depth. - introduce_seconded_candidate_failed(&mut virtual_overseer, candidate_c, pvd_c).await; - virtual_overseer }); assert_eq!(view.active_leaves.len(), 2); - assert_eq!(view.candidate_storage.len(), 2); - assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); } #[test] @@ -2005,6 +1965,16 @@ fn check_pvd_query() { test_state.validation_code_hash, ); + // Candidate E. + let (candidate_e, pvd_e) = make_candidate( + leaf_a.hash, + leaf_a.number, + 1.into(), + HeadData(vec![5]), + HeadData(vec![6]), + test_state.validation_code_hash, + ); + // Get pvd of candidate A before adding it. get_pvd( &mut virtual_overseer, @@ -2067,20 +2037,20 @@ fn check_pvd_query() { introduce_seconded_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone()).await; // Get pvd of candidate C after adding it. - get_pvd( - &mut virtual_overseer, - 1.into(), - leaf_a.hash, - HeadData(vec![2]), - Some(pvd_c.clone()), - ) - .await; + get_pvd(&mut virtual_overseer, 1.into(), leaf_a.hash, HeadData(vec![2]), Some(pvd_c)).await; + + // Get pvd of candidate E before adding it. It won't be found, as we don't have its parent. + get_pvd(&mut virtual_overseer, 1.into(), leaf_a.hash, HeadData(vec![5]), None).await; + + // Add candidate E and check again. Should succeed this time. + introduce_seconded_candidate(&mut virtual_overseer, candidate_e, pvd_e.clone()).await; + + get_pvd(&mut virtual_overseer, 1.into(), leaf_a.hash, HeadData(vec![5]), Some(pvd_e)).await; virtual_overseer }); assert_eq!(view.active_leaves.len(), 1); - assert_eq!(view.candidate_storage.len(), 2); } // Test simultaneously activating and deactivating leaves, and simultaneously deactivating @@ -2150,6 +2120,7 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) { &leaf_c, &test_state, ASYNC_BACKING_PARAMETERS, + get_parent_hash, ) .await; @@ -2171,13 +2142,6 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) { virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) .await; - handle_leaf_activation( - &mut virtual_overseer, - &leaf_a, - &test_state, - ASYNC_BACKING_PARAMETERS, - ) - .await; // Remove the leaf again. Send some unnecessary hashes. let update = ActiveLeavesUpdate { @@ -2192,7 +2156,322 @@ fn correctly_updates_leaves(#[case] runtime_api_version: u32) { }); assert_eq!(view.active_leaves.len(), 0); - assert_eq!(view.candidate_storage.len(), 0); +} + +#[test] +fn handle_active_leaves_update_gets_candidates_from_parent() { + let para_id = ParaId::from(1); + let mut test_state = TestState::default(); + test_state.claim_queue = test_state + .claim_queue + .into_iter() + .filter(|(_, paras)| matches!(paras.front(), Some(para) if para == ¶_id)) + .collect(); + assert_eq!(test_state.claim_queue.len(), 1); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![(para_id, PerParaData::new(97, HeadData(vec![1, 2, 3])))], + }; + // Activate leaf A. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Candidates A, B, C and D all form a chain + let (candidate_a, pvd_a) = make_candidate( + leaf_a.hash, + leaf_a.number, + para_id, + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; + + let (candidate_b, candidate_hash_b) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_a, 2); + let (candidate_c, candidate_hash_c) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_b, 3); + let (candidate_d, candidate_hash_d) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_c, 4); + + let mut all_candidates_resp = vec![ + (candidate_hash_a, leaf_a.hash), + (candidate_hash_b, leaf_a.hash), + (candidate_hash_c, leaf_a.hash), + (candidate_hash_d, leaf_a.hash), + ]; + + // Check candidate tree membership. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + para_id, + Ancestors::default(), + 5, + all_candidates_resp.clone(), + ) + .await; + + // Activate leaf B, which makes candidates A and B pending availability. + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(129), + para_data: vec![( + para_id, + PerParaData::new_with_pending( + 98, + HeadData(vec![1, 2, 3]), + vec![ + CandidatePendingAvailability { + candidate_hash: candidate_a.hash(), + descriptor: candidate_a.descriptor.clone(), + commitments: candidate_a.commitments.clone(), + relay_parent_number: leaf_a.number, + max_pov_size: MAX_POV_SIZE, + }, + CandidatePendingAvailability { + candidate_hash: candidate_b.hash(), + descriptor: candidate_b.descriptor.clone(), + commitments: candidate_b.commitments.clone(), + relay_parent_number: leaf_a.number, + max_pov_size: MAX_POV_SIZE, + }, + ], + ), + )], + }; + // Activate leaf B. + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + Ancestors::default(), + 5, + vec![], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + [candidate_a.hash(), candidate_b.hash()].into_iter().collect(), + 5, + vec![(candidate_c.hash(), leaf_a.hash), (candidate_d.hash(), leaf_a.hash)], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + Ancestors::default(), + 5, + vec![], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + para_id, + Ancestors::default(), + 5, + all_candidates_resp.clone(), + ) + .await; + + // Now deactivate leaf A. + deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + Ancestors::default(), + 5, + vec![], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + [candidate_a.hash(), candidate_b.hash()].into_iter().collect(), + 5, + vec![(candidate_c.hash(), leaf_a.hash), (candidate_d.hash(), leaf_a.hash)], + ) + .await; + + // Now add leaf C, which will be a sibling (fork) of leaf B. It should also inherit the + // candidates of leaf A (their common parent). + let leaf_c = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(12), + para_data: vec![( + para_id, + PerParaData::new_with_pending(98, HeadData(vec![1, 2, 3]), vec![]), + )], + }; + + activate_leaf_with_parent_hash_fn(&mut virtual_overseer, &leaf_c, &test_state, |hash| { + if hash == leaf_c.hash { + leaf_a.hash + } else { + get_parent_hash(hash) + } + }) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + [candidate_a.hash(), candidate_b.hash()].into_iter().collect(), + 5, + vec![(candidate_c.hash(), leaf_a.hash), (candidate_d.hash(), leaf_a.hash)], + ) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + para_id, + Ancestors::new(), + 5, + all_candidates_resp.clone(), + ) + .await; + + // Deactivate C and add another candidate that will be present on the deactivated parent A. + // When activating C again it should also get the new candidate. Deactivated leaves are + // still updated with new candidates. + deactivate_leaf(&mut virtual_overseer, leaf_c.hash).await; + + let (candidate_e, _) = + make_and_back_candidate!(test_state, virtual_overseer, leaf_a, &candidate_d, 5); + activate_leaf_with_parent_hash_fn(&mut virtual_overseer, &leaf_c, &test_state, |hash| { + if hash == leaf_c.hash { + leaf_a.hash + } else { + get_parent_hash(hash) + } + }) + .await; + + get_backable_candidates( + &mut virtual_overseer, + &leaf_b, + para_id, + [candidate_a.hash(), candidate_b.hash()].into_iter().collect(), + 5, + vec![ + (candidate_c.hash(), leaf_a.hash), + (candidate_d.hash(), leaf_a.hash), + (candidate_e.hash(), leaf_a.hash), + ], + ) + .await; + + all_candidates_resp.push((candidate_e.hash(), leaf_a.hash)); + get_backable_candidates( + &mut virtual_overseer, + &leaf_c, + para_id, + Ancestors::new(), + 5, + all_candidates_resp, + ) + .await; + + // Querying the backable candidates for deactivated leaf won't work. + get_backable_candidates( + &mut virtual_overseer, + &leaf_a, + para_id, + Ancestors::new(), + 5, + vec![], + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 2); + assert_eq!(view.per_relay_parent.len(), 3); +} + +#[test] +fn handle_active_leaves_update_bounded_implicit_view() { + let para_id = ParaId::from(1); + let mut test_state = TestState::default(); + test_state.claim_queue = test_state + .claim_queue + .into_iter() + .filter(|(_, paras)| matches!(paras.front(), Some(para) if para == ¶_id)) + .collect(); + assert_eq!(test_state.claim_queue.len(), 1); + + let mut leaves = vec![TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![( + para_id, + PerParaData::new(100 - ALLOWED_ANCESTRY_LEN, HeadData(vec![1, 2, 3])), + )], + }]; + + for index in 1..10 { + let prev_leaf = &leaves[index - 1]; + leaves.push(TestLeaf { + number: prev_leaf.number - 1, + hash: get_parent_hash(prev_leaf.hash), + para_data: vec![( + para_id, + PerParaData::new( + prev_leaf.number - 1 - ALLOWED_ANCESTRY_LEN, + HeadData(vec![1, 2, 3]), + ), + )], + }); + } + leaves.reverse(); + + let view = test_harness(|mut virtual_overseer| async { + // Activate first 10 leaves. + for leaf in &leaves[0..10] { + activate_leaf(&mut virtual_overseer, leaf, &test_state).await; + } + + // Now deactivate first 9 leaves. + for leaf in &leaves[0..9] { + deactivate_leaf(&mut virtual_overseer, leaf.hash).await; + } + + virtual_overseer + }); + + // Only latest leaf is active. + assert_eq!(view.active_leaves.len(), 1); + // We keep allowed_ancestry_len implicit leaves. The latest leaf is also present here. + assert_eq!( + view.per_relay_parent.len() as u32, + ASYNC_BACKING_PARAMETERS.allowed_ancestry_len + 1 + ); + + assert_eq!(view.active_leaves, [leaves[9].hash].into_iter().collect()); + assert_eq!( + view.per_relay_parent.into_keys().collect::>(), + leaves[6..].into_iter().map(|l| l.hash).collect::>() + ); } #[test] @@ -2251,7 +2530,8 @@ fn persists_pending_availability_candidate() { ); let candidate_hash_b = candidate_b.hash(); - introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()) + .await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; let candidate_a_pending_av = CandidatePendingAvailability { @@ -2275,6 +2555,15 @@ fn persists_pending_availability_candidate() { }; activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + get_hypothetical_membership( + &mut virtual_overseer, + candidate_hash_a, + candidate_a, + pvd_a, + vec![leaf_a.hash, leaf_b.hash], + ) + .await; + introduce_seconded_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; diff --git a/polkadot/node/core/provisioner/src/lib.rs b/polkadot/node/core/provisioner/src/lib.rs index 3f622a60a059..ffc5859b7756 100644 --- a/polkadot/node/core/provisioner/src/lib.rs +++ b/polkadot/node/core/provisioner/src/lib.rs @@ -273,7 +273,7 @@ async fn handle_communication( let span = state.span.child("provisionable-data"); let _timer = metrics.time_provisionable_data(); - gum::trace!(target: LOG_TARGET, ?relay_parent, "Received provisionable data."); + gum::trace!(target: LOG_TARGET, ?relay_parent, "Received provisionable data: {:?}", &data); note_provisionable_data(state, &span, data); } @@ -794,9 +794,11 @@ async fn select_candidates( relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { - gum::trace!(target: LOG_TARGET, + gum::trace!( + target: LOG_TARGET, leaf_hash=?relay_parent, - "before GetBackedCandidates"); + "before GetBackedCandidates" + ); let selected_candidates = match prospective_parachains_mode { ProspectiveParachainsMode::Enabled { .. } => diff --git a/polkadot/node/network/statement-distribution/src/v2/mod.rs b/polkadot/node/network/statement-distribution/src/v2/mod.rs index 47d350849b20..109c29f520c5 100644 --- a/polkadot/node/network/statement-distribution/src/v2/mod.rs +++ b/polkadot/node/network/statement-distribution/src/v2/mod.rs @@ -2237,7 +2237,9 @@ async fn fragment_chain_update_inner( // 2. find out which are in the frontier gum::debug!( target: LOG_TARGET, - "Calling getHypotheticalMembership from statement distribution" + active_leaf_hash = ?active_leaf_hash, + "Calling getHypotheticalMembership from statement distribution for candidates: {:?}", + &hypotheticals.iter().map(|hypo| hypo.candidate_hash()).collect::>() ); let candidate_memberships = { let (tx, rx) = oneshot::channel(); diff --git a/polkadot/node/subsystem-types/src/messages.rs b/polkadot/node/subsystem-types/src/messages.rs index 4d27ac9b70e3..d067ca468011 100644 --- a/polkadot/node/subsystem-types/src/messages.rs +++ b/polkadot/node/subsystem-types/src/messages.rs @@ -43,13 +43,13 @@ use polkadot_node_primitives::{ }; use polkadot_primitives::{ async_backing, slashing, ApprovalVotingParams, AuthorityDiscoveryId, BackedCandidate, - BlockNumber, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, - CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, ExecutorParams, GroupIndex, - GroupRotationInfo, Hash, HeadData, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, MultiDisputeStatementSet, NodeFeatures, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, PvfExecKind, SessionIndex, SessionInfo, - SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, + BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, CandidateIndex, + CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreIndex, CoreState, DisputeState, + ExecutorParams, GroupIndex, GroupRotationInfo, Hash, HeadData, Header as BlockHeader, + Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, + NodeFeatures, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, PvfExecKind, + SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -1126,6 +1126,32 @@ impl HypotheticalCandidate { HypotheticalCandidate::Incomplete { .. } => None, } } + + /// Get the candidate commitments, if the candidate is complete. + pub fn commitments(&self) -> Option<&CandidateCommitments> { + match *self { + HypotheticalCandidate::Complete { ref receipt, .. } => Some(&receipt.commitments), + HypotheticalCandidate::Incomplete { .. } => None, + } + } + + /// Get the persisted validation data, if the candidate is complete. + pub fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { + match *self { + HypotheticalCandidate::Complete { ref persisted_validation_data, .. } => + Some(persisted_validation_data), + HypotheticalCandidate::Incomplete { .. } => None, + } + } + + /// Get the validation code hash, if the candidate is complete. + pub fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { + match *self { + HypotheticalCandidate::Complete { ref receipt, .. } => + Some(&receipt.descriptor.validation_code_hash), + HypotheticalCandidate::Incomplete { .. } => None, + } + } } /// Request specifying which candidates are either already included diff --git a/polkadot/node/subsystem-util/src/backing_implicit_view.rs b/polkadot/node/subsystem-util/src/backing_implicit_view.rs index 23a758d25715..a805ef8165e5 100644 --- a/polkadot/node/subsystem-util/src/backing_implicit_view.rs +++ b/polkadot/node/subsystem-util/src/backing_implicit_view.rs @@ -25,6 +25,7 @@ use polkadot_primitives::{BlockNumber, Hash, Id as ParaId}; use std::collections::HashMap; use crate::{ + inclusion_emulator::RelayChainBlockInfo, request_session_index_for_child, runtime::{self, prospective_parachains_mode, recv_runtime, ProspectiveParachainsMode}, }; @@ -121,6 +122,26 @@ struct BlockInfo { parent_hash: Hash, } +/// Information about a relay-chain block, to be used when calling this module from prospective +/// parachains. +#[derive(Debug, Clone, PartialEq)] +pub struct BlockInfoProspectiveParachains { + /// The hash of the relay-chain block. + pub hash: Hash, + /// The hash of the parent relay-chain block. + pub parent_hash: Hash, + /// The number of the relay-chain block. + pub number: BlockNumber, + /// The storage-root of the relay-chain block. + pub storage_root: Hash, +} + +impl From for RelayChainBlockInfo { + fn from(value: BlockInfoProspectiveParachains) -> Self { + Self { hash: value.hash, number: value.number, storage_root: value.storage_root } + } +} + impl View { /// Get an iterator over active leaves in the view. pub fn leaves(&self) -> impl Iterator { @@ -178,6 +199,61 @@ impl View { } } + /// Activate a leaf in the view. To be used by the prospective parachains subsystem. + /// + /// This will not request any additional data, as prospective parachains already provides all + /// the required info. + /// NOTE: using `activate_leaf` instead of this function will result in a + /// deadlock, as it calls prospective-parachains under the hood. + /// + /// No-op for known leaves. + pub fn activate_leaf_from_prospective_parachains( + &mut self, + leaf: BlockInfoProspectiveParachains, + ancestors: &[BlockInfoProspectiveParachains], + ) { + if self.leaves.contains_key(&leaf.hash) { + return + } + + // Retain at least `MINIMUM_RETAIN_LENGTH` blocks in storage. + // This helps to avoid Chain API calls when activating leaves in the + // same chain. + let retain_minimum = std::cmp::min( + ancestors.last().map(|a| a.number).unwrap_or(0), + leaf.number.saturating_sub(MINIMUM_RETAIN_LENGTH), + ); + + self.leaves.insert(leaf.hash, ActiveLeafPruningInfo { retain_minimum }); + let mut allowed_relay_parents = AllowedRelayParents { + allowed_relay_parents_contiguous: Vec::with_capacity(ancestors.len()), + // In this case, initialise this to an empty map, as prospective parachains already has + // this data and it won't query the implicit view for it. + minimum_relay_parents: HashMap::new(), + }; + + for ancestor in ancestors { + self.block_info_storage.insert( + ancestor.hash, + BlockInfo { + block_number: ancestor.number, + maybe_allowed_relay_parents: None, + parent_hash: ancestor.parent_hash, + }, + ); + allowed_relay_parents.allowed_relay_parents_contiguous.push(ancestor.hash); + } + + self.block_info_storage.insert( + leaf.hash, + BlockInfo { + block_number: leaf.number, + maybe_allowed_relay_parents: Some(allowed_relay_parents), + parent_hash: leaf.parent_hash, + }, + ); + } + /// Deactivate a leaf in the view. This prunes any outdated implicit ancestors as well. /// /// Returns hashes of blocks pruned from storage. diff --git a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs index 2272f048089c..0c3b40743495 100644 --- a/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs +++ b/polkadot/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -39,8 +39,8 @@ /// /// # Usage /// -/// It's expected that the users of this module will be building up chains of -/// [`Fragment`]s and consistently pruning and adding to the chains. +/// It's expected that the users of this module will be building up chains or trees of +/// [`Fragment`]s and consistently pruning and adding to them. /// /// ## Operating Constraints /// @@ -56,55 +56,19 @@ /// /// ## Fragment Chains /// -/// For simplicity and practicality, we expect that collators of the same parachain are -/// cooperating and don't create parachain forks or cycles on the same relay chain active leaf. -/// Therefore, higher-level code should maintain one fragment chain for each active leaf (not a -/// fragment tree). If parachains do create forks, their performance in regards to async -/// backing and elastic scaling will suffer, because different validators will have different -/// predictions of the future. +/// For the sake of this module, we don't care how higher-level code is managing parachain +/// fragments, whether or not they're kept as a chain or tree. In reality, +/// prospective-parachains is maintaining for every active leaf, a chain of the "best" backable +/// candidates and a storage of potential candidates which may be added to this chain in the +/// future. /// /// As the relay-chain grows, some predictions come true and others come false. -/// And new predictions get made. These three changes correspond distinctly to the -/// 3 primary operations on fragment chains. +/// And new predictions get made. Higher-level code is responsible for adding and pruning the +/// fragments chains. /// /// Avoiding fragment-chain blowup is beyond the scope of this module. Higher-level must ensure /// proper spam protection. /// -/// ### Pruning Fragment Chains -/// -/// When the relay-chain advances, we want to compare the new constraints of that relay-parent -/// to the root of the fragment chain we have. There are 3 cases: -/// -/// 1. The root fragment is still valid under the new constraints. In this case, we do nothing. -/// This is the "prediction still uncertain" case. (Corresponds to some candidates still -/// being pending availability). -/// -/// 2. The root fragment (potentially along with a number of descendants) is invalid under the -/// new constraints because it has been included by the relay-chain. In this case, we can -/// discard the included chain and split & re-root the chain under its descendants and -/// compare to the new constraints again. This is the "prediction came true" case. -/// -/// 3. The root fragment becomes invalid under the new constraints for any reason (if for -/// example the parachain produced a fork and the block producer picked a different -/// candidate to back). In this case we can discard the entire fragment chain. This is the -/// "prediction came false" case. -/// -/// This is all a bit of a simplification because it assumes that the relay-chain advances -/// without forks and is finalized instantly. In practice, the set of fragment-chains needs to -/// be observable from the perspective of a few different possible forks of the relay-chain and -/// not pruned too eagerly. -/// -/// Note that the fragments themselves don't need to change and the only thing we care about -/// is whether the predictions they represent are still valid. -/// -/// ### Extending Fragment Chains -/// -/// As predictions fade into the past, new ones should be stacked on top. -/// -/// Every new relay-chain block is an opportunity to make a new prediction about the future. -/// Higher-level logic should decide whether to build upon an existing chain or whether -/// to create a new fragment-chain. -/// /// ### Code Upgrades /// /// Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade @@ -116,9 +80,11 @@ /// /// That means a few blocks of execution time lost, which is not a big deal for code upgrades /// in practice at most once every few weeks. +use polkadot_node_subsystem::messages::HypotheticalCandidate; use polkadot_primitives::{ - async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, Hash, - HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash, + async_backing::Constraints as PrimitiveConstraints, BlockNumber, CandidateCommitments, + CandidateHash, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, + ValidationCodeHash, }; use std::{collections::HashMap, sync::Arc}; @@ -702,6 +668,11 @@ impl Fragment { &self.candidate } + /// Get a cheap ref-counted copy of the underlying prospective candidate. + pub fn candidate_clone(&self) -> Arc { + self.candidate.clone() + } + /// Modifications to constraints based on the outputs of the candidate. pub fn constraint_modifications(&self) -> &ConstraintModifications { &self.modifications @@ -791,6 +762,55 @@ fn validate_against_constraints( .map_err(FragmentValidityError::OutputsInvalid) } +/// Trait for a hypothetical or concrete candidate, as needed when assessing the validity of a +/// potential candidate. +pub trait HypotheticalOrConcreteCandidate { + /// Return a reference to the candidate commitments, if present. + fn commitments(&self) -> Option<&CandidateCommitments>; + /// Return a reference to the persisted validation data, if present. + fn persisted_validation_data(&self) -> Option<&PersistedValidationData>; + /// Return a reference to the validation code hash, if present. + fn validation_code_hash(&self) -> Option<&ValidationCodeHash>; + /// Return the parent head hash. + fn parent_head_data_hash(&self) -> Hash; + /// Return the output head hash, if present. + fn output_head_data_hash(&self) -> Option; + /// Return the relay parent hash. + fn relay_parent(&self) -> Hash; + /// Return the candidate hash. + fn candidate_hash(&self) -> CandidateHash; +} + +impl HypotheticalOrConcreteCandidate for HypotheticalCandidate { + fn commitments(&self) -> Option<&CandidateCommitments> { + self.commitments() + } + + fn persisted_validation_data(&self) -> Option<&PersistedValidationData> { + self.persisted_validation_data() + } + + fn validation_code_hash(&self) -> Option<&ValidationCodeHash> { + self.validation_code_hash() + } + + fn parent_head_data_hash(&self) -> Hash { + self.parent_head_data_hash() + } + + fn output_head_data_hash(&self) -> Option { + self.output_head_data_hash() + } + + fn relay_parent(&self) -> Hash { + self.relay_parent() + } + + fn candidate_hash(&self) -> CandidateHash { + self.candidate_hash() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/prdoc/pr_4937.prdoc b/prdoc/pr_4937.prdoc new file mode 100644 index 000000000000..37b7bc3dda59 --- /dev/null +++ b/prdoc/pr_4937.prdoc @@ -0,0 +1,21 @@ +title: "prospective-parachains rework: take II" + +doc: + - audience: Node Dev + description: | + Add back support for backing parachain forks. Once a candidate reaches the backing quorum, + validators use a shared way of picking the winning fork to back on-chain. This was done in + order to increase the likelihood that all backers will vote on the winning fork. + The functionality of backing unconnected candidates introduced by the previous rework is preserved. + +crates: + - name: polkadot-node-core-prospective-parachains + bump: minor + - name: polkadot-node-subsystem-types + bump: minor + - name: polkadot-node-subsystem-util + bump: minor + - name: polkadot-node-core-provisioner + bump: none + - name: polkadot-statement-distribution + bump: none From ebcbca3ff606b22b5eb81bcbfaa9309752d64dde Mon Sep 17 00:00:00 2001 From: jserrat <35823283+Jpserrat@users.noreply.github.com> Date: Mon, 12 Aug 2024 05:40:04 -0300 Subject: [PATCH 18/74] xcm-executor: allow deposit of multiple assets if at least one of them satisfies ED (#4460) Closes #4242 XCM programs that deposit assets to some new (empty) account will now succeed if at least one of the deposited assets satisfies ED. Before this change, the requirement was that the _first_ asset had to satisfy ED, but assets order can be changed during reanchoring so it is not reliable. With this PR, ordering doesn't matter, any one(s) of them can satisfy ED for the whole deposit to work. Kusama address: FkB6QEo8VnV3oifugNj5NeVG3Mvq1zFbrUu4P5YwRoe5mQN --------- Co-authored-by: Adrian Catangiu Co-authored-by: Francisco Aguirre Co-authored-by: command-bot <> --- .../emulated/common/src/lib.rs | 1 + .../tests/assets/asset-hub-rococo/src/lib.rs | 3 +- .../src/tests/reserve_transfer.rs | 20 +- .../tests/assets/asset-hub-westend/src/lib.rs | 3 +- .../src/tests/reserve_transfer.rs | 20 +- .../bridge-hub-rococo/src/tests/snowbridge.rs | 4 +- .../tests/people/people-rococo/src/lib.rs | 1 - .../people-rococo/src/tests/teleport.rs | 21 +- .../tests/people/people-westend/src/lib.rs | 1 - .../people-westend/src/tests/teleport.rs | 21 +- .../xcm/pallet_xcm_benchmarks_fungible.rs | 51 ++- .../xcm/pallet_xcm_benchmarks_fungible.rs | 76 ++--- .../xcm/pallet_xcm_benchmarks_fungible.rs | 42 +-- .../xcm/pallet_xcm_benchmarks_fungible.rs | 74 ++-- .../xcm/pallet_xcm_benchmarks_fungible.rs | 36 +- .../xcm/pallet_xcm_benchmarks_fungible.rs | 36 +- .../people-rococo/src/weights/xcm/mod.rs | 16 +- .../xcm/pallet_xcm_benchmarks_fungible.rs | 203 ++++++----- .../xcm/pallet_xcm_benchmarks_generic.rs | 318 ++++++++---------- .../people-westend/src/weights/xcm/mod.rs | 21 +- .../xcm/pallet_xcm_benchmarks_fungible.rs | 203 ++++++----- .../xcm/pallet_xcm_benchmarks_generic.rs | 318 ++++++++---------- .../xcm/pallet_xcm_benchmarks_fungible.rs | 62 ++-- .../xcm/pallet_xcm_benchmarks_fungible.rs | 70 ++-- polkadot/xcm/xcm-executor/src/lib.rs | 55 ++- prdoc/pr_4460.prdoc | 24 ++ 26 files changed, 869 insertions(+), 831 deletions(-) create mode 100644 prdoc/pr_4460.prdoc diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 7077fbbb0a9a..b3dec175e112 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -132,6 +132,7 @@ pub mod accounts { pub const EVE_STASH: &str = "Eve//stash"; pub const FERDIE_STASH: &str = "Ferdie//stash"; pub const FERDIE_BEEFY: &str = "Ferdie//stash"; + pub const DUMMY_EMPTY: &str = "JohnDoe"; pub fn init_balances() -> Vec { vec![ diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 6309c0584107..1a30fac6ba91 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -35,7 +35,8 @@ mod imports { // Cumulus pub use asset_test_utils::xcm_helpers; pub use emulated_integration_tests_common::{ - test_parachain_is_trusted_teleporter, + accounts::DUMMY_EMPTY, + get_account_id_from_seed, test_parachain_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, Test, TestArgs, TestContext, TestExt, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 313fa953dd05..329dbcac4b42 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -941,6 +941,9 @@ fn reserve_transfer_assets_from_system_para_to_para() { /// Reserve Transfers of a random asset and native asset from Parachain to System Para should /// work +/// Receiver is empty account to show deposit works as long as transfer includes enough DOT for ED. +/// Once we have https://github.com/paritytech/polkadot-sdk/issues/5298, +/// we should do equivalent test with USDT instead of DOT. #[test] fn reserve_transfer_assets_from_para_to_system_para() { // Init values for Parachain @@ -965,24 +968,23 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Fund Parachain's sender account with some foreign assets PenpalA::mint_foreign_asset( penpal_asset_owner_signer.clone(), - asset_location_on_penpal, + asset_location_on_penpal.clone(), sender.clone(), asset_amount_to_send * 2, ); // Fund Parachain's sender account with some system assets PenpalA::mint_foreign_asset( penpal_asset_owner_signer, - system_asset_location_on_penpal, + system_asset_location_on_penpal.clone(), sender.clone(), fee_amount_to_send * 2, ); + // Beneficiary is a new (empty) account + let receiver = get_account_id_from_seed::(DUMMY_EMPTY); // Init values for System Parachain - let receiver = AssetHubRococoReceiver::get(); let penpal_location_as_seen_by_ahr = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); - let system_para_native_asset_location = RelayLocation::get(); - let system_para_foreign_asset_location = PenpalLocalReservableFromAssetHub::get(); let ah_asset_owner = AssetHubRococoAssetOwner::get(); let ah_asset_owner_signer = ::RuntimeOrigin::signed(ah_asset_owner); @@ -1017,11 +1019,11 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Query initial balances let sender_system_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location.clone(), &sender) + >::balance(system_asset_location_on_penpal.clone(), &sender) }); let sender_foreign_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_foreign_asset_location.clone(), &sender) + >::balance(asset_location_on_penpal.clone(), &sender) }); let receiver_balance_before = test.receiver.balance; let receiver_assets_before = AssetHubRococo::execute_with(|| { @@ -1038,11 +1040,11 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Query final balances let sender_system_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location.clone(), &sender) + >::balance(system_asset_location_on_penpal, &sender) }); let sender_foreign_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_foreign_asset_location, &sender) + >::balance(asset_location_on_penpal, &sender) }); let receiver_balance_after = test.receiver.balance; let receiver_assets_after = AssetHubRococo::execute_with(|| { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 060c3fb39254..437268a19e59 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -35,7 +35,8 @@ mod imports { // Cumulus pub use asset_test_utils::xcm_helpers; pub use emulated_integration_tests_common::{ - test_parachain_is_trusted_teleporter, + accounts::DUMMY_EMPTY, + get_account_id_from_seed, test_parachain_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, Test, TestArgs, TestContext, TestExt, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 82ef74fdab10..729de65382f8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -942,6 +942,9 @@ fn reserve_transfer_assets_from_system_para_to_para() { /// Reserve Transfers of a random asset and native asset from Parachain to System Para should /// work +/// Receiver is empty account to show deposit works as long as transfer includes enough DOT for ED. +/// Once we have https://github.com/paritytech/polkadot-sdk/issues/5298, +/// we should do equivalent test with USDT instead of DOT. #[test] fn reserve_transfer_assets_from_para_to_system_para() { // Init values for Parachain @@ -966,25 +969,24 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Fund Parachain's sender account with some foreign assets PenpalA::mint_foreign_asset( penpal_asset_owner_signer.clone(), - asset_location_on_penpal, + asset_location_on_penpal.clone(), sender.clone(), asset_amount_to_send * 2, ); // Fund Parachain's sender account with some system assets PenpalA::mint_foreign_asset( penpal_asset_owner_signer, - system_asset_location_on_penpal, + system_asset_location_on_penpal.clone(), sender.clone(), fee_amount_to_send * 2, ); + // Beneficiary is a new (empty) account + let receiver = get_account_id_from_seed::(DUMMY_EMPTY); // Init values for System Parachain - let receiver = AssetHubWestendReceiver::get(); let penpal_location_as_seen_by_ahr = AssetHubWestend::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of(penpal_location_as_seen_by_ahr); - let system_para_native_asset_location = RelayLocation::get(); - let system_para_foreign_asset_location = PenpalLocalReservableFromAssetHub::get(); let ah_asset_owner = AssetHubWestendAssetOwner::get(); let ah_asset_owner_signer = ::RuntimeOrigin::signed(ah_asset_owner); @@ -1019,11 +1021,11 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Query initial balances let sender_system_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location.clone(), &sender) + >::balance(system_asset_location_on_penpal.clone(), &sender) }); let sender_foreign_assets_before = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_foreign_asset_location.clone(), &sender) + >::balance(asset_location_on_penpal.clone(), &sender) }); let receiver_balance_before = test.receiver.balance; let receiver_assets_before = AssetHubWestend::execute_with(|| { @@ -1040,11 +1042,11 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Query final balances let sender_system_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_native_asset_location, &sender) + >::balance(system_asset_location_on_penpal, &sender) }); let sender_foreign_assets_after = PenpalA::execute_with(|| { type ForeignAssets = ::ForeignAssets; - >::balance(system_para_foreign_asset_location, &sender) + >::balance(asset_location_on_penpal, &sender) }); let receiver_balance_after = test.receiver.balance; let receiver_assets_after = AssetHubWestend::execute_with(|| { diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 40a1968ec557..3e8d3357caa8 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -669,8 +669,8 @@ fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_insufficie #[test] fn send_token_from_ethereum_to_non_existent_account_on_asset_hub_with_sufficient_fee_but_do_not_satisfy_ed( ) { - // On AH the xcm fee is 33_873_024 and the ED is 3_300_000 - send_token_from_ethereum_to_asset_hub_with_fee([1; 32], 36_000_000); + // On AH the xcm fee is 26_789_690 and the ED is 3_300_000 + send_token_from_ethereum_to_asset_hub_with_fee([1; 32], 30_000_000); AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs index 3c0533f775e2..b725c24fbea3 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs @@ -20,7 +20,6 @@ mod imports { // Substrate pub use frame_support::{ assert_ok, - pallet_prelude::Weight, sp_runtime::{AccountId32, DispatchResult}, traits::fungibles::Inspect, }; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs index 4410d1bd40dc..c28b305b2c94 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs @@ -17,7 +17,7 @@ use crate::imports::*; fn relay_origin_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(627_959_000, 7_200))); + Rococo::assert_xcm_pallet_attempted_complete(None); assert_expected_events!( Rococo, @@ -39,11 +39,7 @@ fn relay_origin_assertions(t: RelayToSystemParaTest) { fn relay_dest_assertions(t: SystemParaToRelayTest) { type RuntimeEvent = ::RuntimeEvent; - Rococo::assert_ump_queue_processed( - true, - Some(PeopleRococo::para_id()), - Some(Weight::from_parts(304_266_000, 7_186)), - ); + Rococo::assert_ump_queue_processed(true, Some(PeopleRococo::para_id()), None); assert_expected_events!( Rococo, @@ -62,20 +58,13 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { } fn relay_dest_assertions_fail(_t: SystemParaToRelayTest) { - Rococo::assert_ump_queue_processed( - false, - Some(PeopleRococo::para_id()), - Some(Weight::from_parts(157_718_000, 3_593)), - ); + Rococo::assert_ump_queue_processed(false, Some(PeopleRococo::para_id()), None); } fn para_origin_assertions(t: SystemParaToRelayTest) { type RuntimeEvent = ::RuntimeEvent; - PeopleRococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( - 600_000_000, - 7_000, - ))); + PeopleRococo::assert_xcm_pallet_attempted_complete(None); PeopleRococo::assert_parachain_system_ump_sent(); @@ -94,7 +83,7 @@ fn para_origin_assertions(t: SystemParaToRelayTest) { fn para_dest_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - PeopleRococo::assert_dmp_queue_complete(Some(Weight::from_parts(162_456_000, 0))); + PeopleRococo::assert_dmp_queue_complete(None); assert_expected_events!( PeopleRococo, diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs index 689409fe5040..386a1b91c85b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs @@ -19,7 +19,6 @@ mod imports { // Substrate pub use frame_support::{ assert_ok, - pallet_prelude::Weight, sp_runtime::{AccountId32, DispatchResult}, traits::fungibles::Inspect, }; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs index 6fd3cdeb61fb..294583399011 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs @@ -17,7 +17,7 @@ use crate::imports::*; fn relay_origin_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - Westend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(627_959_000, 7_200))); + Westend::assert_xcm_pallet_attempted_complete(None); assert_expected_events!( Westend, @@ -39,11 +39,7 @@ fn relay_origin_assertions(t: RelayToSystemParaTest) { fn relay_dest_assertions(t: SystemParaToRelayTest) { type RuntimeEvent = ::RuntimeEvent; - Westend::assert_ump_queue_processed( - true, - Some(PeopleWestend::para_id()), - Some(Weight::from_parts(304_266_000, 7_186)), - ); + Westend::assert_ump_queue_processed(true, Some(PeopleWestend::para_id()), None); assert_expected_events!( Westend, @@ -62,20 +58,13 @@ fn relay_dest_assertions(t: SystemParaToRelayTest) { } fn relay_dest_assertions_fail(_t: SystemParaToRelayTest) { - Westend::assert_ump_queue_processed( - false, - Some(PeopleWestend::para_id()), - Some(Weight::from_parts(157_718_000, 3_593)), - ); + Westend::assert_ump_queue_processed(false, Some(PeopleWestend::para_id()), None); } fn para_origin_assertions(t: SystemParaToRelayTest) { type RuntimeEvent = ::RuntimeEvent; - PeopleWestend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts( - 351_425_000, - 3_593, - ))); + PeopleWestend::assert_xcm_pallet_attempted_complete(None); PeopleWestend::assert_parachain_system_ump_sent(); @@ -94,7 +83,7 @@ fn para_origin_assertions(t: SystemParaToRelayTest) { fn para_dest_assertions(t: RelayToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; - PeopleWestend::assert_dmp_queue_complete(Some(Weight::from_parts(162_456_000, 0))); + PeopleWestend::assert_dmp_queue_complete(None); assert_expected_events!( PeopleWestend, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 03d3785dccbd..3d6ae6ddd1d2 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 21_643_000 picoseconds. - Weight::from_parts(22_410_000, 3593) + // Minimum execution time: 34_180_000 picoseconds. + Weight::from_parts(34_745_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 43_758_000 picoseconds. - Weight::from_parts(44_654_000, 6196) + // Minimum execution time: 42_638_000 picoseconds. + Weight::from_parts(43_454_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -90,20 +90,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `8799` - // Minimum execution time: 87_978_000 picoseconds. - Weight::from_parts(88_517_000, 8799) + // Minimum execution time: 102_916_000 picoseconds. + Weight::from_parts(105_699_000, 8799) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn reserve_asset_deposited() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 6_883_000 picoseconds. - Weight::from_parts(6_979_000, 1489) - .saturating_add(T::DbWeight::get().reads(1)) + // Estimated: `0` + // Minimum execution time: 1_805_000 picoseconds. + Weight::from_parts(1_901_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -125,8 +122,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 198_882_000 picoseconds. - Weight::from_parts(199_930_000, 6196) + // Minimum execution time: 108_018_000 picoseconds. + Weight::from_parts(110_310_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -134,8 +131,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_343_000 picoseconds. - Weight::from_parts(3_487_000, 0) + // Minimum execution time: 3_507_000 picoseconds. + Weight::from_parts(3_724_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -143,13 +140,11 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 19_399_000 picoseconds. - Weight::from_parts(19_659_000, 3593) + // Minimum execution time: 26_269_000 picoseconds. + Weight::from_parts(26_706_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -160,6 +155,8 @@ impl WeightInfo { // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) @@ -168,8 +165,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6196` - // Minimum execution time: 59_017_000 picoseconds. - Weight::from_parts(60_543_000, 6196) + // Minimum execution time: 84_759_000 picoseconds. + Weight::from_parts(86_157_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -193,8 +190,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 45_409_000 picoseconds. - Weight::from_parts(47_041_000, 3610) + // Minimum execution time: 50_876_000 picoseconds. + Weight::from_parts(51_512_000, 3610) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index fe8d18613925..f7891aedc496 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,24 +1,25 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -53,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 20_295_000 picoseconds. - Weight::from_parts(21_142_000, 3593) + // Minimum execution time: 32_612_000 picoseconds. + Weight::from_parts(33_359_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -64,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 42_356_000 picoseconds. - Weight::from_parts(43_552_000, 6196) + // Minimum execution time: 41_144_000 picoseconds. + Weight::from_parts(41_788_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -89,20 +90,17 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `8799` - // Minimum execution time: 85_553_000 picoseconds. - Weight::from_parts(87_177_000, 8799) + // Minimum execution time: 101_340_000 picoseconds. + Weight::from_parts(103_686_000, 8799) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } - // Storage: `ParachainInfo::ParachainId` (r:1 w:0) - // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) pub fn reserve_asset_deposited() -> Weight { // Proof Size summary in bytes: // Measured: `0` - // Estimated: `1489` - // Minimum execution time: 6_166_000 picoseconds. - Weight::from_parts(6_352_000, 1489) - .saturating_add(T::DbWeight::get().reads(1)) + // Estimated: `0` + // Minimum execution time: 1_682_000 picoseconds. + Weight::from_parts(1_734_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -124,8 +122,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 184_462_000 picoseconds. - Weight::from_parts(189_593_000, 6196) + // Minimum execution time: 107_335_000 picoseconds. + Weight::from_parts(109_665_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -133,8 +131,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_018_000 picoseconds. - Weight::from_parts(3_098_000, 0) + // Minimum execution time: 3_345_000 picoseconds. + Weight::from_parts(3_548_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -142,13 +140,11 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 18_583_000 picoseconds. - Weight::from_parts(19_057_000, 3593) + // Minimum execution time: 25_560_000 picoseconds. + Weight::from_parts(26_779_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -159,6 +155,8 @@ impl WeightInfo { // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) @@ -167,8 +165,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `6196` - // Minimum execution time: 56_666_000 picoseconds. - Weight::from_parts(58_152_000, 6196) + // Minimum execution time: 84_453_000 picoseconds. + Weight::from_parts(86_755_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -192,8 +190,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 44_197_000 picoseconds. - Weight::from_parts(45_573_000, 3610) + // Minimum execution time: 50_463_000 picoseconds. + Weight::from_parts(51_587_000, 3610) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 057dc4313510..f2cee0e3e807 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-11-14, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-yprdrvc7-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 19_610_000 picoseconds. - Weight::from_parts(19_980_000, 3593) + // Minimum execution time: 30_988_000 picoseconds. + Weight::from_parts(31_496_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 44_411_000 picoseconds. - Weight::from_parts(45_110_000, 6196) + // Minimum execution time: 42_805_000 picoseconds. + Weight::from_parts(44_207_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -90,8 +90,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `223` // Estimated: `8799` - // Minimum execution time: 89_739_000 picoseconds. - Weight::from_parts(91_256_000, 8799) + // Minimum execution time: 103_376_000 picoseconds. + Weight::from_parts(104_770_000, 8799) .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } @@ -124,8 +124,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 60_045_000 picoseconds. - Weight::from_parts(60_710_000, 6196) + // Minimum execution time: 71_234_000 picoseconds. + Weight::from_parts(72_990_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -133,8 +133,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_257_000 picoseconds. - Weight::from_parts(3_392_000, 0) + // Minimum execution time: 2_636_000 picoseconds. + Weight::from_parts(2_777_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -142,13 +142,11 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 19_423_000 picoseconds. - Weight::from_parts(19_823_000, 3593) + // Minimum execution time: 23_839_000 picoseconds. + Weight::from_parts(24_568_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -159,6 +157,8 @@ impl WeightInfo { // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) @@ -167,8 +167,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `122` // Estimated: `6196` - // Minimum execution time: 60_484_000 picoseconds. - Weight::from_parts(61_634_000, 6196) + // Minimum execution time: 78_345_000 picoseconds. + Weight::from_parts(80_558_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -192,8 +192,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3593` - // Minimum execution time: 44_863_000 picoseconds. - Weight::from_parts(45_549_000, 3593) + // Minimum execution time: 46_614_000 picoseconds. + Weight::from_parts(47_354_000, 3593) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 4310b2456475..5bd1d1680aa1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-26, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-vmdtonbz-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` -//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot-parachain @@ -33,10 +33,10 @@ // --heap-pages=4096 // --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm_benchmarks::fungible -// --chain=bridge-hub-rococo-dev +// --chain=bridge-hub-westend-dev // --header=./cumulus/file_header.txt // --template=./cumulus/templates/xcm-bench-template.hbs -// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/ +// --output=./cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 19_037_000 picoseconds. - Weight::from_parts(19_602_000, 3593) + // Minimum execution time: 30_218_000 picoseconds. + Weight::from_parts(30_783_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,15 +65,13 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 43_115_000 picoseconds. - Weight::from_parts(43_897_000, 6196) + // Minimum execution time: 42_631_000 picoseconds. + Weight::from_parts(43_127_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } // Storage: `System::Account` (r:3 w:3) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -90,11 +88,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `294` + // Measured: `260` // Estimated: `8799` - // Minimum execution time: 90_267_000 picoseconds. - Weight::from_parts(91_460_000, 8799) - .saturating_add(T::DbWeight::get().reads(11)) + // Minimum execution time: 100_978_000 picoseconds. + Weight::from_parts(102_819_000, 8799) + .saturating_add(T::DbWeight::get().reads(10)) .saturating_add(T::DbWeight::get().writes(5)) } // Storage: `Benchmark::Override` (r:0 w:0) @@ -106,8 +104,6 @@ impl WeightInfo { // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. Weight::from_parts(18_446_744_073_709_551_000, 0) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -126,19 +122,19 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `242` + // Measured: `208` // Estimated: `6196` - // Minimum execution time: 60_477_000 picoseconds. - Weight::from_parts(61_314_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 71_533_000 picoseconds. + Weight::from_parts(72_922_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_996_000 picoseconds. - Weight::from_parts(3_107_000, 0) + // Minimum execution time: 2_863_000 picoseconds. + Weight::from_parts(2_997_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -146,15 +142,11 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 18_907_000 picoseconds. - Weight::from_parts(19_475_000, 3593) + // Minimum execution time: 23_763_000 picoseconds. + Weight::from_parts(24_438_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: `System::Account` (r:2 w:2) - // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -165,21 +157,21 @@ impl WeightInfo { // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `193` + // Measured: `159` // Estimated: `6196` - // Minimum execution time: 59_143_000 picoseconds. - Weight::from_parts(60_316_000, 6196) - .saturating_add(T::DbWeight::get().reads(10)) + // Minimum execution time: 78_182_000 picoseconds. + Weight::from_parts(79_575_000, 6196) + .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } - // Storage: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) - // Proof: UNKNOWN KEY `0x48297505634037ef48c848c99c0b1f1b` (r:1 w:0) // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) // Storage: `ParachainSystem::UpwardDeliveryFeeFactor` (r:1 w:0) @@ -198,11 +190,11 @@ impl WeightInfo { // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `141` - // Estimated: `3606` - // Minimum execution time: 44_459_000 picoseconds. - Weight::from_parts(45_365_000, 3606) - .saturating_add(T::DbWeight::get().reads(9)) + // Measured: `107` + // Estimated: `3593` + // Minimum execution time: 46_767_000 picoseconds. + Weight::from_parts(47_823_000, 3593) + .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(3)) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 73a719805307..c8dbdadf7b15 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("coretime-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 26_642_000 picoseconds. - Weight::from_parts(27_583_000, 3593) + // Minimum execution time: 29_812_000 picoseconds. + Weight::from_parts(30_526_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 35_124_000 picoseconds. - Weight::from_parts(36_510_000, 6196) + // Minimum execution time: 39_430_000 picoseconds. + Weight::from_parts(39_968_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -88,8 +88,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `207` // Estimated: `6196` - // Minimum execution time: 55_950_000 picoseconds. - Weight::from_parts(57_207_000, 6196) + // Minimum execution time: 65_555_000 picoseconds. + Weight::from_parts(67_161_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -118,8 +118,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 23_747_000 picoseconds. - Weight::from_parts(24_424_000, 3571) + // Minimum execution time: 30_491_000 picoseconds. + Weight::from_parts(31_991_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -127,8 +127,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_853_000 picoseconds. - Weight::from_parts(1_998_000, 0) + // Minimum execution time: 2_568_000 picoseconds. + Weight::from_parts(2_703_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -136,8 +136,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 19_164_000 picoseconds. - Weight::from_parts(19_643_000, 3593) + // Minimum execution time: 22_159_000 picoseconds. + Weight::from_parts(22_517_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3593` - // Minimum execution time: 48_708_000 picoseconds. - Weight::from_parts(49_610_000, 3593) + // Minimum execution time: 57_126_000 picoseconds. + Weight::from_parts(58_830_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -180,8 +180,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 20_586_000 picoseconds. - Weight::from_parts(21_147_000, 3571) + // Minimum execution time: 26_589_000 picoseconds. + Weight::from_parts(27_285_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index ddfc599fa579..935636651eb9 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -54,8 +54,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 26_842_000 picoseconds. - Weight::from_parts(27_606_000, 3593) + // Minimum execution time: 29_866_000 picoseconds. + Weight::from_parts(30_363_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -65,8 +65,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 35_076_000 picoseconds. - Weight::from_parts(36_109_000, 6196) + // Minimum execution time: 39_434_000 picoseconds. + Weight::from_parts(40_274_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -88,8 +88,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `207` // Estimated: `6196` - // Minimum execution time: 56_951_000 picoseconds. - Weight::from_parts(58_286_000, 6196) + // Minimum execution time: 66_303_000 picoseconds. + Weight::from_parts(68_294_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -118,8 +118,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 23_796_000 picoseconds. - Weight::from_parts(24_692_000, 3571) + // Minimum execution time: 30_523_000 picoseconds. + Weight::from_parts(31_289_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -127,8 +127,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_990_000 picoseconds. - Weight::from_parts(2_142_000, 0) + // Minimum execution time: 2_517_000 picoseconds. + Weight::from_parts(2_634_000, 0) } // Storage: `System::Account` (r:1 w:1) // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -136,8 +136,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 19_572_000 picoseconds. - Weight::from_parts(20_017_000, 3593) + // Minimum execution time: 22_151_000 picoseconds. + Weight::from_parts(22_907_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -159,8 +159,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3593` - // Minimum execution time: 49_336_000 picoseconds. - Weight::from_parts(50_507_000, 3593) + // Minimum execution time: 57_763_000 picoseconds. + Weight::from_parts(58_941_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -180,8 +180,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 21_230_000 picoseconds. - Weight::from_parts(21_870_000, 3571) + // Minimum execution time: 26_322_000 picoseconds. + Weight::from_parts(27_197_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs index 11c1bad9aa17..58007173ae1d 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs @@ -5,7 +5,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -60,10 +60,8 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { fn withdraw_asset(assets: &Assets) -> Weight { assets.weigh_assets(XcmFungibleWeight::::withdraw_asset()) } - // Currently there is no trusted reserve - fn reserve_asset_deposited(_assets: &Assets) -> Weight { - // TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974 - Weight::from_parts(1_000_000_000_u64, 0) + fn reserve_asset_deposited(assets: &Assets) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::reserve_asset_deposited()) } fn receive_teleported_asset(assets: &Assets) -> Weight { assets.weigh_assets(XcmFungibleWeight::::receive_teleported_asset()) @@ -114,12 +112,8 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { XcmGeneric::::report_error() } - fn deposit_asset(assets: &AssetFilter, _dest: &Location) -> Weight { - // Hardcoded till the XCM pallet is fixed - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - let weight = assets.weigh_assets(XcmFungibleWeight::::deposit_asset()); - hardcoded_weight.min(weight) + assets.weigh_assets(XcmFungibleWeight::::deposit_asset()) } fn deposit_reserve_asset(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::deposit_reserve_asset()) @@ -132,7 +126,7 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { _reserve: &Location, _xcm: &Xcm<()>, ) -> Weight { - assets.weigh_assets(XcmGeneric::::initiate_reserve_withdraw()) + assets.weigh_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) } fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 2364798596d5..4dd44e66dd5e 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,41 +1,42 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("people-rococo-dev"), DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=people-kusama-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::fungible -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-kusama/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=people-rococo-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,110 +48,140 @@ use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::fungible`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: System Account (r:1 w:1) - // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) pub fn withdraw_asset() -> Weight { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 23_309_000 picoseconds. - Weight::from_parts(23_777_000, 3593) + // Minimum execution time: 30_428_000 picoseconds. + Weight::from_parts(31_184_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: System Account (r:2 w:2) - // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) pub fn transfer_asset() -> Weight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 48_808_000 picoseconds. - Weight::from_parts(49_427_000, 6196) + // Minimum execution time: 41_912_000 picoseconds. + Weight::from_parts(43_346_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: System Account (r:2 w:2) - // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `6196` - // Minimum execution time: 71_204_000 picoseconds. - Weight::from_parts(72_121_000, 6196) + // Minimum execution time: 67_706_000 picoseconds. + Weight::from_parts(69_671_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } + // Storage: `Benchmark::Override` (r:0 w:0) + // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 29_790_000 picoseconds. + Weight::from_parts(30_655_000, 3535) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_559_000 picoseconds. - Weight::from_parts(3_616_000, 0) + // Minimum execution time: 2_438_000 picoseconds. + Weight::from_parts(2_597_000, 0) } - // Storage: System Account (r:1 w:1) - // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) pub fn deposit_asset() -> Weight { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 25_042_000 picoseconds. - Weight::from_parts(25_630_000, 3593) + // Minimum execution time: 24_040_000 picoseconds. + Weight::from_parts(24_538_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: System Account (r:1 w:1) - // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: // Measured: `122` // Estimated: `3593` - // Minimum execution time: 49_030_000 picoseconds. - Weight::from_parts(49_828_000, 3593) + // Minimum execution time: 58_275_000 picoseconds. + Weight::from_parts(59_899_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_teleport() -> Weight { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 27_142_000 picoseconds. - Weight::from_parts(27_416_000, 3535) + // Minimum execution time: 25_638_000 picoseconds. + Weight::from_parts(26_514_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index a50c8860c48f..729a32117041 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,41 +1,42 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-kusama-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("people-rococo-dev"), DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=people-kusama-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-kusama/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=people-rococo-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,24 +48,24 @@ use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 30_210_000 picoseconds. - Weight::from_parts(30_864_000, 3535) + // Minimum execution time: 29_430_000 picoseconds. + Weight::from_parts(30_111_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -72,97 +73,97 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_808_000 picoseconds. - Weight::from_parts(2_848_000, 0) + // Minimum execution time: 607_000 picoseconds. + Weight::from_parts(672_000, 0) } - // Storage: PolkadotXcm Queries (r:1 w:0) - // Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + // Storage: `PolkadotXcm::Queries` (r:1 w:0) + // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn query_response() -> Weight { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 10_353_000 picoseconds. - Weight::from_parts(10_569_000, 3497) + // Minimum execution time: 7_445_000 picoseconds. + Weight::from_parts(7_623_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_074_000 picoseconds. - Weight::from_parts(12_280_000, 0) + // Minimum execution time: 6_749_000 picoseconds. + Weight::from_parts(7_073_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_080_000 picoseconds. - Weight::from_parts(3_161_000, 0) + // Minimum execution time: 1_275_000 picoseconds. + Weight::from_parts(1_409_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_649_000 picoseconds. - Weight::from_parts(2_732_000, 0) + // Minimum execution time: 670_000 picoseconds. + Weight::from_parts(709_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_652_000 picoseconds. - Weight::from_parts(2_749_000, 0) + // Minimum execution time: 635_000 picoseconds. + Weight::from_parts(723_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_642_000 picoseconds. - Weight::from_parts(2_704_000, 0) + // Minimum execution time: 650_000 picoseconds. + Weight::from_parts(699_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_438_000 picoseconds. - Weight::from_parts(3_508_000, 0) + // Minimum execution time: 678_000 picoseconds. + Weight::from_parts(728_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_626_000 picoseconds. - Weight::from_parts(2_701_000, 0) + // Minimum execution time: 657_000 picoseconds. + Weight::from_parts(703_000, 0) } - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 24_737_000 picoseconds. - Weight::from_parts(25_106_000, 3535) + // Minimum execution time: 25_795_000 picoseconds. + Weight::from_parts(26_415_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: PolkadotXcm AssetTraps (r:1 w:1) - // Proof Skipped: PolkadotXcm AssetTraps (max_values: None, max_size: None, mode: Measured) + // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn claim_asset() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 14_712_000 picoseconds. - Weight::from_parts(14_976_000, 3555) + // Minimum execution time: 10_792_000 picoseconds. + Weight::from_parts(11_061_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -170,114 +171,93 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_689_000 picoseconds. - Weight::from_parts(2_739_000, 0) + // Minimum execution time: 624_000 picoseconds. + Weight::from_parts(682_000, 0) } - // Storage: PolkadotXcm VersionNotifyTargets (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn subscribe_version() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 26_478_000 picoseconds. - Weight::from_parts(26_695_000, 3503) + // Minimum execution time: 23_906_000 picoseconds. + Weight::from_parts(24_740_000, 3503) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - // Storage: PolkadotXcm VersionNotifyTargets (r:0 w:1) - // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn unsubscribe_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_811_000 picoseconds. - Weight::from_parts(5_062_000, 0) + // Minimum execution time: 2_621_000 picoseconds. + Weight::from_parts(2_788_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - pub fn initiate_reserve_withdraw() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 26_945_000 picoseconds. - Weight::from_parts(28_093_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_144_000 picoseconds. - Weight::from_parts(4_217_000, 0) + // Minimum execution time: 954_000 picoseconds. + Weight::from_parts(1_046_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_726_000 picoseconds. - Weight::from_parts(2_802_000, 0) + // Minimum execution time: 742_000 picoseconds. + Weight::from_parts(790_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_719_000 picoseconds. - Weight::from_parts(2_790_000, 0) + // Minimum execution time: 664_000 picoseconds. + Weight::from_parts(722_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_660_000 picoseconds. - Weight::from_parts(2_742_000, 0) + // Minimum execution time: 619_000 picoseconds. + Weight::from_parts(672_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_874_000 picoseconds. - Weight::from_parts(2_940_000, 0) + // Minimum execution time: 798_000 picoseconds. + Weight::from_parts(851_000, 0) } - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 27_235_000 picoseconds. - Weight::from_parts(27_811_000, 3535) + // Minimum execution time: 29_580_000 picoseconds. + Weight::from_parts(31_100_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -285,27 +265,27 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_807_000 picoseconds. - Weight::from_parts(4_918_000, 0) + // Minimum execution time: 3_150_000 picoseconds. + Weight::from_parts(3_326_000, 0) } - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 24_698_000 picoseconds. - Weight::from_parts(25_077_000, 3535) + // Minimum execution time: 26_152_000 picoseconds. + Weight::from_parts(26_635_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -313,35 +293,35 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_613_000 picoseconds. - Weight::from_parts(2_703_000, 0) + // Minimum execution time: 693_000 picoseconds. + Weight::from_parts(724_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_602_000 picoseconds. - Weight::from_parts(2_661_000, 0) + // Minimum execution time: 632_000 picoseconds. + Weight::from_parts(678_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_557_000 picoseconds. - Weight::from_parts(2_655_000, 0) + // Minimum execution time: 646_000 picoseconds. + Weight::from_parts(694_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_724_000 picoseconds. - Weight::from_parts(2_760_000, 0) + // Minimum execution time: 622_000 picoseconds. + Weight::from_parts(656_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_764_000 picoseconds. - Weight::from_parts(2_872_000, 0) + // Minimum execution time: 639_000 picoseconds. + Weight::from_parts(679_000, 0) } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs index b1fc7ad8ed83..b44e8d4b61b8 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs @@ -5,7 +5,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -60,10 +60,8 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { fn withdraw_asset(assets: &Assets) -> Weight { assets.weigh_assets(XcmFungibleWeight::::withdraw_asset()) } - // Currently there is no trusted reserve - fn reserve_asset_deposited(_assets: &Assets) -> Weight { - // TODO: hardcoded - fix https://github.com/paritytech/cumulus/issues/1974 - Weight::from_parts(1_000_000_000_u64, 0) + fn reserve_asset_deposited(assets: &Assets) -> Weight { + assets.weigh_assets(XcmFungibleWeight::::reserve_asset_deposited()) } fn receive_teleported_asset(assets: &Assets) -> Weight { assets.weigh_assets(XcmFungibleWeight::::receive_teleported_asset()) @@ -114,12 +112,8 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { fn report_error(_query_response_info: &QueryResponseInfo) -> Weight { XcmGeneric::::report_error() } - fn deposit_asset(assets: &AssetFilter, _dest: &Location) -> Weight { - // Hardcoded till the XCM pallet is fixed - let hardcoded_weight = Weight::from_parts(1_000_000_000_u64, 0); - let weight = assets.weigh_assets(XcmFungibleWeight::::deposit_asset()); - hardcoded_weight.min(weight) + assets.weigh_assets(XcmFungibleWeight::::deposit_asset()) } fn deposit_reserve_asset(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { assets.weigh_assets(XcmFungibleWeight::::deposit_reserve_asset()) @@ -132,13 +126,10 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { _reserve: &Location, _xcm: &Xcm<()>, ) -> Weight { - assets.weigh_assets(XcmGeneric::::initiate_reserve_withdraw()) + assets.weigh_assets(XcmFungibleWeight::::initiate_reserve_withdraw()) } fn initiate_teleport(assets: &AssetFilter, _dest: &Location, _xcm: &Xcm<()>) -> Weight { - // Hardcoded till the XCM pallet is fixed - let hardcoded_weight = Weight::from_parts(200_000_000_u64, 0); - let weight = assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()); - hardcoded_weight.min(weight) + assets.weigh_assets(XcmFungibleWeight::::initiate_teleport()) } fn report_holding(_response_info: &QueryResponseInfo, _assets: &AssetFilter) -> Weight { XcmGeneric::::report_holding() diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 92d08a246180..8f6bfde986bb 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -1,41 +1,42 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("people-westend-dev"), DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=people-polkadot-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::fungible -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-polkadot/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::fungible +// --chain=people-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,110 +48,140 @@ use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::fungible`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: System Account (r:1 w:1) - // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) pub fn withdraw_asset() -> Weight { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 23_363_000 picoseconds. - Weight::from_parts(23_663_000, 3593) + // Minimum execution time: 30_040_000 picoseconds. + Weight::from_parts(30_758_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: System Account (r:2 w:2) - // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) pub fn transfer_asset() -> Weight { // Proof Size summary in bytes: // Measured: `153` // Estimated: `6196` - // Minimum execution time: 49_093_000 picoseconds. - Weight::from_parts(49_719_000, 6196) + // Minimum execution time: 42_135_000 picoseconds. + Weight::from_parts(42_970_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: System Account (r:2 w:2) - // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `System::Account` (r:2 w:2) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: // Measured: `223` // Estimated: `6196` - // Minimum execution time: 74_134_000 picoseconds. - Weight::from_parts(74_719_000, 6196) + // Minimum execution time: 67_385_000 picoseconds. + Weight::from_parts(69_776_000, 6196) .saturating_add(T::DbWeight::get().reads(8)) .saturating_add(T::DbWeight::get().writes(4)) } + // Storage: `Benchmark::Override` (r:0 w:0) + // Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) + pub fn reserve_asset_deposited() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 18_446_744_073_709_551_000 picoseconds. + Weight::from_parts(18_446_744_073_709_551_000, 0) + } + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + pub fn initiate_reserve_withdraw() -> Weight { + // Proof Size summary in bytes: + // Measured: `70` + // Estimated: `3535` + // Minimum execution time: 29_804_000 picoseconds. + Weight::from_parts(30_662_000, 3535) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(2)) + } pub fn receive_teleported_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_726_000 picoseconds. - Weight::from_parts(3_881_000, 0) + // Minimum execution time: 2_358_000 picoseconds. + Weight::from_parts(2_497_000, 0) } - // Storage: System Account (r:1 w:1) - // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) pub fn deposit_asset() -> Weight { // Proof Size summary in bytes: // Measured: `52` // Estimated: `3593` - // Minimum execution time: 25_903_000 picoseconds. - Weight::from_parts(26_150_000, 3593) + // Minimum execution time: 23_732_000 picoseconds. + Weight::from_parts(24_098_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: System Account (r:1 w:1) - // Proof: System Account (max_values: None, max_size: Some(128), added: 2603, mode: MaxEncodedLen) - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `System::Account` (r:1 w:1) + // Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: // Measured: `122` // Estimated: `3593` - // Minimum execution time: 51_084_000 picoseconds. - Weight::from_parts(51_859_000, 3593) + // Minimum execution time: 58_449_000 picoseconds. + Weight::from_parts(60_235_000, 3593) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn initiate_teleport() -> Weight { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 28_038_000 picoseconds. - Weight::from_parts(28_438_000, 3535) + // Minimum execution time: 25_708_000 picoseconds. + Weight::from_parts(26_495_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 861f03819959..1377d31f2db7 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -1,41 +1,42 @@ // Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 +// This file is part of Cumulus. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-05-31, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm4`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("people-polkadot-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("people-westend-dev"), DB CACHE: 1024 // Executed Command: -// ./artifacts/polkadot-parachain +// target/production/polkadot-parachain // benchmark // pallet -// --template=./templates/xcm-bench-template.hbs -// --chain=people-polkadot-dev -// --execution=wasm -// --wasm-execution=compiled -// --pallet=pallet_xcm_benchmarks::generic -// --extrinsic=* // --steps=50 // --repeat=20 -// --json -// --header=./file_header.txt -// --output=./cumulus/parachains/runtimes/people/people-polkadot/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_xcm_benchmarks::generic +// --chain=people-westend-dev +// --header=./cumulus/file_header.txt +// --template=./cumulus/templates/xcm-bench-template.hbs +// --output=./cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -47,24 +48,24 @@ use core::marker::PhantomData; /// Weights for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_holding() -> Weight { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 30_819_000 picoseconds. - Weight::from_parts(31_157_000, 3535) + // Minimum execution time: 29_537_000 picoseconds. + Weight::from_parts(30_513_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -72,97 +73,97 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_869_000 picoseconds. - Weight::from_parts(2_920_000, 0) + // Minimum execution time: 683_000 picoseconds. + Weight::from_parts(738_000, 0) } - // Storage: PolkadotXcm Queries (r:1 w:0) - // Proof Skipped: PolkadotXcm Queries (max_values: None, max_size: None, mode: Measured) + // Storage: `PolkadotXcm::Queries` (r:1 w:0) + // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn query_response() -> Weight { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 10_268_000 picoseconds. - Weight::from_parts(10_496_000, 3497) + // Minimum execution time: 7_498_000 picoseconds. + Weight::from_parts(7_904_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 11_990_000 picoseconds. - Weight::from_parts(12_206_000, 0) + // Minimum execution time: 7_029_000 picoseconds. + Weight::from_parts(7_325_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_170_000 picoseconds. - Weight::from_parts(3_308_000, 0) + // Minimum execution time: 1_343_000 picoseconds. + Weight::from_parts(1_410_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_650_000 picoseconds. - Weight::from_parts(2_783_000, 0) + // Minimum execution time: 696_000 picoseconds. + Weight::from_parts(734_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_681_000 picoseconds. - Weight::from_parts(2_829_000, 0) + // Minimum execution time: 690_000 picoseconds. + Weight::from_parts(740_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_622_000 picoseconds. - Weight::from_parts(2_688_000, 0) + // Minimum execution time: 667_000 picoseconds. + Weight::from_parts(697_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_385_000 picoseconds. - Weight::from_parts(3_538_000, 0) + // Minimum execution time: 692_000 picoseconds. + Weight::from_parts(743_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_630_000 picoseconds. - Weight::from_parts(2_720_000, 0) + // Minimum execution time: 670_000 picoseconds. + Weight::from_parts(712_000, 0) } - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_error() -> Weight { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 24_446_000 picoseconds. - Weight::from_parts(24_854_000, 3535) + // Minimum execution time: 26_405_000 picoseconds. + Weight::from_parts(26_877_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } - // Storage: PolkadotXcm AssetTraps (r:1 w:1) - // Proof Skipped: PolkadotXcm AssetTraps (max_values: None, max_size: None, mode: Measured) + // Storage: `PolkadotXcm::AssetTraps` (r:1 w:1) + // Proof: `PolkadotXcm::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn claim_asset() -> Weight { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 14_713_000 picoseconds. - Weight::from_parts(15_010_000, 3555) + // Minimum execution time: 10_953_000 picoseconds. + Weight::from_parts(11_345_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -170,114 +171,93 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_702_000 picoseconds. - Weight::from_parts(2_744_000, 0) + // Minimum execution time: 644_000 picoseconds. + Weight::from_parts(693_000, 0) } - // Storage: PolkadotXcm VersionNotifyTargets (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn subscribe_version() -> Weight { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_955_000 picoseconds. - Weight::from_parts(26_632_000, 3503) + // Minimum execution time: 24_157_000 picoseconds. + Weight::from_parts(24_980_000, 3503) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } - // Storage: PolkadotXcm VersionNotifyTargets (r:0 w:1) - // Proof Skipped: PolkadotXcm VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + // Storage: `PolkadotXcm::VersionNotifyTargets` (r:0 w:1) + // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) pub fn unsubscribe_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_965_000 picoseconds. - Weight::from_parts(5_168_000, 0) + // Minimum execution time: 2_767_000 picoseconds. + Weight::from_parts(2_844_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) - pub fn initiate_reserve_withdraw() -> Weight { - // Proof Size summary in bytes: - // Measured: `70` - // Estimated: `3535` - // Minimum execution time: 27_707_000 picoseconds. - Weight::from_parts(28_081_000, 3535) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(2)) - } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_215_000 picoseconds. - Weight::from_parts(4_362_000, 0) + // Minimum execution time: 1_079_000 picoseconds. + Weight::from_parts(1_141_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_843_000 picoseconds. - Weight::from_parts(2_957_000, 0) + // Minimum execution time: 776_000 picoseconds. + Weight::from_parts(829_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_751_000 picoseconds. - Weight::from_parts(2_809_000, 0) + // Minimum execution time: 696_000 picoseconds. + Weight::from_parts(740_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_674_000 picoseconds. - Weight::from_parts(2_737_000, 0) + // Minimum execution time: 655_000 picoseconds. + Weight::from_parts(684_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_891_000 picoseconds. - Weight::from_parts(2_952_000, 0) + // Minimum execution time: 825_000 picoseconds. + Weight::from_parts(853_000, 0) } - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn query_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 28_600_000 picoseconds. - Weight::from_parts(29_001_000, 3535) + // Minimum execution time: 30_222_000 picoseconds. + Weight::from_parts(31_110_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -285,27 +265,27 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_748_000 picoseconds. - Weight::from_parts(4_813_000, 0) + // Minimum execution time: 3_108_000 picoseconds. + Weight::from_parts(3_325_000, 0) } - // Storage: ParachainInfo ParachainId (r:1 w:0) - // Proof: ParachainInfo ParachainId (max_values: Some(1), max_size: Some(4), added: 499, mode: MaxEncodedLen) - // Storage: PolkadotXcm SupportedVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SupportedVersion (max_values: None, max_size: None, mode: Measured) - // Storage: PolkadotXcm VersionDiscoveryQueue (r:1 w:1) - // Proof Skipped: PolkadotXcm VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - // Storage: PolkadotXcm SafeXcmVersion (r:1 w:0) - // Proof Skipped: PolkadotXcm SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem HostConfiguration (r:1 w:0) - // Proof Skipped: ParachainSystem HostConfiguration (max_values: Some(1), max_size: None, mode: Measured) - // Storage: ParachainSystem PendingUpwardMessages (r:1 w:1) - // Proof Skipped: ParachainSystem PendingUpwardMessages (max_values: Some(1), max_size: None, mode: Measured) + // Storage: `ParachainInfo::ParachainId` (r:1 w:0) + // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) + // Storage: `PolkadotXcm::SupportedVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::VersionDiscoveryQueue` (r:1 w:1) + // Proof: `PolkadotXcm::VersionDiscoveryQueue` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `PolkadotXcm::SafeXcmVersion` (r:1 w:0) + // Proof: `PolkadotXcm::SafeXcmVersion` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::HostConfiguration` (r:1 w:0) + // Proof: `ParachainSystem::HostConfiguration` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) + // Storage: `ParachainSystem::PendingUpwardMessages` (r:1 w:1) + // Proof: `ParachainSystem::PendingUpwardMessages` (`max_values`: Some(1), `max_size`: None, mode: `Measured`) pub fn report_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 25_483_000 picoseconds. - Weight::from_parts(25_737_000, 3535) + // Minimum execution time: 26_548_000 picoseconds. + Weight::from_parts(26_911_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -313,35 +293,35 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_755_000 picoseconds. - Weight::from_parts(2_817_000, 0) + // Minimum execution time: 684_000 picoseconds. + Weight::from_parts(726_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_700_000 picoseconds. - Weight::from_parts(2_773_000, 0) + // Minimum execution time: 649_000 picoseconds. + Weight::from_parts(700_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_670_000 picoseconds. - Weight::from_parts(2_711_000, 0) + // Minimum execution time: 650_000 picoseconds. + Weight::from_parts(686_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_710_000 picoseconds. - Weight::from_parts(2_762_000, 0) + // Minimum execution time: 652_000 picoseconds. + Weight::from_parts(703_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_839_000 picoseconds. - Weight::from_parts(2_931_000, 0) + // Minimum execution time: 673_000 picoseconds. + Weight::from_parts(742_000, 0) } } diff --git a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 60c40429b1ac..7d743b209124 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-09-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -55,8 +55,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 23_189_000 picoseconds. - Weight::from_parts(23_896_000, 3593) + // Minimum execution time: 30_672_000 picoseconds. + Weight::from_parts(31_677_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -66,8 +66,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 50_299_000 picoseconds. - Weight::from_parts(50_962_000, 6196) + // Minimum execution time: 41_132_000 picoseconds. + Weight::from_parts(41_654_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -83,10 +83,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `243` + // Measured: `281` // Estimated: `6196` - // Minimum execution time: 71_748_000 picoseconds. - Weight::from_parts(74_072_000, 6196) + // Minimum execution time: 97_174_000 picoseconds. + Weight::from_parts(99_537_000, 6196) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -105,16 +105,18 @@ impl WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 27_806_000 picoseconds. - Weight::from_parts(28_594_000, 3607) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 67_105_000 picoseconds. + Weight::from_parts(68_659_000, 3746) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -122,8 +124,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 21_199_000 picoseconds. - Weight::from_parts(21_857_000, 3593) + // Minimum execution time: 30_780_000 picoseconds. + Weight::from_parts(31_496_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -133,27 +135,27 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 23_578_000 picoseconds. - Weight::from_parts(24_060_000, 3593) + // Minimum execution time: 23_411_000 picoseconds. + Weight::from_parts(23_891_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 48_522_000 picoseconds. - Weight::from_parts(49_640_000, 3607) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 61_541_000 picoseconds. + Weight::from_parts(63_677_000, 3645) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -169,10 +171,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `142` - // Estimated: `3607` - // Minimum execution time: 50_429_000 picoseconds. - Weight::from_parts(51_295_000, 3607) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 48_574_000 picoseconds. + Weight::from_parts(49_469_000, 3645) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs index 9939f16aa29f..e0c61c8e2bf2 100644 --- a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs +++ b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_fungible.rs @@ -16,10 +16,10 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::fungible` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-10-03, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-08, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-nbnwcyh-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 // Executed Command: @@ -55,8 +55,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `3593` - // Minimum execution time: 24_815_000 picoseconds. - Weight::from_parts(25_098_000, 3593) + // Minimum execution time: 31_780_000 picoseconds. + Weight::from_parts(32_602_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -66,12 +66,12 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `101` // Estimated: `6196` - // Minimum execution time: 51_268_000 picoseconds. - Weight::from_parts(51_857_000, 6196) + // Minimum execution time: 41_818_000 picoseconds. + Weight::from_parts(42_902_000, 6196) .saturating_add(T::DbWeight::get().reads(2)) .saturating_add(T::DbWeight::get().writes(2)) } - /// Storage: `System::Account` (r:2 w:2) + /// Storage: `System::Account` (r:3 w:3) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -83,12 +83,12 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn transfer_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `210` - // Estimated: `6196` - // Minimum execution time: 74_113_000 picoseconds. - Weight::from_parts(74_721_000, 6196) - .saturating_add(T::DbWeight::get().reads(6)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `351` + // Estimated: `8799` + // Minimum execution time: 101_949_000 picoseconds. + Weight::from_parts(104_190_000, 8799) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(5)) } /// Storage: `Benchmark::Override` (r:0 w:0) /// Proof: `Benchmark::Override` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -105,16 +105,18 @@ impl WeightInfo { /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn initiate_reserve_withdraw() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3574` - // Minimum execution time: 28_919_000 picoseconds. - Weight::from_parts(29_703_000, 3574) - .saturating_add(T::DbWeight::get().reads(4)) - .saturating_add(T::DbWeight::get().writes(2)) + // Measured: `351` + // Estimated: `6196` + // Minimum execution time: 70_123_000 picoseconds. + Weight::from_parts(72_564_000, 6196) + .saturating_add(T::DbWeight::get().reads(6)) + .saturating_add(T::DbWeight::get().writes(4)) } /// Storage: `System::Account` (r:1 w:1) /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) @@ -122,8 +124,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3593` - // Minimum execution time: 21_685_000 picoseconds. - Weight::from_parts(22_528_000, 3593) + // Minimum execution time: 31_868_000 picoseconds. + Weight::from_parts(32_388_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -133,27 +135,27 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `3593` - // Minimum execution time: 25_192_000 picoseconds. - Weight::from_parts(25_445_000, 3593) + // Minimum execution time: 24_532_000 picoseconds. + Weight::from_parts(25_166_000, 3593) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } - /// Storage: `System::Account` (r:1 w:1) - /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn deposit_reserve_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3593` - // Minimum execution time: 49_349_000 picoseconds. - Weight::from_parts(50_476_000, 3593) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 63_378_000 picoseconds. + Weight::from_parts(65_002_000, 3612) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -169,10 +171,10 @@ impl WeightInfo { /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn initiate_teleport() -> Weight { // Proof Size summary in bytes: - // Measured: `109` - // Estimated: `3593` - // Minimum execution time: 51_386_000 picoseconds. - Weight::from_parts(52_141_000, 3593) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 49_174_000 picoseconds. + Weight::from_parts(50_356_000, 3612) .saturating_add(T::DbWeight::get().reads(5)) .saturating_add(T::DbWeight::get().writes(3)) } diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index 1daf5ae750cf..5101a97dc842 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -858,14 +858,7 @@ impl XcmExecutor { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { let deposited = self.holding.saturating_take(assets); - for asset in deposited.into_assets_iter() { - Config::AssetTransactor::deposit_asset( - &asset, - &beneficiary, - Some(&self.context), - )?; - } - Ok(()) + self.deposit_assets_with_retry(&deposited, &beneficiary) }); if Config::TransactionalProcessor::IS_TRANSACTIONAL && result.is_err() { self.holding = old_holding; @@ -890,9 +883,7 @@ impl XcmExecutor { // now take assets to deposit (excluding transport_fee) let deposited = self.holding.saturating_take(assets); - for asset in deposited.assets_iter() { - Config::AssetTransactor::deposit_asset(&asset, &dest, Some(&self.context))?; - } + self.deposit_assets_with_retry(&deposited, &dest)?; // Note that we pass `None` as `maybe_failed_bin` and drop any assets which // cannot be reanchored because we have already called `deposit_asset` on all // assets. @@ -1282,4 +1273,46 @@ impl XcmExecutor { }), } } + + /// Deposit `to_deposit` assets to `beneficiary`, without giving up on the first (transient) + /// error, and retrying once just in case one of the subsequently deposited assets satisfy some + /// requirement. + /// + /// Most common transient error is: `beneficiary` account does not yet exist and the first + /// asset(s) in the (sorted) list does not satisfy ED, but a subsequent one in the list does. + /// + /// This function can write into storage and also return an error at the same time, it should + /// always be called within a transactional context. + fn deposit_assets_with_retry( + &mut self, + to_deposit: &AssetsInHolding, + beneficiary: &Location, + ) -> Result<(), XcmError> { + let mut failed_deposits = Vec::with_capacity(to_deposit.len()); + + let mut deposit_result = Ok(()); + for asset in to_deposit.assets_iter() { + deposit_result = + Config::AssetTransactor::deposit_asset(&asset, &beneficiary, Some(&self.context)); + // if deposit failed for asset, mark it for retry after depositing the others. + if deposit_result.is_err() { + failed_deposits.push(asset); + } + } + if failed_deposits.len() == to_deposit.len() { + tracing::debug!( + target: "xcm::execute", + ?deposit_result, + "Deposit for each asset failed, returning the last error as there is no point in retrying any of them", + ); + return deposit_result; + } + tracing::trace!(target: "xcm::execute", ?failed_deposits, "Deposits to retry"); + + // retry previously failed deposits, this time short-circuiting on any error. + for asset in failed_deposits { + Config::AssetTransactor::deposit_asset(&asset, &beneficiary, Some(&self.context))?; + } + Ok(()) + } } diff --git a/prdoc/pr_4460.prdoc b/prdoc/pr_4460.prdoc new file mode 100644 index 000000000000..81636c3313fc --- /dev/null +++ b/prdoc/pr_4460.prdoc @@ -0,0 +1,24 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: "xcm-executor: allow deposit of multiple assets if at least one of them satisfies ED" + +doc: + - audience: Runtime Dev + description: | + XCM programs that deposit assets to some new (empty) account will now succeed if at least + one of the deposited assets satisfies ED. Before this change, the requirement was that the + _first_ asset had to satisfy ED, but assets order can be changed during reanchoring so it + is not reliable. Now, ordering doesn't matter, any one(s) of them can satisfy ED for the + whole deposit to work. + - audience: Runtime User + description: | + XCM programs that deposit assets to some new (empty) account will now succeed if at least + one of the deposited assets satisfies ED. Before this change, the requirement was that the + _first_ asset had to satisfy ED, but assets order can be changed during reanchoring so it + is not reliable. Now, ordering doesn't matter, any one(s) of them can satisfy ED for the + whole deposit to work. + +crates: + - name: staging-xcm-executor + bump: patch From 1f49358db0033e57a790eac6daccc45beba81863 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Mon, 12 Aug 2024 11:44:35 +0100 Subject: [PATCH 19/74] Fix favicon link to fix CI (#5319) The polkadot.network website was recently refreshed and the `favicon-32x32.png` was removed. It was linked in some docs and so the docs have been updated to point to a working favicon on the new website. Previously the lychee link checker was failing on all PRs. --- docs/sdk/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sdk/src/lib.rs b/docs/sdk/src/lib.rs index bc0970c01f1f..6dc87858530c 100644 --- a/docs/sdk/src/lib.rs +++ b/docs/sdk/src/lib.rs @@ -25,7 +25,7 @@ #![doc = simple_mermaid::mermaid!("../../mermaid/IA.mmd")] #![warn(rustdoc::broken_intra_doc_links)] #![warn(rustdoc::private_intra_doc_links)] -#![doc(html_favicon_url = "https://polkadot.network/favicon-32x32.png")] +#![doc(html_favicon_url = "https://polkadot.com/favicon.ico")] #![doc( html_logo_url = "https://europe1.discourse-cdn.com/standard21/uploads/polkadot2/original/1X/eb57081e2bb7c39e5fcb1a98b443e423fa4448ae.svg" )] From fc906d5d0fb7796ef54ba670101cf37b0aad6794 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Mon, 12 Aug 2024 16:56:00 +0300 Subject: [PATCH 20/74] fix av-distribution Jaeger spans mem leak (#5321) Fixes https://github.com/paritytech/polkadot-sdk/issues/5258 --- .../network/availability-distribution/src/lib.rs | 12 ++++++------ .../availability-distribution/src/requester/mod.rs | 8 +++++--- .../availability-distribution/src/requester/tests.rs | 4 ++-- prdoc/pr_5321.prdoc | 11 +++++++++++ 4 files changed, 24 insertions(+), 11 deletions(-) create mode 100644 prdoc/pr_5321.prdoc diff --git a/polkadot/node/network/availability-distribution/src/lib.rs b/polkadot/node/network/availability-distribution/src/lib.rs index ec2c01f99b01..d3185e0af809 100644 --- a/polkadot/node/network/availability-distribution/src/lib.rs +++ b/polkadot/node/network/availability-distribution/src/lib.rs @@ -25,7 +25,7 @@ use polkadot_node_subsystem::{ jaeger, messages::AvailabilityDistributionMessage, overseer, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; -use polkadot_primitives::Hash; +use polkadot_primitives::{BlockNumber, Hash}; use std::collections::HashMap; /// Error and [`Result`] type for this subsystem. @@ -104,7 +104,7 @@ impl AvailabilityDistributionSubsystem { /// Start processing work as passed on from the Overseer. async fn run(self, mut ctx: Context) -> std::result::Result<(), FatalError> { let Self { mut runtime, recvs, metrics, req_protocol_names } = self; - let mut spans: HashMap = HashMap::new(); + let mut spans: HashMap = HashMap::new(); let IncomingRequestReceivers { pov_req_receiver, @@ -162,7 +162,7 @@ impl AvailabilityDistributionSubsystem { }; let span = jaeger::PerLeafSpan::new(cloned_leaf.span, "availability-distribution"); - spans.insert(cloned_leaf.hash, span); + spans.insert(cloned_leaf.hash, (cloned_leaf.number, span)); log_error( requester .get_mut() @@ -172,8 +172,8 @@ impl AvailabilityDistributionSubsystem { &mut warn_freq, )?; }, - FromOrchestra::Signal(OverseerSignal::BlockFinalized(hash, _)) => { - spans.remove(&hash); + FromOrchestra::Signal(OverseerSignal::BlockFinalized(_hash, finalized_number)) => { + spans.retain(|_hash, (block_number, _span)| *block_number > finalized_number); }, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { @@ -189,7 +189,7 @@ impl AvailabilityDistributionSubsystem { } => { let span = spans .get(&relay_parent) - .map(|span| span.child("fetch-pov")) + .map(|(_, span)| span.child("fetch-pov")) .unwrap_or_else(|| jaeger::Span::new(&relay_parent, "fetch-pov")) .with_trace_id(candidate_hash) .with_candidate(candidate_hash) diff --git a/polkadot/node/network/availability-distribution/src/requester/mod.rs b/polkadot/node/network/availability-distribution/src/requester/mod.rs index efbdceb43bdd..0175161af70d 100644 --- a/polkadot/node/network/availability-distribution/src/requester/mod.rs +++ b/polkadot/node/network/availability-distribution/src/requester/mod.rs @@ -39,7 +39,9 @@ use polkadot_node_subsystem_util::{ availability_chunks::availability_chunk_index, runtime::{get_occupied_cores, RuntimeInfo}, }; -use polkadot_primitives::{CandidateHash, CoreIndex, Hash, OccupiedCore, SessionIndex}; +use polkadot_primitives::{ + BlockNumber, CandidateHash, CoreIndex, Hash, OccupiedCore, SessionIndex, +}; use super::{FatalError, Metrics, Result, LOG_TARGET}; @@ -112,14 +114,14 @@ impl Requester { ctx: &mut Context, runtime: &mut RuntimeInfo, update: ActiveLeavesUpdate, - spans: &HashMap, + spans: &HashMap, ) -> Result<()> { gum::trace!(target: LOG_TARGET, ?update, "Update fetching heads"); let ActiveLeavesUpdate { activated, deactivated } = update; if let Some(leaf) = activated { let span = spans .get(&leaf.hash) - .map(|span| span.child("update-fetching-heads")) + .map(|(_, span)| span.child("update-fetching-heads")) .unwrap_or_else(|| jaeger::Span::new(&leaf.hash, "update-fetching-heads")) .with_string_tag("leaf", format!("{:?}", leaf.hash)) .with_stage(jaeger::Stage::AvailabilityDistribution); diff --git a/polkadot/node/network/availability-distribution/src/requester/tests.rs b/polkadot/node/network/availability-distribution/src/requester/tests.rs index 09567a8f87d3..decb3156004e 100644 --- a/polkadot/node/network/availability-distribution/src/requester/tests.rs +++ b/polkadot/node/network/availability-distribution/src/requester/tests.rs @@ -208,7 +208,7 @@ fn check_ancestry_lookup_in_same_session() { test_harness(test_state.clone(), |mut ctx| async move { let chain = &test_state.relay_chain; - let spans: HashMap = HashMap::new(); + let spans: HashMap = HashMap::new(); let block_number = 1; let update = ActiveLeavesUpdate { activated: Some(new_leaf(chain[block_number], block_number as u32)), @@ -281,7 +281,7 @@ fn check_ancestry_lookup_in_different_sessions() { test_harness(test_state.clone(), |mut ctx| async move { let chain = &test_state.relay_chain; - let spans: HashMap = HashMap::new(); + let spans: HashMap = HashMap::new(); let block_number = 3; let update = ActiveLeavesUpdate { activated: Some(new_leaf(chain[block_number], block_number as u32)), diff --git a/prdoc/pr_5321.prdoc b/prdoc/pr_5321.prdoc new file mode 100644 index 000000000000..97f75d28dd52 --- /dev/null +++ b/prdoc/pr_5321.prdoc @@ -0,0 +1,11 @@ +title: fix availability-distribution Jaeger spans memory leak + +doc: + - audience: Node Dev + description: | + Fixes a memory leak which caused the Jaeger span storage in availability-distribution to never be pruned and therefore increasing indefinitely. + This was caused by improper handling of finalized heads. More info in https://github.com/paritytech/polkadot-sdk/issues/5258 + +crates: + - name: polkadot-availability-distribution + bump: patch From b52cfc2605362b53a1a570c3c1b41d15481d0990 Mon Sep 17 00:00:00 2001 From: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Date: Mon, 12 Aug 2024 17:58:33 +0200 Subject: [PATCH 21/74] chain-spec: minor clarification on the genesis config patch (#5324) Added minor clarification on the genesis config patch ([link](https://substrate.stackexchange.com/questions/11813/in-the-genesis-config-what-does-the-patch-key-do/11825#11825)) --------- Co-authored-by: command-bot <> --- substrate/client/chain-spec/src/lib.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/substrate/client/chain-spec/src/lib.rs b/substrate/client/chain-spec/src/lib.rs index c43f9e89b8a9..5451428d3481 100644 --- a/substrate/client/chain-spec/src/lib.rs +++ b/substrate/client/chain-spec/src/lib.rs @@ -172,6 +172,12 @@ //! //! //! +//! The main purpose of the `RuntimeGenesisConfig` patch is to: +//! - minimize the maintenance effort when RuntimeGenesisConfig is changed in the future (e.g. new +//! pallets added to the runtime or pallet's genesis config changed), +//! - increase the readability - it only contains the relevant fields, +//! - allow to apply numerous changes in distinct domains (e.g. for zombienet). +//! //! For production or long-lasting blockchains, using the `raw` format in the chain specification is //! recommended. Only the `raw` format guarantees that storage root hash will remain unchanged when //! the `RuntimeGenesisConfig` format changes due to software upgrade. From bcc96733f7940edea9f3a782aea78a92c8bb61cf Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Mon, 12 Aug 2024 22:01:02 +0300 Subject: [PATCH 22/74] Remove unnecessary mut (#5318) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Trivial leftover from https://github.com/paritytech/polkadot-sdk/pull/4844 Co-authored-by: Adrian Catangiu Co-authored-by: Bastian Köcher --- substrate/client/consensus/common/src/import_queue.rs | 4 ++-- .../consensus/common/src/import_queue/basic_queue.rs | 7 +++---- substrate/client/network/test/src/block_import.rs | 6 +++--- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/substrate/client/consensus/common/src/import_queue.rs b/substrate/client/consensus/common/src/import_queue.rs index 35fc8ad4a402..1baa67398a49 100644 --- a/substrate/client/consensus/common/src/import_queue.rs +++ b/substrate/client/consensus/common/src/import_queue.rs @@ -225,7 +225,7 @@ pub async fn import_single_block>( import_handle: &mut impl BlockImport, block_origin: BlockOrigin, block: IncomingBlock, - verifier: &mut V, + verifier: &V, ) -> BlockImportResult { match verify_single_block_metered(import_handle, block_origin, block, verifier, None).await? { SingleBlockVerificationOutcome::Imported(import_status) => Ok(import_status), @@ -295,7 +295,7 @@ pub(crate) async fn verify_single_block_metered>( import_handle: &impl BlockImport, block_origin: BlockOrigin, block: IncomingBlock, - verifier: &mut V, + verifier: &V, metrics: Option<&Metrics>, ) -> Result, BlockImportError> { let peer = block.origin; diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs index 05f2b2527961..6a307acb2517 100644 --- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs +++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs @@ -222,7 +222,7 @@ mod worker_messages { /// Returns when `block_import` ended. async fn block_import_process( mut block_import: BoxBlockImport, - mut verifier: impl Verifier, + verifier: impl Verifier, mut result_sender: BufferedLinkSender, mut block_import_receiver: TracingUnboundedReceiver>, metrics: Option, @@ -241,8 +241,7 @@ async fn block_import_process( }; let res = - import_many_blocks(&mut block_import, origin, blocks, &mut verifier, metrics.clone()) - .await; + import_many_blocks(&mut block_import, origin, blocks, &verifier, metrics.clone()).await; result_sender.blocks_processed(res.imported, res.block_count, res.results); } @@ -388,7 +387,7 @@ async fn import_many_blocks>( import_handle: &mut BoxBlockImport, blocks_origin: BlockOrigin, blocks: Vec>, - verifier: &mut V, + verifier: &V, metrics: Option, ) -> ImportManyBlocksResult { let count = blocks.len(); diff --git a/substrate/client/network/test/src/block_import.rs b/substrate/client/network/test/src/block_import.rs index 690a579e0272..dad5a27e9535 100644 --- a/substrate/client/network/test/src/block_import.rs +++ b/substrate/client/network/test/src/block_import.rs @@ -78,7 +78,7 @@ fn import_single_good_block_works() { &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, - &mut PassThroughVerifier::new(true), + &PassThroughVerifier::new(true), )) { Ok(BlockImportStatus::ImportedUnknown(ref num, ref aux, ref org)) if *num == number && *aux == expected_aux && *org == Some(peer_id.into()) => {}, @@ -93,7 +93,7 @@ fn import_single_good_known_block_is_ignored() { &mut client, BlockOrigin::File, block, - &mut PassThroughVerifier::new(true), + &PassThroughVerifier::new(true), )) { Ok(BlockImportStatus::ImportedKnown(ref n, _)) if *n == number => {}, _ => panic!(), @@ -108,7 +108,7 @@ fn import_single_good_block_without_header_fails() { &mut substrate_test_runtime_client::new(), BlockOrigin::File, block, - &mut PassThroughVerifier::new(true), + &PassThroughVerifier::new(true), )) { Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id.into()) => {}, _ => panic!(), From aca25a009f7492d3c5ef07d62363f6812688355b Mon Sep 17 00:00:00 2001 From: Javier Bullrich Date: Mon, 12 Aug 2024 20:49:44 +0100 Subject: [PATCH 23/74] ci: Paused `cmd-action` commenter (#5287) Paused the action which comments on every command starting with `bot ` until we can fix all the commands which are not working. --- .github/workflows/command-inform.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/command-inform.yml b/.github/workflows/command-inform.yml index 2825f4a60460..afdcf4c1b7b9 100644 --- a/.github/workflows/command-inform.yml +++ b/.github/workflows/command-inform.yml @@ -7,7 +7,8 @@ on: jobs: comment: runs-on: ubuntu-latest - if: github.event.issue.pull_request && startsWith(github.event.comment.body, 'bot ') + # Temporary disable the bot until the new command bot works properly + if: github.event.issue.pull_request && startsWith(github.event.comment.body, 'bot ') && false steps: - name: Inform that the new command exist uses: actions/github-script@v7 From 8e8dc618d1a73e7ef169b54551edc6904beb9ae8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Thei=C3=9Fen?= Date: Mon, 12 Aug 2024 21:19:39 +0200 Subject: [PATCH 24/74] `polkadot-node-core-pvf-common`: Fix test compilation error (#5310) This crate only uses `tempfile` on linux but includes it unconditionally in its `Cargo.toml`. It also sets `#![deny(unused_crate_dependencies)]`. This leads to an hard error to anything that is not Linux. This PR fixes this error. I am wondering why CI didn't catch that. Shouldn't the test at least be compiled (but not run) on macOS? --- polkadot/node/core/pvf/common/Cargo.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/polkadot/node/core/pvf/common/Cargo.toml b/polkadot/node/core/pvf/common/Cargo.toml index 18b3f959c955..bf663b4cfcea 100644 --- a/polkadot/node/core/pvf/common/Cargo.toml +++ b/polkadot/node/core/pvf/common/Cargo.toml @@ -42,6 +42,8 @@ seccompiler = "0.4.0" [dev-dependencies] assert_matches = { workspace = true } + +[target.'cfg(target_os = "linux")'.dev-dependencies] tempfile = { workspace = true } [features] From 79e9aa5811dc108a0d423926b2440ac127635fbc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 21:22:05 +0200 Subject: [PATCH 25/74] Bump the known_good_semver group across 1 directory with 3 updates (#5315) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps the known_good_semver group with 2 updates in the / directory: [serde](https://github.com/serde-rs/serde) and [serde_json](https://github.com/serde-rs/json). Updates `serde` from 1.0.204 to 1.0.206

Release notes

Sourced from serde's releases.

v1.0.206

  • Improve support for flatten attribute inside of enums (#2567, thanks @​Mingun)

v1.0.205

  • Use serialize_entry instead of serialize_key + serialize_value when serialize flattened newtype enum variants (#2785, thanks @​Mingun)
  • Avoid triggering a collection_is_never_read lint in the deserialization of enums containing flattened fields (#2791)
Commits
  • 85c73ef Release 1.0.206
  • 5ba1796 Resolve doc_markdown pedantic lint on regression test function
  • e52b7b3 Touch up PR 2567
  • 84c7419 Merge pull request #2794 from dtolnay/neverread
  • 536221b Temporarily ignore collection_is_never_read on FlattenSkipDeserializing
  • fc55ac7 Merge pull request #2567 from Mingun/fix-2565
  • 2afe5b4 Add regression test for issue #2792
  • b4ec259 Correctly process flatten fields in enum variants
  • c3ac7b6 Add regression test for issue #1904
  • 24614e4 Add regression test for issue #2565
  • Additional commits viewable in compare view

Updates `serde_derive` from 1.0.204 to 1.0.206
Release notes

Sourced from serde_derive's releases.

v1.0.206

  • Improve support for flatten attribute inside of enums (#2567, thanks @​Mingun)

v1.0.205

  • Use serialize_entry instead of serialize_key + serialize_value when serialize flattened newtype enum variants (#2785, thanks @​Mingun)
  • Avoid triggering a collection_is_never_read lint in the deserialization of enums containing flattened fields (#2791)
Commits
  • 85c73ef Release 1.0.206
  • 5ba1796 Resolve doc_markdown pedantic lint on regression test function
  • e52b7b3 Touch up PR 2567
  • 84c7419 Merge pull request #2794 from dtolnay/neverread
  • 536221b Temporarily ignore collection_is_never_read on FlattenSkipDeserializing
  • fc55ac7 Merge pull request #2567 from Mingun/fix-2565
  • 2afe5b4 Add regression test for issue #2792
  • b4ec259 Correctly process flatten fields in enum variants
  • c3ac7b6 Add regression test for issue #1904
  • 24614e4 Add regression test for issue #2565
  • Additional commits viewable in compare view

Updates `serde_json` from 1.0.121 to 1.0.124
Release notes

Sourced from serde_json's releases.

v1.0.124

v1.0.123

v1.0.122

  • Support using json! in no-std crates (#1166)
Commits
  • cf771a0 Release 1.0.124
  • 8b314a7 Merge pull request #1173 from iex-rs/fix-big-endian
  • 8eba786 Fix skip_to_escape on BE architectures
  • 2cab07e Release 1.0.123
  • 346189a Fix needless_borrow clippy lint in new control character test
  • 859ead8 Merge pull request #1161 from iex-rs/vectorized-string-parsing
  • e43da5e Immediately bail-out on empty strings
  • 8389d8a Don't run the slow algorithm from the beginning
  • 1f0dcf7 Allow clippy::items_after_statements
  • a95d6df Big endian support
  • Additional commits viewable in compare view

Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore major version` will close this group update PR and stop Dependabot creating any more for the specific dependency's major version (unless you unignore this specific dependency's major version or upgrade to it yourself) - `@dependabot ignore minor version` will close this group update PR and stop Dependabot creating any more for the specific dependency's minor version (unless you unignore this specific dependency's minor version or upgrade to it yourself) - `@dependabot ignore ` will close this group update PR and stop Dependabot creating any more for the specific dependency (unless you unignore this specific dependency or upgrade to it yourself) - `@dependabot unignore ` will remove all of the ignore conditions of the specified dependency - `@dependabot unignore ` will remove the ignore condition of the specified dependency and ignore conditions
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Bastian Köcher --- Cargo.lock | 12 ++++++------ Cargo.toml | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6ebacc9ec5a8..87ff24663ffc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18838,9 +18838,9 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.204" +version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc76f558e0cbb2a839d37354c575f1dc3fdc6546b5be373ba43d95f231bf7c12" +checksum = "5b3e4cd94123dd520a128bcd11e34d9e9e423e7e3e50425cb1b4b1e3549d0284" dependencies = [ "serde_derive", ] @@ -18865,9 +18865,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.204" +version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0cd7e117be63d3c3678776753929474f3b04a43a080c744d6b0ae2a8c28e222" +checksum = "fabfb6138d2383ea8208cf98ccf69cdfb1aff4088460681d84189aa259762f97" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.36", @@ -18896,9 +18896,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.121" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" +checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" dependencies = [ "indexmap 2.2.3", "itoa", diff --git a/Cargo.toml b/Cargo.toml index 7ae7c3bd1811..2333dd15fce8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1172,10 +1172,10 @@ secp256k1 = { version = "0.28.0", default-features = false } secrecy = { version = "0.8.0", default-features = false } seedling-runtime = { path = "cumulus/parachains/runtimes/starters/seedling" } separator = { version = "0.4.1" } -serde = { version = "1.0.204", default-features = false } +serde = { version = "1.0.206", default-features = false } serde-big-array = { version = "0.3.2" } serde_derive = { version = "1.0.117" } -serde_json = { version = "1.0.121", default-features = false } +serde_json = { version = "1.0.124", default-features = false } serde_yaml = { version = "0.9" } serial_test = { version = "2.0.0" } sha1 = { version = "0.10.6" } From bc22f086bb54fd2665e5376382014a3636ad5808 Mon Sep 17 00:00:00 2001 From: Elias Rad <146735585+nnsW3@users.noreply.github.com> Date: Mon, 12 Aug 2024 23:24:48 +0300 Subject: [PATCH 26/74] Fix spelling issues (#5206) Hello I found several spelling errors. Br, Elias. --- docs/contributor/CONTRIBUTING.md | 4 ++-- docs/contributor/DOCUMENTATION_GUIDELINES.md | 4 ++-- docs/contributor/PULL_REQUEST_TEMPLATE.md | 2 +- substrate/docs/SECURITY.md | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/contributor/CONTRIBUTING.md b/docs/contributor/CONTRIBUTING.md index d8f956b82d2d..2e2d7a7fb4f7 100644 --- a/docs/contributor/CONTRIBUTING.md +++ b/docs/contributor/CONTRIBUTING.md @@ -42,13 +42,13 @@ The set of labels and their description can be found [here](https://paritytech.g 3. If you’re still working on your PR, please submit as “Draft”. Once a PR is ready for review change the status to “Open”, so that the maintainers get to review your PR. Generally PRs should sit for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. -4. With respect to auditing, please see [AUDIT.md](../AUDIT.md). In general, merging to master can happen independent of +4. With respect to auditing, please see [AUDIT.md](../AUDIT.md). In general, merging to master can happen independently of audit. 5. PRs will be able to be merged once all reviewers' comments are addressed and CI is successful. **Noting breaking changes:** When breaking APIs, the PR description should mention what was changed alongside some examples on how to change the code to make it work/compile. It should also mention potential storage migrations and if -they require some special setup aside adding it to the list of migrations in the runtime. +they require some special setup aside from adding it to the list of migrations in the runtime. ## Reviewing pull requests diff --git a/docs/contributor/DOCUMENTATION_GUIDELINES.md b/docs/contributor/DOCUMENTATION_GUIDELINES.md index cc1082347d20..5ac99fff1cdb 100644 --- a/docs/contributor/DOCUMENTATION_GUIDELINES.md +++ b/docs/contributor/DOCUMENTATION_GUIDELINES.md @@ -136,7 +136,7 @@ the `macro@my_macro_name` syntax in your link. Read more about how to correctly The above five guidelines must always be reasonably respected in the documentation. -The following are a set of notes that may not necessarily hold in all circumstances: +The following is a set of notes that may not necessarily hold in all circumstances: --- @@ -205,7 +205,7 @@ properly do this. ## Pallet Crates -The guidelines so far have been general in nature, and are applicable to crates that are pallets and crates that're not +The guidelines so far have been general in nature, and are applicable to crates that are pallets and crates that are not pallets. The following is relevant to how to document parts of a crate that is a pallet. See diff --git a/docs/contributor/PULL_REQUEST_TEMPLATE.md b/docs/contributor/PULL_REQUEST_TEMPLATE.md index 083b30b4a356..9612ab6d8d3e 100644 --- a/docs/contributor/PULL_REQUEST_TEMPLATE.md +++ b/docs/contributor/PULL_REQUEST_TEMPLATE.md @@ -20,7 +20,7 @@ reviewed by reviewers, if the PR does NOT have the `R0-Silent` label. In case of ## Review Notes -*In depth notes about the **implenentation** details of your PR. This should be the main guide for reviewers to +*In depth notes about the **implementation** details of your PR. This should be the main guide for reviewers to understand your approach and effectively review it. If too long, use [`
`](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/details)*. diff --git a/substrate/docs/SECURITY.md b/substrate/docs/SECURITY.md index 0d2064863d85..f52da0327132 100644 --- a/substrate/docs/SECURITY.md +++ b/substrate/docs/SECURITY.md @@ -8,7 +8,7 @@ required to address security issues. ## Reporting a Vulnerability Security vulnerabilities in Parity software should be reported by email to security@parity.io. If you think your report -might be eligible for the Parity Bug Bounty Program, your email should be send to bugbounty@parity.io. +might be eligible for the Parity Bug Bounty Program, your email should be sent to bugbounty@parity.io. Your report should include the following: From 819a5818284a96a5a5bd65ce67e69bab860d4534 Mon Sep 17 00:00:00 2001 From: eskimor Date: Mon, 12 Aug 2024 22:34:22 +0200 Subject: [PATCH 27/74] Bump authoring duration for async backing to 2s. (#5195) Should be safe on all production network. I noticed that Paseo needs to be updated, it is lacking behind in a couple of things. Execution environment parameters should be updated to those of Polkadot: ``` [ { MaxMemoryPages: 8,192 } { PvfExecTimeout: [ Backing 2,500 ] } { PvfExecTimeout: [ Approval 15,000 ] } ] ] ``` --------- Co-authored-by: eskimor --- cumulus/polkadot-parachain/src/service.rs | 2 +- docs/sdk/src/guides/async_backing_guide.rs | 4 ++-- prdoc/pr_5195.prdoc | 15 +++++++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 prdoc/pr_5195.prdoc diff --git a/cumulus/polkadot-parachain/src/service.rs b/cumulus/polkadot-parachain/src/service.rs index 80698a2d7115..3b9ae6bd4457 100644 --- a/cumulus/polkadot-parachain/src/service.rs +++ b/cumulus/polkadot-parachain/src/service.rs @@ -832,7 +832,7 @@ where relay_chain_slot_duration, proposer: Proposer::new(proposer_factory), collator_service, - authoring_duration: Duration::from_millis(1500), + authoring_duration: Duration::from_millis(2000), reinitialize: false, }, }; diff --git a/docs/sdk/src/guides/async_backing_guide.rs b/docs/sdk/src/guides/async_backing_guide.rs index a9fda2c3aa8a..25ef3a12cbf0 100644 --- a/docs/sdk/src/guides/async_backing_guide.rs +++ b/docs/sdk/src/guides/async_backing_guide.rs @@ -174,7 +174,7 @@ //! - In the `para_client` field, pass in a cloned para client rather than the original //! - Add a `para_backend` parameter after `para_client`, passing in our para backend //! - Provide a `code_hash_provider` closure like that shown below -//! - Increase `authoring_duration` from 500 milliseconds to 1500 +//! - Increase `authoring_duration` from 500 milliseconds to 2000 //! ```ignore //! let params = AuraParams { //! .. @@ -185,7 +185,7 @@ //! client.code_at(block_hash).ok().map(|c| ValidationCode::from(c).hash()) //! }, //! .. -//! authoring_duration: Duration::from_millis(1500), +//! authoring_duration: Duration::from_millis(2000), //! .. //! }; //! ``` diff --git a/prdoc/pr_5195.prdoc b/prdoc/pr_5195.prdoc new file mode 100644 index 000000000000..cfd435fa289d --- /dev/null +++ b/prdoc/pr_5195.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Bump Aura authoring duration to 2s. + +doc: + - audience: Node Dev + description: | + This PR bumps the Aura authoring duration in the asynchronous backing + guide and the polkadot-parachain service file to 2s in order to make + better use of the provided coretime. + +crates: + - name: polkadot-parachain-bin + bump: patch From c5f6b700bd1f3fd6a7fa40405987782bdecec636 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Aug 2024 10:28:31 +0200 Subject: [PATCH 28/74] Bump libp2p-identity from 0.2.8 to 0.2.9 (#5232) Bumps [libp2p-identity](https://github.com/libp2p/rust-libp2p) from 0.2.8 to 0.2.9.
Release notes

Sourced from libp2p-identity's releases.

libp2p-v0.53.2

See individual changelogs for details.

libp2p-v0.53.1

See individual changelogs for details.

libp2p-v0.53.0

The most ergonomic version of rust-libp2p yet!

We've been busy again, with over 250 PRs being merged into master since v0.52.0 (excluding dependency updates).

Backwards-compatible features

Numerous improvements landed as patch releases since the v0.52.0 release, for example a new, type-safe SwarmBuilder that also encompasses the most common transport protocols:

let mut swarm =
libp2p::SwarmBuilder::with_new_identity()
    .with_tokio()
    .with_tcp(
        tcp::Config::default().port_reuse(true).nodelay(true),
        noise::Config::new,
        yamux::Config::default,
    )?
    .with_quic()
    .with_dns()?
    .with_relay_client(noise::Config::new, yamux::Config::default)?
    .with_behaviour(|keypair, relay_client| Behaviour {
        relay_client,
        ping: ping::Behaviour::default(),
        dcutr: dcutr::Behaviour::new(keypair.public().to_peer_id()),
    })?
    .build();

The new builder makes heavy use of the type-system to guide you towards a correct composition of all transports. For example, it is important to compose the DNS transport as a wrapper around all other transports but before the relay transport. Luckily, you no longer need to worry about these details as the builder takes care of that for you! Have a look yourself if you dare here but be warned, the internals are a bit wild :)

Some more features that we were able to ship in v0.52.X patch-releases include:

We always try to ship as many features as possible in a backwards-compatible way to get them to you faster. Often times, these come with deprecations to give you a heads-up about what will change in a future version. We advise updating to each intermediate version rather than skipping directly to the most recent one, to avoid missing any crucial deprecation warnings. We highly recommend you stay up-to-date with the latest version to make upgrades as smooth as possible.

Some improvments we unfortunately cannot ship in a way that Rust considers a non-breaking change but with every release, we attempt to smoothen the way for future upgrades.

#[non_exhaustive] on key enums

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=libp2p-identity&package-manager=cargo&previous-version=0.2.8&new-version=0.2.9)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 87ff24663ffc..33ebf14be0e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6778,9 +6778,9 @@ checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" [[package]] name = "hkdf" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ "hmac 0.12.1", ] @@ -7910,9 +7910,9 @@ dependencies = [ [[package]] name = "libp2p-identity" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "999ec70441b2fb35355076726a6bc466c932e9bdc66f6a11c6c0aa17c7ab9be0" +checksum = "55cca1eb2bc1fd29f099f3daaab7effd01e1a54b7c577d0ed082521034d912e8" dependencies = [ "bs58 0.5.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 2333dd15fce8..74b12f96603a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -819,7 +819,7 @@ lazy_static = { version = "1.4.0" } libc = { version = "0.2.155" } libfuzzer-sys = { version = "0.4" } libp2p = { version = "0.52.4" } -libp2p-identity = { version = "0.2.3" } +libp2p-identity = { version = "0.2.9" } libsecp256k1 = { version = "0.7.0", default-features = false } linked-hash-map = { version = "0.5.4" } linked_hash_set = { version = "0.1.4" } From ae1b84df5c5276f235ddd1447b8b1afd22f4c377 Mon Sep 17 00:00:00 2001 From: Maksym H <1177472+mordamax@users.noreply.github.com> Date: Tue, 13 Aug 2024 11:59:09 +0100 Subject: [PATCH 29/74] Create subsystem-benchmarks.yml (#5325) Closes https://github.com/paritytech/ci_cd/issues/1014 Adds subsystem-benchmarking in GHA (only works with temp label) --- .github/workflows/subsystem-benchmarks.yml | 82 ++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 .github/workflows/subsystem-benchmarks.yml diff --git a/.github/workflows/subsystem-benchmarks.yml b/.github/workflows/subsystem-benchmarks.yml new file mode 100644 index 000000000000..7c19b420a6ac --- /dev/null +++ b/.github/workflows/subsystem-benchmarks.yml @@ -0,0 +1,82 @@ +on: + push: + branches: + - master + pull_request: + types: [ opened, synchronize, reopened, closed, labeled ] + merge_group: + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: write + +jobs: + set-image: + # TODO: remove once migration is complete or this workflow is fully stable + if: contains(github.event.label.name, 'GHA-migration') + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + + build: + needs: [ set-image ] + runs-on: arc-runners-polkadot-sdk-benchmark + container: + image: ${{ needs.set-image.outputs.IMAGE }} + env: + BENCH_DIR: ./charts/bench/${{ matrix.features.bench }} + BENCH_FILE_NAME: ${{ matrix.features.bench }} + strategy: + fail-fast: false + matrix: + features: [ + { name: "polkadot-availability-recovery", bench: "availability-recovery-regression-bench" }, + { name: "polkadot-availability-distribution", bench: "availability-distribution-regression-bench" }, + { name: "polkadot-node-core-approval-voting", bench: "approval-voting-regression-bench" }, + { name: "polkadot-statement-distribution", bench: "statement-distribution-regression-bench" } + ] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Check Rust + run: | + rustup show + rustup +nightly show + + - name: Run Benchmarks + continue-on-error: true + id: run-benchmarks + run: | + cargo bench -p ${{ matrix.features.name }} --bench ${{ matrix.features.bench }} --features subsystem-benchmarks || echo "Benchmarks failed" + ls -lsa ./charts + mkdir -p $BENCH_DIR || echo "Directory exists" + cp charts/${BENCH_FILE_NAME}.json $BENCH_DIR + ls -lsa $BENCH_DIR + # Fixes "detected dubious ownership" error in the ci + git config --global --add safe.directory '*' + + - name: Publish result to GH Pages + if: ${{ steps.run-benchmarks.outcome == 'success' }} + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: "customSmallerIsBetter" + name: ${{ env.BENCH_FILE_NAME }} + output-file-path: ${{ env.BENCH_DIR }}/${{ env.BENCH_FILE_NAME }}.json + benchmark-data-dir-path: ${{ env.BENCH_DIR }} + github-token: ${{ secrets.GITHUB_TOKEN }} + comment-on-alert: ${{ github.event_name == 'pull_request' }} # will comment on PRs if regression is detected + auto-push: false # TODO: enable when gitlab part is removed ${{ github.ref == 'refs/heads/master' }} + From 0d7bd6badce2d53bb332fc48d4a32e828267cf7e Mon Sep 17 00:00:00 2001 From: Branislav Kontur Date: Tue, 13 Aug 2024 13:11:12 +0200 Subject: [PATCH 30/74] Small nits found accidentally along the way (#5341) --- Cargo.lock | 8 -------- cumulus/primitives/aura/Cargo.toml | 10 ---------- cumulus/primitives/parachain-inherent/Cargo.toml | 4 ---- cumulus/primitives/utility/Cargo.toml | 5 ----- polkadot/xcm/xcm-executor/src/lib.rs | 2 +- 5 files changed, 1 insertion(+), 28 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 33ebf14be0e8..60be41563299 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4256,12 +4256,8 @@ dependencies = [ name = "cumulus-primitives-aura" version = "0.7.0" dependencies = [ - "parity-scale-codec", - "polkadot-core-primitives", - "polkadot-primitives", "sp-api", "sp-consensus-aura", - "sp-runtime", ] [[package]] @@ -4289,8 +4285,6 @@ dependencies = [ "scale-info", "sp-core", "sp-inherents", - "sp-runtime", - "sp-state-machine", "sp-trie", ] @@ -4343,8 +4337,6 @@ dependencies = [ "pallet-asset-conversion", "parity-scale-codec", "polkadot-runtime-common", - "polkadot-runtime-parachains", - "sp-io", "sp-runtime", "staging-xcm", "staging-xcm-builder", diff --git a/cumulus/primitives/aura/Cargo.toml b/cumulus/primitives/aura/Cargo.toml index 062b9ce736e7..185b2d40833f 100644 --- a/cumulus/primitives/aura/Cargo.toml +++ b/cumulus/primitives/aura/Cargo.toml @@ -10,24 +10,14 @@ description = "Core primitives for Aura in Cumulus" workspace = true [dependencies] -codec = { features = ["derive"], workspace = true } # Substrate sp-api = { workspace = true } sp-consensus-aura = { workspace = true } -sp-runtime = { workspace = true } - -# Polkadot -polkadot-core-primitives = { workspace = true } -polkadot-primitives = { workspace = true } [features] default = ["std"] std = [ - "codec/std", - "polkadot-core-primitives/std", - "polkadot-primitives/std", "sp-api/std", "sp-consensus-aura/std", - "sp-runtime/std", ] diff --git a/cumulus/primitives/parachain-inherent/Cargo.toml b/cumulus/primitives/parachain-inherent/Cargo.toml index 172af4b9ec63..a4271d3fd9cc 100644 --- a/cumulus/primitives/parachain-inherent/Cargo.toml +++ b/cumulus/primitives/parachain-inherent/Cargo.toml @@ -17,8 +17,6 @@ scale-info = { features = ["derive"], workspace = true } # Substrate sp-core = { workspace = true } sp-inherents = { workspace = true } -sp-runtime = { optional = true, workspace = true } -sp-state-machine = { optional = true, workspace = true } sp-trie = { workspace = true } # Cumulus @@ -33,7 +31,5 @@ std = [ "scale-info/std", "sp-core/std", "sp-inherents/std", - "sp-runtime?/std", - "sp-state-machine?/std", "sp-trie/std", ] diff --git a/cumulus/primitives/utility/Cargo.toml b/cumulus/primitives/utility/Cargo.toml index 82d18c8c0aac..2ca8b82001d5 100644 --- a/cumulus/primitives/utility/Cargo.toml +++ b/cumulus/primitives/utility/Cargo.toml @@ -15,13 +15,11 @@ log = { workspace = true } # Substrate frame-support = { workspace = true } -sp-io = { workspace = true } sp-runtime = { workspace = true } pallet-asset-conversion = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true } -polkadot-runtime-parachains = { workspace = true } xcm = { workspace = true } xcm-executor = { workspace = true } xcm-builder = { workspace = true } @@ -38,8 +36,6 @@ std = [ "log/std", "pallet-asset-conversion/std", "polkadot-runtime-common/std", - "polkadot-runtime-parachains/std", - "sp-io/std", "sp-runtime/std", "xcm-builder/std", "xcm-executor/std", @@ -51,7 +47,6 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "pallet-asset-conversion/runtime-benchmarks", "polkadot-runtime-common/runtime-benchmarks", - "polkadot-runtime-parachains/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "xcm-builder/runtime-benchmarks", "xcm-executor/runtime-benchmarks", diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index 5101a97dc842..74561e931e7e 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -809,7 +809,7 @@ impl XcmExecutor { }; let actual_weight = maybe_actual_weight.unwrap_or(weight); let surplus = weight.saturating_sub(actual_weight); - // We assume that the `Config::Weigher` will counts the `require_weight_at_most` + // We assume that the `Config::Weigher` will count the `require_weight_at_most` // for the estimate of how much weight this instruction will take. Now that we know // that it's less, we credit it. // From b78d7955e280309dc7cbd6b0b40819e942dfc597 Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Tue, 13 Aug 2024 15:25:09 +0200 Subject: [PATCH 31/74] [Bot] Add prdoc generation (#5331) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a bot to automatically generate prdocs that have all the crates populated. --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher --- .github/commands-readme.md | 11 +++ .github/scripts/generate-prdoc.py | 112 ++++++++++++++++++++++++++++ .github/workflows/command-prdoc.yml | 78 +++++++++++++++++++ 3 files changed, 201 insertions(+) create mode 100644 .github/scripts/generate-prdoc.py create mode 100644 .github/workflows/command-prdoc.yml diff --git a/.github/commands-readme.md b/.github/commands-readme.md index 793524e056f8..ce4e0fd0d789 100644 --- a/.github/commands-readme.md +++ b/.github/commands-readme.md @@ -10,6 +10,7 @@ The current available command actions are: - [Command FMT](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-fmt.yml) - [Command Update UI](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-update-ui.yml) +- [Command Prdoc](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-prdoc.yml) - [Command Sync](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-sync.yml) - [Command Bench](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench.yml) - [Command Bench All](https://github.com/paritytech/polkadot-sdk/actions/workflows/command-bench-all.yml) @@ -235,6 +236,16 @@ You can use the following [`gh cli`](https://cli.github.com/) inside the repo: gh workflow run command-bench-overheard.yml -f pr=1000 -f benchmark=substrate -f runtime=rococo -f target_dir=substrate ``` +### PrDoc + +Generate a PrDoc with the crates populated by all modified crates. + +Options: +- `pr`: The PR number to generate the PrDoc for. +- `audience`: The audience of whom the changes may concern. +- `bump`: A default bump level for all crates. The PrDoc will likely need to be edited to reflect the actual changes after generation. +- `overwrite`: Whether to overwrite any existing PrDoc. + ### Sync Run sync and commit back results to PR. diff --git a/.github/scripts/generate-prdoc.py b/.github/scripts/generate-prdoc.py new file mode 100644 index 000000000000..b7b2e6f970fa --- /dev/null +++ b/.github/scripts/generate-prdoc.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 + +""" +Generate the PrDoc for a Pull Request with a specific number, audience and bump level. + +It downloads and parses the patch from the GitHub API to opulate the prdoc with all modified crates. +This will delete any prdoc that already exists for the PR if `--force` is passed. + +Usage: + python generate-prdoc.py --pr 1234 --audience "TODO" --bump "TODO" +""" + +import argparse +import os +import re +import sys +import subprocess +import toml +import yaml +import requests + +from github import Github +import whatthepatch +from cargo_workspace import Workspace + +# Download the patch and pass the info into `create_prdoc`. +def from_pr_number(n, audience, bump, force): + print(f"Fetching PR '{n}' from GitHub") + g = Github() + + repo = g.get_repo("paritytech/polkadot-sdk") + pr = repo.get_pull(n) + + patch_url = pr.patch_url + patch = requests.get(patch_url).text + + create_prdoc(n, audience, pr.title, pr.body, patch, bump, force) + +def create_prdoc(pr, audience, title, description, patch, bump, force): + path = f"prdoc/pr_{pr}.prdoc" + + if os.path.exists(path): + if force == True: + print(f"Overwriting existing PrDoc for PR {pr}") + else: + print(f"PrDoc already exists for PR {pr}. Use --force to overwrite.") + sys.exit(1) + else: + print(f"No preexisting PrDoc for PR {pr}") + + prdoc = { "doc": [{}], "crates": [] } + + prdoc["title"] = title + prdoc["doc"][0]["audience"] = audience + prdoc["doc"][0]["description"] = description + + workspace = Workspace.from_path(".") + + modified_paths = [] + for diff in whatthepatch.parse_patch(patch): + modified_paths.append(diff.header.new_path) + + modified_crates = {} + for p in modified_paths: + # Go up until we find a Cargo.toml + p = os.path.join(workspace.path, p) + while not os.path.exists(os.path.join(p, "Cargo.toml")): + p = os.path.dirname(p) + + with open(os.path.join(p, "Cargo.toml")) as f: + manifest = toml.load(f) + + if not "package" in manifest: + print(f"File was not in any crate: {p}") + continue + + crate_name = manifest["package"]["name"] + if workspace.crate_by_name(crate_name).publish: + modified_crates[crate_name] = True + else: + print(f"Skipping unpublished crate: {crate_name}") + + print(f"Modified crates: {modified_crates.keys()}") + + for crate_name in modified_crates.keys(): + entry = { "name": crate_name } + + if bump == 'silent' or bump == 'ignore' or bump == 'no change': + entry["validate"] = False + else: + entry["bump"] = bump + + print(f"Adding crate {entry}") + prdoc["crates"].append(entry) + + # write the parsed PR documentation back to the file + with open(path, "w") as f: + yaml.dump(prdoc, f) + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--pr", type=int, required=True) + parser.add_argument("--audience", type=str, default="TODO") + parser.add_argument("--bump", type=str, default="TODO") + parser.add_argument("--force", type=str) + return parser.parse_args() + +if __name__ == "__main__": + args = parse_args() + force = True if args.force.lower() == "true" else False + print(f"Args: {args}, force: {force}") + from_pr_number(args.pr, args.audience, args.bump, force) diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml new file mode 100644 index 000000000000..da8f14089cb8 --- /dev/null +++ b/.github/workflows/command-prdoc.yml @@ -0,0 +1,78 @@ +name: Command PrDoc + +on: + workflow_dispatch: + inputs: + pr: + type: number + description: Number of the Pull Request + required: true + bump: + type: choice + description: Default bump level for all crates + default: "TODO" + required: true + options: + - "TODO" + - "no change" + - "patch" + - "minor" + - "major" + audience: + type: choice + description: Audience of the PrDoc + default: "TODO" + required: true + options: + - "TODO" + - "Runtime Dev" + - "Runtime User" + - "Node Dev" + - "Node User" + overwrite: + type: choice + description: Overwrite existing PrDoc + default: "true" + required: true + options: + - "true" + - "false" + +concurrency: + group: command-prdoc + cancel-in-progress: true + +jobs: + cmd-prdoc: + runs-on: ubuntu-latest + timeout-minutes: 20 + permissions: + contents: write + pull-requests: write + steps: + - name: Download repo + uses: actions/checkout@v4 + - name: Install gh cli + id: gh + uses: ./.github/actions/set-up-gh + with: + pr-number: ${{ inputs.pr }} + GH_TOKEN: ${{ github.token }} + - name: Generate PrDoc + run: | + python3 -m pip install -q cargo-workspace PyGithub whatthepatch pyyaml toml + + python3 .github/scripts/generate-prdoc.py --pr "${{ inputs.pr }}" --bump "${{ inputs.bump }}" --audience "${{ inputs.audience }}" --force "${{ inputs.overwrite }}" + + - name: Report failure + if: ${{ failure() }} + run: gh pr comment ${{ inputs.pr }} --body "

Command failed ❌

Run by @${{ github.actor }} for ${{ github.workflow }} failed. See logs here." + env: + RUN: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + GH_TOKEN: ${{ github.token }} + - name: Push Commit + uses: stefanzweifel/git-auto-commit-action@v5 + with: + commit_message: Add PrDoc (auto generated) + branch: ${{ steps.gh.outputs.branch }} + file_pattern: 'prdoc/*.prdoc' From 4f4de36489ffeb973244a4350a2c7b6b443b676b Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Tue, 13 Aug 2024 17:22:55 +0300 Subject: [PATCH 32/74] Add transaction extension example Signed-off-by: georgepisaltu --- Cargo.lock | 17 ++ Cargo.toml | 1 + substrate/frame/examples/Cargo.toml | 2 + .../authorization-tx-extension/Cargo.toml | 45 +++ .../src/extensions.rs | 163 +++++++++++ .../authorization-tx-extension/src/lib.rs | 155 ++++++++++ .../authorization-tx-extension/src/mock.rs | 107 +++++++ .../authorization-tx-extension/src/tests.rs | 269 ++++++++++++++++++ substrate/frame/examples/src/lib.rs | 8 +- .../support/src/transaction_extensions.rs | 11 + .../primitives/runtime/src/traits/mod.rs | 8 +- 11 files changed, 783 insertions(+), 3 deletions(-) create mode 100644 substrate/frame/examples/authorization-tx-extension/Cargo.toml create mode 100644 substrate/frame/examples/authorization-tx-extension/src/extensions.rs create mode 100644 substrate/frame/examples/authorization-tx-extension/src/lib.rs create mode 100644 substrate/frame/examples/authorization-tx-extension/src/mock.rs create mode 100644 substrate/frame/examples/authorization-tx-extension/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 814311c84368..90776ef504eb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10631,6 +10631,22 @@ dependencies = [ "substrate-test-utils", ] +[[package]] +name = "pallet-example-authorization-tx-extension" +version = "4.0.0-dev" +dependencies = [ + "docify", + "frame-support", + "frame-system", + "log", + "parity-scale-codec", + "scale-info", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", +] + [[package]] name = "pallet-example-basic" version = "27.0.0" @@ -10756,6 +10772,7 @@ version = "4.0.0-dev" dependencies = [ "pallet-default-config-example", "pallet-dev-mode", + "pallet-example-authorization-tx-extension", "pallet-example-basic", "pallet-example-frame-crate", "pallet-example-kitchensink", diff --git a/Cargo.toml b/Cargo.toml index 7ae7c3bd1811..4967fa836f92 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -914,6 +914,7 @@ pallet-example-offchain-worker = { path = "substrate/frame/examples/offchain-wor pallet-example-single-block-migrations = { path = "substrate/frame/examples/single-block-migrations", default-features = false } pallet-example-split = { path = "substrate/frame/examples/split", default-features = false } pallet-example-tasks = { path = "substrate/frame/examples/tasks", default-features = false } +pallet-example-authorization-tx-extension = { path = "substrate/frame/examples/authorization-tx-extension", default-features = false } pallet-examples = { path = "substrate/frame/examples" } pallet-fast-unstake = { path = "substrate/frame/fast-unstake", default-features = false } pallet-glutton = { path = "substrate/frame/glutton", default-features = false } diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index ee0f8df29cf5..cb04e890b56a 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -25,6 +25,7 @@ pallet-example-offchain-worker = { workspace = true } pallet-example-split = { workspace = true } pallet-example-single-block-migrations = { workspace = true } pallet-example-tasks = { workspace = true } +pallet-example-authorization-tx-extension = { workspace = true } [features] default = ["std"] @@ -38,6 +39,7 @@ std = [ "pallet-example-single-block-migrations/std", "pallet-example-split/std", "pallet-example-tasks/std", + "pallet-example-authorization-tx-extension/std", ] try-runtime = [ "pallet-default-config-example/try-runtime", diff --git a/substrate/frame/examples/authorization-tx-extension/Cargo.toml b/substrate/frame/examples/authorization-tx-extension/Cargo.toml new file mode 100644 index 000000000000..4cd637772851 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "pallet-example-authorization-tx-extension" +version = "4.0.0-dev" +authors.workspace = true +edition.workspace = true +license = "MIT-0" +homepage.workspace = true +repository.workspace = true +description = "FRAME example authorization transaction extension pallet" +publish = false + +[lints] +workspace = true + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { workspace = true } +docify = { workspace = true } +log = { workspace = true } +scale-info = { features = ["derive"], workspace = true } + +frame-support = { features = ["experimental"], workspace = true } +frame-system = { workspace = true } + +sp-io = { workspace = true } +sp-runtime = { workspace = true } + +[dev-dependencies] +sp-core = { workspace = true } +sp-keyring = { workspace = true, default-features = true } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "log/std", + "scale-info/std", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", +] diff --git a/substrate/frame/examples/authorization-tx-extension/src/extensions.rs b/substrate/frame/examples/authorization-tx-extension/src/extensions.rs new file mode 100644 index 000000000000..035d6e540f71 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/extensions.rs @@ -0,0 +1,163 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use core::{fmt, marker::PhantomData}; + +use codec::{Decode, Encode}; +use frame_support::traits::OriginTrait; +use scale_info::TypeInfo; +use sp_runtime::{ + impl_tx_ext_default, + traits::{ + DispatchInfoOf, IdentifyAccount, OriginOf, TransactionExtension, TransactionExtensionBase, + ValidateResult, Verify, + }, + transaction_validity::{InvalidTransaction, ValidTransaction}, +}; + +use crate::pallet_coownership::{Config, Origin}; + +/// Helper struct to organize the data needed for signature verification of both parties involved. +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +pub struct AuthCredentials { + first: (Signer, Signature), + second: (Signer, Signature), +} + +/// Extension that, if activated by providing a pair of signers and signatures, will authorize a +/// coowner origin of the two signers. Both signers have to construct their signatures on all of the +/// data that follows this extension in the `TransactionExtension` pipeline, their implications and +/// the call. Essentially resign the transaction from this point onwards in the pipeline by using +/// the `inherited_implication`, as shown below. +#[derive(Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] +#[scale_info(skip_type_params(T, Signer, Signature))] +pub struct AuthorizeCoownership { + inner: Option>, + _phantom: PhantomData, +} + +impl Default for AuthorizeCoownership { + fn default() -> Self { + Self { inner: None, _phantom: Default::default() } + } +} + +impl AuthorizeCoownership { + /// Creates an active extension that will try to authorize the coownership origin. + pub fn new(first: (Signer, Signature), second: (Signer, Signature)) -> Self { + Self { inner: Some(AuthCredentials { first, second }), _phantom: Default::default() } + } +} + +impl fmt::Debug for AuthorizeCoownership { + #[cfg(feature = "std")] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "AuthorizeCoownership") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + Ok(()) + } +} + +impl TransactionExtensionBase + for AuthorizeCoownership +where + Signer: Clone + Eq + PartialEq + Encode + Decode + TypeInfo + Send + Sync + 'static, + Signature: Verify + + Clone + + Eq + + PartialEq + + Encode + + Decode + + TypeInfo + + Send + + Sync + + 'static, +{ + const IDENTIFIER: &'static str = "AuthorizeCoownership"; + type Implicit = (); +} + +impl TransactionExtension + for AuthorizeCoownership +where + Signer: IdentifyAccount + + Clone + + Eq + + PartialEq + + Encode + + Decode + + TypeInfo + + Send + + Sync + + 'static, + Signature: Verify + + Clone + + Eq + + PartialEq + + Encode + + Decode + + TypeInfo + + Send + + Sync + + 'static, +{ + type Val = (); + type Pre = (); + + fn validate( + &self, + origin: OriginOf, + _call: &T::RuntimeCall, + _info: &DispatchInfoOf, + _len: usize, + _self_implicit: Self::Implicit, + inherited_implication: &impl codec::Encode, + ) -> ValidateResult { + // If the extension is inactive, just move on in the pipeline. + let Some(auth) = &self.inner else { + return Ok((ValidTransaction::default(), (), origin)); + }; + let first_account = auth.first.0.clone().into_account(); + let second_account = auth.second.0.clone().into_account(); + + // Construct the payload to sign using the `inherited_implication`. + let msg = inherited_implication.using_encoded(sp_io::hashing::blake2_256); + + // Both parties' signatures must be correct for the origin to be authorized. + // In a prod environment, we're just return a `InvalidTransaction::BadProof` if the + // signature isn't valid, but we return these custom errors to be able to assert them in + // tests. + if !auth.first.1.verify(&msg[..], &first_account) { + Err(InvalidTransaction::Custom(100))? + } + if !auth.second.1.verify(&msg[..], &second_account) { + Err(InvalidTransaction::Custom(200))? + } + // Construct a `pallet_coownership::Origin`. + let local_origin = Origin::Coowners(first_account, second_account); + // Mutate the origin to a `pallet_coownership::Origin`, keep the filter. + let mut origin = ::RuntimeOrigin::from(origin); + origin.set_caller_from(::PalletsOrigin::from(local_origin)); + // Make sure to return the new origin. + Ok((ValidTransaction::default(), (), origin.into())) + } + // We're not doing any special logic in `TransactionExtension::prepare`, so just impl a default. + impl_tx_ext_default!(T::RuntimeCall; prepare); +} diff --git a/substrate/frame/examples/authorization-tx-extension/src/lib.rs b/substrate/frame/examples/authorization-tx-extension/src/lib.rs new file mode 100644 index 000000000000..60d198bdebfe --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/lib.rs @@ -0,0 +1,155 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Authorization Transaction Extension Example Pallet +//! +//! **This pallet serves as an example and is not meant to be used in production.** +//! +//! FRAME Transaction Extension reference implemenmtation, origin mutation, origin authorization and +//! integration in a `TransactionExtension` pipeline. +//! +//! The [TransactionExtension](sp_runtime::traits::TransactionExtension) used in this example is +//! [AuthorizeCoownership](extensions::AuthorizeCoownership). If activated, the extension will +//! authorize 2 signers as coowners, with a [coowner origin](pallet_coownership::Origin) specific to +//! the [coownership example pallet](pallet_coownership), by validating a signature of the rest of +//! the transaction from each party. This means any extensions after ours in the pipeline, their +//! implicits and the actual call. The extension pipeline used in our example checks the genesis +//! hash, transaction version and mortality of the transaction after the `AuthorizeCoownership` runs +//! as we want these transactions to run regardless of what origin passes through them and/or we +//! want their implicit data in any signature authorization happening earlier in the pipeline. +//! +//! In this example, aside from the [AuthorizeCoownership](extensions::AuthorizeCoownership) +//! extension, we use the following pallets: +//! - [pallet_coownership] - provides a coowner origin and the functionality to authorize it. +//! - [pallet_assets] - a dummy asset pallet that tracks assets, identified by an +//! [AssetId](pallet_assets::AssetId), and their respective owners, which can be either an +//! [account](pallet_assets::Owner::Single) or a [pair of owners](pallet_assets::Owner::Double). +//! +//! Assets are created in [pallet_assets] using the +//! [create_asset](pallet_assets::Call::create_asset) call, which accepts traditionally signed +//! origins (a single account) or coowner origins, authorized through the +//! [CoownerOrigin](pallet_assets::Config::CoownerOrigin) type. +//! +//! ### Example +#![doc = docify::embed!("src/tests.rs", create_coowned_asset_works)] +//! +//! This example does not focus on any pallet logic or syntax, but rather on `TransactionExtension` +//! functionality. The pallets used are just skeletons to provide storage state and custom origin +//! choices and requirements, as shown in the examples. Any weight and/or +//! transaction fee is out of scope for this example. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod extensions; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +extern crate alloc; + +use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; + +#[frame_support::pallet(dev_mode)] +pub mod pallet_coownership { + use super::*; + use frame_support::traits::OriginTrait; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The aggregated origin which the dispatch will take. + type RuntimeOrigin: OriginTrait + + From + + IsType<::RuntimeOrigin>; + + /// The caller origin, overarching type of all pallets origins. + type PalletsOrigin: From> + TryInto, Error = Self::PalletsOrigin>; + } + + #[pallet::pallet] + pub struct Pallet(_); + + /// Origin that this pallet can auhtorize. For the purposes of this example, it's just two + /// accounts that own something together. + #[pallet::origin] + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub enum Origin { + Coowners(T::AccountId, T::AccountId), + } +} + +#[frame_support::pallet(dev_mode)] +pub mod pallet_assets { + use super::*; + + pub type AssetId = u32; + + /// Type that describes possible owners of a particular asset. + #[derive(Clone, PartialEq, Eq, RuntimeDebug, Encode, Decode, MaxEncodedLen, TypeInfo)] + pub enum Owner { + Single(AccountId), + Double(AccountId, AccountId), + } + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Type that can authorize an account pair coowner origin. + type CoownerOrigin: EnsureOrigin< + Self::RuntimeOrigin, + Success = (Self::AccountId, Self::AccountId), + >; + } + + /// Map that holds the owner information for each asset it manages. + #[pallet::storage] + pub type AssetOwners = + StorageMap<_, Blake2_128Concat, AssetId, Owner<::AccountId>>; + + #[pallet::pallet] + pub struct Pallet(_); + + #[pallet::error] + pub enum Error { + /// Asset already exists. + AlreadyExists, + } + + #[pallet::call] + impl Pallet { + /// Simple call that just creates an asset with a specific `AssetId`. This call will fail if + /// there is already an asset with the same `AssetId`. + /// + /// The origin is either a single account (traditionally signed origin) or a coowner origin. + #[pallet::call_index(0)] + pub fn create_asset(origin: OriginFor, asset_id: AssetId) -> DispatchResult { + let owner: Owner = match T::CoownerOrigin::try_origin(origin) { + Ok((first, second)) => Owner::Double(first, second), + Err(origin) => ensure_signed(origin).map(|account| Owner::Single(account))?, + }; + AssetOwners::::try_mutate(asset_id, |maybe_owner| { + if maybe_owner.is_some() { + return Err(Error::::AlreadyExists); + } + *maybe_owner = Some(owner); + Ok(()) + })?; + Ok(()) + } + } +} diff --git a/substrate/frame/examples/authorization-tx-extension/src/mock.rs b/substrate/frame/examples/authorization-tx-extension/src/mock.rs new file mode 100644 index 000000000000..2074d81c48c5 --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/mock.rs @@ -0,0 +1,107 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use extensions::AuthorizeCoownership; +use frame_support::{derive_impl, transaction_extensions::VerifyMultiSignature}; +use frame_system::{CheckEra, CheckGenesis, CheckNonce, CheckTxVersion}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, IdentifyAccount, IdentityLookup, Verify}, + BuildStorage, MultiSignature, MultiSigner, +}; + +/// Our `TransactionExtension` fit for general transactions. +pub type TxExtension = ( + // Validate the signature of regular account transactions (substitutes the old signed + // transaction). + VerifyMultiSignature, + // Nonce check (and increment) for the caller. + CheckNonce, + // If activated, will mutate the origin to a `pallet_coownership` origin of 2 accounts that own + // something. + AuthorizeCoownership, + // Some other extensions that we want to run for every possible origin and we want captured in + // any and all signature and authorization schemes (such as the traditional account signature + // or the double signature in `pallet_coownership`). + CheckGenesis, + CheckTxVersion, + CheckEra, +); +/// Convenience type to more easily construct the signature to be signed in case +/// `AuthorizeCoownership` is activated. +pub type InnerTxExtension = (CheckGenesis, CheckTxVersion, CheckEra); +pub type UncheckedExtrinsic = + generic::UncheckedExtrinsic; +pub type Header = generic::Header; +pub type Block = generic::Block; +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; +pub type Signature = MultiSignature; +pub type BlockNumber = u32; + +// For testing the pallet, we construct a mock runtime. +frame_support::construct_runtime!( + pub enum Runtime + { + System: frame_system, + + Assets: pallet_assets, + Coownership: pallet_coownership, + } +); + +#[derive_impl(frame_system::config_preludes::TestDefaultConfig)] +impl frame_system::Config for Runtime { + type AccountId = AccountId; + type Block = Block; + type Lookup = IdentityLookup; +} + +/// Type that enables any pallet to ask for a coowner origin. +pub struct EnsureCoowner; +impl EnsureOrigin for EnsureCoowner { + type Success = (AccountId, AccountId); + + fn try_origin(o: RuntimeOrigin) -> Result { + match o.clone().into() { + Ok(pallet_coownership::Origin::::Coowners(first, second)) => + Ok((first, second)), + _ => Err(o), + } + } +} + +impl pallet_assets::Config for Runtime { + type CoownerOrigin = EnsureCoowner; +} + +impl pallet_coownership::Config for Runtime { + type RuntimeOrigin = RuntimeOrigin; + type PalletsOrigin = OriginCaller; +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = RuntimeGenesisConfig { + // We use default for brevity, but you can configure as desired if needed. + system: Default::default(), + } + .build_storage() + .unwrap(); + t.into() +} diff --git a/substrate/frame/examples/authorization-tx-extension/src/tests.rs b/substrate/frame/examples/authorization-tx-extension/src/tests.rs new file mode 100644 index 000000000000..91d0f61e95db --- /dev/null +++ b/substrate/frame/examples/authorization-tx-extension/src/tests.rs @@ -0,0 +1,269 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for pallet-example-kitchensink. + +use codec::Encode; +use frame_support::{ + assert_noop, + dispatch::GetDispatchInfo, + pallet_prelude::{InvalidTransaction, TransactionValidityError}, + transaction_extensions::VerifyMultiSignature, +}; +use sp_keyring::AccountKeyring; +use sp_runtime::{ + traits::{Applyable, Checkable, IdentityLookup, TransactionExtensionBase}, + MultiSignature, MultiSigner, +}; + +use crate::{extensions::AuthorizeCoownership, mock::*, pallet_assets}; + +#[test] +fn create_asset_works() { + new_test_ext().execute_with(|| { + let alice_keyring = AccountKeyring::Alice; + let alice_account = AccountId::from(alice_keyring.public()); + // Simple call to create asset with Id `42`. + let create_asset_call = + RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); + // Create extension that will be used for dispatch. + let initial_nonce = 23; + let tx_ext = ( + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::default(), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction signature, to be used in the top level `VerifyMultiSignature` + // extension. + let tx_sign = MultiSignature::Sr25519( + (&create_asset_call, &tx_ext, tx_ext.implicit().unwrap()) + .using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Add the signature to the extension. + let tx_ext = ( + VerifyMultiSignature::new(tx_sign, alice_account.clone()), + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::default(), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction and we're ready for dispatch. + let uxt = UncheckedExtrinsic::new_transaction(create_asset_call, tx_ext); + // Check Extrinsic validity and apply it. + let uxt_info = uxt.get_dispatch_info(); + let uxt_len = uxt.using_encoded(|e| e.len()); + // Manually pay for Alice's nonce. + frame_system::Account::::mutate(&alice_account, |info| { + info.nonce = initial_nonce; + info.providers = 1; + }); + // Check should pass. + let xt = >>::check( + uxt, + &Default::default(), + ) + .unwrap(); + // Apply the extrinsic. + let res = xt.apply::(&uxt_info, uxt_len).unwrap(); + + // Asserting the results. + assert_eq!(frame_system::Account::::get(&alice_account).nonce, initial_nonce + 1); + assert_eq!( + pallet_assets::AssetOwners::::get(42), + Some(pallet_assets::Owner::::Single(alice_account)) + ); + assert!(res.is_ok()); + }); +} + +#[docify::export] +#[test] +fn create_coowned_asset_works() { + new_test_ext().execute_with(|| { + let alice_keyring = AccountKeyring::Alice; + let bob_keyring = AccountKeyring::Bob; + let charlie_keyring = AccountKeyring::Charlie; + let alice_account = AccountId::from(alice_keyring.public()); + let bob_account = AccountId::from(bob_keyring.public()); + let charlie_account = AccountId::from(charlie_keyring.public()); + // Simple call to create asset with Id `42`. + let create_asset_call = + RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); + // Create the inner transaction extension, to be signed by our coowners, Alice and Bob. + let inner_ext: InnerTxExtension = ( + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the payload Alice and Bob need to sign. + let inner_payload = (&create_asset_call, &inner_ext, inner_ext.implicit().unwrap()); + // Create Alice's signature. + let alice_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create Bob's signature. + let bob_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| bob_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create the transaction extension, to be signed by the submitter of the extrinsic, let's + // have it be Charlie. + let initial_nonce = 23; + let tx_ext = ( + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig.clone()), + (bob_keyring.into(), bob_inner_sig.clone()), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create Charlie's transaction signature, to be used in the top level + // `VerifyMultiSignature` extension. + let tx_sign = MultiSignature::Sr25519( + (&create_asset_call, &tx_ext, tx_ext.implicit().unwrap()) + .using_encoded(|e| charlie_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Add the signature to the extension. + let tx_ext = ( + VerifyMultiSignature::new(tx_sign, charlie_account.clone()), + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig), + (bob_keyring.into(), bob_inner_sig), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction and we're ready for dispatch. + let uxt = UncheckedExtrinsic::new_transaction(create_asset_call, tx_ext); + // Check Extrinsic validity and apply it. + let uxt_info = uxt.get_dispatch_info(); + let uxt_len = uxt.using_encoded(|e| e.len()); + // Manually pay for Charlie's nonce. + frame_system::Account::::mutate(&charlie_account, |info| { + info.nonce = initial_nonce; + info.providers = 1; + }); + // Check should pass. + let xt = >>::check( + uxt, + &Default::default(), + ) + .unwrap(); + // Apply the extrinsic. + let res = xt.apply::(&uxt_info, uxt_len).unwrap(); + + // Asserting the results. + assert!(res.is_ok()); + assert_eq!(frame_system::Account::::get(charlie_account).nonce, initial_nonce + 1); + assert_eq!( + pallet_assets::AssetOwners::::get(42), + Some(pallet_assets::Owner::::Double(alice_account, bob_account)) + ); + }); +} + +#[test] +fn inner_authorization_works() { + new_test_ext().execute_with(|| { + let alice_keyring = AccountKeyring::Alice; + let bob_keyring = AccountKeyring::Bob; + let charlie_keyring = AccountKeyring::Charlie; + let charlie_account = AccountId::from(charlie_keyring.public()); + // Simple call to create asset with Id `42`. + let create_asset_call = + RuntimeCall::Assets(pallet_assets::Call::create_asset { asset_id: 42 }); + // Create the inner transaction extension, to be signed by our coowners, Alice and Bob. They + // are going to sign this transaction as a mortal one. + let inner_ext: InnerTxExtension = ( + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + // Sign with mortal era check. + frame_system::CheckEra::::from(sp_runtime::generic::Era::mortal(4, 0)), + ); + // Create the payload Alice and Bob need to sign. + let inner_payload = (&create_asset_call, &inner_ext, inner_ext.implicit().unwrap()); + // Create Alice's signature. + let alice_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| alice_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create Bob's signature. + let bob_inner_sig = MultiSignature::Sr25519( + inner_payload.using_encoded(|e| bob_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Create the transaction extension, to be signed by the submitter of the extrinsic, let's + // have it be Charlie. + let initial_nonce = 23; + let tx_ext = ( + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig.clone()), + (bob_keyring.into(), bob_inner_sig.clone()), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + // Construct the transaction as immortal with a different era check. + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create Charlie's transaction signature, to be used in the top level + // `VerifyMultiSignature` extension. + let tx_sign = MultiSignature::Sr25519( + (&create_asset_call, &tx_ext, tx_ext.implicit().unwrap()) + .using_encoded(|e| charlie_keyring.sign(&sp_io::hashing::blake2_256(e))), + ); + // Add the signature to the extension that Charlie signed. + let tx_ext = ( + VerifyMultiSignature::new(tx_sign, charlie_account.clone()), + frame_system::CheckNonce::::from(initial_nonce), + AuthorizeCoownership::::new( + (alice_keyring.into(), alice_inner_sig), + (bob_keyring.into(), bob_inner_sig), + ), + frame_system::CheckGenesis::::new(), + frame_system::CheckTxVersion::::new(), + // Construct the transaction as immortal with a different era check. + frame_system::CheckEra::::from(sp_runtime::generic::Era::immortal()), + ); + // Create the transaction and we're ready for dispatch. + let uxt = UncheckedExtrinsic::new_transaction(create_asset_call, tx_ext); + // Check Extrinsic validity and apply it. + let uxt_info = uxt.get_dispatch_info(); + let uxt_len = uxt.using_encoded(|e| e.len()); + // Manually pay for Charlie's nonce. + frame_system::Account::::mutate(charlie_account, |info| { + info.nonce = initial_nonce; + info.providers = 1; + }); + // Check should pass. + let xt = >>::check( + uxt, + &Default::default(), + ) + .unwrap(); + // The extrinsic should fail as the signature for the `AuthorizeCoownership` doesn't work + // for the provided payload with the changed transaction mortality. + assert_noop!( + xt.apply::(&uxt_info, uxt_len), + TransactionValidityError::Invalid(InvalidTransaction::Custom(100)) + ); + }); +} diff --git a/substrate/frame/examples/src/lib.rs b/substrate/frame/examples/src/lib.rs index dee23a41379f..d0d30830f2f0 100644 --- a/substrate/frame/examples/src/lib.rs +++ b/substrate/frame/examples/src/lib.rs @@ -40,12 +40,16 @@ //! - [`pallet_example_split`]: A simple example of a FRAME pallet demonstrating the ability to //! split sections across multiple files. //! -//! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be -//! built using only the `frame` umbrella crate. +//! - [`pallet_example_frame_crate`]: Example pallet showcasing how one can be built using only the +//! `frame` umbrella crate. //! //! - [`pallet_example_single_block_migrations`]: An example pallet demonstrating best-practices for //! writing storage migrations. //! //! - [`pallet_example_tasks`]: This pallet demonstrates the use of `Tasks` to execute service work. //! +//! - [`pallet_example_authorization_tx_extension`]: An example `TransactionExtension` that +//! authorizes a custom origin through signature validation, along with two support pallets to +//! showcase the usage. +//! //! **Tip**: Use `cargo doc --package --open` to view each pallet's documentation. diff --git a/substrate/frame/support/src/transaction_extensions.rs b/substrate/frame/support/src/transaction_extensions.rs index b3dbbfe1acd1..56a3560b636a 100644 --- a/substrate/frame/support/src/transaction_extensions.rs +++ b/substrate/frame/support/src/transaction_extensions.rs @@ -46,6 +46,17 @@ where account: ::AccountId, } +impl VerifyMultiSignature +where + V: Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo, + ::AccountId: + Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo, +{ + pub fn new(signature: V, account: ::AccountId) -> Self { + Self { signature, account } + } +} + impl TransactionExtensionBase for VerifyMultiSignature where V: Codec + Debug + Sync + Send + Clone + Eq + PartialEq + StaticTypeInfo, diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs index 94b727e64260..94ecab1f9c6e 100644 --- a/substrate/primitives/runtime/src/traits/mod.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -232,8 +232,14 @@ pub trait StaticLookup { } /// A lookup implementation returning the input value. -#[derive(Default, Clone, Copy, PartialEq, Eq)] +#[derive(Clone, Copy, PartialEq, Eq)] pub struct IdentityLookup(PhantomData); +impl Default for IdentityLookup { + fn default() -> Self { + Self(PhantomData::::default()) + } +} + impl StaticLookup for IdentityLookup { type Source = T; type Target = T; From b2724194b05ac87e128108be42d00ab174443349 Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Tue, 13 Aug 2024 19:17:22 +0300 Subject: [PATCH 33/74] Fix toml formatting Signed-off-by: georgepisaltu --- Cargo.toml | 3 ++- substrate/frame/examples/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 513cf2294caf..9aff12d2b092 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,6 +346,7 @@ members = [ "substrate/frame/election-provider-support/solution-type/fuzzer", "substrate/frame/elections-phragmen", "substrate/frame/examples", + "substrate/frame/examples/authorization-tx-extension", "substrate/frame/examples/basic", "substrate/frame/examples/default-config", "substrate/frame/examples/dev-mode", @@ -906,6 +907,7 @@ pallet-dev-mode = { path = "substrate/frame/examples/dev-mode", default-features pallet-election-provider-multi-phase = { path = "substrate/frame/election-provider-multi-phase", default-features = false } pallet-election-provider-support-benchmarking = { path = "substrate/frame/election-provider-support/benchmarking", default-features = false } pallet-elections-phragmen = { path = "substrate/frame/elections-phragmen", default-features = false } +pallet-example-authorization-tx-extension = { path = "substrate/frame/examples/authorization-tx-extension", default-features = false } pallet-example-basic = { path = "substrate/frame/examples/basic", default-features = false } pallet-example-frame-crate = { path = "substrate/frame/examples/frame-crate", default-features = false } pallet-example-kitchensink = { path = "substrate/frame/examples/kitchensink", default-features = false } @@ -914,7 +916,6 @@ pallet-example-offchain-worker = { path = "substrate/frame/examples/offchain-wor pallet-example-single-block-migrations = { path = "substrate/frame/examples/single-block-migrations", default-features = false } pallet-example-split = { path = "substrate/frame/examples/split", default-features = false } pallet-example-tasks = { path = "substrate/frame/examples/tasks", default-features = false } -pallet-example-authorization-tx-extension = { path = "substrate/frame/examples/authorization-tx-extension", default-features = false } pallet-examples = { path = "substrate/frame/examples" } pallet-fast-unstake = { path = "substrate/frame/fast-unstake", default-features = false } pallet-glutton = { path = "substrate/frame/glutton", default-features = false } diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index cb04e890b56a..2f4af1cc0220 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -32,6 +32,7 @@ default = ["std"] std = [ "pallet-default-config-example/std", "pallet-dev-mode/std", + "pallet-example-authorization-tx-extension/std", "pallet-example-basic/std", "pallet-example-frame-crate/std", "pallet-example-kitchensink/std", @@ -39,7 +40,6 @@ std = [ "pallet-example-single-block-migrations/std", "pallet-example-split/std", "pallet-example-tasks/std", - "pallet-example-authorization-tx-extension/std", ] try-runtime = [ "pallet-default-config-example/try-runtime", From 059a2638ad2bfadfca9f9c1f637b64c7a8c63dc7 Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Tue, 13 Aug 2024 19:28:55 +0300 Subject: [PATCH 34/74] Fix features Signed-off-by: georgepisaltu --- .../frame/examples/authorization-tx-extension/src/mock.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/substrate/frame/examples/authorization-tx-extension/src/mock.rs b/substrate/frame/examples/authorization-tx-extension/src/mock.rs index 2074d81c48c5..80e5b998fe47 100644 --- a/substrate/frame/examples/authorization-tx-extension/src/mock.rs +++ b/substrate/frame/examples/authorization-tx-extension/src/mock.rs @@ -83,6 +83,11 @@ impl EnsureOrigin for EnsureCoowner { _ => Err(o), } } + + #[cfg(feature = "runtime-benchmarks")] + fn try_successful_origin() -> Result { + unimplemented!() + } } impl pallet_assets::Config for Runtime { From 055eb5377da43eaced23647ed4348a816bfeb8f4 Mon Sep 17 00:00:00 2001 From: Sebastian Kunert Date: Tue, 13 Aug 2024 21:57:23 +0200 Subject: [PATCH 35/74] StorageWeightReclaim: set to node pov size if higher (#5281) This PR adds an additional defensive check to the reclaim SE. Since it can happen that we miss some storage accesses on other SEs pre-dispatch, we should double check that the bookkeeping of the runtime stays ahead of the node-side pov-size. If we discover a mismatch and the node-side pov-size is indeed higher, we should set the runtime bookkeeping to the node-side value. In cases such as #5229, we would stop including extrinsics and not run `on_idle` at least. cc @gui1117 --------- Co-authored-by: command-bot <> --- .../storage-weight-reclaim/src/lib.rs | 96 ++++++++++++++++++- prdoc/pr_5281.prdoc | 17 ++++ substrate/frame/system/src/lib.rs | 2 +- 3 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 prdoc/pr_5281.prdoc diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index 5984fa77a2c5..a557e881e26b 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -174,6 +174,9 @@ where let storage_size_diff = benchmarked_weight.abs_diff(consumed_weight as u64); + let extrinsic_len = frame_system::AllExtrinsicsLen::::get().unwrap_or(0); + let node_side_pov_size = post_dispatch_proof_size.saturating_add(extrinsic_len.into()); + // This value will be reclaimed by [`frame_system::CheckWeight`], so we need to calculate // that in. frame_system::BlockWeight::::mutate(|current| { @@ -190,6 +193,19 @@ where ); current.reduce(Weight::from_parts(0, storage_size_diff), info.class) } + + // If we encounter a situation where the node-side proof size is already higher than + // what we have in the runtime bookkeeping, we add the difference to the `BlockWeight`. + // This prevents that the proof size grows faster than the runtime proof size. + let block_weight_proof_size = current.total().proof_size(); + let missing_from_node = node_side_pov_size.saturating_sub(block_weight_proof_size); + if missing_from_node > 0 { + log::warn!( + target: LOG_TARGET, + "Node-side PoV size higher than runtime proof size weight. node-side: {node_side_pov_size} extrinsic_len: {extrinsic_len} runtime: {block_weight_proof_size}, missing: {missing_from_node}. Setting to node-side proof size." + ); + current.accrue(Weight::from_parts(0, missing_from_node), info.class); + } }); Ok(()) } @@ -332,6 +348,82 @@ mod tests { }) } + #[test] + fn sets_to_node_storage_proof_if_higher() { + // The storage proof reported by the proof recorder is higher than what is stored on + // the runtime side. + { + let mut test_ext = setup_test_externalities(&[1000, 1005]); + + test_ext.execute_with(|| { + // Stored in BlockWeight is 5 + set_current_storage_weight(5); + + // Benchmarked storage weight: 10 + let info = DispatchInfo { weight: Weight::from_parts(0, 10), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(1000)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + // We expect that the storage weight was set to the node-side proof size (1005) + + // extrinsics length (150) + assert_eq!(get_storage_weight().total().proof_size(), 1155); + }) + } + + // In this second scenario the proof size on the node side is only lower + // after reclaim happened. + { + let mut test_ext = setup_test_externalities(&[175, 180]); + test_ext.execute_with(|| { + set_current_storage_weight(85); + + // Benchmarked storage weight: 100 + let info = + DispatchInfo { weight: Weight::from_parts(0, 100), ..Default::default() }; + let post_info = PostDispatchInfo::default(); + + // After this pre_dispatch, the BlockWeight proof size will be + // 85 (initial) + 100 (benched) + 150 (tx length) = 335 + assert_ok!(CheckWeight::::do_pre_dispatch(&info, LEN)); + + let pre = StorageWeightReclaim::(PhantomData) + .pre_dispatch(&ALICE, CALL, &info, LEN) + .unwrap(); + assert_eq!(pre, Some(175)); + + assert_ok!(CheckWeight::::post_dispatch(None, &info, &post_info, 0, &Ok(()))); + + // First we will reclaim 95, which leaves us with 240 BlockWeight. This is lower + // than 180 (proof size hf) + 150 (length), so we expect it to be set to 330. + assert_ok!(StorageWeightReclaim::::post_dispatch( + Some(pre), + &info, + &post_info, + LEN, + &Ok(()) + )); + + // We expect that the storage weight was set to the node-side proof weight + assert_eq!(get_storage_weight().total().proof_size(), 330); + }) + } + } + #[test] fn does_nothing_without_extension() { let mut test_ext = new_test_ext(); @@ -545,7 +637,7 @@ mod tests { #[test] fn test_nothing_relcaimed() { - let mut test_ext = setup_test_externalities(&[100, 200]); + let mut test_ext = setup_test_externalities(&[0, 100]); test_ext.execute_with(|| { set_current_storage_weight(0); @@ -568,7 +660,7 @@ mod tests { .pre_dispatch(&ALICE, CALL, &info, LEN) .unwrap(); // Should return `setup_test_externalities` proof recorder value: 100. - assert_eq!(pre, Some(100)); + assert_eq!(pre, Some(0)); // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. diff --git a/prdoc/pr_5281.prdoc b/prdoc/pr_5281.prdoc new file mode 100644 index 000000000000..60feab412aff --- /dev/null +++ b/prdoc/pr_5281.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: PoV-Reclaim - Set `BlockWeight` to node-side PoV size if mismatch is detected + +doc: + - audience: Runtime Dev + description: | + After this change, the `StorageWeightReclaim` `SignedExtension` will check the node-side PoV size after every + extrinsic. If we detect a case where the returned proof size is higher than the `BlockWeight` value of the + runtime, we set `BlockWeight` to the size returned from the node. + +crates: + - name: cumulus-primitives-storage-weight-reclaim + bump: patch + - name: frame-system + bump: minor diff --git a/substrate/frame/system/src/lib.rs b/substrate/frame/system/src/lib.rs index 0c6ff2cb8ddb..abacfa7b62cc 100644 --- a/substrate/frame/system/src/lib.rs +++ b/substrate/frame/system/src/lib.rs @@ -917,7 +917,7 @@ pub mod pallet { /// Total length (in bytes) for all extrinsics put together, for the current block. #[pallet::storage] - pub(super) type AllExtrinsicsLen = StorageValue<_, u32>; + pub type AllExtrinsicsLen = StorageValue<_, u32>; /// Map of block numbers to block hashes. #[pallet::storage] From 42eb4ec0ad9ab32385bbaefa572c79acc5fbf27d Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Tue, 13 Aug 2024 23:26:50 +0200 Subject: [PATCH 36/74] [Pools] Ensure members can always exit the pool gracefully (#4998) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Resolves https://github.com/paritytech-secops/srlabs_findings/issues/412 ## Changes - Clear any dangling delegation when member is removed. - Agents need to be killed explicitly when pools are destroyed. - Member withdraw amount is max of their locked funds and the value of their points. --------- Co-authored-by: Gonçalo Pestana Co-authored-by: command-bot <> --- prdoc/pr_4998.prdoc | 20 ++ .../frame/delegated-staking/src/impls.rs | 22 +- substrate/frame/delegated-staking/src/lib.rs | 56 ++- .../frame/delegated-staking/src/tests.rs | 4 +- .../frame/delegated-staking/src/types.rs | 19 +- .../frame/nomination-pools/src/adapter.rs | 51 ++- substrate/frame/nomination-pools/src/lib.rs | 47 ++- substrate/frame/nomination-pools/src/tests.rs | 36 +- .../test-delegate-stake/src/lib.rs | 324 +++++++++++++++++- .../test-delegate-stake/src/mock.rs | 16 +- .../test-transfer-stake/src/lib.rs | 14 +- substrate/primitives/staking/src/lib.rs | 23 +- 12 files changed, 500 insertions(+), 132 deletions(-) create mode 100644 prdoc/pr_4998.prdoc diff --git a/prdoc/pr_4998.prdoc b/prdoc/pr_4998.prdoc new file mode 100644 index 000000000000..41e3886405cc --- /dev/null +++ b/prdoc/pr_4998.prdoc @@ -0,0 +1,20 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Ensure members can always exit the pool gracefully + +doc: + - audience: Runtime Dev + description: | + Ensures when a member wants to withdraw all funds but the pool is not able to provide all their funds, the member + can receive as much as possible and exit pool. Also handles cases where some extra funds held in member's account + is released when they are removed. + +crates: + - name: pallet-delegated-staking + bump: patch + - name: pallet-nomination-pools + bump: major + - name: sp-staking + bump: major + diff --git a/substrate/frame/delegated-staking/src/impls.rs b/substrate/frame/delegated-staking/src/impls.rs index f8df9dfe7b46..8c05b0bcfc22 100644 --- a/substrate/frame/delegated-staking/src/impls.rs +++ b/substrate/frame/delegated-staking/src/impls.rs @@ -32,28 +32,34 @@ impl DelegationInterface for Pallet { .ok() } + fn agent_transferable_balance(agent: Agent) -> Option { + AgentLedgerOuter::::get(&agent.get()) + .map(|a| a.ledger.unclaimed_withdrawals) + .ok() + } + fn delegator_balance(delegator: Delegator) -> Option { Delegation::::get(&delegator.get()).map(|d| d.amount) } /// Delegate funds to an `Agent`. - fn delegate( - who: Delegator, + fn register_agent( agent: Agent, reward_account: &Self::AccountId, - amount: Self::Balance, ) -> DispatchResult { Pallet::::register_agent( RawOrigin::Signed(agent.clone().get()).into(), reward_account.clone(), - )?; + ) + } - // Delegate the funds from who to the `Agent` account. - Pallet::::delegate_to_agent(RawOrigin::Signed(who.get()).into(), agent.get(), amount) + /// Remove `Agent` registration. + fn remove_agent(agent: Agent) -> DispatchResult { + Pallet::::remove_agent(RawOrigin::Signed(agent.clone().get()).into()) } /// Add more delegation to the `Agent` account. - fn delegate_extra( + fn delegate( who: Delegator, agent: Agent, amount: Self::Balance, @@ -118,7 +124,7 @@ impl DelegationMigrator for Pallet { /// Only used for testing. #[cfg(feature = "runtime-benchmarks")] - fn drop_agent(agent: Agent) { + fn migrate_to_direct_staker(agent: Agent) { >::remove(agent.clone().get()); >::iter() .filter(|(_, delegation)| delegation.agent == agent.clone().get()) diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs index 8203f7513305..1882989cfa7d 100644 --- a/substrate/frame/delegated-staking/src/lib.rs +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -304,6 +304,27 @@ pub mod pallet { Ok(()) } + /// Remove an account from being an `Agent`. + /// + /// This can only be called if the agent has no delegated funds, no pending slashes and no + /// unclaimed withdrawals. + pub fn remove_agent(origin: OriginFor) -> DispatchResult { + let who = ensure_signed(origin)?; + let ledger = AgentLedger::::get(&who).ok_or(Error::::NotAgent)?; + + ensure!( + ledger.total_delegated == Zero::zero() && + ledger.pending_slash == Zero::zero() && + ledger.unclaimed_withdrawals == Zero::zero(), + Error::::NotAllowed + ); + + // remove provider reference + let _ = frame_system::Pallet::::dec_providers(&who)?; + >::remove(who); + Ok(()) + } + /// Migrate from a `Nominator` account to `Agent` account. /// /// The origin needs to @@ -599,44 +620,19 @@ impl Pallet { ensure!(delegation.agent == agent, Error::::NotAgent); ensure!(delegation.amount >= amount, Error::::NotEnoughFunds); - // if we do not already have enough funds to be claimed, try withdraw some more. - // keep track if we killed the staker in the process. - let stash_killed = if agent_ledger.ledger.unclaimed_withdrawals < amount { + // if we do not already have enough funds to be claimed, try to withdraw some more. + if agent_ledger.ledger.unclaimed_withdrawals < amount { // withdraw account. - let killed = T::CoreStaking::withdraw_unbonded(agent.clone(), num_slashing_spans) + let _ = T::CoreStaking::withdraw_unbonded(agent.clone(), num_slashing_spans) .map_err(|_| Error::::WithdrawFailed)?; // reload agent from storage since withdrawal might have changed the state. agent_ledger = agent_ledger.reload()?; - Some(killed) - } else { - None - }; + } // if we still do not have enough funds to release, abort. ensure!(agent_ledger.ledger.unclaimed_withdrawals >= amount, Error::::NotEnoughFunds); + agent_ledger.remove_unclaimed_withdraw(amount)?.update(); - // Claim withdraw from agent. Kill agent if no delegation left. - // TODO: Ideally if there is a register, there should be an unregister that should - // clean up the agent. Can be improved in future. - if agent_ledger.remove_unclaimed_withdraw(amount)?.update_or_kill()? { - match stash_killed { - Some(killed) => { - // this implies we did a `CoreStaking::withdraw` before release. Ensure - // we killed the staker as well. - ensure!(killed, Error::::BadState); - }, - None => { - // We did not do a `CoreStaking::withdraw` before release. Ensure staker is - // already killed in `CoreStaking`. - ensure!(T::CoreStaking::status(&agent).is_err(), Error::::BadState); - }, - } - - // Remove provider reference for `who`. - let _ = frame_system::Pallet::::dec_providers(&agent).defensive(); - } - - // book keep delegation delegation.amount = delegation .amount .checked_sub(&amount) diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs index ade0872dd390..bc8bc2dccc3c 100644 --- a/substrate/frame/delegated-staking/src/tests.rs +++ b/substrate/frame/delegated-staking/src/tests.rs @@ -943,7 +943,7 @@ mod pool_integration { vec![ PoolsEvent::Withdrawn { member: 302, pool_id, balance: 100, points: 100 }, PoolsEvent::Withdrawn { member: 303, pool_id, balance: 200, points: 200 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 303 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 303, released_balance: 0 }, ] ); @@ -1055,7 +1055,7 @@ mod pool_integration { balance: creator_stake, points: creator_stake, }, - PoolsEvent::MemberRemoved { pool_id, member: creator }, + PoolsEvent::MemberRemoved { pool_id, member: creator, released_balance: 0 }, PoolsEvent::Destroyed { pool_id }, ] ); diff --git a/substrate/frame/delegated-staking/src/types.rs b/substrate/frame/delegated-staking/src/types.rs index 24b457356544..aff282774c3c 100644 --- a/substrate/frame/delegated-staking/src/types.rs +++ b/substrate/frame/delegated-staking/src/types.rs @@ -251,25 +251,10 @@ impl AgentLedgerOuter { self.ledger.update(&key) } - /// Save self and remove if no delegation left. - /// - /// Returns: - /// - true if agent killed. - /// - error if the delegate is in an unexpected state. - pub(crate) fn update_or_kill(self) -> Result { + /// Update agent ledger. + pub(crate) fn update(self) { let key = self.key; - // see if delegate can be killed - if self.ledger.total_delegated == Zero::zero() { - ensure!( - self.ledger.unclaimed_withdrawals == Zero::zero() && - self.ledger.pending_slash == Zero::zero(), - Error::::BadState - ); - >::remove(key); - return Ok(true) - } self.ledger.update(&key); - Ok(false) } /// Reloads self from storage. diff --git a/substrate/frame/nomination-pools/src/adapter.rs b/substrate/frame/nomination-pools/src/adapter.rs index 4d571855e4fe..272b3b60612b 100644 --- a/substrate/frame/nomination-pools/src/adapter.rs +++ b/substrate/frame/nomination-pools/src/adapter.rs @@ -106,9 +106,11 @@ pub trait StakeStrategy { /// Balance that can be transferred from pool account to member. /// - /// This is part of the pool balance that is not actively staked. That is, tokens that are - /// in unbonding period or unbonded. - fn transferable_balance(pool_account: Pool) -> Self::Balance; + /// This is part of the pool balance that can be withdrawn. + fn transferable_balance( + pool_account: Pool, + member_account: Member, + ) -> Self::Balance; /// Total balance of the pool including amount that is actively staked. fn total_balance(pool_account: Pool) -> Option; @@ -181,6 +183,9 @@ pub trait StakeStrategy { num_slashing_spans: u32, ) -> DispatchResult; + /// Dissolve the pool account. + fn dissolve(pool_account: Pool) -> DispatchResult; + /// Check if there is any pending slash for the pool. fn pending_slash(pool_account: Pool) -> Self::Balance; @@ -253,12 +258,15 @@ impl, AccountId = T: StakeStrategyType::Transfer } - fn transferable_balance(pool_account: Pool) -> BalanceOf { + fn transferable_balance( + pool_account: Pool, + _: Member, + ) -> BalanceOf { T::Currency::balance(&pool_account.0).saturating_sub(Self::active_stake(pool_account)) } fn total_balance(pool_account: Pool) -> Option> { - Some(T::Currency::total_balance(&pool_account.0)) + Some(T::Currency::total_balance(&pool_account.get())) } fn member_delegation_balance( @@ -300,6 +308,17 @@ impl, AccountId = T: Ok(()) } + fn dissolve(pool_account: Pool) -> DispatchResult { + defensive_assert!( + T::Currency::total_balance(&pool_account.clone().get()).is_zero(), + "dissolving pool should not have any balance" + ); + + // Defensively force set balance to zero. + T::Currency::set_balance(&pool_account.get(), Zero::zero()); + Ok(()) + } + fn pending_slash(_: Pool) -> Self::Balance { // for transfer stake strategy, slashing is greedy and never deferred. Zero::zero() @@ -360,11 +379,14 @@ impl< StakeStrategyType::Delegate } - fn transferable_balance(pool_account: Pool) -> BalanceOf { - Delegation::agent_balance(pool_account.clone().into()) + fn transferable_balance( + pool_account: Pool, + member_account: Member, + ) -> BalanceOf { + Delegation::agent_transferable_balance(pool_account.clone().into()) // pool should always be an agent. .defensive_unwrap_or_default() - .saturating_sub(Self::active_stake(pool_account)) + .min(Delegation::delegator_balance(member_account.into()).unwrap_or_default()) } fn total_balance(pool_account: Pool) -> Option> { @@ -384,12 +406,13 @@ impl< ) -> DispatchResult { match bond_type { BondType::Create => { - // first delegation - Delegation::delegate(who.into(), pool_account.into(), reward_account, amount) + // first delegation. Register agent first. + Delegation::register_agent(pool_account.clone().into(), reward_account)?; + Delegation::delegate(who.into(), pool_account.into(), amount) }, BondType::Extra => { // additional delegation - Delegation::delegate_extra(who.into(), pool_account.into(), amount) + Delegation::delegate(who.into(), pool_account.into(), amount) }, } } @@ -403,6 +426,10 @@ impl< Delegation::withdraw_delegation(who.into(), pool_account.into(), amount, num_slashing_spans) } + fn dissolve(pool_account: Pool) -> DispatchResult { + Delegation::remove_agent(pool_account.into()) + } + fn pending_slash(pool_account: Pool) -> Self::Balance { Delegation::pending_slash(pool_account.into()).defensive_unwrap_or_default() } @@ -433,6 +460,6 @@ impl< #[cfg(feature = "runtime-benchmarks")] fn remove_as_agent(pool: Pool) { - Delegation::drop_agent(pool.into()) + Delegation::migrate_to_direct_staker(pool.into()) } } diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 70ad06e2a4da..9108060ccb2a 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -1831,7 +1831,9 @@ pub mod pallet { /// A member has been removed from a pool. /// /// The removal can be voluntary (withdrawn all unbonded funds) or involuntary (kicked). - MemberRemoved { pool_id: PoolId, member: T::AccountId }, + /// Any funds that are still delegated (i.e. dangling delegation) are released and are + /// represented by `released_balance`. + MemberRemoved { pool_id: PoolId, member: T::AccountId, released_balance: BalanceOf }, /// The roles of a pool have been updated to the given new roles. Note that the depositor /// can never change. RolesUpdated { @@ -2342,9 +2344,10 @@ pub mod pallet { // don't exist. This check is also defensive in cases where the unbond pool does not // update its balance (e.g. a bug in the slashing hook.) We gracefully proceed in // order to ensure members can leave the pool and it can be destroyed. - .min(T::StakeAdapter::transferable_balance(Pool::from( - bonded_pool.bonded_account(), - ))); + .min(T::StakeAdapter::transferable_balance( + Pool::from(bonded_pool.bonded_account()), + Member::from(member_account.clone()), + )); // this can fail if the pool uses `DelegateStake` strategy and the member delegation // is not claimed yet. See `Call::migrate_delegation()`. @@ -2368,9 +2371,27 @@ pub mod pallet { // member being reaped. PoolMembers::::remove(&member_account); + + // Ensure any dangling delegation is withdrawn. + let dangling_withdrawal = match T::StakeAdapter::member_delegation_balance( + Member::from(member_account.clone()), + ) { + Some(dangling_delegation) => { + T::StakeAdapter::member_withdraw( + Member::from(member_account.clone()), + Pool::from(bonded_pool.bonded_account()), + dangling_delegation, + num_slashing_spans, + )?; + dangling_delegation + }, + None => Zero::zero(), + }; + Self::deposit_event(Event::::MemberRemoved { pool_id: member.pool_id, member: member_account.clone(), + released_balance: dangling_withdrawal, }); if member_account == bonded_pool.roles.depositor { @@ -3078,16 +3099,11 @@ impl Pallet { T::Currency::total_balance(&reward_account) == Zero::zero(), "could not transfer all amount to depositor while dissolving pool" ); - defensive_assert!( - T::StakeAdapter::total_balance(Pool::from(bonded_pool.bonded_account())) - .unwrap_or_default() == - Zero::zero(), - "dissolving pool should not have any balance" - ); // NOTE: Defensively force set balance to zero. T::Currency::set_balance(&reward_account, Zero::zero()); - // NOTE: With `DelegateStake` strategy, this won't do anything. - T::Currency::set_balance(&bonded_pool.bonded_account(), Zero::zero()); + + // dissolve pool account. + let _ = T::StakeAdapter::dissolve(Pool::from(bonded_account)).defensive(); Self::deposit_event(Event::::Destroyed { pool_id: bonded_pool.id }); // Remove bonded pool metadata. @@ -3525,13 +3541,8 @@ impl Pallet { // this is their balance in the pool let expected_balance = pool_member.total_balance(); - defensive_assert!( - actual_balance >= expected_balance, - "actual balance should always be greater or equal to the expected" - ); - // return the amount to be slashed. - Ok(actual_balance.defensive_saturating_sub(expected_balance)) + Ok(actual_balance.saturating_sub(expected_balance)) } /// Apply freeze on reward account to restrict it from going below ED. diff --git a/substrate/frame/nomination-pools/src/tests.rs b/substrate/frame/nomination-pools/src/tests.rs index 28063c2ecaec..06261699a5b2 100644 --- a/substrate/frame/nomination-pools/src/tests.rs +++ b/substrate/frame/nomination-pools/src/tests.rs @@ -2364,10 +2364,10 @@ mod claim_payout { Event::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, Event::Unbonded { member: 20, pool_id: 1, balance: 20, points: 20, era: 3 }, Event::Withdrawn { member: 20, pool_id: 1, balance: 20, points: 20 }, - Event::MemberRemoved { pool_id: 1, member: 20 }, + Event::MemberRemoved { pool_id: 1, member: 20, released_balance: 0 }, Event::Unbonded { member: 10, pool_id: 1, balance: 10, points: 10, era: 6 }, Event::Withdrawn { member: 10, pool_id: 1, balance: 10, points: 10 }, - Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, Event::Destroyed { pool_id: 1 } ] ); @@ -2939,9 +2939,9 @@ mod unbond { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 40, pool_id: 1, points: 6, balance: 6 }, - Event::MemberRemoved { pool_id: 1, member: 40 }, + Event::MemberRemoved { pool_id: 1, member: 40, released_balance: 0 }, Event::Withdrawn { member: 550, pool_id: 1, points: 92, balance: 92 }, - Event::MemberRemoved { pool_id: 1, member: 550 }, + Event::MemberRemoved { pool_id: 1, member: 550, released_balance: 0 }, Event::PaidOut { member: 10, pool_id: 1, payout: 10 }, Event::Unbonded { member: 10, pool_id: 1, points: 2, balance: 2, era: 6 } ] @@ -3688,7 +3688,7 @@ mod withdraw_unbonded { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 550, pool_id: 1, balance: 275, points: 550 }, - Event::MemberRemoved { pool_id: 1, member: 550 } + Event::MemberRemoved { pool_id: 1, member: 550, released_balance: 0 } ] ); assert_eq!( @@ -3709,7 +3709,7 @@ mod withdraw_unbonded { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 40, pool_id: 1, balance: 20, points: 40 }, - Event::MemberRemoved { pool_id: 1, member: 40 } + Event::MemberRemoved { pool_id: 1, member: 40, released_balance: 0 } ] ); assert_eq!( @@ -3731,7 +3731,7 @@ mod withdraw_unbonded { vec![ Event::Unbonded { member: 10, pool_id: 1, balance: 5, points: 5, era: 9 }, Event::Withdrawn { member: 10, pool_id: 1, balance: 5, points: 5 }, - Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, Event::Destroyed { pool_id: 1 } ] ); @@ -3806,7 +3806,7 @@ mod withdraw_unbonded { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 40, pool_id: 1, balance: 20, points: 20 }, - Event::MemberRemoved { pool_id: 1, member: 40 } + Event::MemberRemoved { pool_id: 1, member: 40, released_balance: 0 } ] ); @@ -3827,7 +3827,7 @@ mod withdraw_unbonded { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 550, pool_id: 1, balance: 275, points: 275 }, - Event::MemberRemoved { pool_id: 1, member: 550 } + Event::MemberRemoved { pool_id: 1, member: 550, released_balance: 0 } ] ); assert!(SubPoolsStorage::::get(1).unwrap().with_era.is_empty()); @@ -3862,7 +3862,7 @@ mod withdraw_unbonded { vec![ Event::Unbonded { member: 10, pool_id: 1, points: 5, balance: 5, era: 6 }, Event::Withdrawn { member: 10, pool_id: 1, points: 5, balance: 5 }, - Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, Event::Destroyed { pool_id: 1 } ] ); @@ -4018,9 +4018,9 @@ mod withdraw_unbonded { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 100, pool_id: 1, points: 100, balance: 100 }, - Event::MemberRemoved { pool_id: 1, member: 100 }, + Event::MemberRemoved { pool_id: 1, member: 100, released_balance: 0 }, Event::Withdrawn { member: 200, pool_id: 1, points: 200, balance: 200 }, - Event::MemberRemoved { pool_id: 1, member: 200 } + Event::MemberRemoved { pool_id: 1, member: 200, released_balance: 0 } ] ); }); @@ -4070,7 +4070,7 @@ mod withdraw_unbonded { Event::Bonded { member: 100, pool_id: 1, bonded: 100, joined: true }, Event::Unbonded { member: 100, pool_id: 1, points: 100, balance: 100, era: 3 }, Event::Withdrawn { member: 100, pool_id: 1, points: 100, balance: 100 }, - Event::MemberRemoved { pool_id: 1, member: 100 } + Event::MemberRemoved { pool_id: 1, member: 100, released_balance: 0 } ] ); }); @@ -4310,7 +4310,7 @@ mod withdraw_unbonded { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 100, pool_id: 1, points: 25, balance: 25 }, - Event::MemberRemoved { pool_id: 1, member: 100 } + Event::MemberRemoved { pool_id: 1, member: 100, released_balance: 0 } ] ); }) @@ -4548,7 +4548,7 @@ mod withdraw_unbonded { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 10, pool_id: 1, points: 13, balance: 13 }, - Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, Event::Destroyed { pool_id: 1 }, ] ); @@ -4588,7 +4588,7 @@ mod withdraw_unbonded { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 20, pool_id: 1, balance: 20, points: 20 }, - Event::MemberRemoved { pool_id: 1, member: 20 } + Event::MemberRemoved { pool_id: 1, member: 20, released_balance: 0 } ] ); @@ -4626,7 +4626,7 @@ mod withdraw_unbonded { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 10, pool_id: 1, points: 10, balance: 10 }, - Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, Event::Destroyed { pool_id: 1 }, ] ); @@ -4672,7 +4672,7 @@ mod withdraw_unbonded { pool_events_since_last_call(), vec![ Event::Withdrawn { member: 10, pool_id: 1, points: 10, balance: 10 }, - Event::MemberRemoved { pool_id: 1, member: 10 }, + Event::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, Event::Destroyed { pool_id: 1 }, ] ); diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs index 51f6470f90d0..a50a6b73f5fc 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs @@ -152,9 +152,9 @@ fn pool_lifecycle_e2e() { pool_events_since_last_call(), vec![ PoolsEvent::Withdrawn { member: 20, pool_id: 1, points: 10, balance: 10 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 20 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 20, released_balance: 0 }, PoolsEvent::Withdrawn { member: 21, pool_id: 1, points: 10, balance: 10 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 21 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 21, released_balance: 0 }, ] ); @@ -193,7 +193,7 @@ fn pool_lifecycle_e2e() { pool_events_since_last_call(), vec![ PoolsEvent::Withdrawn { member: 10, pool_id: 1, points: 50, balance: 50 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, PoolsEvent::Destroyed { pool_id: 1 } ] ); @@ -476,10 +476,10 @@ fn pool_slash_e2e() { vec![ // 20 had unbonded 10 safely, and 10 got slashed by half. PoolsEvent::Withdrawn { member: 20, pool_id: 1, balance: 10 + 5, points: 20 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 20 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 20, released_balance: 0 }, // 21 unbonded all of it after the slash PoolsEvent::Withdrawn { member: 21, pool_id: 1, balance: 5 + 5, points: 15 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 21 } + PoolsEvent::MemberRemoved { pool_id: 1, member: 21, released_balance: 0 } ] ); assert_eq!( @@ -525,7 +525,7 @@ fn pool_slash_e2e() { pool_events_since_last_call(), vec![ PoolsEvent::Withdrawn { member: 10, pool_id: 1, balance: 10 + 15, points: 30 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, PoolsEvent::Destroyed { pool_id: 1 } ] ); @@ -1160,7 +1160,7 @@ fn pool_migration_e2e() { vec![ PoolsEvent::Withdrawn { member: 21, pool_id: 1, balance: 10, points: 10 }, // 21 was fully unbonding and removed from pool. - PoolsEvent::MemberRemoved { member: 21, pool_id: 1 }, + PoolsEvent::MemberRemoved { member: 21, pool_id: 1, released_balance: 0 }, PoolsEvent::Withdrawn { member: 22, pool_id: 1, balance: 5, points: 5 }, ] ); @@ -1183,3 +1183,313 @@ fn pool_migration_e2e() { ); }) } + +#[test] +fn pool_no_dangling_delegation() { + new_test_ext().execute_with(|| { + ExistentialDeposit::set(1); + assert_eq!(Balances::minimum_balance(), 1); + assert_eq!(Staking::current_era(), None); + // pool creator + let alice = 10; + let bob = 20; + let charlie = 21; + + // create the pool, we know this has id 1. + assert_ok!(Pools::create(RuntimeOrigin::signed(alice), 40, alice, alice, alice)); + assert_eq!(LastPoolId::::get(), 1); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 40 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: alice, pool_id: 1 }, + PoolsEvent::Bonded { member: alice, pool_id: 1, bonded: 40, joined: true }, + ] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: alice, + amount: 40 + },] + ); + + assert_eq!( + Payee::::get(POOL1_BONDED), + Some(RewardDestination::Account(POOL1_REWARD)) + ); + + // have two members join + assert_ok!(Pools::join(RuntimeOrigin::signed(bob), 20, 1)); + assert_ok!(Pools::join(RuntimeOrigin::signed(charlie), 20, 1)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 }, + StakingEvent::Bonded { stash: POOL1_BONDED, amount: 20 } + ] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Bonded { member: bob, pool_id: 1, bonded: 20, joined: true }, + PoolsEvent::Bonded { member: charlie, pool_id: 1, bonded: 20, joined: true }, + ] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: bob, + amount: 20 + }, + DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: charlie, + amount: 20 + }, + ] + ); + + // now let's progress a bit. + CurrentEra::::set(Some(1)); + + // bob is completely unbonding + assert_ok!(Pools::unbond(RuntimeOrigin::signed(bob), 20, 20)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 20 },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { member: bob, pool_id: 1, balance: 20, points: 20, era: 4 }] + ); + + // this era will get slashed + CurrentEra::::set(Some(2)); + + assert_ok!(Pools::unbond(RuntimeOrigin::signed(alice), 10, 10)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(charlie), 21, 10)); + + assert_eq!( + staking_events_since_last_call(), + vec![ + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 10 }, + ] + ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Unbonded { member: alice, pool_id: 1, balance: 10, points: 10, era: 5 }, + PoolsEvent::Unbonded { + member: charlie, + pool_id: 1, + balance: 10, + points: 10, + era: 5 + }, + ] + ); + + // At this point, bob's 20 that is unlocking is safe from slash, 10 (alice) + 10 (charlie) + // are also unlocking but vulnerable to slash, and another 40 are active and vulnerable to + // slash. Let's slash half of them. + pallet_staking::slashing::do_slash::( + &POOL1_BONDED, + 30, + &mut Default::default(), + &mut Default::default(), + 2, // slash era 2, affects chunks at era 5 onwards. + ); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Slashed { staker: POOL1_BONDED, amount: 30 }] + ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + // unbonding pool of 20 for era 5 has been slashed to 10 + PoolsEvent::UnbondingPoolSlashed { pool_id: 1, era: 5, balance: 10 }, + // active stake of 40 has been slashed to half + PoolsEvent::PoolSlashed { pool_id: 1, balance: 20 } /* no slash to era 4 + * unbonding pool */ + ] + ); + + // alice's initial stake of 40 is reduced to half + assert_eq!(PoolMembers::::get(alice).unwrap().total_balance(), 20); + // bob unbonded in era 1 and is safe from slash + assert_eq!(PoolMembers::::get(bob).unwrap().total_balance(), 20); + // charlie's initial stake of 20 is slashed to half + assert_eq!(PoolMembers::::get(charlie).unwrap().total_balance(), 10); + + // apply pending slash to alice. + assert_eq!(Pools::api_member_pending_slash(alice), 20); + assert_ok!(Pools::apply_slash(RuntimeOrigin::signed(10), alice)); + // apply pending slash to charlie. + assert_eq!(Pools::api_member_pending_slash(charlie), 10); + assert_ok!(Pools::apply_slash(RuntimeOrigin::signed(10), charlie)); + // no pending slash for bob + assert_eq!(Pools::api_member_pending_slash(bob), 0); + + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::Slashed { + agent: POOL1_BONDED, + delegator: alice, + amount: 20 + }, + DelegatedStakingEvent::Slashed { + agent: POOL1_BONDED, + delegator: charlie, + amount: 10 + }, + ] + ); + + // go forward to an era after PostUnbondingPoolsWindow = 10 ends for era 5. + CurrentEra::::set(Some(15)); + // At this point subpools will all be merged in no-era causing Bob to lose some value while + // Alice and Charlie will gain some value. + assert_ok!(Pools::unbond(RuntimeOrigin::signed(charlie), charlie, 10)); + + // Now alice and charlie has less balance locked than their contribution. + assert_eq!(Balances::total_balance_on_hold(&alice), 20); + assert_eq!(PoolMembers::::get(alice).unwrap().total_balance(), 22); + assert_eq!(Balances::total_balance_on_hold(&charlie), 10); + assert_eq!(PoolMembers::::get(charlie).unwrap().total_balance(), 12); + + // and bob has more balance locked than his contribution. + assert_eq!(Balances::total_balance_on_hold(&bob), 20); + assert_eq!(PoolMembers::::get(bob).unwrap().total_balance(), 15); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Unbonded { + member: charlie, + pool_id: 1, + balance: 5, + points: 5, + era: 18 + }] + ); + + // When bob withdraws all, he gets all his locked funds back. + let bob_pre_withdraw_balance = Balances::free_balance(&bob); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(bob), bob, 0)); + assert_eq!(Balances::free_balance(&bob), bob_pre_withdraw_balance + 20); + assert_eq!(Balances::total_balance_on_hold(&bob), 0); + assert!(!PoolMembers::::contains_key(bob)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 30 },] + ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { pool_id: 1, member: bob, balance: 15, points: 20 }, + // dangling delegation of 5 is released + PoolsEvent::MemberRemoved { pool_id: 1, member: bob, released_balance: 5 }, + ] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::Released { agent: POOL1_BONDED, delegator: bob, amount: 15 }, + // the second release is the dangling delegation when member is removed. + DelegatedStakingEvent::Released { agent: POOL1_BONDED, delegator: bob, amount: 5 }, + ] + ); + + // Charlie can withdraw as much as he has locked. + CurrentEra::::set(Some(18)); + let charlie_pre_withdraw_balance = Balances::free_balance(&charlie); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(charlie), charlie, 0)); + // Charlie's total balance was 12, but we don't have enough funds to unlock. We try the best + // effort and unlock 10. + assert_eq!(Balances::free_balance(&charlie), charlie_pre_withdraw_balance + 10); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 5 },] + ); + + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { pool_id: 1, member: charlie, balance: 10, points: 15 }, + PoolsEvent::MemberRemoved { member: charlie, pool_id: 1, released_balance: 0 } + ] + ); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![DelegatedStakingEvent::Released { + agent: POOL1_BONDED, + delegator: charlie, + amount: 10 + },] + ); + + // Set pools to destroying so alice can withdraw + assert_ok!(Pools::set_state(RuntimeOrigin::signed(alice), 1, PoolState::Destroying)); + assert_ok!(Pools::unbond(RuntimeOrigin::signed(alice), alice, 30)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 15 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::StateChanged { pool_id: 1, new_state: PoolState::Destroying }, + PoolsEvent::Unbonded { + member: alice, + pool_id: 1, + points: 15, + balance: 15, + era: 21 + } + ] + ); + + CurrentEra::::set(Some(21)); + assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(alice), alice, 0)); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 15 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Withdrawn { member: alice, pool_id: 1, balance: 20, points: 25 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: alice, released_balance: 0 }, + PoolsEvent::Destroyed { pool_id: 1 } + ] + ); + + // holds for all members are released. + assert_eq!(Balances::total_balance_on_hold(&alice), 0); + assert_eq!(Balances::total_balance_on_hold(&bob), 0); + assert_eq!(Balances::total_balance_on_hold(&charlie), 0); + }); +} diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs index ed47932a323b..d1bc4ef8ff28 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/mock.rs @@ -154,11 +154,14 @@ impl pallet_nomination_pools::adapter::StakeStrategy for MockAdapter { } DelegateStake::strategy_type() } - fn transferable_balance(pool_account: Pool) -> Self::Balance { + fn transferable_balance( + pool_account: Pool, + member_account: Member, + ) -> Self::Balance { if LegacyAdapter::get() { - return TransferStake::transferable_balance(pool_account) + return TransferStake::transferable_balance(pool_account, member_account) } - DelegateStake::transferable_balance(pool_account) + DelegateStake::transferable_balance(pool_account, member_account) } fn total_balance(pool_account: Pool) -> Option { @@ -200,6 +203,13 @@ impl pallet_nomination_pools::adapter::StakeStrategy for MockAdapter { DelegateStake::member_withdraw(who, pool_account, amount, num_slashing_spans) } + fn dissolve(pool_account: Pool) -> DispatchResult { + if LegacyAdapter::get() { + return TransferStake::dissolve(pool_account) + } + DelegateStake::dissolve(pool_account) + } + fn pending_slash(pool_account: Pool) -> Self::Balance { if LegacyAdapter::get() { return TransferStake::pending_slash(pool_account) diff --git a/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs b/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs index aa9135025900..28e978bba0e5 100644 --- a/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-transfer-stake/src/lib.rs @@ -145,9 +145,9 @@ fn pool_lifecycle_e2e() { pool_events_since_last_call(), vec![ PoolsEvent::Withdrawn { member: 20, pool_id: 1, points: 10, balance: 10 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 20 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 20, released_balance: 0 }, PoolsEvent::Withdrawn { member: 21, pool_id: 1, points: 10, balance: 10 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 21 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 21, released_balance: 0 }, ] ); @@ -186,7 +186,7 @@ fn pool_lifecycle_e2e() { pool_events_since_last_call(), vec![ PoolsEvent::Withdrawn { member: 10, pool_id: 1, points: 50, balance: 50 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, PoolsEvent::Destroyed { pool_id: 1 } ] ); @@ -275,7 +275,7 @@ fn destroy_pool_with_erroneous_consumer() { pool_events_since_last_call(), vec![ PoolsEvent::Withdrawn { member: 10, pool_id: 1, points: 50, balance: 50 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, PoolsEvent::Destroyed { pool_id: 1 } ] ); @@ -558,10 +558,10 @@ fn pool_slash_e2e() { vec![ // 20 had unbonded 10 safely, and 10 got slashed by half. PoolsEvent::Withdrawn { member: 20, pool_id: 1, balance: 10 + 5, points: 20 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 20 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 20, released_balance: 0 }, // 21 unbonded all of it after the slash PoolsEvent::Withdrawn { member: 21, pool_id: 1, balance: 5 + 5, points: 15 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 21 } + PoolsEvent::MemberRemoved { pool_id: 1, member: 21, released_balance: 0 } ] ); assert_eq!( @@ -607,7 +607,7 @@ fn pool_slash_e2e() { pool_events_since_last_call(), vec![ PoolsEvent::Withdrawn { member: 10, pool_id: 1, balance: 10 + 15, points: 30 }, - PoolsEvent::MemberRemoved { pool_id: 1, member: 10 }, + PoolsEvent::MemberRemoved { pool_id: 1, member: 10, released_balance: 0 }, PoolsEvent::Destroyed { pool_id: 1 } ] ); diff --git a/substrate/primitives/staking/src/lib.rs b/substrate/primitives/staking/src/lib.rs index 03c1af50240b..5e94524816a0 100644 --- a/substrate/primitives/staking/src/lib.rs +++ b/substrate/primitives/staking/src/lib.rs @@ -525,23 +525,26 @@ pub trait DelegationInterface { /// This takes into account any pending slashes to `Agent` against the delegated balance. fn agent_balance(agent: Agent) -> Option; + /// Returns the total amount of funds that is unbonded and can be withdrawn from the `Agent` + /// account. `None` if not an `Agent`. + fn agent_transferable_balance(agent: Agent) -> Option; + /// Returns the total amount of funds delegated. `None` if not a `Delegator`. fn delegator_balance(delegator: Delegator) -> Option; - /// Delegate funds to `Agent`. - /// - /// Only used for the initial delegation. Use [`Self::delegate_extra`] to add more delegation. - fn delegate( - delegator: Delegator, + /// Register `Agent` such that it can accept delegation. + fn register_agent( agent: Agent, reward_account: &Self::AccountId, - amount: Self::Balance, ) -> DispatchResult; - /// Add more delegation to the `Agent`. + /// Removes `Agent` registration. /// - /// If this is the first delegation, use [`Self::delegate`] instead. - fn delegate_extra( + /// This should only be allowed if the agent has no staked funds. + fn remove_agent(agent: Agent) -> DispatchResult; + + /// Add delegation to the `Agent`. + fn delegate( delegator: Delegator, agent: Agent, amount: Self::Balance, @@ -616,7 +619,7 @@ pub trait DelegationMigrator { /// /// Also removed from [`StakingUnchecked`] as a Virtual Staker. Useful for testing. #[cfg(feature = "runtime-benchmarks")] - fn drop_agent(agent: Agent); + fn migrate_to_direct_staker(agent: Agent); } sp_core::generate_feature_enabled_macro!(runtime_benchmarks_enabled, feature = "runtime-benchmarks", $); From 0cd577ba1c4995500eb3ed10330d93402177a53b Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Wed, 14 Aug 2024 06:54:51 +0900 Subject: [PATCH 37/74] Minor clean up (#5284) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR performs minor code cleanup to reduce verbosity. Since the compiler has already optimized out indirect calls in the existing code, these changes improve readability but do not affect performance. --------- Co-authored-by: Bastian Köcher --- prdoc/pr_5284.prdoc | 7 ++++++ substrate/primitives/runtime/src/lib.rs | 31 +++++++++---------------- 2 files changed, 18 insertions(+), 20 deletions(-) create mode 100644 prdoc/pr_5284.prdoc diff --git a/prdoc/pr_5284.prdoc b/prdoc/pr_5284.prdoc new file mode 100644 index 000000000000..a3244a82c860 --- /dev/null +++ b/prdoc/pr_5284.prdoc @@ -0,0 +1,7 @@ +title: Minor clean up +author: conr2d +topic: runtime + +crates: + - name: sp-runtime + bump: none diff --git a/substrate/primitives/runtime/src/lib.rs b/substrate/primitives/runtime/src/lib.rs index d313d23395a0..fd10dee2a7c5 100644 --- a/substrate/primitives/runtime/src/lib.rs +++ b/substrate/primitives/runtime/src/lib.rs @@ -438,10 +438,10 @@ impl TryFrom for ecdsa::Public { #[cfg(feature = "std")] impl std::fmt::Display for MultiSigner { fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - match *self { - Self::Ed25519(ref who) => write!(fmt, "ed25519: {}", who), - Self::Sr25519(ref who) => write!(fmt, "sr25519: {}", who), - Self::Ecdsa(ref who) => write!(fmt, "ecdsa: {}", who), + match self { + Self::Ed25519(who) => write!(fmt, "ed25519: {}", who), + Self::Sr25519(who) => write!(fmt, "sr25519: {}", who), + Self::Ecdsa(who) => write!(fmt, "ecdsa: {}", who), } } } @@ -449,23 +449,14 @@ impl std::fmt::Display for MultiSigner { impl Verify for MultiSignature { type Signer = MultiSigner; fn verify>(&self, mut msg: L, signer: &AccountId32) -> bool { - match (self, signer) { - (Self::Ed25519(ref sig), who) => match ed25519::Public::from_slice(who.as_ref()) { - Ok(signer) => sig.verify(msg, &signer), - Err(()) => false, - }, - (Self::Sr25519(ref sig), who) => match sr25519::Public::from_slice(who.as_ref()) { - Ok(signer) => sig.verify(msg, &signer), - Err(()) => false, - }, - (Self::Ecdsa(ref sig), who) => { + let who: [u8; 32] = *signer.as_ref(); + match self { + Self::Ed25519(sig) => sig.verify(msg, &who.into()), + Self::Sr25519(sig) => sig.verify(msg, &who.into()), + Self::Ecdsa(sig) => { let m = sp_io::hashing::blake2_256(msg.get()); - match sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) { - Ok(pubkey) => - &sp_io::hashing::blake2_256(pubkey.as_ref()) == - >::as_ref(who), - _ => false, - } + sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) + .map_or(false, |pubkey| sp_io::hashing::blake2_256(&pubkey) == who) }, } } From be74fe921add60f79e2d11548bfa5a7d17e063ff Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Wed, 14 Aug 2024 10:06:16 +0200 Subject: [PATCH 38/74] Migrate foreign assets v3::Location to v4::Location (#4129) In the move from XCMv3 to XCMv4, the `AssetId` for `ForeignAssets` in `asset-hub-rococo` and `asset-hub-westend` was left as `v3::Location` to be later migrated to `v4::Location`. This is that migration PR. Because the encoding of `v3::Location` and `v4::Location` is the same, we don't need to do any data migration, the keys will still be decodable. The [original idea by Jan](https://github.com/paritytech/polkadot/pull/7236) was to make the v4 changes in v3 since the ABI (the encoding/decoding) didn't change. Corroborated the ABI is the same iterating over all storage, the code is on [another branch](https://github.com/paritytech/polkadot-sdk/blob/cisco-assert-v3-v4-encodings-equal/cumulus/parachains/runtimes/assets/migrations/src/foreign_assets_to_v4/mod.rs). We will need a data migration when we want to update from `v4::Location` to `v5::Location` because of [the accepted RFC changing the NetworkId enum](https://github.com/polkadot-fellows/RFCs/pull/108). I'll configure MBMs (Multi-Block Migrations) then and make the actual migration. Fixes https://github.com/paritytech/polkadot-sdk/issues/4128 --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> --- Cargo.lock | 924 ++++++++++-------- .../assets/asset-hub-rococo/src/lib.rs | 2 +- .../assets/asset-hub-westend/src/lib.rs | 2 +- .../emulated/common/src/lib.rs | 10 +- .../assets/asset-hub-rococo/src/tests/send.rs | 11 +- .../assets/asset-hub-rococo/src/tests/swap.rs | 41 +- .../asset-hub-rococo/src/tests/teleport.rs | 6 +- .../tests/assets/asset-hub-westend/src/lib.rs | 5 +- .../asset-hub-westend/src/tests/send.rs | 11 +- .../asset-hub-westend/src/tests/swap.rs | 38 +- .../asset-hub-westend/src/tests/teleport.rs | 6 +- .../bridges/bridge-hub-rococo/src/lib.rs | 3 +- .../src/tests/asset_transfers.rs | 58 +- .../bridge-hub-rococo/src/tests/mod.rs | 18 +- .../bridge-hub-rococo/src/tests/send_xcm.rs | 2 +- .../bridges/bridge-hub-westend/src/lib.rs | 3 +- .../src/tests/asset_transfers.rs | 50 +- .../bridge-hub-westend/src/tests/mod.rs | 22 +- .../assets/asset-hub-rococo/src/lib.rs | 113 +-- .../assets/asset-hub-rococo/src/tests/mod.rs | 72 ++ .../assets/asset-hub-rococo/src/xcm_config.rs | 15 +- .../assets/asset-hub-rococo/tests/tests.rs | 162 ++- .../assets/asset-hub-westend/src/lib.rs | 47 +- .../asset-hub-westend/src/xcm_config.rs | 15 +- .../assets/asset-hub-westend/tests/tests.rs | 118 ++- .../assets/test-utils/src/test_cases.rs | 48 +- .../test-utils/src/test_cases_over_bridge.rs | 17 +- prdoc/pr_4129.prdoc | 17 + substrate/frame/assets/Cargo.toml | 1 - 29 files changed, 951 insertions(+), 886 deletions(-) create mode 100644 cumulus/parachains/runtimes/assets/asset-hub-rococo/src/tests/mod.rs create mode 100644 prdoc/pr_4129.prdoc diff --git a/Cargo.lock b/Cargo.lock index 60be41563299..2593452fb869 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,6 +42,15 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +[[package]] +name = "aead" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" +dependencies = [ + "generic-array 0.14.7", +] + [[package]] name = "aead" version = "0.5.2" @@ -52,6 +61,18 @@ dependencies = [ "generic-array 0.14.7", ] +[[package]] +name = "aes" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" +dependencies = [ + "cfg-if", + "cipher 0.3.0", + "cpufeatures", + "opaque-debug 0.3.0", +] + [[package]] name = "aes" version = "0.8.3" @@ -63,17 +84,31 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "aes-gcm" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" +dependencies = [ + "aead 0.4.3", + "aes 0.7.5", + "cipher 0.3.0", + "ctr 0.7.0", + "ghash 0.4.4", + "subtle 2.5.0", +] + [[package]] name = "aes-gcm" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ - "aead", - "aes", + "aead 0.5.2", + "aes 0.8.3", "cipher 0.4.4", - "ctr", - "ghash", + "ctr 0.9.2", + "ghash 0.5.0", "subtle 2.5.0", ] @@ -157,9 +192,9 @@ dependencies = [ "dunce", "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", "syn-solidity", "tiny-keccak", ] @@ -228,9 +263,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" @@ -284,9 +319,9 @@ dependencies = [ "include_dir", "itertools 0.10.5", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -518,7 +553,7 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -620,7 +655,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -661,9 +696,9 @@ dependencies = [ [[package]] name = "array-bytes" -version = "6.2.2" +version = "6.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f840fb7195bcfc5e17ea40c26e5ce6d5b9ce5d584466e17703209657e459ae0" +checksum = "5d5dde061bd34119e902bbb2d9b90c5692635cf59fb91d582c2b68043f1b8293" [[package]] name = "arrayref" @@ -724,7 +759,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", "synstructure 0.12.6", @@ -736,9 +771,9 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", "synstructure 0.13.1", ] @@ -748,7 +783,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -759,16 +794,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] name = "assert_cmd" -version = "2.0.14" +version = "2.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed72493ac66d5804837f480ab3766c72bdfab91a65e565fc54fa9e42db0073a8" +checksum = "bc65048dd435533bb1baf2ed9956b9a278fbfdcf90301b39ee117f06c0199d37" dependencies = [ "anstyle", "bstr", @@ -1179,14 +1214,14 @@ version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" dependencies = [ - "async-lock 3.4.0", + "async-lock 3.3.0", "cfg-if", "concurrent-queue", "futures-io", "futures-lite 2.3.0", "parking", "polling 3.4.0", - "rustix 0.38.21", + "rustix 0.38.25", "slab", "tracing", "windows-sys 0.52.0", @@ -1203,13 +1238,13 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.4.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ - "event-listener 5.2.0", + "event-listener 4.0.3", "event-listener-strategy", - "pin-project-lite", + "pin-project-lite 0.2.12", ] [[package]] @@ -1263,7 +1298,7 @@ dependencies = [ "log", "memchr", "once_cell", - "pin-project-lite", + "pin-project-lite 0.2.12", "pin-utils", "slab", "wasm-bindgen-futures", @@ -1277,7 +1312,7 @@ checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite", + "pin-project-lite 0.2.12", ] [[package]] @@ -1286,9 +1321,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -1303,9 +1338,9 @@ version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -1318,7 +1353,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite", + "pin-project-lite 0.2.12", ] [[package]] @@ -1362,7 +1397,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -1508,12 +1543,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease 0.2.12", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "regex", "rustc-hash", "shlex", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -2590,6 +2625,18 @@ dependencies = [ "keystream", ] +[[package]] +name = "chacha20" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" +dependencies = [ + "cfg-if", + "cipher 0.3.0", + "cpufeatures", + "zeroize", +] + [[package]] name = "chacha20" version = "0.9.1" @@ -2603,14 +2650,14 @@ dependencies = [ [[package]] name = "chacha20poly1305" -version = "0.10.1" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" +checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" dependencies = [ - "aead", - "chacha20", - "cipher 0.4.4", - "poly1305", + "aead 0.4.3", + "chacha20 0.8.2", + "cipher 0.3.0", + "poly1305 0.7.2", "zeroize", ] @@ -2715,6 +2762,15 @@ dependencies = [ "generic-array 0.14.7", ] +[[package]] +name = "cipher" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" +dependencies = [ + "generic-array 0.14.7", +] + [[package]] name = "cipher" version = "0.4.4" @@ -2723,7 +2779,6 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", - "zeroize", ] [[package]] @@ -2790,9 +2845,9 @@ dependencies = [ [[package]] name = "clap-num" -version = "1.0.2" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488557e97528174edaa2ee268b23a809e0c598213a4bbcb4f34575a46fda147e" +checksum = "0e063d263364859dc54fb064cedb7c122740cd4733644b14b176c097f51e8ab7" dependencies = [ "num-traits", ] @@ -2827,7 +2882,7 @@ checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -2839,9 +2894,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d029b67f89d30bbb547c89fd5161293c0aec155fc691d7924b64550662db93e" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -3026,7 +3081,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d51beaa537d73d2d1ff34ee70bc095f170420ab2ec5d687ecd3ec2b0d092514b" dependencies = [ "nom", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -3039,20 +3094,19 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "colored" -version = "2.0.4" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" +checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" dependencies = [ - "is-terminal", "lazy_static", "windows-sys 0.48.0", ] [[package]] name = "combine" -version = "4.6.6" +version = "4.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ "bytes", "memchr", @@ -3611,9 +3665,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2b432c56615136f8dba245fed7ec3d5518c500a31108661067e61e72fe7e6bc" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] @@ -3763,6 +3817,15 @@ dependencies = [ "subtle 2.5.0", ] +[[package]] +name = "ctr" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" +dependencies = [ + "cipher 0.3.0", +] + [[package]] name = "ctr" version = "0.9.2" @@ -4149,9 +4212,9 @@ name = "cumulus-pallet-parachain-system-proc-macro" version = "0.6.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -4648,9 +4711,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.72+curl-8.6.0" +version = "0.4.73+curl-8.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29cbdc8314c447d11e8fd156dcdd031d9e02a7a976163e396b548c03153bc9ea" +checksum = "450ab250ecf17227c39afb9a2dd9261dc0035cb80f2612472fc0c4aac2dcb84d" dependencies = [ "cc", "libc", @@ -4684,9 +4747,9 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -4723,10 +4786,10 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "scratch", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -4741,9 +4804,9 @@ version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -4834,12 +4897,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" -dependencies = [ - "powerfmt", -] +checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" [[package]] name = "derivative" @@ -4847,7 +4907,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -4858,9 +4918,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -4869,9 +4929,9 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -4881,7 +4941,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "rustc_version 0.4.0", "syn 1.0.109", @@ -4977,9 +5037,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -5037,10 +5097,10 @@ dependencies = [ "common-path", "derive-syn-parse", "once_cell", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "regex", - "syn 2.0.61", + "syn 2.0.58", "termcolor", "toml 0.8.8", "walkdir", @@ -5086,7 +5146,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -5232,7 +5292,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -5244,9 +5304,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -5264,9 +5324,9 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -5275,9 +5335,9 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -5353,11 +5413,12 @@ dependencies = [ [[package]] name = "erased-serde" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b73807008a3c7f171cc40312f37d95ef0396e048b5848d775f54b1a4dd4a0d3" +checksum = "24e2389d65ab4fab27dc2a5de7b191e1f6617d1f1c8855c0dc569c94a4cbb18d" dependencies = [ "serde", + "typeid", ] [[package]] @@ -5440,23 +5501,23 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "5.2.0" +version = "4.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" dependencies = [ "concurrent-queue", "parking", - "pin-project-lite", + "pin-project-lite 0.2.12", ] [[package]] name = "event-listener-strategy" -version = "0.5.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" dependencies = [ - "event-listener 5.2.0", - "pin-project-lite", + "event-listener 4.0.3", + "pin-project-lite 0.2.12", ] [[package]] @@ -5478,9 +5539,9 @@ dependencies = [ "file-guard", "fs-err", "prettyplease 0.2.12", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -5550,9 +5611,9 @@ dependencies = [ "expander", "indexmap 2.2.3", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -5753,9 +5814,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] @@ -5882,11 +5943,11 @@ dependencies = [ "frame-support", "parity-scale-codec", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "scale-info", "sp-arithmetic", - "syn 2.0.61", + "syn 2.0.58", "trybuild", ] @@ -6077,7 +6138,7 @@ dependencies = [ "parity-scale-codec", "pretty_assertions", "proc-macro-warning 1.0.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "regex", "scale-info", @@ -6087,7 +6148,7 @@ dependencies = [ "sp-metadata-ir", "sp-runtime", "static_assertions", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -6096,18 +6157,18 @@ version = "10.0.0" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] name = "frame-support-procedural-tools-derive" version = "11.0.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -6250,7 +6311,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -6336,7 +6397,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite", + "pin-project-lite 0.2.12", "waker-fn", ] @@ -6347,7 +6408,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ "futures-core", - "pin-project-lite", + "pin-project-lite 0.2.12", ] [[package]] @@ -6356,9 +6417,9 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -6402,7 +6463,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite", + "pin-project-lite 0.2.12", "pin-utils", "slab", ] @@ -6480,6 +6541,16 @@ dependencies = [ "rand_core", ] +[[package]] +name = "ghash" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" +dependencies = [ + "opaque-debug 0.3.0", + "polyval 0.5.3", +] + [[package]] name = "ghash" version = "0.5.0" @@ -6487,7 +6558,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" dependencies = [ "opaque-debug 0.3.0", - "polyval", + "polyval 0.6.1", ] [[package]] @@ -6605,9 +6676,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.26" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" dependencies = [ "bytes", "fnv", @@ -6758,9 +6829,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-conservative" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" +checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" [[package]] name = "hex-literal" @@ -6860,7 +6931,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http 0.2.9", - "pin-project-lite", + "pin-project-lite 0.2.12", ] [[package]] @@ -6883,7 +6954,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.0", - "pin-project-lite", + "pin-project-lite 0.2.12", ] [[package]] @@ -6906,21 +6977,21 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.24", "http 0.2.9", "http-body 0.4.5", "httparse", "httpdate", "itoa", - "pin-project-lite", + "pin-project-lite 0.2.12", "socket2 0.5.7", "tokio", "tower-service", @@ -6943,7 +7014,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite", + "pin-project-lite 0.2.12", "smallvec", "tokio", "want", @@ -6957,7 +7028,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.9", - "hyper 0.14.29", + "hyper 0.14.30", "log", "rustls 0.21.7", "rustls-native-certs 0.6.3", @@ -6995,7 +7066,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "hyper 1.3.1", - "pin-project-lite", + "pin-project-lite 0.2.12", "socket2 0.5.7", "tokio", "tower", @@ -7047,16 +7118,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "idna" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "if-addrs" version = "0.10.2" @@ -7097,7 +7158,7 @@ dependencies = [ "bytes", "futures", "http 0.2.9", - "hyper 0.14.29", + "hyper 0.14.30", "log", "rand", "tokio", @@ -7149,7 +7210,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -7169,7 +7230,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", ] @@ -7294,7 +7355,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -7513,9 +7574,9 @@ checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -7764,9 +7825,9 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libnghttp2-sys" -version = "0.1.9+1.58.0" +version = "0.1.10+1.61.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b57e858af2798e167e709b9d969325b6d8e9d50232fcbc494d7d54f976854a64" +checksum = "959c25552127d2e1fa72f0e52548ec04fc386e827ba71a7bd01db46a447dc135" dependencies = [ "cc", "libc", @@ -8101,9 +8162,9 @@ checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ "heck 0.4.1", "proc-macro-warning 0.4.2", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -8174,9 +8235,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.42.2" +version = "0.42.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "004ee9c4a4631435169aee6aad2f62e3984dc031c43b6d29731e8e82a016c538" +checksum = "3facf0691bab65f571bc97c6c65ffa836248ca631d631b7691ac91deb7fceb5f" dependencies = [ "either", "futures", @@ -8185,10 +8246,9 @@ dependencies = [ "libp2p-identity", "log", "parking_lot 0.12.3", - "pin-project-lite", + "quicksink", "rw-stream-sink", - "soketto 0.8.0", - "thiserror", + "soketto 0.7.1", "url", "webpki-roots 0.25.2", ] @@ -8328,9 +8388,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lioness" @@ -8391,7 +8451,7 @@ dependencies = [ "rand", "rcgen", "ring 0.16.20", - "rustls 0.20.9", + "rustls 0.20.8", "serde", "sha2 0.10.8", "simple-dns", @@ -8517,7 +8577,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -8529,9 +8589,9 @@ dependencies = [ "const-random", "derive-syn-parse", "macro_magic_core_macros", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -8540,9 +8600,9 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -8553,7 +8613,7 @@ checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -8604,9 +8664,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.4" +version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memfd" @@ -8915,7 +8975,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -8927,9 +8987,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" dependencies = [ "cfg-if", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -9039,7 +9099,7 @@ checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", "synstructure 0.12.6", @@ -9087,7 +9147,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91761aed67d03ad966ef783ae962ef9bbaca728d2dd7ceb7939ec110fffad998" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -9176,9 +9236,9 @@ dependencies = [ [[package]] name = "network-interface" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae72fd9dbd7f55dda80c00d66acc3b2130436fcba9ea89118fc508eaae48dfb0" +checksum = "a4a43439bf756eed340bdf8feba761e2d50c7d47175d87545cd5cbe4a137c4d1" dependencies = [ "cc", "libc", @@ -9467,21 +9527,15 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-conv" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" - [[package]] name = "num-derive" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -9646,9 +9700,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -9659,9 +9713,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.2.3+3.2.1" +version = "300.3.1+3.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843" +checksum = "7259953d42a81bf137fbbd73bd30a8e1914d6dce43c2b90ed575783a22608b91" dependencies = [ "cc", ] @@ -9713,7 +9767,7 @@ dependencies = [ "itertools 0.11.0", "petgraph", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -9874,7 +9928,6 @@ dependencies = [ "sp-core", "sp-io", "sp-runtime", - "sp-std 14.0.0", ] [[package]] @@ -10417,9 +10470,9 @@ dependencies = [ name = "pallet-contracts-proc-macro" version = "18.0.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -10585,7 +10638,7 @@ dependencies = [ "sp-npos-elections", "sp-runtime", "sp-tracing 16.0.0", - "strum 0.26.2", + "strum 0.26.3", ] [[package]] @@ -11651,10 +11704,10 @@ name = "pallet-staking-reward-curve" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "sp-runtime", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -12296,7 +12349,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -12325,7 +12378,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "syn 1.0.109", "synstructure 0.12.6", ] @@ -12710,9 +12763,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "pest" @@ -12742,9 +12795,9 @@ checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -12783,16 +12836,22 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" + +[[package]] +name = "pin-project-lite" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" [[package]] name = "pin-utils" @@ -13694,7 +13753,7 @@ dependencies = [ "sc-network", "sc-network-types", "sp-runtime", - "strum 0.26.2", + "strum 0.26.3", "thiserror", "tracing-gum", ] @@ -14894,7 +14953,7 @@ dependencies = [ "sp-runtime", "sp-timestamp", "sp-tracing 16.0.0", - "strum 0.26.2", + "strum 0.26.3", "substrate-prometheus-endpoint", "tokio", "tracing-gum", @@ -15130,9 +15189,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ "polkavm-common", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -15142,7 +15201,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -15178,7 +15237,7 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite", + "pin-project-lite 0.2.12", "windows-sys 0.48.0", ] @@ -15190,12 +15249,23 @@ checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" dependencies = [ "cfg-if", "concurrent-queue", - "pin-project-lite", - "rustix 0.38.21", + "pin-project-lite 0.2.12", + "rustix 0.38.25", "tracing", "windows-sys 0.52.0", ] +[[package]] +name = "poly1305" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" +dependencies = [ + "cpufeatures", + "opaque-debug 0.3.0", + "universal-hash 0.4.0", +] + [[package]] name = "poly1305" version = "0.8.0" @@ -15204,7 +15274,19 @@ checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", "opaque-debug 0.3.0", - "universal-hash", + "universal-hash 0.5.1", +] + +[[package]] +name = "polyval" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" +dependencies = [ + "cfg-if", + "cpufeatures", + "opaque-debug 0.3.0", + "universal-hash 0.4.0", ] [[package]] @@ -15216,7 +15298,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug 0.3.0", - "universal-hash", + "universal-hash 0.5.1", ] [[package]] @@ -15234,12 +15316,6 @@ dependencies = [ "rand", ] -[[package]] -name = "powerfmt" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" - [[package]] name = "pprof" version = "0.12.1" @@ -15324,7 +15400,7 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "syn 1.0.109", ] @@ -15334,8 +15410,8 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ - "proc-macro2 1.0.82", - "syn 2.0.61", + "proc-macro2 1.0.75", + "syn 2.0.58", ] [[package]] @@ -15395,7 +15471,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", "version_check", @@ -15407,7 +15483,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "version_check", ] @@ -15424,9 +15500,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -15435,9 +15511,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -15451,9 +15527,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708" dependencies = [ "unicode-ident", ] @@ -15470,7 +15546,7 @@ dependencies = [ "hex", "lazy_static", "procfs-core", - "rustix 0.38.21", + "rustix 0.38.25", ] [[package]] @@ -15516,9 +15592,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -15597,9 +15673,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" +checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", "heck 0.5.0", @@ -15610,9 +15686,9 @@ dependencies = [ "petgraph", "prettyplease 0.2.12", "prost 0.12.6", - "prost-types 0.12.4", + "prost-types 0.12.6", "regex", - "syn 2.0.61", + "syn 2.0.58", "tempfile", ] @@ -15624,7 +15700,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -15637,9 +15713,9 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.11.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -15653,9 +15729,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.4" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" +checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" dependencies = [ "prost 0.12.6", ] @@ -15765,6 +15841,17 @@ dependencies = [ "rand", ] +[[package]] +name = "quicksink" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" +dependencies = [ + "futures-core", + "futures-sink", + "pin-project-lite 0.1.12", +] + [[package]] name = "quinn" version = "0.9.4" @@ -15772,11 +15859,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e8b432585672228923edbbf64b8b12c14e1112f62e88737655b4a083dbcd78e" dependencies = [ "bytes", - "pin-project-lite", - "quinn-proto 0.9.6", + "pin-project-lite 0.2.12", + "quinn-proto 0.9.5", "quinn-udp 0.3.2", "rustc-hash", - "rustls 0.20.9", + "rustls 0.20.8", "thiserror", "tokio", "tracing", @@ -15791,7 +15878,7 @@ checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" dependencies = [ "bytes", "futures-io", - "pin-project-lite", + "pin-project-lite 0.2.12", "quinn-proto 0.10.6", "quinn-udp 0.4.1", "rustc-hash", @@ -15803,15 +15890,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.6" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b0b33c13a79f669c85defaf4c275dc86a0c0372807d0ca3d78e0bb87274863" +checksum = "c956be1b23f4261676aed05a0046e204e8a6836e50203902683a718af0797989" dependencies = [ "bytes", "rand", "ring 0.16.20", "rustc-hash", - "rustls 0.20.9", + "rustls 0.20.8", "slab", "thiserror", "tinyvec", @@ -15843,7 +15930,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "641538578b21f5e5c8ea733b736895576d0fe329bb883b937db6f4d163dbaaf4" dependencies = [ "libc", - "quinn-proto 0.9.6", + "quinn-proto 0.9.5", "socket2 0.4.9", "tracing", "windows-sys 0.42.0", @@ -15877,7 +15964,7 @@ version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", ] [[package]] @@ -16076,9 +16163,9 @@ version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -16254,10 +16341,10 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.24", "http 0.2.9", "http-body 0.4.5", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-rustls 0.24.2", "ipnet", "js-sys", @@ -16265,7 +16352,7 @@ dependencies = [ "mime", "once_cell", "percent-encoding", - "pin-project-lite", + "pin-project-lite 0.2.12", "rustls 0.21.7", "rustls-pemfile 1.0.3", "serde", @@ -16626,12 +16713,12 @@ checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" dependencies = [ "cfg-if", "glob", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.61", + "syn 2.0.58", "unicode-ident", ] @@ -16774,22 +16861,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.21" +version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" +checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.10", + "linux-raw-sys 0.4.11", "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.20.9" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" +checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" dependencies = [ "ring 0.16.20", "sct", @@ -16875,9 +16962,9 @@ checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-platform-verifier" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5f0d26fa1ce3c790f9590868f0109289a044acb954525f933e2aa3b871c157d" +checksum = "3e3beb939bcd33c269f4bf946cc829fcd336370267c4a927ac0399c84a3151a1" dependencies = [ "core-foundation", "core-foundation-sys", @@ -17018,7 +17105,7 @@ dependencies = [ "multihash 0.19.1", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build 0.12.6", "quickcheck", "rand", "sc-client-api", @@ -17114,9 +17201,9 @@ name = "sc-chain-spec-derive" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -17764,7 +17851,7 @@ dependencies = [ "partial_sort", "pin-project", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build 0.12.6", "rand", "sc-block-builder", "sc-client-api", @@ -17809,7 +17896,7 @@ dependencies = [ "futures", "libp2p-identity", "parity-scale-codec", - "prost-build 0.12.4", + "prost-build 0.12.6", "sc-consensus", "sc-network-types", "sp-consensus", @@ -17851,7 +17938,7 @@ dependencies = [ "log", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build 0.12.6", "sc-client-api", "sc-network", "sc-network-types", @@ -17895,7 +17982,7 @@ dependencies = [ "mockall 0.11.4", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.4", + "prost-build 0.12.6", "quickcheck", "sc-block-builder", "sc-client-api", @@ -17997,7 +18084,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-rustls 0.24.2", "lazy_static", "log", @@ -18409,9 +18496,9 @@ name = "sc-tracing-proc-macro" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -18523,7 +18610,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -18561,7 +18648,7 @@ version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0f696e21e10fa546b7ffb1c9672c6de8fbc7a81acf59524386d8639bf12737" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "serde_derive_internals", "syn 1.0.109", @@ -18600,7 +18687,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" dependencies = [ - "aead", + "aead 0.5.2", "arrayref", "arrayvec 0.7.4", "curve25519-dalek", @@ -18682,18 +18769,18 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.28.2" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" +checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.9.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" +checksum = "09e67c467c38fd24bd5499dc9a18183b31575c12ee549197e3e20d57aa4fe3b7" dependencies = [ "cc", ] @@ -18861,9 +18948,9 @@ version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fabfb6138d2383ea8208cf98ccf69cdfb1aff4088460681d84189aa259762f97" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -18872,7 +18959,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -18922,9 +19009,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.34+deprecated" +version = "0.9.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" dependencies = [ "indexmap 2.2.3", "itoa", @@ -18963,9 +19050,9 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -19006,9 +19093,9 @@ dependencies = [ [[package]] name = "sha1-asm" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ba6947745e7f86be3b8af00b7355857085dbdf8901393c89514510eb61f4e21" +checksum = "286acebaf8b67c1130aedffad26f594eff0c1292389158135327d2e23aed582b" dependencies = [ "cc", ] @@ -19256,7 +19343,7 @@ dependencies = [ "bip39", "blake2-rfc", "bs58 0.5.1", - "chacha20", + "chacha20 0.9.1", "crossbeam-queue", "derive_more", "ed25519-zebra", @@ -19278,7 +19365,7 @@ dependencies = [ "num-traits", "pbkdf2", "pin-project", - "poly1305", + "poly1305 0.8.0", "rand", "rand_chacha", "ruzstd", @@ -19341,16 +19428,16 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.6" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" +checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" dependencies = [ - "aes-gcm", + "aes-gcm 0.9.2", "blake2 0.10.6", "chacha20poly1305", "curve25519-dalek", "rand_core", - "ring 0.17.7", + "ring 0.16.20", "rustc_version 0.4.0", "sha2 0.10.8", "subtle 2.5.0", @@ -19839,9 +19926,9 @@ dependencies = [ "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -20037,7 +20124,7 @@ dependencies = [ "sp-keystore", "sp-mmr-primitives", "sp-runtime", - "strum 0.26.2", + "strum 0.26.3", "w3f-bls", ] @@ -20168,7 +20255,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -20225,7 +20312,7 @@ version = "0.1.0" dependencies = [ "quote 1.0.36", "sp-crypto-hashing", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -20241,18 +20328,18 @@ name = "sp-debug-derive" version = "8.0.0" source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] name = "sp-debug-derive" version = "14.0.0" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -20330,7 +20417,7 @@ version = "31.0.0" dependencies = [ "sp-core", "sp-runtime", - "strum 0.26.2", + "strum 0.26.3", ] [[package]] @@ -20518,13 +20605,13 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" +source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" dependencies = [ "Inflector", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -20534,9 +20621,9 @@ dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -20629,7 +20716,7 @@ dependencies = [ name = "sp-statement-store" version = "10.0.0" dependencies = [ - "aes-gcm", + "aes-gcm 0.10.3", "curve25519-dalek", "ed25519-dalek", "hkdf", @@ -20795,10 +20882,10 @@ name = "sp-version-proc-macro" version = "13.0.0" dependencies = [ "parity-scale-codec", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "sp-version", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -20880,7 +20967,7 @@ checksum = "5e6915280e2d0db8911e5032a5c275571af6bdded2916abd691a659be25d3439" dependencies = [ "Inflector", "num-format", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "serde", "serde_json", @@ -20905,7 +20992,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -21098,7 +21185,7 @@ checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" dependencies = [ "cfg_aliases", "memchr", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -21171,7 +21258,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -21193,11 +21280,11 @@ checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" [[package]] name = "strum" -version = "0.26.2" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" +checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" dependencies = [ - "strum_macros 0.26.2", + "strum_macros 0.26.4", ] [[package]] @@ -21207,7 +21294,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "rustversion", "syn 1.0.109", @@ -21220,23 +21307,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "rustversion", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] name = "strum_macros" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" +checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ - "heck 0.4.1", - "proc-macro2 1.0.82", + "heck 0.5.0", + "proc-macro2 1.0.75", "quote 1.0.36", "rustversion", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -21374,7 +21461,7 @@ dependencies = [ "sp-runtime", "sp-trie", "structopt", - "strum 0.26.2", + "strum 0.26.3", "thiserror", ] @@ -21550,7 +21637,7 @@ dependencies = [ "sp-maybe-compressed-blob", "sp-tracing 16.0.0", "sp-version", - "strum 0.26.2", + "strum 0.26.3", "tempfile", "toml 0.8.8", "walkdir", @@ -21683,18 +21770,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.61" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "unicode-ident", ] @@ -21706,9 +21793,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b837ef12ab88835251726eb12237655e61ec8dc8a280085d1961cdc3dfd047" dependencies = [ "paste", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -21717,7 +21804,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", "unicode-xid 0.2.4", @@ -21729,16 +21816,16 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] name = "sysinfo" -version = "0.30.5" +version = "0.30.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fb4f3438c8f6389c864e61221cbc97e9bca98b4daf39a5beb7bea660f528bb2" +checksum = "732ffa00f53e6b2af46208fba5718d9662a421049204e156328b66791ffa15ae" dependencies = [ "cfg-if", "core-foundation-sys", @@ -21802,7 +21889,7 @@ dependencies = [ "cfg-if", "fastrand 2.1.0", "redox_syscall 0.4.1", - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -21821,7 +21908,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.21", + "rustix 0.38.25", "windows-sys 0.48.0", ] @@ -21848,9 +21935,9 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -22012,7 +22099,7 @@ version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "syn 1.0.109", ] @@ -22023,9 +22110,9 @@ version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -22099,16 +22186,14 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "0bb39ee79a6d8de55f48f2293a830e040392f1c5f16e336bdd1788cd0aadce07" dependencies = [ "deranged", "itoa", "libc", - "num-conv", "num_threads", - "powerfmt", "serde", "time-core", "time-macros", @@ -22116,17 +22201,16 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "733d258752e9303d392b94b75230d07b0b9c489350c69b851fc6c065fde3e8f9" dependencies = [ - "num-conv", "time-core", ] @@ -22166,9 +22250,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.37.0" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ "backtrace", "bytes", @@ -22176,7 +22260,7 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.3", - "pin-project-lite", + "pin-project-lite 0.2.12", "signal-hook-registry", "socket2 0.5.7", "tokio-macros", @@ -22185,13 +22269,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -22233,7 +22317,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite", + "pin-project-lite 0.2.12", "tokio", "tokio-util", ] @@ -22276,7 +22360,7 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", - "pin-project-lite", + "pin-project-lite 0.2.12", "tokio", ] @@ -22343,7 +22427,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite", + "pin-project-lite 0.2.12", "tokio", "tower-layer", "tower-service", @@ -22361,7 +22445,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "pin-project-lite", + "pin-project-lite 0.2.12", "tower-layer", "tower-service", ] @@ -22385,7 +22469,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", - "pin-project-lite", + "pin-project-lite 0.2.12", "tracing-attributes", "tracing-core", ] @@ -22396,9 +22480,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -22438,9 +22522,9 @@ dependencies = [ "assert_matches", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -22536,9 +22620,9 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65ed83be775d85ebb0e272914fff6462c39b3ddd6dc67b5c1c41271aad280c69" +checksum = "0c992b4f40c234a074d48a757efeabb1a6be88af84c0c23f7ca158950cb0ae7f" dependencies = [ "hash-db", "log", @@ -22703,6 +22787,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "typeid" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "059d83cc991e7a42fc37bd50941885db0888e34209f8cfd9aab07ddec03bc9cf" + [[package]] name = "typenum" version = "1.16.0" @@ -22778,6 +22868,16 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +[[package]] +name = "universal-hash" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" +dependencies = [ + "generic-array 0.14.7", + "subtle 2.5.0", +] + [[package]] name = "universal-hash" version = "0.5.1" @@ -22790,9 +22890,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.11" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" +checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" [[package]] name = "unsigned-varint" @@ -22830,12 +22930,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.2" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", - "idna 0.5.0", + "idna 0.4.0", "percent-encoding", ] @@ -22865,9 +22965,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec26a25bd6fca441cdd0f769fd7f891bae119f996de31f86a5eddccef54c1d" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" dependencies = [ "value-bag-serde1", "value-bag-sval2", @@ -22875,9 +22975,9 @@ dependencies = [ [[package]] name = "value-bag-serde1" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ead5b693d906686203f19a49e88c477fb8c15798b68cf72f60b4b5521b4ad891" +checksum = "ccacf50c5cb077a9abb723c5bcb5e0754c1a433f1e1de89edc328e2760b6328b" dependencies = [ "erased-serde", "serde", @@ -22886,9 +22986,9 @@ dependencies = [ [[package]] name = "value-bag-sval2" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b9d0f4a816370c3a0d7d82d603b62198af17675b12fe5e91de6b47ceb505882" +checksum = "1785bae486022dfb9703915d42287dcb284c1ee37bd1080eeba78cc04721285b" dependencies = [ "sval", "sval_buffer", @@ -22989,9 +23089,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "serde", @@ -23001,16 +23101,16 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", "wasm-bindgen-shared", ] @@ -23028,9 +23128,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote 1.0.36", "wasm-bindgen-macro-support", @@ -23038,22 +23138,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-bindgen-test" @@ -23075,7 +23175,7 @@ version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", ] @@ -24141,10 +24241,10 @@ name = "xcm-procedural" version = "7.0.0" dependencies = [ "Inflector", - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", "staging-xcm", - "syn 2.0.61", + "syn 2.0.58", "trybuild", ] @@ -24307,9 +24407,9 @@ version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] @@ -24327,9 +24427,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.82", + "proc-macro2 1.0.75", "quote 1.0.36", - "syn 2.0.61", + "syn 2.0.58", ] [[package]] diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index 80d2376c6811..1f98d3ba964d 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -59,5 +59,5 @@ impl_accounts_helpers_for_parachain!(AssetHubRococo); impl_assert_events_helpers_for_parachain!(AssetHubRococo); impl_assets_helpers_for_system_parachain!(AssetHubRococo, Rococo); impl_assets_helpers_for_parachain!(AssetHubRococo); -impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, xcm::v3::Location); +impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, xcm::v4::Location); impl_xcm_helpers_for_parachain!(AssetHubRococo); diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index 608690218d2f..6066adec52c3 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -59,5 +59,5 @@ impl_accounts_helpers_for_parachain!(AssetHubWestend); impl_assert_events_helpers_for_parachain!(AssetHubWestend); impl_assets_helpers_for_system_parachain!(AssetHubWestend, Westend); impl_assets_helpers_for_parachain!(AssetHubWestend); -impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, xcm::v3::Location); +impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, xcm::v4::Location); impl_xcm_helpers_for_parachain!(AssetHubWestend); diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index b3dec175e112..30e66ced1fb0 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -63,11 +63,11 @@ pub const PENPAL_ID: u32 = 2000; pub const ASSETS_PALLET_ID: u8 = 50; parameter_types! { - pub PenpalTeleportableAssetLocation: xcm::v3::Location - = xcm::v3::Location::new(1, [ - xcm::v3::Junction::Parachain(PENPAL_ID), - xcm::v3::Junction::PalletInstance(ASSETS_PALLET_ID), - xcm::v3::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), + pub PenpalTeleportableAssetLocation: xcm::v4::Location + = xcm::v4::Location::new(1, [ + xcm::v4::Junction::Parachain(PENPAL_ID), + xcm::v4::Junction::PalletInstance(ASSETS_PALLET_ID), + xcm::v4::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), ] ); pub PenpalSiblingSovereignAccount: AccountId = Sibling::from(PENPAL_ID).into_account_truncating(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs index 364fbd0d439f..ab407611c736 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs @@ -36,21 +36,18 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { let para_sovereign_account = AssetHubRococo::sovereign_account_id_of( AssetHubRococo::sibling_location_of(PenpalA::para_id()), ); - let asset_location_on_penpal = v3::Location::new( + let asset_location_on_penpal = Location::new( 0, - [ - v3::Junction::PalletInstance(ASSETS_PALLET_ID), - v3::Junction::GeneralIndex(ASSET_ID.into()), - ], + [Junction::PalletInstance(ASSETS_PALLET_ID), Junction::GeneralIndex(ASSET_ID.into())], ); let foreign_asset_at_asset_hub = - v3::Location::new(1, [v3::Junction::Parachain(PenpalA::para_id().into())]) + Location::new(1, [Junction::Parachain(PenpalA::para_id().into())]) .appended_with(asset_location_on_penpal) .unwrap(); // Encoded `create_asset` call to be executed in AssetHub let call = AssetHubRococo::create_foreign_asset_call( - foreign_asset_at_asset_hub, + foreign_asset_at_asset_hub.clone(), ASSET_MIN_BALANCE, para_sovereign_account.clone(), ); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs index 16e0512da960..ac0c90ba198d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/swap.rs @@ -17,13 +17,10 @@ use crate::imports::*; #[test] fn swap_locally_on_chain_using_local_assets() { - let asset_native = Box::new(v3::Location::try_from(RelayLocation::get()).unwrap()); - let asset_one = Box::new(v3::Location::new( + let asset_native = Box::new(Location::try_from(RelayLocation::get()).unwrap()); + let asset_one = Box::new(Location::new( 0, - [ - v3::Junction::PalletInstance(ASSETS_PALLET_ID), - v3::Junction::GeneralIndex(ASSET_ID.into()), - ], + [Junction::PalletInstance(ASSETS_PALLET_ID), Junction::GeneralIndex(ASSET_ID.into())], )); AssetHubRococo::execute_with(|| { @@ -112,11 +109,11 @@ fn swap_locally_on_chain_using_local_assets() { #[test] fn swap_locally_on_chain_using_foreign_assets() { - let asset_native = Box::new(v3::Location::try_from(RelayLocation::get()).unwrap()); + let asset_native = Box::new(Location::try_from(RelayLocation::get()).unwrap()); let asset_location_on_penpal = - v3::Location::try_from(PenpalLocalTeleportableToAssetHub::get()).unwrap(); + Location::try_from(PenpalLocalTeleportableToAssetHub::get()).unwrap(); let foreign_asset_at_asset_hub_rococo = - v3::Location::new(1, [v3::Junction::Parachain(PenpalA::para_id().into())]) + Location::new(1, [Junction::Parachain(PenpalA::para_id().into())]) .appended_with(asset_location_on_penpal) .unwrap(); @@ -141,7 +138,7 @@ fn swap_locally_on_chain_using_foreign_assets() { // 1. Mint foreign asset (in reality this should be a teleport or some such) assert_ok!(::ForeignAssets::mint( ::RuntimeOrigin::signed(sov_penpal_on_ahr.clone().into()), - foreign_asset_at_asset_hub_rococo, + foreign_asset_at_asset_hub_rococo.clone(), sov_penpal_on_ahr.clone().into(), ASSET_HUB_ROCOCO_ED * 3_000_000_000_000, )); @@ -157,7 +154,7 @@ fn swap_locally_on_chain_using_foreign_assets() { assert_ok!(::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), asset_native.clone(), - Box::new(foreign_asset_at_asset_hub_rococo), + Box::new(foreign_asset_at_asset_hub_rococo.clone()), )); assert_expected_events!( @@ -171,7 +168,7 @@ fn swap_locally_on_chain_using_foreign_assets() { assert_ok!(::AssetConversion::add_liquidity( ::RuntimeOrigin::signed(sov_penpal_on_ahr.clone()), asset_native.clone(), - Box::new(foreign_asset_at_asset_hub_rococo), + Box::new(foreign_asset_at_asset_hub_rococo.clone()), 1_000_000_000_000, 2_000_000_000_000, 0, @@ -189,7 +186,7 @@ fn swap_locally_on_chain_using_foreign_assets() { ); // 4. Swap! - let path = vec![asset_native.clone(), Box::new(foreign_asset_at_asset_hub_rococo)]; + let path = vec![asset_native.clone(), Box::new(foreign_asset_at_asset_hub_rococo.clone())]; assert_ok!( ::AssetConversion::swap_exact_tokens_for_tokens( @@ -216,7 +213,7 @@ fn swap_locally_on_chain_using_foreign_assets() { assert_ok!(::AssetConversion::remove_liquidity( ::RuntimeOrigin::signed(sov_penpal_on_ahr.clone()), asset_native.clone(), - Box::new(foreign_asset_at_asset_hub_rococo), + Box::new(foreign_asset_at_asset_hub_rococo.clone()), 1414213562273 - ASSET_HUB_ROCOCO_ED * 2, // all but the 2 EDs can't be retrieved. 0, 0, @@ -252,8 +249,8 @@ fn cannot_create_pool_from_pool_assets() { assert_matches::assert_matches!( ::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - Box::new(v3::Location::try_from(asset_native).unwrap()), - Box::new(v3::Location::try_from(asset_one).unwrap()), + Box::new(Location::try_from(asset_native).unwrap()), + Box::new(Location::try_from(asset_one).unwrap()), ), Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("Unknown")) ); @@ -262,12 +259,12 @@ fn cannot_create_pool_from_pool_assets() { #[test] fn pay_xcm_fee_with_some_asset_swapped_for_native() { - let asset_native = v3::Location::try_from(RelayLocation::get()).unwrap(); - let asset_one = xcm::v3::Location { + let asset_native = Location::try_from(RelayLocation::get()).unwrap(); + let asset_one = Location { parents: 0, interior: [ - xcm::v3::Junction::PalletInstance(ASSETS_PALLET_ID), - xcm::v3::Junction::GeneralIndex(ASSET_ID.into()), + Junction::PalletInstance(ASSETS_PALLET_ID), + Junction::GeneralIndex(ASSET_ID.into()), ] .into(), }; @@ -296,8 +293,8 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { assert_ok!(::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubRococoSender::get()), - Box::new(asset_native), - Box::new(asset_one), + Box::new(asset_native.clone()), + Box::new(asset_one.clone()), )); assert_expected_events!( diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index f74378d7631a..5f9131b8c123 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -141,7 +141,6 @@ fn penpal_to_ah_foreign_assets_receiver_assertions(t: ParaToSystemParaTest) { ); let (expected_foreign_asset_id, expected_foreign_asset_amount) = non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); - let expected_foreign_asset_id_v3: v3::Location = expected_foreign_asset_id.try_into().unwrap(); AssetHubRococo::assert_xcmp_queue_success(None); @@ -159,7 +158,7 @@ fn penpal_to_ah_foreign_assets_receiver_assertions(t: ParaToSystemParaTest) { who: *who == t.receiver.account_id, }, RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, amount }) => { - asset_id: *asset_id == expected_foreign_asset_id_v3, + asset_id: *asset_id == expected_foreign_asset_id, owner: *owner == t.receiver.account_id, amount: *amount == expected_foreign_asset_amount, }, @@ -173,7 +172,6 @@ fn ah_to_penpal_foreign_assets_sender_assertions(t: SystemParaToParaTest) { AssetHubRococo::assert_xcm_pallet_attempted_complete(None); let (expected_foreign_asset_id, expected_foreign_asset_amount) = non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); - let expected_foreign_asset_id_v3: v3::Location = expected_foreign_asset_id.try_into().unwrap(); assert_expected_events!( AssetHubRococo, vec![ @@ -189,7 +187,7 @@ fn ah_to_penpal_foreign_assets_sender_assertions(t: SystemParaToParaTest) { }, // foreign asset is burned locally as part of teleportation RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned { asset_id, owner, balance }) => { - asset_id: *asset_id == expected_foreign_asset_id_v3, + asset_id: *asset_id == expected_foreign_asset_id, owner: *owner == t.sender.account_id, balance: *balance == expected_foreign_asset_amount, }, diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index 437268a19e59..fe484771931f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -26,10 +26,7 @@ mod imports { }; // Polkadot - pub use xcm::{ - prelude::{AccountId32 as AccountId32Junction, *}, - v3, - }; + pub use xcm::prelude::{AccountId32 as AccountId32Junction, *}; pub use xcm_executor::traits::TransferType; // Cumulus diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index eb0e985cc0ce..ac006653ca67 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -36,21 +36,18 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { let para_sovereign_account = AssetHubWestend::sovereign_account_id_of( AssetHubWestend::sibling_location_of(PenpalA::para_id()), ); - let asset_location_on_penpal = v3::Location::new( + let asset_location_on_penpal = Location::new( 0, - [ - v3::Junction::PalletInstance(ASSETS_PALLET_ID), - v3::Junction::GeneralIndex(ASSET_ID.into()), - ], + [Junction::PalletInstance(ASSETS_PALLET_ID), Junction::GeneralIndex(ASSET_ID.into())], ); let foreign_asset_at_asset_hub = - v3::Location::new(1, [v3::Junction::Parachain(PenpalA::para_id().into())]) + Location::new(1, [Junction::Parachain(PenpalA::para_id().into())]) .appended_with(asset_location_on_penpal) .unwrap(); // Encoded `create_asset` call to be executed in AssetHub let call = AssetHubWestend::create_foreign_asset_call( - foreign_asset_at_asset_hub, + foreign_asset_at_asset_hub.clone(), ASSET_MIN_BALANCE, para_sovereign_account.clone(), ); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs index cf429378cf6d..1a2821452155 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/swap.rs @@ -18,12 +18,12 @@ use crate::imports::*; #[test] fn swap_locally_on_chain_using_local_assets() { let asset_native = - Box::new(v3::Location::try_from(RelayLocation::get()).expect("conversion works")); - let asset_one = Box::new(v3::Location { + Box::new(Location::try_from(RelayLocation::get()).expect("conversion works")); + let asset_one = Box::new(Location { parents: 0, interior: [ - v3::Junction::PalletInstance(ASSETS_PALLET_ID), - v3::Junction::GeneralIndex(ASSET_ID.into()), + Junction::PalletInstance(ASSETS_PALLET_ID), + Junction::GeneralIndex(ASSET_ID.into()), ] .into(), }); @@ -112,11 +112,11 @@ fn swap_locally_on_chain_using_local_assets() { #[test] fn swap_locally_on_chain_using_foreign_assets() { - let asset_native = Box::new(v3::Location::try_from(RelayLocation::get()).unwrap()); + let asset_native = Box::new(Location::try_from(RelayLocation::get()).unwrap()); let asset_location_on_penpal = - v3::Location::try_from(PenpalLocalTeleportableToAssetHub::get()).expect("conversion_works"); + Location::try_from(PenpalLocalTeleportableToAssetHub::get()).expect("conversion_works"); let foreign_asset_at_asset_hub_westend = - v3::Location::new(1, [v3::Junction::Parachain(PenpalA::para_id().into())]) + Location::new(1, [Junction::Parachain(PenpalA::para_id().into())]) .appended_with(asset_location_on_penpal) .unwrap(); @@ -141,7 +141,7 @@ fn swap_locally_on_chain_using_foreign_assets() { // 1. Mint foreign asset (in reality this should be a teleport or some such) assert_ok!(::ForeignAssets::mint( ::RuntimeOrigin::signed(sov_penpal_on_ahr.clone().into()), - foreign_asset_at_asset_hub_westend, + foreign_asset_at_asset_hub_westend.clone(), sov_penpal_on_ahr.clone().into(), ASSET_HUB_WESTEND_ED * 3_000_000_000_000, )); @@ -157,7 +157,7 @@ fn swap_locally_on_chain_using_foreign_assets() { assert_ok!(::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), asset_native.clone(), - Box::new(foreign_asset_at_asset_hub_westend), + Box::new(foreign_asset_at_asset_hub_westend.clone()), )); assert_expected_events!( @@ -171,7 +171,7 @@ fn swap_locally_on_chain_using_foreign_assets() { assert_ok!(::AssetConversion::add_liquidity( ::RuntimeOrigin::signed(sov_penpal_on_ahr.clone()), asset_native.clone(), - Box::new(foreign_asset_at_asset_hub_westend), + Box::new(foreign_asset_at_asset_hub_westend.clone()), 1_000_000_000_000_000, 2_000_000_000_000_000, 0, @@ -189,7 +189,7 @@ fn swap_locally_on_chain_using_foreign_assets() { ); // 4. Swap! - let path = vec![asset_native.clone(), Box::new(foreign_asset_at_asset_hub_westend)]; + let path = vec![asset_native.clone(), Box::new(foreign_asset_at_asset_hub_westend.clone())]; assert_ok!( ::AssetConversion::swap_exact_tokens_for_tokens( @@ -252,8 +252,8 @@ fn cannot_create_pool_from_pool_assets() { assert_matches::assert_matches!( ::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - Box::new(v3::Location::try_from(asset_native).expect("conversion works")), - Box::new(v3::Location::try_from(asset_one).expect("conversion works")), + Box::new(Location::try_from(asset_native).expect("conversion works")), + Box::new(Location::try_from(asset_one).expect("conversion works")), ), Err(DispatchError::Module(ModuleError{index: _, error: _, message})) => assert_eq!(message, Some("Unknown")) ); @@ -262,12 +262,12 @@ fn cannot_create_pool_from_pool_assets() { #[test] fn pay_xcm_fee_with_some_asset_swapped_for_native() { - let asset_native = v3::Location::try_from(RelayLocation::get()).expect("conversion works"); - let asset_one = xcm::v3::Location { + let asset_native = Location::try_from(RelayLocation::get()).expect("conversion works"); + let asset_one = Location { parents: 0, interior: [ - xcm::v3::Junction::PalletInstance(ASSETS_PALLET_ID), - xcm::v3::Junction::GeneralIndex(ASSET_ID.into()), + Junction::PalletInstance(ASSETS_PALLET_ID), + Junction::GeneralIndex(ASSET_ID.into()), ] .into(), }; @@ -296,8 +296,8 @@ fn pay_xcm_fee_with_some_asset_swapped_for_native() { assert_ok!(::AssetConversion::create_pool( ::RuntimeOrigin::signed(AssetHubWestendSender::get()), - Box::new(asset_native), - Box::new(asset_one), + Box::new(asset_native.clone()), + Box::new(asset_one.clone()), )); assert_expected_events!( diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index a524b87b2daf..02a0bc0207bb 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -141,7 +141,6 @@ fn penpal_to_ah_foreign_assets_receiver_assertions(t: ParaToSystemParaTest) { ); let (expected_foreign_asset_id, expected_foreign_asset_amount) = non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); - let expected_foreign_asset_id_v3: v3::Location = expected_foreign_asset_id.try_into().unwrap(); AssetHubWestend::assert_xcmp_queue_success(None); @@ -159,7 +158,7 @@ fn penpal_to_ah_foreign_assets_receiver_assertions(t: ParaToSystemParaTest) { who: *who == t.receiver.account_id, }, RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, amount }) => { - asset_id: *asset_id == expected_foreign_asset_id_v3, + asset_id: *asset_id == expected_foreign_asset_id, owner: *owner == t.receiver.account_id, amount: *amount == expected_foreign_asset_amount, }, @@ -173,7 +172,6 @@ fn ah_to_penpal_foreign_assets_sender_assertions(t: SystemParaToParaTest) { AssetHubWestend::assert_xcm_pallet_attempted_complete(None); let (expected_foreign_asset_id, expected_foreign_asset_amount) = non_fee_asset(&t.args.assets, t.args.fee_asset_item as usize).unwrap(); - let expected_foreign_asset_id_v3: v3::Location = expected_foreign_asset_id.try_into().unwrap(); assert_expected_events!( AssetHubWestend, vec![ @@ -189,7 +187,7 @@ fn ah_to_penpal_foreign_assets_sender_assertions(t: SystemParaToParaTest) { }, // foreign asset is burned locally as part of teleportation RuntimeEvent::ForeignAssets(pallet_assets::Event::Burned { asset_id, owner, balance }) => { - asset_id: *asset_id == expected_foreign_asset_id_v3, + asset_id: *asset_id == expected_foreign_asset_id, owner: *owner == t.sender.account_id, balance: *balance == expected_foreign_asset_amount, }, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index 3ee509389c67..0aefe5b6352c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -23,7 +23,8 @@ mod imports { pub use xcm::{ latest::ParentThen, prelude::{AccountId32 as AccountId32Junction, *}, - v3::{self, NetworkId::Westend as WestendId}, + v4, + v4::NetworkId::Westend as WestendId, }; pub use xcm_executor::traits::TransferType; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs index 6053936487b2..8a674f89c9ef 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -36,10 +36,10 @@ fn send_assets_over_bridge(send_fn: F) { fn set_up_rocs_for_penpal_rococo_through_ahr_to_ahw( sender: &AccountId, amount: u128, -) -> (Location, v3::Location) { +) -> (Location, v4::Location) { let roc_at_rococo_parachains = roc_at_ah_rococo(); - let roc_at_asset_hub_westend = bridged_roc_at_ah_westend().try_into().unwrap(); - create_foreign_on_ah_westend(roc_at_asset_hub_westend, true); + let roc_at_asset_hub_westend = bridged_roc_at_ah_westend(); + create_foreign_on_ah_westend(roc_at_asset_hub_westend.clone(), true); let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); @@ -121,11 +121,11 @@ fn send_roc_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { let amount = ASSET_HUB_ROCOCO_ED * 1_000_000; let sender = AssetHubRococoSender::get(); let receiver = AssetHubWestendReceiver::get(); - let roc_at_asset_hub_rococo: v3::Location = roc_at_ah_rococo().try_into().unwrap(); - let bridged_roc_at_asset_hub_westend = bridged_roc_at_ah_westend().try_into().unwrap(); + let roc_at_asset_hub_rococo = roc_at_ah_rococo(); + let bridged_roc_at_asset_hub_westend = bridged_roc_at_ah_westend(); - create_foreign_on_ah_westend(bridged_roc_at_asset_hub_westend, true); - set_up_pool_with_wnd_on_ah_westend(bridged_roc_at_asset_hub_westend); + create_foreign_on_ah_westend(bridged_roc_at_asset_hub_westend.clone(), true); + set_up_pool_with_wnd_on_ah_westend(bridged_roc_at_asset_hub_westend.clone()); //////////////////////////////////////////////////////////// // Let's first send over just some ROCs as a simple example @@ -138,12 +138,13 @@ fn send_roc_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { ::account_data_of(sov_ahw_on_ahr.clone()).free; let sender_rocs_before = ::account_data_of(sender.clone()).free; let receiver_rocs_before = - foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend, &receiver); + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend.clone(), &receiver); // send ROCs, use them for fees send_assets_over_bridge(|| { let destination = asset_hub_westend_location(); - let assets: Assets = (Location::try_from(roc_at_asset_hub_rococo).unwrap(), amount).into(); + let assets: Assets = + (Location::try_from(roc_at_asset_hub_rococo.clone()).unwrap(), amount).into(); let fee_idx = 0; assert_ok!(send_assets_from_asset_hub_rococo(destination, assets, fee_idx)); }); @@ -185,9 +186,9 @@ fn send_roc_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { ///////////////////////////////////////////////////////////// let usdt_at_asset_hub_rococo = usdt_at_ah_rococo(); - let bridged_usdt_at_asset_hub_westend = bridged_usdt_at_ah_westend().try_into().unwrap(); + let bridged_usdt_at_asset_hub_westend = bridged_usdt_at_ah_westend(); // wETH has same relative location on both Rococo and Westend AssetHubs - let bridged_weth_at_ah = weth_at_asset_hubs().try_into().unwrap(); + let bridged_weth_at_ah = weth_at_asset_hubs(); // mint USDT in sender's account (USDT already created in genesis) AssetHubRococo::mint_asset( @@ -197,19 +198,23 @@ fn send_roc_usdt_and_weth_from_asset_hub_rococo_to_asset_hub_westend() { amount * 2, ); // create wETH at src and dest and prefund sender's account - create_foreign_on_ah_rococo(bridged_weth_at_ah, true, vec![(sender.clone(), amount * 2)]); - create_foreign_on_ah_westend(bridged_weth_at_ah, true); - create_foreign_on_ah_westend(bridged_usdt_at_asset_hub_westend, true); - set_up_pool_with_wnd_on_ah_westend(bridged_usdt_at_asset_hub_westend); + create_foreign_on_ah_rococo( + bridged_weth_at_ah.clone(), + true, + vec![(sender.clone(), amount * 2)], + ); + create_foreign_on_ah_westend(bridged_weth_at_ah.clone(), true); + create_foreign_on_ah_westend(bridged_usdt_at_asset_hub_westend.clone(), true); + set_up_pool_with_wnd_on_ah_westend(bridged_usdt_at_asset_hub_westend.clone()); let receiver_usdts_before = - foreign_balance_on_ah_westend(bridged_usdt_at_asset_hub_westend, &receiver); - let receiver_weth_before = foreign_balance_on_ah_westend(bridged_weth_at_ah, &receiver); + foreign_balance_on_ah_westend(bridged_usdt_at_asset_hub_westend.clone(), &receiver); + let receiver_weth_before = foreign_balance_on_ah_westend(bridged_weth_at_ah.clone(), &receiver); // send USDTs and wETHs let assets: Assets = vec![ (usdt_at_asset_hub_rococo.clone(), amount).into(), - (Location::try_from(bridged_weth_at_ah).unwrap(), amount).into(), + (Location::try_from(bridged_weth_at_ah.clone()).unwrap(), amount).into(), ] .into(); // use USDT for fees @@ -258,9 +263,8 @@ fn send_back_wnds_from_asset_hub_rococo_to_asset_hub_westend() { let sender = AssetHubRococoSender::get(); let receiver = AssetHubWestendReceiver::get(); let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo(); - let wnd_at_asset_hub_rococo_v3 = wnd_at_asset_hub_rococo.clone().try_into().unwrap(); let prefund_accounts = vec![(sender.clone(), prefund_amount)]; - create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo_v3, true, prefund_accounts); + create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo.clone(), true, prefund_accounts); // fund the AHR's SA on AHW with the WND tokens held in reserve let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( @@ -273,14 +277,14 @@ fn send_back_wnds_from_asset_hub_rococo_to_asset_hub_westend() { ::account_data_of(sov_ahr_on_ahw.clone()).free; assert_eq!(wnds_in_reserve_on_ahw_before, prefund_amount); - let sender_wnds_before = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo_v3, &sender); + let sender_wnds_before = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo.clone(), &sender); assert_eq!(sender_wnds_before, prefund_amount); let receiver_wnds_before = ::account_data_of(receiver.clone()).free; // send back WNDs, use them for fees send_assets_over_bridge(|| { let destination = asset_hub_westend_location(); - let assets: Assets = (wnd_at_asset_hub_rococo, amount_to_send).into(); + let assets: Assets = (wnd_at_asset_hub_rococo.clone(), amount_to_send).into(); let fee_idx = 0; assert_ok!(send_assets_from_asset_hub_rococo(destination, assets, fee_idx)); }); @@ -309,7 +313,7 @@ fn send_back_wnds_from_asset_hub_rococo_to_asset_hub_westend() { ); }); - let sender_wnds_after = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo_v3, &sender); + let sender_wnds_after = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo, &sender); let receiver_wnds_after = ::account_data_of(receiver).free; let wnds_in_reserve_on_ahw_after = ::account_data_of(sov_ahr_on_ahw).free; @@ -341,7 +345,8 @@ fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() type ForeignAssets = ::ForeignAssets; >::balance(roc_at_rococo_parachains.clone(), &sender) }); - let receiver_rocs_before = foreign_balance_on_ah_westend(roc_at_asset_hub_westend, &receiver); + let receiver_rocs_before = + foreign_balance_on_ah_westend(roc_at_asset_hub_westend.clone(), &receiver); // Send ROCs over bridge { @@ -372,7 +377,7 @@ fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() vec![ // issue ROCs on AHW RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == roc_at_rococo_parachains.clone().try_into().unwrap(), + asset_id: *asset_id == roc_at_rococo_parachains.clone(), owner: owner == &receiver, }, // message processed successfully @@ -403,7 +408,6 @@ fn send_rocs_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() #[test] fn send_back_wnds_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_westend() { let wnd_at_rococo_parachains = bridged_wnd_at_ah_rococo(); - let wnd_at_rococo_parachains_v3 = wnd_at_rococo_parachains.clone().try_into().unwrap(); let amount = ASSET_HUB_ROCOCO_ED * 10_000_000; let sender = PenpalASender::get(); let receiver = AssetHubWestendReceiver::get(); @@ -416,7 +420,7 @@ fn send_back_wnds_from_penpal_rococo_through_asset_hub_rococo_to_asset_hub_weste let penpal_location = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location); let prefund_accounts = vec![(sov_penpal_on_ahr, amount * 2)]; - create_foreign_on_ah_rococo(wnd_at_rococo_parachains_v3, true, prefund_accounts); + create_foreign_on_ah_rococo(wnd_at_rococo_parachains.clone(), true, prefund_accounts); let asset_owner: AccountId = AssetHubRococo::account_id_of(ALICE); PenpalA::force_create_foreign_asset( wnd_at_rococo_parachains.clone(), diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs index ceccf98a0240..6ce8ecef0df3 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs @@ -69,7 +69,7 @@ pub(crate) fn weth_at_asset_hubs() -> Location { } pub(crate) fn create_foreign_on_ah_rococo( - id: v3::Location, + id: v4::Location, sufficient: bool, prefund_accounts: Vec<(AccountId, u128)>, ) { @@ -78,18 +78,18 @@ pub(crate) fn create_foreign_on_ah_rococo( AssetHubRococo::force_create_foreign_asset(id, owner, sufficient, min, prefund_accounts); } -pub(crate) fn create_foreign_on_ah_westend(id: v3::Location, sufficient: bool) { +pub(crate) fn create_foreign_on_ah_westend(id: v4::Location, sufficient: bool) { let owner = AssetHubWestend::account_id_of(ALICE); AssetHubWestend::force_create_foreign_asset(id, owner, sufficient, ASSET_MIN_BALANCE, vec![]); } -pub(crate) fn foreign_balance_on_ah_rococo(id: v3::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_rococo(id: v4::Location, who: &AccountId) -> u128 { AssetHubRococo::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) }) } -pub(crate) fn foreign_balance_on_ah_westend(id: v3::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_westend(id: v4::Location, who: &AccountId) -> u128 { AssetHubWestend::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) @@ -97,8 +97,8 @@ pub(crate) fn foreign_balance_on_ah_westend(id: v3::Location, who: &AccountId) - } // set up pool -pub(crate) fn set_up_pool_with_wnd_on_ah_westend(foreign_asset: v3::Location) { - let wnd: v3::Location = v3::Parent.into(); +pub(crate) fn set_up_pool_with_wnd_on_ah_westend(foreign_asset: v4::Location) { + let wnd: v4::Location = v4::Parent.into(); AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; let owner = AssetHubWestendSender::get(); @@ -106,14 +106,14 @@ pub(crate) fn set_up_pool_with_wnd_on_ah_westend(foreign_asset: v3::Location) { assert_ok!(::ForeignAssets::mint( signed_owner.clone(), - foreign_asset.into(), + foreign_asset.clone().into(), owner.clone().into(), 3_000_000_000_000, )); assert_ok!(::AssetConversion::create_pool( signed_owner.clone(), - Box::new(wnd), - Box::new(foreign_asset), + Box::new(wnd.clone()), + Box::new(foreign_asset.clone()), )); assert_expected_events!( AssetHubWestend, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs index 652447fa5601..3f2038b4bdd1 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/send_xcm.rs @@ -29,7 +29,7 @@ fn send_xcm_from_rococo_relay_to_westend_asset_hub_should_fail_on_not_applicable let xcm = VersionedXcm::from(Xcm(vec![ UnpaidExecution { weight_limit, check_origin }, ExportMessage { - network: WestendId.into(), + network: WestendId, destination: [Parachain(AssetHubWestend::para_id().into())].into(), xcm: remote_xcm, }, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index 782b83bac475..3262cc17ba56 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -23,8 +23,7 @@ mod imports { pub use xcm::{ latest::ParentThen, prelude::{AccountId32 as AccountId32Junction, *}, - v3, - v4::NetworkId::Rococo as RococoId, + v4::{self, NetworkId::Rococo as RococoId}, }; pub use xcm_executor::traits::TransferType; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index 0c0b04cd45a9..e2c496802e2c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -35,10 +35,10 @@ fn send_assets_over_bridge(send_fn: F) { fn set_up_wnds_for_penpal_westend_through_ahw_to_ahr( sender: &AccountId, amount: u128, -) -> (Location, v3::Location) { +) -> (Location, v4::Location) { let wnd_at_westend_parachains = wnd_at_ah_westend(); - let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo().try_into().unwrap(); - create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo, true); + let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo(); + create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo.clone(), true); let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); let sov_penpal_on_ahw = AssetHubWestend::sovereign_account_id_of(penpal_location); @@ -116,10 +116,10 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { let sender = AssetHubWestendSender::get(); let receiver = AssetHubRococoReceiver::get(); let wnd_at_asset_hub_westend = wnd_at_ah_westend(); - let bridged_wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo().try_into().unwrap(); - create_foreign_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, true); + let bridged_wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo(); + create_foreign_on_ah_rococo(bridged_wnd_at_asset_hub_rococo.clone(), true); - set_up_pool_with_roc_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, true); + set_up_pool_with_roc_on_ah_rococo(bridged_wnd_at_asset_hub_rococo.clone(), true); let sov_ahr_on_ahw = AssetHubWestend::sovereign_account_of_parachain_on_other_global_consensus( Rococo, @@ -129,7 +129,7 @@ fn send_wnds_from_asset_hub_westend_to_asset_hub_rococo() { ::account_data_of(sov_ahr_on_ahw.clone()).free; let sender_wnds_before = ::account_data_of(sender.clone()).free; let receiver_wnds_before = - foreign_balance_on_ah_rococo(bridged_wnd_at_asset_hub_rococo, &receiver); + foreign_balance_on_ah_rococo(bridged_wnd_at_asset_hub_rococo.clone(), &receiver); // send WNDs, use them for fees send_assets_over_bridge(|| { @@ -187,10 +187,8 @@ fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { let sender = AssetHubWestendSender::get(); let receiver = AssetHubRococoReceiver::get(); let bridged_roc_at_asset_hub_westend = bridged_roc_at_ah_westend(); - let bridged_roc_at_asset_hub_westend_v3 = - bridged_roc_at_asset_hub_westend.clone().try_into().unwrap(); let prefund_accounts = vec![(sender.clone(), prefund_amount)]; - create_foreign_on_ah_westend(bridged_roc_at_asset_hub_westend_v3, true, prefund_accounts); + create_foreign_on_ah_westend(bridged_roc_at_asset_hub_westend.clone(), true, prefund_accounts); //////////////////////////////////////////////////////////// // Let's first send back just some ROCs as a simple example @@ -208,14 +206,14 @@ fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { assert_eq!(rocs_in_reserve_on_ahr_before, prefund_amount); let sender_rocs_before = - foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend_v3, &sender); + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend.clone(), &sender); assert_eq!(sender_rocs_before, prefund_amount); let receiver_rocs_before = ::account_data_of(receiver.clone()).free; // send back ROCs, use them for fees send_assets_over_bridge(|| { let destination = asset_hub_rococo_location(); - let assets: Assets = (bridged_roc_at_asset_hub_westend, amount_to_send).into(); + let assets: Assets = (bridged_roc_at_asset_hub_westend.clone(), amount_to_send).into(); let fee_idx = 0; assert_ok!(send_assets_from_asset_hub_westend(destination, assets, fee_idx)); }); @@ -245,7 +243,7 @@ fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { }); let sender_rocs_after = - foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend_v3, &sender); + foreign_balance_on_ah_westend(bridged_roc_at_asset_hub_westend, &sender); let receiver_rocs_after = ::account_data_of(receiver.clone()).free; let rocs_in_reserve_on_ahr_after = ::account_data_of(sov_ahw_on_ahr.clone()).free; @@ -262,14 +260,14 @@ fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { ////////////////////////////////////////////////////////////////// // wETH has same relative location on both Rococo and Westend AssetHubs - let bridged_weth_at_ah = weth_at_asset_hubs().try_into().unwrap(); - let bridged_usdt_at_asset_hub_westend = bridged_usdt_at_ah_westend().try_into().unwrap(); + let bridged_weth_at_ah = weth_at_asset_hubs(); + let bridged_usdt_at_asset_hub_westend = bridged_usdt_at_ah_westend(); // set up destination chain AH Rococo: // create a ROC/USDT pool to be able to pay fees with USDT (USDT created in genesis) - set_up_pool_with_roc_on_ah_rococo(usdt_at_ah_rococo().try_into().unwrap(), false); + set_up_pool_with_roc_on_ah_rococo(usdt_at_ah_rococo(), false); // create wETH on Rococo (IRL it's already created by Snowbridge) - create_foreign_on_ah_rococo(bridged_weth_at_ah, true); + create_foreign_on_ah_rococo(bridged_weth_at_ah.clone(), true); // prefund AHW's sovereign account on AHR to be able to withdraw USDT and wETH from reserves let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( Westend, @@ -283,7 +281,7 @@ fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { ); AssetHubRococo::mint_foreign_asset( ::RuntimeOrigin::signed(AssetHubRococo::account_id_of(ALICE)), - bridged_weth_at_ah, + bridged_weth_at_ah.clone(), sov_ahw_on_ahr, amount_to_send * 2, ); @@ -291,21 +289,21 @@ fn send_back_rocs_usdt_and_weth_from_asset_hub_westend_to_asset_hub_rococo() { // set up source chain AH Westend: // create wETH and USDT foreign assets on Westend and prefund sender's account let prefund_accounts = vec![(sender.clone(), amount_to_send * 2)]; - create_foreign_on_ah_westend(bridged_weth_at_ah, true, prefund_accounts.clone()); - create_foreign_on_ah_westend(bridged_usdt_at_asset_hub_westend, true, prefund_accounts); + create_foreign_on_ah_westend(bridged_weth_at_ah.clone(), true, prefund_accounts.clone()); + create_foreign_on_ah_westend(bridged_usdt_at_asset_hub_westend.clone(), true, prefund_accounts); // check balances before let receiver_usdts_before = AssetHubRococo::execute_with(|| { type Assets = ::Assets; >::balance(USDT_ID, &receiver) }); - let receiver_weth_before = foreign_balance_on_ah_rococo(bridged_weth_at_ah, &receiver); + let receiver_weth_before = foreign_balance_on_ah_rococo(bridged_weth_at_ah.clone(), &receiver); let usdt_id: AssetId = Location::try_from(bridged_usdt_at_asset_hub_westend).unwrap().into(); // send USDTs and wETHs let assets: Assets = vec![ (usdt_id.clone(), amount_to_send).into(), - (Location::try_from(bridged_weth_at_ah).unwrap(), amount_to_send).into(), + (Location::try_from(bridged_weth_at_ah.clone()).unwrap(), amount_to_send).into(), ] .into(); // use USDT for fees @@ -367,7 +365,8 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() type ForeignAssets = ::ForeignAssets; >::balance(wnd_at_westend_parachains.clone(), &sender) }); - let receiver_wnds_before = foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo, &receiver); + let receiver_wnds_before = + foreign_balance_on_ah_rococo(wnd_at_asset_hub_rococo.clone(), &receiver); // Send WNDs over bridge { @@ -398,7 +397,7 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() vec![ // issue WNDs on AHR RuntimeEvent::ForeignAssets(pallet_assets::Event::Issued { asset_id, owner, .. }) => { - asset_id: *asset_id == wnd_at_westend_parachains.clone().try_into().unwrap(), + asset_id: *asset_id == wnd_at_westend_parachains.clone(), owner: owner == &receiver, }, // message processed successfully @@ -429,7 +428,6 @@ fn send_wnds_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() #[test] fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_rococo() { let roc_at_westend_parachains = bridged_roc_at_ah_westend(); - let roc_at_westend_parachains_v3 = roc_at_westend_parachains.clone().try_into().unwrap(); let amount = ASSET_HUB_WESTEND_ED * 10_000_000; let sender = PenpalBSender::get(); let receiver = AssetHubRococoReceiver::get(); @@ -442,7 +440,7 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc let penpal_location = AssetHubWestend::sibling_location_of(PenpalB::para_id()); let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of(penpal_location); let prefund_accounts = vec![(sov_penpal_on_ahr, amount * 2)]; - create_foreign_on_ah_westend(roc_at_westend_parachains_v3, true, prefund_accounts); + create_foreign_on_ah_westend(roc_at_westend_parachains.clone(), true, prefund_accounts); let asset_owner: AccountId = AssetHubWestend::account_id_of(ALICE); PenpalB::force_create_foreign_asset( roc_at_westend_parachains.clone(), diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index 87ae9aedd6f1..bf894a3baf58 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -69,13 +69,13 @@ pub(crate) fn weth_at_asset_hubs() -> Location { ) } -pub(crate) fn create_foreign_on_ah_rococo(id: v3::Location, sufficient: bool) { +pub(crate) fn create_foreign_on_ah_rococo(id: v4::Location, sufficient: bool) { let owner = AssetHubRococo::account_id_of(ALICE); AssetHubRococo::force_create_foreign_asset(id, owner, sufficient, ASSET_MIN_BALANCE, vec![]); } pub(crate) fn create_foreign_on_ah_westend( - id: v3::Location, + id: v4::Location, sufficient: bool, prefund_accounts: Vec<(AccountId, u128)>, ) { @@ -84,13 +84,13 @@ pub(crate) fn create_foreign_on_ah_westend( AssetHubWestend::force_create_foreign_asset(id, owner, sufficient, min, prefund_accounts); } -pub(crate) fn foreign_balance_on_ah_rococo(id: v3::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_rococo(id: v4::Location, who: &AccountId) -> u128 { AssetHubRococo::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) }) } -pub(crate) fn foreign_balance_on_ah_westend(id: v3::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_westend(id: v4::Location, who: &AccountId) -> u128 { AssetHubWestend::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) @@ -98,8 +98,8 @@ pub(crate) fn foreign_balance_on_ah_westend(id: v3::Location, who: &AccountId) - } // set up pool -pub(crate) fn set_up_pool_with_roc_on_ah_rococo(asset: v3::Location, is_foreign: bool) { - let roc: v3::Location = v3::Parent.into(); +pub(crate) fn set_up_pool_with_roc_on_ah_rococo(asset: v4::Location, is_foreign: bool) { + let roc: v4::Location = v4::Parent.into(); AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; let owner = AssetHubRococoSender::get(); @@ -108,13 +108,13 @@ pub(crate) fn set_up_pool_with_roc_on_ah_rococo(asset: v3::Location, is_foreign: if is_foreign { assert_ok!(::ForeignAssets::mint( signed_owner.clone(), - asset.into(), + asset.clone().into(), owner.clone().into(), 3_000_000_000_000, )); } else { - let asset_id = match asset.interior.split_last() { - (_, Some(v3::Junction::GeneralIndex(id))) => id as u32, + let asset_id = match asset.interior.last() { + Some(v4::Junction::GeneralIndex(id)) => *id as u32, _ => unreachable!(), }; assert_ok!(::Assets::mint( @@ -126,8 +126,8 @@ pub(crate) fn set_up_pool_with_roc_on_ah_rococo(asset: v3::Location, is_foreign: } assert_ok!(::AssetConversion::create_pool( signed_owner.clone(), - Box::new(roc), - Box::new(asset), + Box::new(roc.clone()), + Box::new(asset.clone()), )); assert_expected_events!( AssetHubRococo, diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 7414adacc448..4c7356707ab6 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -83,10 +83,13 @@ use sp_runtime::{Perbill, RuntimeDebug}; use testnet_parachains_constants::rococo::{consensus::*, currency::*, fee::WeightToFee, time::*}; use xcm_config::{ ForeignAssetsConvertedConcreteId, ForeignCreatorsSovereignAccountOf, GovernanceLocation, - PoolAssetsConvertedConcreteId, TokenLocation, TokenLocationV3, - TrustBackedAssetsConvertedConcreteId, TrustBackedAssetsPalletLocationV3, + PoolAssetsConvertedConcreteId, TokenLocation, TrustBackedAssetsConvertedConcreteId, + TrustBackedAssetsPalletLocation, }; +#[cfg(test)] +mod tests; + #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; @@ -324,11 +327,11 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, ForeignAssets, LocalFromLeft< - AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssetsConvert, AssetIdForTrustBackedAssets, - xcm::v3::Location, + xcm::v4::Location, >, - xcm::v3::Location, + xcm::v4::Location, AccountId, >; @@ -336,25 +339,25 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< pub type NativeAndAssets = fungible::UnionOf< Balances, LocalAndForeignAssets, - TargetFromLeft, - xcm::v3::Location, + TargetFromLeft, + xcm::v4::Location, AccountId, >; pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< AssetConversionPalletId, - (xcm::v3::Location, xcm::v3::Location), + (xcm::v4::Location, xcm::v4::Location), >; impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type HigherPrecisionBalance = sp_core::U256; - type AssetKind = xcm::v3::Location; + type AssetKind = xcm::v4::Location; type Assets = NativeAndAssets; type PoolId = (Self::AssetKind, Self::AssetKind); type PoolLocator = pallet_asset_conversion::WithFirstAsset< - TokenLocationV3, + TokenLocation, AccountId, Self::AssetKind, PoolIdToAccountId, @@ -362,7 +365,7 @@ impl pallet_asset_conversion::Config for Runtime { type PoolAssetId = u32; type PoolAssets = PoolAssets; type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam - type PoolSetupFeeAsset = TokenLocationV3; + type PoolSetupFeeAsset = TokenLocation; type PoolSetupFeeTarget = ResolveAssetTo; type LiquidityWithdrawalFee = LiquidityWithdrawalFee; type LPFee = ConstU32<3>; @@ -372,10 +375,10 @@ impl pallet_asset_conversion::Config for Runtime { type WeightInfo = weights::pallet_asset_conversion::WeightInfo; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = assets_common::benchmarks::AssetPairFactory< - TokenLocationV3, + TokenLocation, parachain_info::Pallet, xcm_config::TrustBackedAssetsPalletIndex, - xcm::v3::Location, + xcm::v4::Location, >; } @@ -409,17 +412,17 @@ pub type ForeignAssetsInstance = pallet_assets::Instance2; impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; - type AssetId = xcm::v3::Location; - type AssetIdParameter = xcm::v3::Location; + type AssetId = xcm::v4::Location; + type AssetIdParameter = xcm::v4::Location; type Currency = Balances; type CreateOrigin = ForeignCreators< ( - FromSiblingParachain, xcm::v3::Location>, - FromNetwork, + FromSiblingParachain, xcm::v4::Location>, + FromNetwork, ), ForeignCreatorsSovereignAccountOf, AccountId, - xcm::v3::Location, + xcm::v4::Location, >; type ForceOrigin = AssetsForceOrigin; type AssetDeposit = ForeignAssetsAssetDeposit; @@ -804,9 +807,9 @@ parameter_types! { impl pallet_asset_conversion_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type AssetId = xcm::v3::Location; + type AssetId = xcm::v4::Location; type OnChargeAssetTransaction = SwapAssetAdapter< - TokenLocationV3, + TokenLocation, NativeAndAssets, AssetConversion, ResolveAssetTo, @@ -1019,7 +1022,6 @@ pub type SignedExtra = ( pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Migrations to apply on runtime upgrade. -#[allow(deprecated)] pub type Migrations = ( InitStorageVersions, // unreleased @@ -1234,16 +1236,16 @@ impl_runtime_apis! { impl pallet_asset_conversion::AssetConversionApi< Block, Balance, - xcm::v3::Location, + xcm::v4::Location, > for Runtime { - fn quote_price_exact_tokens_for_tokens(asset1: xcm::v3::Location, asset2: xcm::v3::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_exact_tokens_for_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) } - fn quote_price_tokens_for_exact_tokens(asset1: xcm::v3::Location, asset2: xcm::v3::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_tokens_for_exact_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) } - fn get_reserves(asset1: xcm::v3::Location, asset2: xcm::v3::Location) -> Option<(Balance, Balance)> { + fn get_reserves(asset1: xcm::v4::Location, asset2: xcm::v4::Location) -> Option<(Balance, Balance)> { AssetConversion::get_reserves(asset1, asset2).ok() } } @@ -1781,64 +1783,3 @@ cumulus_pallet_parachain_system::register_validate_block! { Runtime = Runtime, BlockExecutor = cumulus_pallet_aura_ext::BlockExecutor::, } - -#[cfg(test)] -mod tests { - use super::*; - use crate::{CENTS, MILLICENTS}; - use sp_runtime::traits::Zero; - use sp_weights::WeightToFee; - use testnet_parachains_constants::rococo::fee; - - /// We can fit at least 1000 transfers in a block. - #[test] - fn sane_block_weight() { - use pallet_balances::WeightInfo; - let block = RuntimeBlockWeights::get().max_block; - let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let transfer = - base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); - - let fit = block.checked_div_per_component(&transfer).unwrap_or_default(); - assert!(fit >= 1000, "{} should be at least 1000", fit); - } - - /// The fee for one transfer is at most 1 CENT. - #[test] - fn sane_transfer_fee() { - use pallet_balances::WeightInfo; - let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; - let transfer = - base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); - - let fee: Balance = fee::WeightToFee::weight_to_fee(&transfer); - assert!(fee <= CENTS, "{} MILLICENTS should be at most 1000", fee / MILLICENTS); - } - - /// Weight is being charged for both dimensions. - #[test] - fn weight_charged_for_both_components() { - let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(10_000, 0)); - assert!(!fee.is_zero(), "Charges for ref time"); - - let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, 10_000)); - assert_eq!(fee, CENTS, "10kb maps to CENT"); - } - - /// Filling up a block by proof size is at most 30 times more expensive than ref time. - /// - /// This is just a sanity check. - #[test] - fn full_block_fee_ratio() { - let block = RuntimeBlockWeights::get().max_block; - let time_fee: Balance = - fee::WeightToFee::weight_to_fee(&Weight::from_parts(block.ref_time(), 0)); - let proof_fee: Balance = - fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, block.proof_size())); - - let proof_o_time = proof_fee.checked_div(time_fee).unwrap_or_default(); - assert!(proof_o_time <= 30, "{} should be at most 30", proof_o_time); - let time_o_proof = time_fee.checked_div(proof_fee).unwrap_or_default(); - assert!(time_o_proof <= 30, "{} should be at most 30", time_o_proof); - } -} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/tests/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/tests/mod.rs new file mode 100644 index 000000000000..12c0bc4e1688 --- /dev/null +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/tests/mod.rs @@ -0,0 +1,72 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Tests for the Rococo runtime. + +use super::*; +use crate::{CENTS, MILLICENTS}; +use sp_runtime::traits::Zero; +use sp_weights::WeightToFee; +use testnet_parachains_constants::rococo::fee; + +/// We can fit at least 1000 transfers in a block. +#[test] +fn sane_block_weight() { + use pallet_balances::WeightInfo; + let block = RuntimeBlockWeights::get().max_block; + let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; + let transfer = base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); + + let fit = block.checked_div_per_component(&transfer).unwrap_or_default(); + assert!(fit >= 1000, "{} should be at least 1000", fit); +} + +/// The fee for one transfer is at most 1 CENT. +#[test] +fn sane_transfer_fee() { + use pallet_balances::WeightInfo; + let base = RuntimeBlockWeights::get().get(DispatchClass::Normal).base_extrinsic; + let transfer = base + weights::pallet_balances::WeightInfo::::transfer_allow_death(); + + let fee: Balance = fee::WeightToFee::weight_to_fee(&transfer); + assert!(fee <= CENTS, "{} MILLICENTS should be at most 1000", fee / MILLICENTS); +} + +/// Weight is being charged for both dimensions. +#[test] +fn weight_charged_for_both_components() { + let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(10_000, 0)); + assert!(!fee.is_zero(), "Charges for ref time"); + + let fee: Balance = fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, 10_000)); + assert_eq!(fee, CENTS, "10kb maps to CENT"); +} + +/// Filling up a block by proof size is at most 30 times more expensive than ref time. +/// +/// This is just a sanity check. +#[test] +fn full_block_fee_ratio() { + let block = RuntimeBlockWeights::get().max_block; + let time_fee: Balance = + fee::WeightToFee::weight_to_fee(&Weight::from_parts(block.ref_time(), 0)); + let proof_fee: Balance = + fee::WeightToFee::weight_to_fee(&Weight::from_parts(0, block.proof_size())); + + let proof_o_time = proof_fee.checked_div(time_fee).unwrap_or_default(); + assert!(proof_o_time <= 30, "{} should be at most 30", proof_o_time); + let time_o_proof = time_fee.checked_div(proof_fee).unwrap_or_default(); + assert!(time_o_proof <= 30, "{} should be at most 30", time_o_proof); +} diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index a11dca4f6d7c..2d1914e059bf 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -65,7 +65,6 @@ use xcm_executor::XcmExecutor; parameter_types! { pub const TokenLocation: Location = Location::parent(); - pub const TokenLocationV3: xcm::v3::Location = xcm::v3::Location::parent(); pub const RelayNetwork: NetworkId = NetworkId::Rococo; pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = @@ -74,8 +73,6 @@ parameter_types! { pub TrustBackedAssetsPalletLocation: Location = PalletInstance(TrustBackedAssetsPalletIndex::get()).into(); pub TrustBackedAssetsPalletIndex: u8 = ::index() as u8; - pub TrustBackedAssetsPalletLocationV3: xcm::v3::Location = - xcm::v3::Junction::PalletInstance(::index() as u8).into(); pub ForeignAssetsPalletLocation: Location = PalletInstance(::index() as u8).into(); pub PoolAssetsPalletLocation: Location = @@ -177,7 +174,7 @@ pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConverte StartsWithExplicitGlobalConsensus, ), Balance, - xcm::v3::Location, + xcm::v4::Location, >; /// Means for transacting foreign assets from different global consensus. @@ -361,7 +358,7 @@ impl xcm_executor::Config for XcmConfig { ResolveTo, >, cumulus_primitives_utility::SwapFirstAssetTrader< - TokenLocationV3, + TokenLocation, crate::AssetConversion, WeightToFee, crate::NativeAndAssets, @@ -369,7 +366,7 @@ impl xcm_executor::Config for XcmConfig { TrustBackedAssetsAsLocation< TrustBackedAssetsPalletLocation, Balance, - xcm::v3::Location, + xcm::v4::Location, >, ForeignAssetsConvertedConcreteId, ), @@ -502,9 +499,9 @@ pub type ForeignCreatorsSovereignAccountOf = ( /// Simple conversion of `u32` into an `AssetId` for use in benchmarking. pub struct XcmBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { - fn create_asset_id_parameter(id: u32) -> xcm::v3::Location { - xcm::v3::Location::new(1, [xcm::v3::Junction::Parachain(id)]) +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> xcm::v4::Location { + xcm::v4::Location::new(1, [xcm::v4::Junction::Parachain(id)]) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs index ee1461b7f9c8..83f4f9ec3dc5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/tests/tests.rs @@ -85,7 +85,7 @@ fn slot_durations() -> SlotDurations { fn setup_pool_for_paying_fees_with_foreign_assets( (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( AccountId, - xcm::v3::Location, + Location, Balance, ), ) { @@ -93,7 +93,7 @@ fn setup_pool_for_paying_fees_with_foreign_assets( // setup a pool to pay fees with `foreign_asset_id_location` tokens let pool_owner: AccountId = [14u8; 32].into(); - let native_asset = xcm::v3::Location::parent(); + let native_asset = Location::parent(); let pool_liquidity: Balance = existential_deposit.max(foreign_asset_id_minimum_balance).mul(100_000); @@ -105,15 +105,15 @@ fn setup_pool_for_paying_fees_with_foreign_assets( assert_ok!(ForeignAssets::mint( RuntimeOrigin::signed(foreign_asset_owner), - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), pool_owner.clone().into(), (foreign_asset_id_minimum_balance + pool_liquidity).mul(2).into(), )); assert_ok!(AssetConversion::create_pool( RuntimeOrigin::signed(pool_owner.clone()), - Box::new(native_asset.into()), - Box::new(foreign_asset_id_location.into()) + Box::new(native_asset.clone().into()), + Box::new(foreign_asset_id_location.clone().into()) )); assert_ok!(AssetConversion::add_liquidity( @@ -217,24 +217,14 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { assert_ok!(AssetConversion::create_pool( RuntimeHelper::origin_of(bob.clone()), - Box::new( - xcm::v3::Location::try_from(native_location.clone()).expect("conversion works") - ), - Box::new( - xcm::v3::Location::try_from(asset_1_location.clone()) - .expect("conversion works") - ) + Box::new(Location::try_from(native_location.clone()).expect("conversion works")), + Box::new(Location::try_from(asset_1_location.clone()).expect("conversion works")) )); assert_ok!(AssetConversion::add_liquidity( RuntimeHelper::origin_of(bob.clone()), - Box::new( - xcm::v3::Location::try_from(native_location.clone()).expect("conversion works") - ), - Box::new( - xcm::v3::Location::try_from(asset_1_location.clone()) - .expect("conversion works") - ), + Box::new(Location::try_from(native_location.clone()).expect("conversion works")), + Box::new(Location::try_from(asset_1_location.clone()).expect("conversion works")), pool_liquidity, pool_liquidity, 1, @@ -270,8 +260,8 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { let refund_weight = Weight::from_parts(1_000_000_000, 0); let refund = WeightToFee::weight_to_fee(&refund_weight); let (reserve1, reserve2) = AssetConversion::get_reserves( - xcm::v3::Location::try_from(native_location).expect("conversion works"), - xcm::v3::Location::try_from(asset_1_location.clone()).expect("conversion works"), + Location::try_from(native_location).expect("conversion works"), + Location::try_from(asset_1_location.clone()).expect("conversion works"), ) .unwrap(); let asset_refund = @@ -309,14 +299,10 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { let bob: AccountId = SOME_ASSET_ADMIN.into(); let staking_pot = CollatorSelection::account_id(); let native_location = - xcm::v3::Location::try_from(TokenLocation::get()).expect("conversion works"); - let foreign_location = xcm::v3::Location { + Location::try_from(TokenLocation::get()).expect("conversion works"); + let foreign_location = Location { parents: 1, - interior: ( - xcm::v3::Junction::Parachain(1234), - xcm::v3::Junction::GeneralIndex(12345), - ) - .into(), + interior: (Junction::Parachain(1234), Junction::GeneralIndex(12345)).into(), }; // bob's initial balance for native and `asset1` assets. let initial_balance = 200 * UNITS; @@ -325,26 +311,26 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { // init asset, balances and pool. assert_ok!(>::create( - foreign_location, + foreign_location.clone(), bob.clone(), true, 10 )); - assert_ok!(ForeignAssets::mint_into(foreign_location, &bob, initial_balance)); + assert_ok!(ForeignAssets::mint_into(foreign_location.clone(), &bob, initial_balance)); assert_ok!(Balances::mint_into(&bob, initial_balance)); assert_ok!(Balances::mint_into(&staking_pot, initial_balance)); assert_ok!(AssetConversion::create_pool( RuntimeHelper::origin_of(bob.clone()), - Box::new(native_location), - Box::new(foreign_location) + Box::new(native_location.clone()), + Box::new(foreign_location.clone()) )); assert_ok!(AssetConversion::add_liquidity( RuntimeHelper::origin_of(bob.clone()), - Box::new(native_location), - Box::new(foreign_location), + Box::new(native_location.clone()), + Box::new(foreign_location.clone()), pool_liquidity, pool_liquidity, 1, @@ -353,11 +339,9 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { )); // keep initial total issuance to assert later. - let asset_total_issuance = ForeignAssets::total_issuance(foreign_location); + let asset_total_issuance = ForeignAssets::total_issuance(foreign_location.clone()); let native_total_issuance = Balances::total_issuance(); - let foreign_location_latest: Location = foreign_location.try_into().unwrap(); - // prepare input to buy weight. let weight = Weight::from_parts(4_000_000_000, 0); let fee = WeightToFee::weight_to_fee(&weight); @@ -365,7 +349,7 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { AssetConversion::get_amount_in(&fee, &pool_liquidity, &pool_liquidity).unwrap(); let extra_amount = 100; let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - let payment: Asset = (foreign_location_latest.clone(), asset_fee + extra_amount).into(); + let payment: Asset = (foreign_location.clone(), asset_fee + extra_amount).into(); // init trader and buy weight. let mut trader = ::Trader::new(); @@ -373,13 +357,11 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { trader.buy_weight(weight, payment.into(), &ctx).expect("Expected Ok"); // assert. - let unused_amount = unused_asset - .fungible - .get(&foreign_location_latest.clone().into()) - .map_or(0, |a| *a); + let unused_amount = + unused_asset.fungible.get(&foreign_location.clone().into()).map_or(0, |a| *a); assert_eq!(unused_amount, extra_amount); assert_eq!( - ForeignAssets::total_issuance(foreign_location), + ForeignAssets::total_issuance(foreign_location.clone()), asset_total_issuance + asset_fee ); @@ -387,13 +369,13 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { let refund_weight = Weight::from_parts(1_000_000_000, 0); let refund = WeightToFee::weight_to_fee(&refund_weight); let (reserve1, reserve2) = - AssetConversion::get_reserves(native_location, foreign_location).unwrap(); + AssetConversion::get_reserves(native_location, foreign_location.clone()).unwrap(); let asset_refund = AssetConversion::get_amount_out(&refund, &reserve1, &reserve2).unwrap(); // refund. let actual_refund = trader.refund_weight(refund_weight, &ctx).unwrap(); - assert_eq!(actual_refund, (foreign_location_latest, asset_refund).into()); + assert_eq!(actual_refund, (foreign_location.clone(), asset_refund).into()); // assert. assert_eq!(Balances::balance(&staking_pot), initial_balance); @@ -500,17 +482,13 @@ fn test_foreign_asset_xcm_take_first_trader() { .execute_with(|| { // We need root origin to create a sufficient asset let minimum_asset_balance = 3333333_u128; - let foreign_location = xcm::v3::Location { + let foreign_location = Location { parents: 1, - interior: ( - xcm::v3::Junction::Parachain(1234), - xcm::v3::Junction::GeneralIndex(12345), - ) - .into(), + interior: (Junction::Parachain(1234), Junction::GeneralIndex(12345)).into(), }; assert_ok!(ForeignAssets::force_create( RuntimeHelper::root_origin(), - foreign_location.into(), + foreign_location.clone().into(), AccountId::from(ALICE).into(), true, minimum_asset_balance @@ -519,13 +497,11 @@ fn test_foreign_asset_xcm_take_first_trader() { // We first mint enough asset for the account to exist for assets assert_ok!(ForeignAssets::mint( RuntimeHelper::origin_of(AccountId::from(ALICE)), - foreign_location.into(), + foreign_location.clone().into(), AccountId::from(ALICE).into(), minimum_asset_balance )); - let asset_location_v4: Location = foreign_location.try_into().unwrap(); - // Set Alice as block author, who will receive fees RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); @@ -535,7 +511,7 @@ fn test_foreign_asset_xcm_take_first_trader() { // Lets calculate amount needed let asset_amount_needed = ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger::charge_weight_in_fungibles( - foreign_location, + foreign_location.clone(), bought ) .expect("failed to compute"); @@ -543,7 +519,7 @@ fn test_foreign_asset_xcm_take_first_trader() { // Lets pay with: asset_amount_needed + asset_amount_extra let asset_amount_extra = 100_u128; let asset: Asset = - (asset_location_v4.clone(), asset_amount_needed + asset_amount_extra).into(); + (foreign_location.clone(), asset_amount_needed + asset_amount_extra).into(); let mut trader = ::Trader::new(); let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; @@ -551,16 +527,15 @@ fn test_foreign_asset_xcm_take_first_trader() { // Lets buy_weight and make sure buy_weight does not return an error let unused_assets = trader.buy_weight(bought, asset.into(), &ctx).expect("Expected Ok"); // Check whether a correct amount of unused assets is returned - assert_ok!( - unused_assets.ensure_contains(&(asset_location_v4, asset_amount_extra).into()) - ); + assert_ok!(unused_assets + .ensure_contains(&(foreign_location.clone(), asset_amount_extra).into())); // Drop trader drop(trader); // Make sure author(Alice) has received the amount assert_eq!( - ForeignAssets::balance(foreign_location, AccountId::from(ALICE)), + ForeignAssets::balance(foreign_location.clone(), AccountId::from(ALICE)), minimum_asset_balance + asset_amount_needed ); @@ -841,15 +816,13 @@ fn test_assets_balances_api_works() { .build() .execute_with(|| { let local_asset_id = 1; - let foreign_asset_id_location = xcm::v3::Location::new( - 1, - [xcm::v3::Junction::Parachain(1234), xcm::v3::Junction::GeneralIndex(12345)], - ); + let foreign_asset_id_location = + Location::new(1, [Junction::Parachain(1234), Junction::GeneralIndex(12345)]); // check before assert_eq!(Assets::balance(local_asset_id, AccountId::from(ALICE)), 0); assert_eq!( - ForeignAssets::balance(foreign_asset_id_location, AccountId::from(ALICE)), + ForeignAssets::balance(foreign_asset_id_location.clone(), AccountId::from(ALICE)), 0 ); assert_eq!(Balances::free_balance(AccountId::from(ALICE)), 0); @@ -886,7 +859,7 @@ fn test_assets_balances_api_works() { let foreign_asset_minimum_asset_balance = 3333333_u128; assert_ok!(ForeignAssets::force_create( RuntimeHelper::root_origin(), - foreign_asset_id_location, + foreign_asset_id_location.clone(), AccountId::from(SOME_ASSET_ADMIN).into(), false, foreign_asset_minimum_asset_balance @@ -895,7 +868,7 @@ fn test_assets_balances_api_works() { // We first mint enough asset for the account to exist for assets assert_ok!(ForeignAssets::mint( RuntimeHelper::origin_of(AccountId::from(SOME_ASSET_ADMIN)), - foreign_asset_id_location, + foreign_asset_id_location.clone(), AccountId::from(ALICE).into(), 6 * foreign_asset_minimum_asset_balance )); @@ -906,7 +879,7 @@ fn test_assets_balances_api_works() { minimum_asset_balance ); assert_eq!( - ForeignAssets::balance(foreign_asset_id_location, AccountId::from(ALICE)), + ForeignAssets::balance(foreign_asset_id_location.clone(), AccountId::from(ALICE)), 6 * minimum_asset_balance ); assert_eq!(Balances::free_balance(AccountId::from(ALICE)), some_currency); @@ -932,10 +905,8 @@ fn test_assets_balances_api_works() { .into()))); // check foreign asset assert!(result.inner().iter().any(|asset| asset.eq(&( - WithLatestLocationConverter::::convert_back( - &foreign_asset_id_location - ) - .unwrap(), + WithLatestLocationConverter::::convert_back(&foreign_asset_id_location) + .unwrap(), 6 * foreign_asset_minimum_asset_balance ) .into()))); @@ -1025,14 +996,11 @@ asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_ Runtime, XcmConfig, ForeignAssetsInstance, - xcm::v3::Location, + Location, JustTry, collator_session_keys(), ExistentialDeposit::get(), - xcm::v3::Location::new( - 1, - [xcm::v3::Junction::Parachain(1313), xcm::v3::Junction::GeneralIndex(12345)] - ), + Location::new(1, [Junction::Parachain(1313), Junction::GeneralIndex(12345)]), Box::new(|| { assert!(Assets::asset_ids().collect::>().is_empty()); }), @@ -1047,8 +1015,8 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p WeightToFee, ForeignCreatorsSovereignAccountOf, ForeignAssetsInstance, - xcm::v3::Location, - WithLatestLocationConverter, + Location, + WithLatestLocationConverter, collator_session_keys(), ExistentialDeposit::get(), AssetDeposit::get(), @@ -1138,16 +1106,17 @@ mod asset_hub_rococo_tests { let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); let staking_pot = StakingPot::get(); - let foreign_asset_id_location = xcm::v3::Location::new( - 2, - [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Westend)], - ); + let foreign_asset_id_location = + Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); let foreign_asset_id_minimum_balance = 1_000_000_000; // sovereign account as foreign asset owner (can be whoever for this scenario) let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); - let foreign_asset_create_params = - (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + let foreign_asset_create_params = ( + foreign_asset_owner, + foreign_asset_id_location.clone(), + foreign_asset_id_minimum_balance, + ); asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< Runtime, @@ -1181,7 +1150,7 @@ mod asset_hub_rococo_tests { // check now foreign asset for staking pot assert_eq!( ForeignAssets::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &staking_pot ), 0 @@ -1195,7 +1164,7 @@ mod asset_hub_rococo_tests { // staking pot receives no foreign assets assert_eq!( ForeignAssets::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &staking_pot ), 0 @@ -1211,16 +1180,17 @@ mod asset_hub_rococo_tests { let block_author_account = AccountId::from(BLOCK_AUTHOR_ACCOUNT); let staking_pot = StakingPot::get(); - let foreign_asset_id_location = xcm::v3::Location::new( - 2, - [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Westend)], - ); + let foreign_asset_id_location = + Location::new(2, [Junction::GlobalConsensus(NetworkId::Westend)]); let foreign_asset_id_minimum_balance = 1_000_000_000; // sovereign account as foreign asset owner (can be whoever for this scenario) let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); - let foreign_asset_create_params = - (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + let foreign_asset_create_params = ( + foreign_asset_owner, + foreign_asset_id_location.clone(), + foreign_asset_id_minimum_balance, + ); asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< Runtime, @@ -1245,7 +1215,7 @@ mod asset_hub_rococo_tests { // check block author before assert_eq!( ForeignAssets::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &block_author_account ), 0 @@ -1255,7 +1225,7 @@ mod asset_hub_rococo_tests { // `TakeFirstAssetTrader` puts fees to the block author assert!( ForeignAssets::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &block_author_account ) > 0 ); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 201698ecb7f9..ebbc000d1413 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -80,8 +80,7 @@ use testnet_parachains_constants::westend::{ use xcm_config::{ ForeignAssetsConvertedConcreteId, ForeignCreatorsSovereignAccountOf, PoolAssetsConvertedConcreteId, TrustBackedAssetsConvertedConcreteId, - TrustBackedAssetsPalletLocationV3, WestendLocation, WestendLocationV3, - XcmOriginToTransactDispatchOrigin, + TrustBackedAssetsPalletLocation, WestendLocation, XcmOriginToTransactDispatchOrigin, }; #[cfg(any(feature = "std", test))] @@ -326,11 +325,11 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, ForeignAssets, LocalFromLeft< - AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssetsConvert, AssetIdForTrustBackedAssets, - xcm::v3::Location, + xcm::v4::Location, >, - xcm::v3::Location, + xcm::v4::Location, AccountId, >; @@ -338,25 +337,25 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< pub type NativeAndAssets = fungible::UnionOf< Balances, LocalAndForeignAssets, - TargetFromLeft, - xcm::v3::Location, + TargetFromLeft, + xcm::v4::Location, AccountId, >; pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< AssetConversionPalletId, - (xcm::v3::Location, xcm::v3::Location), + (xcm::v4::Location, xcm::v4::Location), >; impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type HigherPrecisionBalance = sp_core::U256; - type AssetKind = xcm::v3::Location; + type AssetKind = xcm::v4::Location; type Assets = NativeAndAssets; type PoolId = (Self::AssetKind, Self::AssetKind); type PoolLocator = pallet_asset_conversion::WithFirstAsset< - WestendLocationV3, + WestendLocation, AccountId, Self::AssetKind, PoolIdToAccountId, @@ -364,7 +363,7 @@ impl pallet_asset_conversion::Config for Runtime { type PoolAssetId = u32; type PoolAssets = PoolAssets; type PoolSetupFee = ConstU128<0>; // Asset class deposit fees are sufficient to prevent spam - type PoolSetupFeeAsset = WestendLocationV3; + type PoolSetupFeeAsset = WestendLocation; type PoolSetupFeeTarget = ResolveAssetTo; type LiquidityWithdrawalFee = LiquidityWithdrawalFee; type LPFee = ConstU32<3>; @@ -374,10 +373,10 @@ impl pallet_asset_conversion::Config for Runtime { type WeightInfo = weights::pallet_asset_conversion::WeightInfo; #[cfg(feature = "runtime-benchmarks")] type BenchmarkHelper = assets_common::benchmarks::AssetPairFactory< - WestendLocationV3, + WestendLocation, parachain_info::Pallet, xcm_config::TrustBackedAssetsPalletIndex, - xcm::v3::Location, + xcm::v4::Location, >; } @@ -411,17 +410,17 @@ pub type ForeignAssetsInstance = pallet_assets::Instance2; impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; - type AssetId = xcm::v3::Location; - type AssetIdParameter = xcm::v3::Location; + type AssetId = xcm::v4::Location; + type AssetIdParameter = xcm::v4::Location; type Currency = Balances; type CreateOrigin = ForeignCreators< ( - FromSiblingParachain, xcm::v3::Location>, - FromNetwork, + FromSiblingParachain, xcm::v4::Location>, + FromNetwork, ), ForeignCreatorsSovereignAccountOf, AccountId, - xcm::v3::Location, + xcm::v4::Location, >; type ForceOrigin = AssetsForceOrigin; type AssetDeposit = ForeignAssetsAssetDeposit; @@ -801,9 +800,9 @@ parameter_types! { impl pallet_asset_conversion_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type AssetId = xcm::v3::Location; + type AssetId = xcm::v4::Location; type OnChargeAssetTransaction = SwapAssetAdapter< - WestendLocationV3, + WestendLocation, NativeAndAssets, AssetConversion, ResolveAssetTo, @@ -1331,18 +1330,18 @@ impl_runtime_apis! { impl pallet_asset_conversion::AssetConversionApi< Block, Balance, - xcm::v3::Location, + xcm::v4::Location, > for Runtime { - fn quote_price_exact_tokens_for_tokens(asset1: xcm::v3::Location, asset2: xcm::v3::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_exact_tokens_for_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) } - fn quote_price_tokens_for_exact_tokens(asset1: xcm::v3::Location, asset2: xcm::v3::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_tokens_for_exact_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) } - fn get_reserves(asset1: xcm::v3::Location, asset2: xcm::v3::Location) -> Option<(Balance, Balance)> { + fn get_reserves(asset1: xcm::v4::Location, asset2: xcm::v4::Location) -> Option<(Balance, Balance)> { AssetConversion::get_reserves(asset1, asset2).ok() } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index bf45e146e334..d61381d3f50b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -62,7 +62,6 @@ use xcm_executor::XcmExecutor; parameter_types! { pub const WestendLocation: Location = Location::parent(); - pub const WestendLocationV3: xcm::v3::Location = xcm::v3::Location::parent(); pub const RelayNetwork: Option = Some(NetworkId::Westend); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); pub UniversalLocation: InteriorLocation = @@ -71,8 +70,6 @@ parameter_types! { pub TrustBackedAssetsPalletLocation: Location = PalletInstance(TrustBackedAssetsPalletIndex::get()).into(); pub TrustBackedAssetsPalletIndex: u8 = ::index() as u8; - pub TrustBackedAssetsPalletLocationV3: xcm::v3::Location = - xcm::v3::Junction::PalletInstance(::index() as u8).into(); pub ForeignAssetsPalletLocation: Location = PalletInstance(::index() as u8).into(); pub PoolAssetsPalletLocation: Location = @@ -174,7 +171,7 @@ pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConverte StartsWithExplicitGlobalConsensus, ), Balance, - xcm::v3::Location, + xcm::v4::Location, >; /// Means for transacting foreign assets from different global consensus. @@ -384,7 +381,7 @@ impl xcm_executor::Config for XcmConfig { ResolveTo, >, cumulus_primitives_utility::SwapFirstAssetTrader< - WestendLocationV3, + WestendLocation, crate::AssetConversion, WeightToFee, crate::NativeAndAssets, @@ -392,7 +389,7 @@ impl xcm_executor::Config for XcmConfig { TrustBackedAssetsAsLocation< TrustBackedAssetsPalletLocation, Balance, - xcm::v3::Location, + xcm::v4::Location, >, ForeignAssetsConvertedConcreteId, ), @@ -526,9 +523,9 @@ pub type ForeignCreatorsSovereignAccountOf = ( /// Simple conversion of `u32` into an `AssetId` for use in benchmarking. pub struct XcmBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { - fn create_asset_id_parameter(id: u32) -> xcm::v3::Location { - xcm::v3::Location::new(1, [xcm::v3::Junction::Parachain(id)]) +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> xcm::v4::Location { + xcm::v4::Location::new(1, [xcm::v4::Junction::Parachain(id)]) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index 48e6c11d268c..1c334d6f84f8 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -86,7 +86,7 @@ fn slot_durations() -> SlotDurations { fn setup_pool_for_paying_fees_with_foreign_assets( (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( AccountId, - xcm::v3::Location, + xcm::v4::Location, Balance, ), ) { @@ -94,7 +94,7 @@ fn setup_pool_for_paying_fees_with_foreign_assets( // setup a pool to pay fees with `foreign_asset_id_location` tokens let pool_owner: AccountId = [14u8; 32].into(); - let native_asset = xcm::v3::Location::parent(); + let native_asset = xcm::v4::Location::parent(); let pool_liquidity: Balance = existential_deposit.max(foreign_asset_id_minimum_balance).mul(100_000); @@ -106,15 +106,15 @@ fn setup_pool_for_paying_fees_with_foreign_assets( assert_ok!(ForeignAssets::mint( RuntimeOrigin::signed(foreign_asset_owner), - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), pool_owner.clone().into(), (foreign_asset_id_minimum_balance + pool_liquidity).mul(2).into(), )); assert_ok!(AssetConversion::create_pool( RuntimeOrigin::signed(pool_owner.clone()), - Box::new(native_asset.into()), - Box::new(foreign_asset_id_location.into()) + Box::new(native_asset.clone().into()), + Box::new(foreign_asset_id_location.clone().into()) )); assert_ok!(AssetConversion::add_liquidity( @@ -219,10 +219,10 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { assert_ok!(AssetConversion::create_pool( RuntimeHelper::origin_of(bob.clone()), Box::new( - xcm::v3::Location::try_from(native_location.clone()).expect("conversion works") + xcm::v4::Location::try_from(native_location.clone()).expect("conversion works") ), Box::new( - xcm::v3::Location::try_from(asset_1_location.clone()) + xcm::v4::Location::try_from(asset_1_location.clone()) .expect("conversion works") ) )); @@ -230,10 +230,10 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { assert_ok!(AssetConversion::add_liquidity( RuntimeHelper::origin_of(bob.clone()), Box::new( - xcm::v3::Location::try_from(native_location.clone()).expect("conversion works") + xcm::v4::Location::try_from(native_location.clone()).expect("conversion works") ), Box::new( - xcm::v3::Location::try_from(asset_1_location.clone()) + xcm::v4::Location::try_from(asset_1_location.clone()) .expect("conversion works") ), pool_liquidity, @@ -271,8 +271,8 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { let refund_weight = Weight::from_parts(1_000_000_000, 0); let refund = WeightToFee::weight_to_fee(&refund_weight); let (reserve1, reserve2) = AssetConversion::get_reserves( - xcm::v3::Location::try_from(native_location).expect("conversion works"), - xcm::v3::Location::try_from(asset_1_location.clone()).expect("conversion works"), + xcm::v4::Location::try_from(native_location).expect("conversion works"), + xcm::v4::Location::try_from(asset_1_location.clone()).expect("conversion works"), ) .unwrap(); let asset_refund = @@ -310,12 +310,12 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { let bob: AccountId = SOME_ASSET_ADMIN.into(); let staking_pot = CollatorSelection::account_id(); let native_location = - xcm::v3::Location::try_from(WestendLocation::get()).expect("conversion works"); - let foreign_location = xcm::v3::Location { + xcm::v4::Location::try_from(WestendLocation::get()).expect("conversion works"); + let foreign_location = xcm::v4::Location { parents: 1, interior: ( - xcm::v3::Junction::Parachain(1234), - xcm::v3::Junction::GeneralIndex(12345), + xcm::v4::Junction::Parachain(1234), + xcm::v4::Junction::GeneralIndex(12345), ) .into(), }; @@ -326,26 +326,26 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { // init asset, balances and pool. assert_ok!(>::create( - foreign_location, + foreign_location.clone(), bob.clone(), true, 10 )); - assert_ok!(ForeignAssets::mint_into(foreign_location, &bob, initial_balance)); + assert_ok!(ForeignAssets::mint_into(foreign_location.clone(), &bob, initial_balance)); assert_ok!(Balances::mint_into(&bob, initial_balance)); assert_ok!(Balances::mint_into(&staking_pot, initial_balance)); assert_ok!(AssetConversion::create_pool( RuntimeHelper::origin_of(bob.clone()), - Box::new(native_location), - Box::new(foreign_location) + Box::new(native_location.clone()), + Box::new(foreign_location.clone()) )); assert_ok!(AssetConversion::add_liquidity( RuntimeHelper::origin_of(bob.clone()), - Box::new(native_location), - Box::new(foreign_location), + Box::new(native_location.clone()), + Box::new(foreign_location.clone()), pool_liquidity, pool_liquidity, 1, @@ -354,11 +354,9 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { )); // keep initial total issuance to assert later. - let asset_total_issuance = ForeignAssets::total_issuance(foreign_location); + let asset_total_issuance = ForeignAssets::total_issuance(foreign_location.clone()); let native_total_issuance = Balances::total_issuance(); - let foreign_location_latest: Location = foreign_location.try_into().unwrap(); - // prepare input to buy weight. let weight = Weight::from_parts(4_000_000_000, 0); let fee = WeightToFee::weight_to_fee(&weight); @@ -366,7 +364,7 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { AssetConversion::get_amount_in(&fee, &pool_liquidity, &pool_liquidity).unwrap(); let extra_amount = 100; let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; - let payment: Asset = (foreign_location_latest.clone(), asset_fee + extra_amount).into(); + let payment: Asset = (foreign_location.clone(), asset_fee + extra_amount).into(); // init trader and buy weight. let mut trader = ::Trader::new(); @@ -374,13 +372,11 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { trader.buy_weight(weight, payment.into(), &ctx).expect("Expected Ok"); // assert. - let unused_amount = unused_asset - .fungible - .get(&foreign_location_latest.clone().into()) - .map_or(0, |a| *a); + let unused_amount = + unused_asset.fungible.get(&foreign_location.clone().into()).map_or(0, |a| *a); assert_eq!(unused_amount, extra_amount); assert_eq!( - ForeignAssets::total_issuance(foreign_location), + ForeignAssets::total_issuance(foreign_location.clone()), asset_total_issuance + asset_fee ); @@ -388,13 +384,13 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { let refund_weight = Weight::from_parts(1_000_000_000, 0); let refund = WeightToFee::weight_to_fee(&refund_weight); let (reserve1, reserve2) = - AssetConversion::get_reserves(native_location, foreign_location).unwrap(); + AssetConversion::get_reserves(native_location, foreign_location.clone()).unwrap(); let asset_refund = AssetConversion::get_amount_out(&refund, &reserve1, &reserve2).unwrap(); // refund. let actual_refund = trader.refund_weight(refund_weight, &ctx).unwrap(); - assert_eq!(actual_refund, (foreign_location_latest, asset_refund).into()); + assert_eq!(actual_refund, (foreign_location.clone(), asset_refund).into()); // assert. assert_eq!(Balances::balance(&staking_pot), initial_balance); @@ -501,17 +497,17 @@ fn test_foreign_asset_xcm_take_first_trader() { .execute_with(|| { // We need root origin to create a sufficient asset let minimum_asset_balance = 3333333_u128; - let foreign_location = xcm::v3::Location { + let foreign_location = xcm::v4::Location { parents: 1, interior: ( - xcm::v3::Junction::Parachain(1234), - xcm::v3::Junction::GeneralIndex(12345), + xcm::v4::Junction::Parachain(1234), + xcm::v4::Junction::GeneralIndex(12345), ) .into(), }; assert_ok!(ForeignAssets::force_create( RuntimeHelper::root_origin(), - foreign_location.into(), + foreign_location.clone().into(), AccountId::from(ALICE).into(), true, minimum_asset_balance @@ -520,12 +516,12 @@ fn test_foreign_asset_xcm_take_first_trader() { // We first mint enough asset for the account to exist for assets assert_ok!(ForeignAssets::mint( RuntimeHelper::origin_of(AccountId::from(ALICE)), - foreign_location.into(), + foreign_location.clone().into(), AccountId::from(ALICE).into(), minimum_asset_balance )); - let asset_location_v4: Location = foreign_location.try_into().unwrap(); + let asset_location_v4: Location = foreign_location.clone().try_into().unwrap(); // Set Alice as block author, who will receive fees RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); @@ -534,7 +530,7 @@ fn test_foreign_asset_xcm_take_first_trader() { let bought = Weight::from_parts(4_000_000_000u64, 0); // Lets calculate amount needed - let asset_amount_needed = ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger::charge_weight_in_fungibles(foreign_location, bought) + let asset_amount_needed = ForeignAssetFeeAsExistentialDepositMultiplierFeeCharger::charge_weight_in_fungibles(foreign_location.clone(), bought) .expect("failed to compute"); // Lets pay with: asset_amount_needed + asset_amount_extra @@ -557,7 +553,7 @@ fn test_foreign_asset_xcm_take_first_trader() { // Make sure author(Alice) has received the amount assert_eq!( - ForeignAssets::balance(foreign_location, AccountId::from(ALICE)), + ForeignAssets::balance(foreign_location.clone(), AccountId::from(ALICE)), minimum_asset_balance + asset_amount_needed ); @@ -837,11 +833,11 @@ fn test_assets_balances_api_works() { .build() .execute_with(|| { let local_asset_id = 1; - let foreign_asset_id_location = xcm::v3::Location { + let foreign_asset_id_location = xcm::v4::Location { parents: 1, interior: [ - xcm::v3::Junction::Parachain(1234), - xcm::v3::Junction::GeneralIndex(12345), + xcm::v4::Junction::Parachain(1234), + xcm::v4::Junction::GeneralIndex(12345), ] .into(), }; @@ -849,7 +845,7 @@ fn test_assets_balances_api_works() { // check before assert_eq!(Assets::balance(local_asset_id, AccountId::from(ALICE)), 0); assert_eq!( - ForeignAssets::balance(foreign_asset_id_location, AccountId::from(ALICE)), + ForeignAssets::balance(foreign_asset_id_location.clone(), AccountId::from(ALICE)), 0 ); assert_eq!(Balances::free_balance(AccountId::from(ALICE)), 0); @@ -886,7 +882,7 @@ fn test_assets_balances_api_works() { let foreign_asset_minimum_asset_balance = 3333333_u128; assert_ok!(ForeignAssets::force_create( RuntimeHelper::root_origin(), - foreign_asset_id_location, + foreign_asset_id_location.clone(), AccountId::from(SOME_ASSET_ADMIN).into(), false, foreign_asset_minimum_asset_balance @@ -895,7 +891,7 @@ fn test_assets_balances_api_works() { // We first mint enough asset for the account to exist for assets assert_ok!(ForeignAssets::mint( RuntimeHelper::origin_of(AccountId::from(SOME_ASSET_ADMIN)), - foreign_asset_id_location, + foreign_asset_id_location.clone(), AccountId::from(ALICE).into(), 6 * foreign_asset_minimum_asset_balance )); @@ -906,7 +902,7 @@ fn test_assets_balances_api_works() { minimum_asset_balance ); assert_eq!( - ForeignAssets::balance(foreign_asset_id_location, AccountId::from(ALICE)), + ForeignAssets::balance(foreign_asset_id_location.clone(), AccountId::from(ALICE)), 6 * minimum_asset_balance ); assert_eq!(Balances::free_balance(AccountId::from(ALICE)), some_currency); @@ -932,7 +928,7 @@ fn test_assets_balances_api_works() { .into()))); // check foreign asset assert!(result.inner().iter().any(|asset| asset.eq(&( - WithLatestLocationConverter::::convert_back( + WithLatestLocationConverter::::convert_back( &foreign_asset_id_location ) .unwrap(), @@ -1025,13 +1021,13 @@ asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_ Runtime, XcmConfig, ForeignAssetsInstance, - xcm::v3::Location, + xcm::v4::Location, JustTry, collator_session_keys(), ExistentialDeposit::get(), - xcm::v3::Location { + xcm::v4::Location { parents: 1, - interior: [xcm::v3::Junction::Parachain(1313), xcm::v3::Junction::GeneralIndex(12345)] + interior: [xcm::v4::Junction::Parachain(1313), xcm::v4::Junction::GeneralIndex(12345)] .into() }, Box::new(|| { @@ -1048,8 +1044,8 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p WeightToFee, ForeignCreatorsSovereignAccountOf, ForeignAssetsInstance, - xcm::v3::Location, - WithLatestLocationConverter, + xcm::v4::Location, + WithLatestLocationConverter, collator_session_keys(), ExistentialDeposit::get(), AssetDeposit::get(), @@ -1127,12 +1123,12 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_pool_s let staking_pot = StakingPot::get(); let foreign_asset_id_location = - xcm::v3::Location::new(2, [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Rococo)]); + xcm::v4::Location::new(2, [xcm::v4::Junction::GlobalConsensus(xcm::v4::NetworkId::Rococo)]); let foreign_asset_id_minimum_balance = 1_000_000_000; // sovereign account as foreign asset owner (can be whoever for this scenario) let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); let foreign_asset_create_params = - (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + (foreign_asset_owner, foreign_asset_id_location.clone(), foreign_asset_id_minimum_balance); asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< Runtime, @@ -1166,7 +1162,7 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_pool_s // check now foreign asset for staking pot assert_eq!( ForeignAssets::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &staking_pot ), 0 @@ -1180,7 +1176,7 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_pool_s // staking pot receives no foreign assets assert_eq!( ForeignAssets::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &staking_pot ), 0 @@ -1196,12 +1192,12 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_suffic let staking_pot = StakingPot::get(); let foreign_asset_id_location = - xcm::v3::Location::new(2, [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::Rococo)]); + xcm::v4::Location::new(2, [xcm::v4::Junction::GlobalConsensus(xcm::v4::NetworkId::Rococo)]); let foreign_asset_id_minimum_balance = 1_000_000_000; // sovereign account as foreign asset owner (can be whoever for this scenario) let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); let foreign_asset_create_params = - (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance); + (foreign_asset_owner, foreign_asset_id_location.clone(), foreign_asset_id_minimum_balance); asset_test_utils::test_cases_over_bridge::receive_reserve_asset_deposited_from_different_consensus_works::< Runtime, @@ -1226,7 +1222,7 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_suffic // check block author before assert_eq!( ForeignAssets::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &block_author_account ), 0 @@ -1236,7 +1232,7 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_suffic // `TakeFirstAssetTrader` puts fees to the block author assert!( ForeignAssets::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &block_author_account ) > 0 ); diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index 884b71369e79..67b585ecfe86 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -367,9 +367,9 @@ pub fn teleports_for_foreign_assets_works< ::Balance: From + Into, SovereignAccountOf: ConvertLocation>, >::AssetId: - From + Into, + From + Into, >::AssetIdParameter: - From + Into, + From + Into, >::Balance: From + Into, ::AccountId: @@ -381,11 +381,11 @@ pub fn teleports_for_foreign_assets_works< { // foreign parachain with the same consensus currency as asset let foreign_para_id = 2222; - let foreign_asset_id_location = xcm::v3::Location { + let foreign_asset_id_location = xcm::v4::Location { parents: 1, interior: [ - xcm::v3::Junction::Parachain(foreign_para_id), - xcm::v3::Junction::GeneralIndex(1234567), + xcm::v4::Junction::Parachain(foreign_para_id), + xcm::v4::Junction::GeneralIndex(1234567), ] .into(), }; @@ -438,14 +438,14 @@ pub fn teleports_for_foreign_assets_works< ); assert_eq!( >::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &target_account ), 0.into() ); assert_eq!( >::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &CheckingAccount::get() ), 0.into() @@ -454,14 +454,14 @@ pub fn teleports_for_foreign_assets_works< assert_total::< pallet_assets::Pallet, AccountIdOf, - >(foreign_asset_id_location, 0, 0); + >(foreign_asset_id_location.clone(), 0, 0); // create foreign asset (0 total issuance) let asset_minimum_asset_balance = 3333333_u128; assert_ok!( >::force_create( RuntimeHelper::::root_origin(), - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), asset_owner.into(), false, asset_minimum_asset_balance.into() @@ -470,12 +470,9 @@ pub fn teleports_for_foreign_assets_works< assert_total::< pallet_assets::Pallet, AccountIdOf, - >(foreign_asset_id_location, 0, 0); + >(foreign_asset_id_location.clone(), 0, 0); assert!(teleported_foreign_asset_amount > asset_minimum_asset_balance); - let foreign_asset_id_location_latest: Location = - foreign_asset_id_location.try_into().unwrap(); - // 1. process received teleported assets from sibling parachain (foreign_para_id) let xcm = Xcm(vec![ // BuyExecution with relaychain native token @@ -489,12 +486,12 @@ pub fn teleports_for_foreign_assets_works< }, // Process teleported asset ReceiveTeleportedAsset(Assets::from(vec![Asset { - id: AssetId(foreign_asset_id_location_latest.clone()), + id: AssetId(foreign_asset_id_location.clone()), fun: Fungible(teleported_foreign_asset_amount), }])), DepositAsset { assets: Wild(AllOf { - id: AssetId(foreign_asset_id_location_latest.clone()), + id: AssetId(foreign_asset_id_location.clone()), fun: WildFungibility::Fungible, }), beneficiary: Location { @@ -526,7 +523,7 @@ pub fn teleports_for_foreign_assets_works< ); assert_eq!( >::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &target_account ), teleported_foreign_asset_amount.into() @@ -538,7 +535,7 @@ pub fn teleports_for_foreign_assets_works< ); assert_eq!( >::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &CheckingAccount::get() ), 0.into() @@ -548,7 +545,7 @@ pub fn teleports_for_foreign_assets_works< pallet_assets::Pallet, AccountIdOf, >( - foreign_asset_id_location, + foreign_asset_id_location.clone(), teleported_foreign_asset_amount, teleported_foreign_asset_amount, ); @@ -566,7 +563,7 @@ pub fn teleports_for_foreign_assets_works< let target_account_balance_before_teleport = >::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &target_account, ); let asset_to_teleport_away = asset_minimum_asset_balance * 3; @@ -580,7 +577,7 @@ pub fn teleports_for_foreign_assets_works< // Make sure the target account has enough native asset to pay for delivery fees let delivery_fees = xcm_helpers::teleport_assets_delivery_fees::( - (foreign_asset_id_location_latest.clone(), asset_to_teleport_away).into(), + (foreign_asset_id_location.clone(), asset_to_teleport_away).into(), 0, Unlimited, dest_beneficiary.clone(), @@ -596,7 +593,7 @@ pub fn teleports_for_foreign_assets_works< RuntimeHelper::::origin_of(target_account.clone()), dest, dest_beneficiary, - (foreign_asset_id_location_latest.clone(), asset_to_teleport_away), + (foreign_asset_id_location.clone(), asset_to_teleport_away), Some((runtime_para_id, foreign_para_id)), included_head, &alice, @@ -606,14 +603,14 @@ pub fn teleports_for_foreign_assets_works< // check balances assert_eq!( >::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &target_account ), (target_account_balance_before_teleport - asset_to_teleport_away.into()) ); assert_eq!( >::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &CheckingAccount::get() ), 0.into() @@ -623,7 +620,7 @@ pub fn teleports_for_foreign_assets_works< pallet_assets::Pallet, AccountIdOf, >( - foreign_asset_id_location, + foreign_asset_id_location.clone(), teleported_foreign_asset_amount - asset_to_teleport_away, teleported_foreign_asset_amount - asset_to_teleport_away, ); @@ -1559,9 +1556,6 @@ pub fn reserve_transfer_native_asset_to_non_teleport_para_works< ) .unwrap(); - let v4_xcm: Xcm<()> = xcm_sent.clone().try_into().unwrap(); - dbg!(&v4_xcm); - let delivery_fees = get_fungible_delivery_fees::< ::XcmSender, >(dest.clone(), Xcm::try_from(xcm_sent.clone()).unwrap()); diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index 0b2364dbb8bd..e0b3f70c7546 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -331,7 +331,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< block_author_account: AccountIdOf, (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( AccountIdOf, - xcm::v3::Location, + xcm::v4::Location, u128, ), foreign_asset_id_amount_to_transfer: u128, @@ -357,9 +357,9 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< BalanceOf: From + Into, XcmConfig: xcm_executor::Config, >::AssetId: - From + Into, + From + Into, >::AssetIdParameter: - From + Into, + From + Into, >::Balance: From + Into + From, ::AccountId: Into<<::RuntimeOrigin as OriginTrait>::AccountId> @@ -390,7 +390,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< assert_ok!( >::force_create( RuntimeHelper::::root_origin(), - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), foreign_asset_owner.into(), true, // is_sufficient=true foreign_asset_id_minimum_balance.into() @@ -409,7 +409,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< // ForeignAssets balances before assert_eq!( >::balance( - foreign_asset_id_location.into(), + foreign_asset_id_location.clone().into(), &target_account ), 0.into() @@ -418,11 +418,8 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< // additional check before additional_checks_before(); - let foreign_asset_id_location_latest: Location = - foreign_asset_id_location.try_into().unwrap(); - let expected_assets = Assets::from(vec![Asset { - id: AssetId(foreign_asset_id_location_latest.clone()), + id: AssetId(foreign_asset_id_location.clone()), fun: Fungible(foreign_asset_id_amount_to_transfer), }]); let expected_beneficiary = Location::new( @@ -439,7 +436,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< ClearOrigin, BuyExecution { fees: Asset { - id: AssetId(foreign_asset_id_location_latest.clone()), + id: AssetId(foreign_asset_id_location.clone()), fun: Fungible(foreign_asset_id_amount_to_transfer), }, weight_limit: Unlimited, diff --git a/prdoc/pr_4129.prdoc b/prdoc/pr_4129.prdoc new file mode 100644 index 000000000000..dfcc9b9ef030 --- /dev/null +++ b/prdoc/pr_4129.prdoc @@ -0,0 +1,17 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Update ForeignAssets from xcm::v3::Location to xcm::v4::Location + +doc: + - audience: + - Runtime Dev + - Runtime User + description: | + As a stepping stone for XCMv5, the foreign asset ids have been updated from v3::Location to v4::Location. + +crates: + - name: asset-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor diff --git a/substrate/frame/assets/Cargo.toml b/substrate/frame/assets/Cargo.toml index fcb151298f09..e20b576d0836 100644 --- a/substrate/frame/assets/Cargo.toml +++ b/substrate/frame/assets/Cargo.toml @@ -30,7 +30,6 @@ frame-benchmarking = { optional = true, workspace = true } sp-core = { workspace = true } [dev-dependencies] -sp-std = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } pallet-balances = { workspace = true, default-features = true } From d944ac2f25d267b443631f891aa68a834dc24af0 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Wed, 14 Aug 2024 10:46:03 +0200 Subject: [PATCH 39/74] Stop running the wishlist workflow on forks (#5297) Addresses https://github.com/paritytech/polkadot-sdk/pull/5085#issuecomment-2277231072 --- .github/workflows/misc-update-wishlist-leaderboard.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/misc-update-wishlist-leaderboard.yml b/.github/workflows/misc-update-wishlist-leaderboard.yml index 68625e5433ca..326168717674 100644 --- a/.github/workflows/misc-update-wishlist-leaderboard.yml +++ b/.github/workflows/misc-update-wishlist-leaderboard.yml @@ -11,6 +11,7 @@ permissions: jobs: update-wishlist-leaderboard: + if: github.repository == 'paritytech/polkadot-sdk' runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 From e4f8a6de49b37af3c58d9414ba81f7002c911551 Mon Sep 17 00:00:00 2001 From: Adrian Catangiu Date: Wed, 14 Aug 2024 13:58:07 +0300 Subject: [PATCH 40/74] [tests] dedup test code, add more tests, improve naming and docs (#5338) This is mostly tests cleanup: - uses helper macro for generating teleport tests, - adds missing treasury tests, - improves naming and docs for transfer tests. - [x] does not need a PRDOC --------- Co-authored-by: command-bot <> --- Cargo.lock | 2 + .../people/people-rococo/src/genesis.rs | 7 +- .../people/people-westend/src/genesis.rs | 7 +- .../emulated/common/src/macros.rs | 5 +- .../emulated/common/src/xcm_helpers.rs | 7 +- .../tests/assets/asset-hub-rococo/src/lib.rs | 2 +- .../src/tests/reserve_transfer.rs | 37 +- .../assets/asset-hub-rococo/src/tests/send.rs | 30 +- .../asset-hub-rococo/src/tests/teleport.rs | 186 +----- .../tests/assets/asset-hub-westend/src/lib.rs | 2 +- .../src/tests/reserve_transfer.rs | 37 +- .../asset-hub-westend/src/tests/send.rs | 30 +- .../asset-hub-westend/src/tests/teleport.rs | 186 +----- .../bridges/bridge-hub-rococo/src/lib.rs | 12 +- .../bridge-hub-rococo/src/tests/snowbridge.rs | 1 - .../bridge-hub-rococo/src/tests/teleport.rs | 20 + .../bridges/bridge-hub-westend/src/lib.rs | 10 +- .../src/tests/asset_transfers.rs | 2 +- .../bridge-hub-westend/src/tests/teleport.rs | 20 + .../collectives-westend/Cargo.toml | 1 + .../src/tests/fellowship.rs | 72 +++ .../src/tests/fellowship_salary.rs | 66 +++ .../collectives-westend/src/tests/mod.rs | 2 + .../tests/people/people-rococo/src/lib.rs | 21 +- .../people/people-rococo/src/tests/mod.rs | 1 - .../people-rococo/src/tests/reap_identity.rs | 545 ----------------- .../people-rococo/src/tests/teleport.rs | 162 +----- .../tests/people/people-westend/Cargo.toml | 1 + .../tests/people/people-westend/src/lib.rs | 20 +- .../people/people-westend/src/tests/mod.rs | 1 - .../people-westend/src/tests/reap_identity.rs | 547 ------------------ .../people-westend/src/tests/teleport.rs | 162 +----- .../collectives-westend/src/xcm_config.rs | 10 + 33 files changed, 376 insertions(+), 1838 deletions(-) create mode 100644 cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs create mode 100644 cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_salary.rs delete mode 100644 cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs delete mode 100644 cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs diff --git a/Cargo.lock b/Cargo.lock index 2593452fb869..67cfbb5968d5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2964,6 +2964,7 @@ dependencies = [ "pallet-message-queue", "pallet-treasury", "pallet-utility", + "pallet-whitelist", "pallet-xcm", "parachains-common", "parity-scale-codec", @@ -12686,6 +12687,7 @@ dependencies = [ "pallet-balances", "pallet-identity", "pallet-message-queue", + "pallet-xcm", "parachains-common", "parity-scale-codec", "polkadot-runtime-common", diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/genesis.rs index 43d182facdd5..36a701d24c27 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-rococo/src/genesis.rs @@ -18,7 +18,9 @@ use sp_core::storage::Storage; // Cumulus use cumulus_primitives_core::ParaId; -use emulated_integration_tests_common::{build_genesis_storage, collators, SAFE_XCM_VERSION}; +use emulated_integration_tests_common::{ + accounts, build_genesis_storage, collators, SAFE_XCM_VERSION, +}; use parachains_common::Balance; pub const PARA_ID: u32 = 1004; @@ -27,6 +29,9 @@ pub const ED: Balance = testnet_parachains_constants::rococo::currency::EXISTENT pub fn genesis() -> Storage { let genesis_config = people_rococo_runtime::RuntimeGenesisConfig { system: people_rococo_runtime::SystemConfig::default(), + balances: people_rococo_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, parachain_info: people_rococo_runtime::ParachainInfoConfig { parachain_id: ParaId::from(PARA_ID), ..Default::default() diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/genesis.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/genesis.rs index 0b99f19bc130..942ec1b31d2b 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/genesis.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/people/people-westend/src/genesis.rs @@ -18,7 +18,9 @@ use sp_core::storage::Storage; // Cumulus use cumulus_primitives_core::ParaId; -use emulated_integration_tests_common::{build_genesis_storage, collators, SAFE_XCM_VERSION}; +use emulated_integration_tests_common::{ + accounts, build_genesis_storage, collators, SAFE_XCM_VERSION, +}; use parachains_common::Balance; pub const PARA_ID: u32 = 1004; @@ -27,6 +29,9 @@ pub const ED: Balance = testnet_parachains_constants::westend::currency::EXISTEN pub fn genesis() -> Storage { let genesis_config = people_westend_runtime::RuntimeGenesisConfig { system: people_westend_runtime::SystemConfig::default(), + balances: people_westend_runtime::BalancesConfig { + balances: accounts::init_balances().iter().cloned().map(|k| (k, ED * 4096)).collect(), + }, parachain_info: people_westend_runtime::ParachainInfoConfig { parachain_id: ParaId::from(PARA_ID), ..Default::default() diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index b11adacbde5c..578bca84ce5a 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -228,7 +228,7 @@ macro_rules! test_parachain_is_trusted_teleporter_for_relay { $crate::macros::paste::paste! { // init Origin variables let sender = [<$sender_para Sender>]::get(); - let mut para_sender_balance_before = + let para_sender_balance_before = <$sender_para as $crate::macros::Chain>::account_data_of(sender.clone()).free; let origin = <$sender_para as $crate::macros::Chain>::RuntimeOrigin::signed(sender.clone()); let assets: Assets = (Parent, $amount).into(); @@ -302,9 +302,6 @@ macro_rules! test_parachain_is_trusted_teleporter_for_relay { assert_eq!(para_sender_balance_before - $amount - delivery_fees, para_sender_balance_after); assert!(relay_receiver_balance_after > relay_receiver_balance_before); - - // Update sender balance - para_sender_balance_before = <$sender_para as $crate::macros::Chain>::account_data_of(sender.clone()).free; } }; } diff --git a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs index 76179c6d82c6..7a289a3f1ac6 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/xcm_helpers.rs @@ -23,16 +23,15 @@ use xcm::{prelude::*, DoubleEncoded}; pub fn xcm_transact_paid_execution( call: DoubleEncoded<()>, origin_kind: OriginKind, - native_asset: Asset, + fees: Asset, beneficiary: AccountId, ) -> VersionedXcm<()> { let weight_limit = WeightLimit::Unlimited; let require_weight_at_most = Weight::from_parts(1000000000, 200000); - let native_assets: Assets = native_asset.clone().into(); VersionedXcm::from(Xcm(vec![ - WithdrawAsset(native_assets), - BuyExecution { fees: native_asset, weight_limit }, + WithdrawAsset(fees.clone().into()), + BuyExecution { fees, weight_limit }, Transact { require_weight_at_most, origin_kind, call }, RefundSurplus, DepositAsset { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 1a30fac6ba91..87a090bf1ae6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -37,6 +37,7 @@ mod imports { pub use emulated_integration_tests_common::{ accounts::DUMMY_EMPTY, get_account_id_from_seed, test_parachain_is_trusted_teleporter, + test_parachain_is_trusted_teleporter_for_relay, test_relay_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, Test, TestArgs, TestContext, TestExt, @@ -91,7 +92,6 @@ mod imports { pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; - pub type RelayToSystemParaTest = Test; pub type RelayToParaTest = Test; pub type ParaToRelayTest = Test; pub type SystemParaToRelayTest = Test; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 329dbcac4b42..70dde03d75a2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -493,9 +493,9 @@ fn para_to_para_through_relay_limited_reserve_transfer_assets( ) } -/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work +/// Reserve Transfers of native asset from Relay Chain to the Asset Hub shouldn't work #[test] -fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { +fn reserve_transfer_native_asset_from_relay_to_asset_hub_fails() { // Init values for Relay Chain let signed_origin = ::RuntimeOrigin::signed(RococoSender::get().into()); let destination = Rococo::child_location_of(AssetHubRococo::para_id()); @@ -526,10 +526,10 @@ fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { }); } -/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work +/// Reserve Transfers of native asset from Asset Hub to Relay Chain shouldn't work #[test] -fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain +fn reserve_transfer_native_asset_from_asset_hub_to_relay_fails() { + // Init values for Asset Hub let signed_origin = ::RuntimeOrigin::signed(AssetHubRococoSender::get().into()); let destination = AssetHubRococo::parent_location(); @@ -691,10 +691,10 @@ fn reserve_transfer_native_asset_from_para_to_relay() { // ========================================================================= // ======= Reserve Transfers - Native Asset - AssetHub<>Parachain ========== // ========================================================================= -/// Reserve Transfers of native asset from System Parachain to Parachain should work +/// Reserve Transfers of native asset from Asset Hub to Parachain should work #[test] -fn reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain +fn reserve_transfer_native_asset_from_asset_hub_to_para() { + // Init values for Asset Hub let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let sender = AssetHubRococoSender::get(); let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 10000; @@ -749,9 +749,9 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { assert!(receiver_assets_after < receiver_assets_before + amount_to_send); } -/// Reserve Transfers of native asset from Parachain to System Parachain should work +/// Reserve Transfers of native asset from Parachain to Asset Hub should work #[test] -fn reserve_transfer_native_asset_from_para_to_system_para() { +fn reserve_transfer_native_asset_from_para_to_asset_hub() { // Init values for Parachain let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); let sender = PenpalASender::get(); @@ -768,12 +768,12 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { amount_to_send * 2, ); - // Init values for System Parachain + // Init values for Asset Hub let receiver = AssetHubRococoReceiver::get(); let penpal_location_as_seen_by_ahr = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); - // fund Parachain's SA on System Parachain with the native tokens held in reserve + // fund Parachain's SA on Asset Hub with the native tokens held in reserve AssetHubRococo::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount_to_send * 2)]); // Init Test @@ -824,11 +824,11 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { // ================================================================================== // ======= Reserve Transfers - Native + Non-system Asset - AssetHub<>Parachain ====== // ================================================================================== -/// Reserve Transfers of a local asset and native asset from System Parachain to Parachain should +/// Reserve Transfers of a local asset and native asset from Asset Hub to Parachain should /// work #[test] -fn reserve_transfer_assets_from_system_para_to_para() { - // Init values for System Parachain +fn reserve_transfer_multiple_assets_from_asset_hub_to_para() { + // Init values for Asset Hub let destination = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(destination.clone()); let sender = AssetHubRococoSender::get(); @@ -939,13 +939,12 @@ fn reserve_transfer_assets_from_system_para_to_para() { ); } -/// Reserve Transfers of a random asset and native asset from Parachain to System Para should -/// work +/// Reserve Transfers of a random asset and native asset from Parachain to Asset Hub should work /// Receiver is empty account to show deposit works as long as transfer includes enough DOT for ED. /// Once we have https://github.com/paritytech/polkadot-sdk/issues/5298, /// we should do equivalent test with USDT instead of DOT. #[test] -fn reserve_transfer_assets_from_para_to_system_para() { +fn reserve_transfer_multiple_assets_from_para_to_asset_hub() { // Init values for Parachain let destination = PenpalA::sibling_location_of(AssetHubRococo::para_id()); let sender = PenpalASender::get(); @@ -982,7 +981,7 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Beneficiary is a new (empty) account let receiver = get_account_id_from_seed::(DUMMY_EMPTY); - // Init values for System Parachain + // Init values for Asset Hub let penpal_location_as_seen_by_ahr = AssetHubRococo::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubRococo::sovereign_account_id_of(penpal_location_as_seen_by_ahr); let ah_asset_owner = AssetHubRococoAssetOwner::get(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs index ab407611c736..29eaa9694643 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/send.rs @@ -18,7 +18,7 @@ use crate::imports::*; /// Relay Chain should be able to execute `Transact` instructions in System Parachain /// when `OriginKind::Superuser`. #[test] -fn send_transact_as_superuser_from_relay_to_system_para_works() { +fn send_transact_as_superuser_from_relay_to_asset_hub_works() { AssetHubRococo::force_create_asset_from_relay_as_root( ASSET_ID, ASSET_MIN_BALANCE, @@ -29,10 +29,10 @@ fn send_transact_as_superuser_from_relay_to_system_para_works() { } /// We tests two things here: -/// - Parachain should be able to send XCM paying its fee with system asset in the System Parachain -/// - Parachain should be able to create a new Foreign Asset in the System Parachain +/// - Parachain should be able to send XCM paying its fee at Asset Hub using system asset +/// - Parachain should be able to create a new Foreign Asset at Asset Hub #[test] -fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { +fn send_xcm_from_para_to_asset_hub_paying_fee_with_system_asset() { let para_sovereign_account = AssetHubRococo::sovereign_account_id_of( AssetHubRococo::sibling_location_of(PenpalA::para_id()), ); @@ -83,12 +83,7 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - - AssetHubRococo::assert_xcmp_queue_success(Some(Weight::from_parts( - 15_594_564_000, - 562_893, - ))); - + AssetHubRococo::assert_xcmp_queue_success(None); assert_expected_events!( AssetHubRococo, vec![ @@ -112,15 +107,15 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { } /// We tests two things here: -/// - Parachain should be able to send XCM paying its fee with system assets in the System Parachain -/// - Parachain should be able to create a new Asset in the System Parachain +/// - Parachain should be able to send XCM paying its fee at Asset Hub using sufficient asset +/// - Parachain should be able to create a new Asset at Asset Hub #[test] -fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { +fn send_xcm_from_para_to_asset_hub_paying_fee_with_sufficient_asset() { let para_sovereign_account = AssetHubRococo::sovereign_account_id_of( AssetHubRococo::sibling_location_of(PenpalA::para_id()), ); - // Force create and mint assets for Parachain's sovereign account + // Force create and mint sufficient assets for Parachain's sovereign account AssetHubRococo::force_create_and_mint_asset( ASSET_ID, ASSET_MIN_BALANCE, @@ -167,12 +162,7 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - - AssetHubRococo::assert_xcmp_queue_success(Some(Weight::from_parts( - 15_594_564_000, - 562_893, - ))); - + AssetHubRococo::assert_xcmp_queue_success(None); assert_expected_events!( AssetHubRococo, vec![ diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 5f9131b8c123..c8da801a14bf 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -15,53 +15,6 @@ use crate::imports::*; -fn relay_origin_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - Rococo::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(631_531_000, 7_186))); - - assert_expected_events!( - Rococo, - vec![ - // Amount to teleport is withdrawn from Sender - RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { - who: *who == t.sender.account_id, - amount: *amount == t.args.amount, - }, - // Amount to teleport is deposited in Relay's `CheckAccount` - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) => { - who: *who == ::XcmPallet::check_account(), - amount: *amount == t.args.amount, - }, - ] - ); -} - -fn relay_dest_assertions(t: SystemParaToRelayTest) { - type RuntimeEvent = ::RuntimeEvent; - - Rococo::assert_ump_queue_processed( - true, - Some(AssetHubRococo::para_id()), - Some(Weight::from_parts(307_225_000, 7_186)), - ); - - assert_expected_events!( - Rococo, - vec![ - // Amount is withdrawn from Relay Chain's `CheckAccount` - RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { - who: *who == ::XcmPallet::check_account(), - amount: *amount == t.args.amount, - }, - // Amount minus fees are deposited in Receiver's account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == t.receiver.account_id, - }, - ] - ); -} - fn relay_dest_assertions_fail(_t: SystemParaToRelayTest) { Rococo::assert_ump_queue_processed( false, @@ -92,22 +45,6 @@ fn para_origin_assertions(t: SystemParaToRelayTest) { ); } -fn para_dest_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - AssetHubRococo::assert_dmp_queue_complete(Some(Weight::from_parts(157_718_000, 3593))); - - assert_expected_events!( - AssetHubRococo, - vec![ - // Amount minus fees are deposited in Receiver's account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == t.receiver.account_id, - }, - ] - ); -} - fn penpal_to_ah_foreign_assets_sender_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; let system_para_native_asset_location = RelayLocation::get(); @@ -230,17 +167,6 @@ fn ah_to_penpal_foreign_assets_receiver_assertions(t: SystemParaToParaTest) { ); } -fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ::PolkadotXcm::limited_teleport_assets( t.signed_origin, @@ -274,90 +200,41 @@ fn system_para_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResul ) } -/// Limited Teleport of native asset from Relay Chain to the System Parachain should work #[test] -fn limited_teleport_native_assets_from_relay_to_system_para_works() { - // Init values for Relay Chain - let amount_to_send: Balance = ROCOCO_ED * 1000; - let dest = Rococo::child_location_of(AssetHubRococo::para_id()); - let beneficiary_id = AssetHubRococoReceiver::get(); - let test_args = TestContext { - sender: RococoSender::get(), - receiver: AssetHubRococoReceiver::get(), - args: TestArgs::new_relay(dest, beneficiary_id, amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(para_dest_assertions); - test.set_dispatchable::(relay_limited_teleport_assets); - test.assert(); - - let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; +fn teleport_to_other_system_parachains_works() { + let amount = ASSET_HUB_ROCOCO_ED * 100; + let native_asset: Assets = (Parent, amount).into(); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); + test_parachain_is_trusted_teleporter!( + AssetHubRococo, // Origin + AssetHubRococoXcmConfig, // XCM Configuration + vec![BridgeHubRococo], // Destinations + (native_asset, amount) + ); } -/// Limited Teleport of native asset from System Parachain to Relay Chain -/// should work when there is enough balance in Relay Chain's `CheckAccount` #[test] -fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { - // Dependency - Relay Chain's `CheckAccount` should have enough balance - limited_teleport_native_assets_from_relay_to_system_para_works(); - - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_ROCOCO_ED * 1000; - let destination = AssetHubRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubRococoSender::get(), - receiver: RococoReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions); - test.set_dispatchable::(system_para_limited_teleport_assets); - test.assert(); +fn teleport_from_and_to_relay() { + let amount = ROCOCO_ED * 100; + let native_asset: Assets = (Here, amount).into(); - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - let delivery_fees = AssetHubRococo::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); + test_relay_is_trusted_teleporter!( + Rococo, + RococoXcmConfig, + vec![AssetHubRococo], + (native_asset, amount) + ); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); + test_parachain_is_trusted_teleporter_for_relay!( + AssetHubRococo, + AssetHubRococoXcmConfig, + Rococo, + amount + ); } /// Limited Teleport of native asset from System Parachain to Relay Chain -/// should't work when there is not enough balance in Relay Chain's `CheckAccount` +/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` #[test] fn limited_teleport_native_assets_from_system_para_to_relay_fails() { // Init values for Relay Chain @@ -397,19 +274,6 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { assert_eq!(receiver_balance_after, receiver_balance_before); } -#[test] -fn teleport_to_other_system_parachains_works() { - let amount = ASSET_HUB_ROCOCO_ED * 100; - let native_asset: Assets = (Parent, amount).into(); - - test_parachain_is_trusted_teleporter!( - AssetHubRococo, // Origin - AssetHubRococoXcmConfig, // XCM Configuration - vec![BridgeHubRococo], // Destinations - (native_asset, amount) - ); -} - /// Bidirectional teleports of local Penpal assets to Asset Hub as foreign assets while paying /// fees using (reserve transferred) native asset. pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt( diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs index fe484771931f..a887ee6a532a 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/lib.rs @@ -34,6 +34,7 @@ mod imports { pub use emulated_integration_tests_common::{ accounts::DUMMY_EMPTY, get_account_id_from_seed, test_parachain_is_trusted_teleporter, + test_parachain_is_trusted_teleporter_for_relay, test_relay_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, Test, TestArgs, TestContext, TestExt, @@ -88,7 +89,6 @@ mod imports { pub const ASSET_ID: u32 = 3; pub const ASSET_MIN_BALANCE: u128 = 1000; - pub type RelayToSystemParaTest = Test; pub type RelayToParaTest = Test; pub type ParaToRelayTest = Test; pub type SystemParaToRelayTest = Test; diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 729de65382f8..59f63d380590 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -493,9 +493,9 @@ fn para_to_para_through_relay_limited_reserve_transfer_assets( ) } -/// Reserve Transfers of native asset from Relay Chain to the System Parachain shouldn't work +/// Reserve Transfers of native asset from Relay Chain to the Asset Hub shouldn't work #[test] -fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { +fn reserve_transfer_native_asset_from_relay_to_asset_hub_fails() { // Init values for Relay Chain let signed_origin = ::RuntimeOrigin::signed(WestendSender::get().into()); let destination = Westend::child_location_of(AssetHubWestend::para_id()); @@ -526,10 +526,10 @@ fn reserve_transfer_native_asset_from_relay_to_system_para_fails() { }); } -/// Reserve Transfers of native asset from System Parachain to Relay Chain shouldn't work +/// Reserve Transfers of native asset from Asset Hub to Relay Chain shouldn't work #[test] -fn reserve_transfer_native_asset_from_system_para_to_relay_fails() { - // Init values for System Parachain +fn reserve_transfer_native_asset_from_asset_hub_to_relay_fails() { + // Init values for Asset Hub let signed_origin = ::RuntimeOrigin::signed(AssetHubWestendSender::get().into()); let destination = AssetHubWestend::parent_location(); @@ -691,10 +691,10 @@ fn reserve_transfer_native_asset_from_para_to_relay() { // ========================================================================= // ======= Reserve Transfers - Native Asset - AssetHub<>Parachain ========== // ========================================================================= -/// Reserve Transfers of native asset from System Parachain to Parachain should work +/// Reserve Transfers of native asset from Asset Hub to Parachain should work #[test] -fn reserve_transfer_native_asset_from_system_para_to_para() { - // Init values for System Parachain +fn reserve_transfer_native_asset_from_asset_hub_to_para() { + // Init values for Asset Hub let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); let sender = AssetHubWestendSender::get(); let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 2000; @@ -749,9 +749,9 @@ fn reserve_transfer_native_asset_from_system_para_to_para() { assert!(receiver_assets_after < receiver_assets_before + amount_to_send); } -/// Reserve Transfers of native asset from Parachain to System Parachain should work +/// Reserve Transfers of native asset from Parachain to Asset Hub should work #[test] -fn reserve_transfer_native_asset_from_para_to_system_para() { +fn reserve_transfer_native_asset_from_para_to_asset_hub() { // Init values for Parachain let destination = PenpalA::sibling_location_of(AssetHubWestend::para_id()); let sender = PenpalASender::get(); @@ -768,13 +768,13 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { amount_to_send * 2, ); - // Init values for System Parachain + // Init values for Asset Hub let receiver = AssetHubWestendReceiver::get(); let penpal_location_as_seen_by_ahr = AssetHubWestend::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of(penpal_location_as_seen_by_ahr); - // fund Parachain's SA on System Parachain with the native tokens held in reserve + // fund Parachain's SA on Asset Hub with the native tokens held in reserve AssetHubWestend::fund_accounts(vec![(sov_penpal_on_ahr.into(), amount_to_send * 2)]); // Init Test @@ -825,11 +825,11 @@ fn reserve_transfer_native_asset_from_para_to_system_para() { // ========================================================================= // ======= Reserve Transfers - Non-system Asset - AssetHub<>Parachain ====== // ========================================================================= -/// Reserve Transfers of a local asset and native asset from System Parachain to Parachain should +/// Reserve Transfers of a local asset and native asset from Asset Hub to Parachain should /// work #[test] -fn reserve_transfer_assets_from_system_para_to_para() { - // Init values for System Parachain +fn reserve_transfer_multiple_assets_from_asset_hub_to_para() { + // Init values for Asset Hub let destination = AssetHubWestend::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of(destination.clone()); let sender = AssetHubWestendSender::get(); @@ -940,13 +940,12 @@ fn reserve_transfer_assets_from_system_para_to_para() { ); } -/// Reserve Transfers of a random asset and native asset from Parachain to System Para should -/// work +/// Reserve Transfers of a random asset and native asset from Parachain to Asset Hub should work /// Receiver is empty account to show deposit works as long as transfer includes enough DOT for ED. /// Once we have https://github.com/paritytech/polkadot-sdk/issues/5298, /// we should do equivalent test with USDT instead of DOT. #[test] -fn reserve_transfer_assets_from_para_to_system_para() { +fn reserve_transfer_multiple_assets_from_para_to_asset_hub() { // Init values for Parachain let destination = PenpalA::sibling_location_of(AssetHubWestend::para_id()); let sender = PenpalASender::get(); @@ -983,7 +982,7 @@ fn reserve_transfer_assets_from_para_to_system_para() { // Beneficiary is a new (empty) account let receiver = get_account_id_from_seed::(DUMMY_EMPTY); - // Init values for System Parachain + // Init values for Asset Hub let penpal_location_as_seen_by_ahr = AssetHubWestend::sibling_location_of(PenpalA::para_id()); let sov_penpal_on_ahr = AssetHubWestend::sovereign_account_id_of(penpal_location_as_seen_by_ahr); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs index ac006653ca67..761c7c12255c 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/send.rs @@ -18,7 +18,7 @@ use crate::imports::*; /// Relay Chain should be able to execute `Transact` instructions in System Parachain /// when `OriginKind::Superuser`. #[test] -fn send_transact_as_superuser_from_relay_to_system_para_works() { +fn send_transact_as_superuser_from_relay_to_asset_hub_works() { AssetHubWestend::force_create_asset_from_relay_as_root( ASSET_ID, ASSET_MIN_BALANCE, @@ -29,10 +29,10 @@ fn send_transact_as_superuser_from_relay_to_system_para_works() { } /// We tests two things here: -/// - Parachain should be able to send XCM paying its fee with system asset in the System Parachain -/// - Parachain should be able to create a new Foreign Asset in the System Parachain +/// - Parachain should be able to send XCM paying its fee at Asset Hub using system asset +/// - Parachain should be able to create a new Foreign Asset at Asset Hub #[test] -fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { +fn send_xcm_from_para_to_asset_hub_paying_fee_with_system_asset() { let para_sovereign_account = AssetHubWestend::sovereign_account_id_of( AssetHubWestend::sibling_location_of(PenpalA::para_id()), ); @@ -83,12 +83,7 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - - AssetHubWestend::assert_xcmp_queue_success(Some(Weight::from_parts( - 15_594_564_000, - 562_893, - ))); - + AssetHubWestend::assert_xcmp_queue_success(None); assert_expected_events!( AssetHubWestend, vec![ @@ -112,15 +107,15 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_system_assets_works() { } /// We tests two things here: -/// - Parachain should be able to send XCM paying its fee with system assets in the System Parachain -/// - Parachain should be able to create a new Asset in the System Parachain +/// - Parachain should be able to send XCM paying its fee at Asset Hub using sufficient asset +/// - Parachain should be able to create a new Asset at Asset Hub #[test] -fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { +fn send_xcm_from_para_to_asset_hub_paying_fee_with_sufficient_asset() { let para_sovereign_account = AssetHubWestend::sovereign_account_id_of( AssetHubWestend::sibling_location_of(PenpalA::para_id()), ); - // Force create and mint assets for Parachain's sovereign account + // Force create and mint sufficient assets for Parachain's sovereign account AssetHubWestend::force_create_and_mint_asset( ASSET_ID, ASSET_MIN_BALANCE, @@ -167,12 +162,7 @@ fn send_xcm_from_para_to_system_para_paying_fee_with_assets_works() { AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; - - AssetHubWestend::assert_xcmp_queue_success(Some(Weight::from_parts( - 15_594_564_000, - 562_893, - ))); - + AssetHubWestend::assert_xcmp_queue_success(None); assert_expected_events!( AssetHubWestend, vec![ diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index 02a0bc0207bb..15d39858acca 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -15,53 +15,6 @@ use crate::imports::*; -fn relay_origin_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - Westend::assert_xcm_pallet_attempted_complete(Some(Weight::from_parts(631_531_000, 7_186))); - - assert_expected_events!( - Westend, - vec![ - // Amount to teleport is withdrawn from Sender - RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { - who: *who == t.sender.account_id, - amount: *amount == t.args.amount, - }, - // Amount to teleport is deposited in Relay's `CheckAccount` - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) => { - who: *who == ::XcmPallet::check_account(), - amount: *amount == t.args.amount, - }, - ] - ); -} - -fn relay_dest_assertions(t: SystemParaToRelayTest) { - type RuntimeEvent = ::RuntimeEvent; - - Westend::assert_ump_queue_processed( - true, - Some(AssetHubWestend::para_id()), - Some(Weight::from_parts(307_225_000, 7_186)), - ); - - assert_expected_events!( - Westend, - vec![ - // Amount is withdrawn from Relay Chain's `CheckAccount` - RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { - who: *who == ::XcmPallet::check_account(), - amount: *amount == t.args.amount, - }, - // Amount minus fees are deposited in Receiver's account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == t.receiver.account_id, - }, - ] - ); -} - fn relay_dest_assertions_fail(_t: SystemParaToRelayTest) { Westend::assert_ump_queue_processed( false, @@ -92,22 +45,6 @@ fn para_origin_assertions(t: SystemParaToRelayTest) { ); } -fn para_dest_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - AssetHubWestend::assert_dmp_queue_complete(Some(Weight::from_parts(157_718_000, 3593))); - - assert_expected_events!( - AssetHubWestend, - vec![ - // Amount minus fees are deposited in Receiver's account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == t.receiver.account_id, - }, - ] - ); -} - fn penpal_to_ah_foreign_assets_sender_assertions(t: ParaToSystemParaTest) { type RuntimeEvent = ::RuntimeEvent; let system_para_native_asset_location = RelayLocation::get(); @@ -230,17 +167,6 @@ fn ah_to_penpal_foreign_assets_receiver_assertions(t: SystemParaToParaTest) { ); } -fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ::PolkadotXcm::limited_teleport_assets( t.signed_origin, @@ -274,90 +200,41 @@ fn system_para_to_para_transfer_assets(t: SystemParaToParaTest) -> DispatchResul ) } -/// Limited Teleport of native asset from Relay Chain to the System Parachain should work #[test] -fn limited_teleport_native_assets_from_relay_to_system_para_works() { - // Init values for Relay Chain - let amount_to_send: Balance = WESTEND_ED * 1000; - let dest = Westend::child_location_of(AssetHubWestend::para_id()); - let beneficiary_id = AssetHubWestendReceiver::get(); - let test_args = TestContext { - sender: WestendSender::get(), - receiver: AssetHubWestendReceiver::get(), - args: TestArgs::new_relay(dest, beneficiary_id, amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(para_dest_assertions); - test.set_dispatchable::(relay_limited_teleport_assets); - test.assert(); - - let delivery_fees = Westend::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; +fn teleport_to_other_system_parachains_works() { + let amount = ASSET_HUB_WESTEND_ED * 100; + let native_asset: Assets = (Parent, amount).into(); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); + test_parachain_is_trusted_teleporter!( + AssetHubWestend, // Origin + AssetHubWestendXcmConfig, // XCM Configuration + vec![BridgeHubWestend], // Destinations + (native_asset, amount) + ); } -/// Limited Teleport of native asset from System Parachain to Relay Chain -/// should work when there is enough balance in Relay Chain's `CheckAccount` #[test] -fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { - // Dependency - Relay Chain's `CheckAccount` should have enough balance - limited_teleport_native_assets_from_relay_to_system_para_works(); - - // Init values for Relay Chain - let amount_to_send: Balance = ASSET_HUB_WESTEND_ED * 1000; - let destination = AssetHubWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - let test_args = TestContext { - sender: AssetHubWestendSender::get(), - receiver: WestendReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions); - test.set_dispatchable::(system_para_limited_teleport_assets); - test.assert(); +fn teleport_from_and_to_relay() { + let amount = WESTEND_ED * 100; + let native_asset: Assets = (Here, amount).into(); - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - let delivery_fees = AssetHubWestend::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); + test_relay_is_trusted_teleporter!( + Westend, + WestendXcmConfig, + vec![AssetHubWestend], + (native_asset, amount) + ); - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); + test_parachain_is_trusted_teleporter_for_relay!( + AssetHubWestend, + AssetHubWestendXcmConfig, + Westend, + amount + ); } /// Limited Teleport of native asset from System Parachain to Relay Chain -/// should't work when there is not enough balance in Relay Chain's `CheckAccount` +/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` #[test] fn limited_teleport_native_assets_from_system_para_to_relay_fails() { // Init values for Relay Chain @@ -397,19 +274,6 @@ fn limited_teleport_native_assets_from_system_para_to_relay_fails() { assert_eq!(receiver_balance_after, receiver_balance_before); } -#[test] -fn teleport_to_other_system_parachains_works() { - let amount = ASSET_HUB_WESTEND_ED * 100; - let native_asset: Assets = (Parent, amount).into(); - - test_parachain_is_trusted_teleporter!( - AssetHubWestend, // Origin - AssetHubWestendXcmConfig, // XCM Configuration - vec![BridgeHubWestend], // Destinations - (native_asset, amount) - ); -} - /// Bidirectional teleports of local Penpal assets to Asset Hub as foreign assets while paying /// fees using (reserve transferred) native asset. pub fn do_bidirectional_teleport_foreign_assets_between_para_and_asset_hub_using_xt( diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index 0aefe5b6352c..ac08e48ded68 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -32,7 +32,8 @@ mod imports { pub use emulated_integration_tests_common::{ accounts::ALICE, impls::Inspect, - test_parachain_is_trusted_teleporter, + test_parachain_is_trusted_teleporter, test_parachain_is_trusted_teleporter_for_relay, + test_relay_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, TestExt, }, @@ -60,15 +61,20 @@ mod imports { }, PenpalAParaPallet as PenpalAPallet, PenpalAssetOwner, }, - rococo_emulated_chain::{genesis::ED as ROCOCO_ED, RococoRelayPallet as RococoPallet}, + rococo_emulated_chain::{ + genesis::ED as ROCOCO_ED, rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig, + RococoRelayPallet as RococoPallet, + }, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubRococoPara as BridgeHubRococo, + BridgeHubRococoParaReceiver as BridgeHubRococoReceiver, BridgeHubRococoParaSender as BridgeHubRococoSender, BridgeHubWestendPara as BridgeHubWestend, PenpalAPara as PenpalA, PenpalAParaReceiver as PenpalAReceiver, PenpalAParaSender as PenpalASender, - RococoRelay as Rococo, + RococoRelay as Rococo, RococoRelayReceiver as RococoReceiver, + RococoRelaySender as RococoSender, }; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index 3e8d3357caa8..84328fb7c6d2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -560,7 +560,6 @@ fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u12 2, [EthereumNetwork::get().into(), AccountKey20 { network: None, key: WETH }], ); - // (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }) // Fund asset hub sovereign on bridge hub let asset_hub_sovereign = BridgeHubRococo::sovereign_account_id_of(Location::new( 1, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs index 1fb03748d926..8cdd9613dc52 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/teleport.rs @@ -27,3 +27,23 @@ fn teleport_to_other_system_parachains_works() { (native_asset, amount) ); } + +#[test] +fn teleport_from_and_to_relay() { + let amount = ROCOCO_ED * 100; + let native_asset: Assets = (Here, amount).into(); + + test_relay_is_trusted_teleporter!( + Rococo, + RococoXcmConfig, + vec![BridgeHubRococo], + (native_asset, amount) + ); + + test_parachain_is_trusted_teleporter_for_relay!( + BridgeHubRococo, + BridgeHubRococoXcmConfig, + Rococo, + amount + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index 3262cc17ba56..5e0462d14882 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -31,7 +31,8 @@ mod imports { pub use emulated_integration_tests_common::{ accounts::ALICE, impls::Inspect, - test_parachain_is_trusted_teleporter, + test_parachain_is_trusted_teleporter, test_parachain_is_trusted_teleporter_for_relay, + test_relay_is_trusted_teleporter, xcm_emulator::{ assert_expected_events, bx, Chain, Parachain as Para, RelayChain as Relay, TestExt, }, @@ -54,14 +55,19 @@ mod imports { penpal_runtime::xcm_config::UniversalLocation as PenpalUniversalLocation, PenpalAssetOwner, PenpalBParaPallet as PenpalBPallet, }, - westend_emulated_chain::WestendRelayPallet as WestendPallet, + westend_emulated_chain::{ + genesis::ED as WESTEND_ED, westend_runtime::xcm_config::XcmConfig as WestendXcmConfig, + WestendRelayPallet as WestendPallet, + }, AssetHubRococoPara as AssetHubRococo, AssetHubRococoParaReceiver as AssetHubRococoReceiver, AssetHubRococoParaSender as AssetHubRococoSender, AssetHubWestendPara as AssetHubWestend, AssetHubWestendParaReceiver as AssetHubWestendReceiver, AssetHubWestendParaSender as AssetHubWestendSender, BridgeHubRococoPara as BridgeHubRococo, BridgeHubWestendPara as BridgeHubWestend, + BridgeHubWestendParaReceiver as BridgeHubWestendReceiver, BridgeHubWestendParaSender as BridgeHubWestendSender, PenpalBPara as PenpalB, PenpalBParaSender as PenpalBSender, WestendRelay as Westend, + WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, }; pub const ASSET_MIN_BALANCE: u128 = 1000; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index e2c496802e2c..fc8b772a9c7e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -452,7 +452,7 @@ fn send_back_rocs_from_penpal_westend_through_asset_hub_westend_to_asset_hub_roc // fund the AHW's SA on AHR with the ROC tokens held in reserve let sov_ahw_on_ahr = AssetHubRococo::sovereign_account_of_parachain_on_other_global_consensus( - NetworkId::Westend, + Westend, AssetHubWestend::para_id(), ); AssetHubRococo::fund_accounts(vec![(sov_ahw_on_ahr.clone(), amount * 2)]); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs index 64378a844f52..a5add3b82957 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/teleport.rs @@ -27,3 +27,23 @@ fn teleport_to_other_system_parachains_works() { (native_asset, amount) ); } + +#[test] +fn teleport_from_and_to_relay() { + let amount = WESTEND_ED * 100; + let native_asset: Assets = (Here, amount).into(); + + test_relay_is_trusted_teleporter!( + Westend, + WestendXcmConfig, + vec![BridgeHubWestend], + (native_asset, amount) + ); + + test_parachain_is_trusted_teleporter_for_relay!( + BridgeHubWestend, + BridgeHubWestendXcmConfig, + Westend, + amount + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml index 3012e2b19f53..c4d281b75a77 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/Cargo.toml @@ -23,6 +23,7 @@ pallet-assets = { workspace = true } pallet-treasury = { workspace = true } pallet-message-queue = { workspace = true } pallet-utility = { workspace = true } +pallet-whitelist = { workspace = true } # Polkadot polkadot-runtime-common = { workspace = true, default-features = true } diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs new file mode 100644 index 000000000000..f97599bda7f0 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship.rs @@ -0,0 +1,72 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use codec::Encode; +use collectives_fellowship::pallet_fellowship_origins::Origin::Fellows as FellowsOrigin; +use frame_support::{assert_ok, sp_runtime::traits::Dispatchable}; + +#[test] +fn fellows_whitelist_call() { + CollectivesWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + type RuntimeCall = ::RuntimeCall; + type RuntimeOrigin = ::RuntimeOrigin; + type Runtime = ::Runtime; + type WestendCall = ::RuntimeCall; + type WestendRuntime = ::Runtime; + + let call_hash = [1u8; 32].into(); + + let whitelist_call = RuntimeCall::PolkadotXcm(pallet_xcm::Call::::send { + dest: bx!(VersionedLocation::from(Location::parent())), + message: bx!(VersionedXcm::from(Xcm(vec![ + UnpaidExecution { weight_limit: Unlimited, check_origin: None }, + Transact { + origin_kind: OriginKind::Xcm, + require_weight_at_most: Weight::from_parts(5_000_000_000, 500_000), + call: WestendCall::Whitelist( + pallet_whitelist::Call::::whitelist_call { call_hash } + ) + .encode() + .into(), + } + ]))), + }); + + let fellows_origin: RuntimeOrigin = FellowsOrigin.into(); + + assert_ok!(whitelist_call.dispatch(fellows_origin)); + + assert_expected_events!( + CollectivesWestend, + vec![ + RuntimeEvent::PolkadotXcm(pallet_xcm::Event::Sent { .. }) => {}, + ] + ); + }); + + Westend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_expected_events!( + Westend, + vec![ + RuntimeEvent::Whitelist(pallet_whitelist::Event::CallWhitelisted { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true, .. }) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_salary.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_salary.rs new file mode 100644 index 000000000000..840d2da49463 --- /dev/null +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_salary.rs @@ -0,0 +1,66 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::*; +use collectives_fellowship::FellowshipSalaryPaymaster; +use frame_support::{ + assert_ok, + traits::{fungibles::Mutate, tokens::Pay}, +}; +use xcm_executor::traits::ConvertLocation; + +const FELLOWSHIP_SALARY_PALLET_ID: u8 = 64; + +#[test] +fn pay_salary() { + let asset_id: u32 = 1984; + let fellowship_salary = ( + Parent, + Parachain(CollectivesWestend::para_id().into()), + PalletInstance(FELLOWSHIP_SALARY_PALLET_ID), + ); + let pay_from = + AssetHubLocationToAccountId::convert_location(&fellowship_salary.into()).unwrap(); + let pay_to = Westend::account_id_of(ALICE); + let pay_amount = 9_000_000_000; + + AssetHubWestend::execute_with(|| { + type AssetHubAssets = ::Assets; + assert_ok!(>::mint_into(asset_id, &pay_from, pay_amount * 2)); + }); + + CollectivesWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + + assert_ok!(FellowshipSalaryPaymaster::pay(&pay_to, (), pay_amount)); + assert_expected_events!( + CollectivesWestend, + vec![ + RuntimeEvent::XcmpQueue(cumulus_pallet_xcmp_queue::Event::XcmpMessageSent { .. }) => {}, + ] + ); + }); + + AssetHubWestend::execute_with(|| { + type RuntimeEvent = ::RuntimeEvent; + assert_expected_events!( + AssetHubWestend, + vec![ + RuntimeEvent::Assets(pallet_assets::Event::Transferred { .. }) => {}, + RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { success: true ,.. }) => {}, + ] + ); + }); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs index 40e98a8b6869..ef4e4885183d 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/mod.rs @@ -13,5 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod fellowship; +mod fellowship_salary; mod fellowship_treasury; mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs index b725c24fbea3..06b0b6ba6005 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/lib.rs @@ -15,14 +15,8 @@ #[cfg(test)] mod imports { - pub use codec::Encode; - // Substrate - pub use frame_support::{ - assert_ok, - sp_runtime::{AccountId32, DispatchResult}, - traits::fungibles::Inspect, - }; + pub use frame_support::{assert_ok, sp_runtime::DispatchResult, traits::fungibles::Inspect}; // Polkadot pub use xcm::prelude::*; @@ -36,20 +30,14 @@ mod imports { pub use parachains_common::Balance; pub use rococo_system_emulated_network::{ people_rococo_emulated_chain::{ - genesis::ED as PEOPLE_ROCOCO_ED, people_rococo_runtime::{ - people, xcm_config::XcmConfig as PeopleRococoXcmConfig, - ExistentialDeposit as PeopleRococoExistentialDeposit, Runtime as PeopleRuntime, + xcm_config::XcmConfig as PeopleRococoXcmConfig, + ExistentialDeposit as PeopleRococoExistentialDeposit, }, PeopleRococoParaPallet as PeopleRococoPallet, }, rococo_emulated_chain::{ - genesis::ED as ROCOCO_ED, - rococo_runtime::{ - xcm_config::XcmConfig as RococoXcmConfig, BasicDeposit, ByteDeposit, - MaxAdditionalFields, MaxSubAccounts, Runtime as RococoRuntime, - RuntimeOrigin as RococoOrigin, SubAccountDeposit, - }, + genesis::ED as ROCOCO_ED, rococo_runtime::xcm_config::XcmConfig as RococoXcmConfig, RococoRelayPallet as RococoPallet, }, PeopleRococoPara as PeopleRococo, PeopleRococoParaReceiver as PeopleRococoReceiver, @@ -57,7 +45,6 @@ mod imports { RococoRelayReceiver as RococoReceiver, RococoRelaySender as RococoSender, }; - pub type RelayToSystemParaTest = Test; pub type SystemParaToRelayTest = Test; } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/mod.rs index 3f18621224ac..08749b295dc2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/mod.rs @@ -14,5 +14,4 @@ // limitations under the License. mod claim_assets; -mod reap_identity; mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs deleted file mode 100644 index 10f0c61ed63c..000000000000 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/reap_identity.rs +++ /dev/null @@ -1,545 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # OnReapIdentity Tests -//! -//! This file contains the test cases for migrating Identity data away from the Rococo Relay -//! chain and to the PeopleRococo parachain. This migration is part of the broader Minimal Relay -//! effort: -//! https://github.com/polkadot-fellows/RFCs/blob/main/text/0032-minimal-relay.md -//! -//! ## Overview -//! -//! The tests validate the robustness and correctness of the `OnReapIdentityHandler` -//! ensuring that it behaves as expected in various scenarios. Key aspects tested include: -//! -//! - **Deposit Handling**: Confirming that deposits are correctly migrated from the Relay Chain to -//! the People parachain in various scenarios (different `IdentityInfo` fields and different -//! numbers of sub-accounts). -//! -//! ### Test Scenarios -//! -//! The tests are categorized into several scenarios, each resulting in different deposits required -//! on the destination parachain. The tests ensure: -//! -//! - Reserved deposits on the Relay Chain are fully released; -//! - The freed deposit from the Relay Chain is sufficient for the parachain deposit; and -//! - The account will exist on the parachain. - -use crate::imports::*; -use frame_support::BoundedVec; -use pallet_balances::Event as BalancesEvent; -use pallet_identity::{legacy::IdentityInfo, Data, Event as IdentityEvent, IdentityOf, SubsOf}; -use people::{ - BasicDeposit as BasicDepositParachain, ByteDeposit as ByteDepositParachain, - IdentityInfo as IdentityInfoParachain, SubAccountDeposit as SubAccountDepositParachain, -}; -use rococo_runtime_constants::currency::*; -use rococo_system_emulated_network::{ - rococo_emulated_chain::RococoRelayPallet, RococoRelay, RococoRelaySender, -}; - -type Balance = u128; -type RococoIdentity = ::Identity; -type RococoBalances = ::Balances; -type RococoIdentityMigrator = ::IdentityMigrator; -type PeopleRococoIdentity = ::Identity; -type PeopleRococoBalances = ::Balances; - -#[derive(Clone, Debug)] -struct Identity { - relay: IdentityInfo, - para: IdentityInfoParachain, - subs: Subs, -} - -impl Identity { - fn new( - full: bool, - additional: Option>, - subs: Subs, - ) -> Self { - let pgp_fingerprint = [ - 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, - 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, - ]; - let make_data = |data: &[u8], full: bool| -> Data { - if full { - Data::Raw(data.to_vec().try_into().unwrap()) - } else { - Data::None - } - }; - let (github, discord) = additional - .as_ref() - .and_then(|vec| vec.first()) - .map(|(g, d)| (g.clone(), d.clone())) - .unwrap_or((Data::None, Data::None)); - Self { - relay: IdentityInfo { - display: make_data(b"xcm-test", full), - legal: make_data(b"The Xcm Test, Esq.", full), - web: make_data(b"https://visitme/", full), - riot: make_data(b"xcm-riot", full), - email: make_data(b"xcm-test@gmail.com", full), - pgp_fingerprint: Some(pgp_fingerprint), - image: make_data(b"xcm-test.png", full), - twitter: make_data(b"@xcm-test", full), - additional: additional.unwrap_or_default(), - }, - para: IdentityInfoParachain { - display: make_data(b"xcm-test", full), - legal: make_data(b"The Xcm Test, Esq.", full), - web: make_data(b"https://visitme/", full), - matrix: make_data(b"xcm-matrix@server", full), - email: make_data(b"xcm-test@gmail.com", full), - pgp_fingerprint: Some(pgp_fingerprint), - image: make_data(b"xcm-test.png", full), - twitter: make_data(b"@xcm-test", full), - github, - discord, - }, - subs, - } - } -} - -#[derive(Clone, Debug)] -enum Subs { - Zero, - Many(u32), -} - -enum IdentityOn<'a> { - Relay(&'a IdentityInfo), - Para(&'a IdentityInfoParachain), -} - -impl IdentityOn<'_> { - fn calculate_deposit(self) -> Balance { - match self { - IdentityOn::Relay(id) => { - let base_deposit = BasicDeposit::get(); - let byte_deposit = - ByteDeposit::get() * TryInto::::try_into(id.encoded_size()).unwrap(); - base_deposit + byte_deposit - }, - IdentityOn::Para(id) => { - let base_deposit = BasicDepositParachain::get(); - let byte_deposit = ByteDepositParachain::get() * - TryInto::::try_into(id.encoded_size()).unwrap(); - base_deposit + byte_deposit - }, - } - } -} - -/// Generate an `AccountId32` from a `u32`. -/// This creates a 32-byte array, initially filled with `255`, and then repeatedly fills it -/// with the 4-byte little-endian representation of the `u32` value, until the array is full. -/// -/// **Example**: -/// -/// `account_from_u32(5)` will return an `AccountId32` with the bytes -/// `[0, 5, 0, 0, 0, 0, 0, 0, 0, 5 ... ]` -fn account_from_u32(id: u32) -> AccountId32 { - let mut buffer = [255u8; 32]; - let id_bytes = id.to_le_bytes(); - let id_size = id_bytes.len(); - for chunk in buffer.chunks_mut(id_size) { - chunk.clone_from_slice(&id_bytes); - } - AccountId32::new(buffer) -} - -// Set up the Relay Chain with an identity. -fn set_id_relay(id: &Identity) -> Balance { - let mut total_deposit: Balance = 0; - - // Set identity and Subs on Relay Chain - RococoRelay::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_ok!(RococoIdentity::set_identity( - RococoOrigin::signed(RococoRelaySender::get()), - Box::new(id.relay.clone()) - )); - - if let Subs::Many(n) = id.subs { - let subs: Vec<_> = (0..n) - .map(|i| (account_from_u32(i), Data::Raw(b"name".to_vec().try_into().unwrap()))) - .collect(); - - assert_ok!(RococoIdentity::set_subs( - RococoOrigin::signed(RococoRelaySender::get()), - subs, - )); - } - - let reserved_balance = RococoBalances::reserved_balance(RococoRelaySender::get()); - let id_deposit = IdentityOn::Relay(&id.relay).calculate_deposit(); - - let total_deposit = match id.subs { - Subs::Zero => { - total_deposit = id_deposit; // No subs - assert_expected_events!( - RococoRelay, - vec![ - RuntimeEvent::Identity(IdentityEvent::IdentitySet { .. }) => {}, - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == RococoRelaySender::get(), - amount: *amount == id_deposit, - }, - ] - ); - total_deposit - }, - Subs::Many(n) => { - let sub_account_deposit = n as Balance * SubAccountDeposit::get(); - total_deposit = - sub_account_deposit + IdentityOn::Relay(&id.relay).calculate_deposit(); - assert_expected_events!( - RococoRelay, - vec![ - RuntimeEvent::Identity(IdentityEvent::IdentitySet { .. }) => {}, - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == RococoRelaySender::get(), - amount: *amount == id_deposit, - }, - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == RococoRelaySender::get(), - amount: *amount == sub_account_deposit, - }, - ] - ); - total_deposit - }, - }; - - assert_eq!(reserved_balance, total_deposit); - }); - total_deposit -} - -// Set up the parachain with an identity and (maybe) sub accounts, but with zero deposits. -fn assert_set_id_parachain(id: &Identity) { - // Set identity and Subs on Parachain with zero deposit - PeopleRococo::execute_with(|| { - let free_bal = PeopleRococoBalances::free_balance(PeopleRococoSender::get()); - let reserved_balance = PeopleRococoBalances::reserved_balance(PeopleRococoSender::get()); - - // total balance at Genesis should be zero - assert_eq!(reserved_balance + free_bal, 0); - - assert_ok!(PeopleRococoIdentity::set_identity_no_deposit( - &PeopleRococoSender::get(), - id.para.clone(), - )); - - match id.subs { - Subs::Zero => {}, - Subs::Many(n) => { - let subs: Vec<_> = (0..n) - .map(|ii| { - (account_from_u32(ii), Data::Raw(b"name".to_vec().try_into().unwrap())) - }) - .collect(); - assert_ok!(PeopleRococoIdentity::set_subs_no_deposit( - &PeopleRococoSender::get(), - subs, - )); - }, - } - - // No amount should be reserved as deposit amounts are set to 0. - let reserved_balance = PeopleRococoBalances::reserved_balance(PeopleRococoSender::get()); - assert_eq!(reserved_balance, 0); - assert!(IdentityOf::::get(PeopleRococoSender::get()).is_some()); - - let (_, sub_accounts) = SubsOf::::get(PeopleRococoSender::get()); - - match id.subs { - Subs::Zero => assert_eq!(sub_accounts.len(), 0), - Subs::Many(n) => assert_eq!(sub_accounts.len(), n as usize), - } - }); -} - -// Reap the identity on the Relay Chain and assert that the correct things happen there. -fn assert_reap_id_relay(total_deposit: Balance, id: &Identity) { - RococoRelay::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - let free_bal_before_reap = RococoBalances::free_balance(RococoRelaySender::get()); - let reserved_balance = RococoBalances::reserved_balance(RococoRelaySender::get()); - - assert_eq!(reserved_balance, total_deposit); - - assert_ok!(RococoIdentityMigrator::reap_identity( - RococoOrigin::signed(RococoRelaySender::get()), - RococoRelaySender::get() - )); - - let remote_deposit = match id.subs { - Subs::Zero => calculate_remote_deposit(id.relay.encoded_size() as u32, 0), - Subs::Many(n) => calculate_remote_deposit(id.relay.encoded_size() as u32, n), - }; - - assert_expected_events!( - RococoRelay, - vec![ - // `reap_identity` sums the identity and subs deposits and unreserves them in one - // call. Therefore, we only expect one `Unreserved` event. - RuntimeEvent::Balances(BalancesEvent::Unreserved { who, amount }) => { - who: *who == RococoRelaySender::get(), - amount: *amount == total_deposit, - }, - RuntimeEvent::IdentityMigrator( - polkadot_runtime_common::identity_migrator::Event::IdentityReaped { - who, - }) => { - who: *who == PeopleRococoSender::get(), - }, - ] - ); - // Identity should be gone. - assert!(IdentityOf::::get(RococoRelaySender::get()).is_none()); - - // Subs should be gone. - let (_, sub_accounts) = SubsOf::::get(RococoRelaySender::get()); - assert_eq!(sub_accounts.len(), 0); - - let reserved_balance = RococoBalances::reserved_balance(RococoRelaySender::get()); - assert_eq!(reserved_balance, 0); - - // Free balance should be greater (i.e. the teleport should work even if 100% of an - // account's balance is reserved for Identity). - let free_bal_after_reap = RococoBalances::free_balance(RococoRelaySender::get()); - assert!(free_bal_after_reap > free_bal_before_reap); - - // Implicit: total_deposit > remote_deposit. As in, accounts should always have enough - // reserved for the parachain deposit. - assert_eq!(free_bal_after_reap, free_bal_before_reap + total_deposit - remote_deposit); - }); -} - -// Reaping the identity on the Relay Chain will have sent an XCM program to the parachain. Ensure -// that everything happens as expected. -fn assert_reap_parachain(id: &Identity) { - PeopleRococo::execute_with(|| { - let reserved_balance = PeopleRococoBalances::reserved_balance(PeopleRococoSender::get()); - let id_deposit = IdentityOn::Para(&id.para).calculate_deposit(); - let total_deposit = match id.subs { - Subs::Zero => id_deposit, - Subs::Many(n) => id_deposit + n as Balance * SubAccountDepositParachain::get(), - }; - assert_reap_events(id_deposit, id); - assert_eq!(reserved_balance, total_deposit); - - // Should have at least one ED after in free balance after the reap. - assert!(PeopleRococoBalances::free_balance(PeopleRococoSender::get()) >= PEOPLE_ROCOCO_ED); - }); -} - -// Assert the events that should happen on the parachain upon reaping an identity on the Relay -// Chain. -fn assert_reap_events(id_deposit: Balance, id: &Identity) { - type RuntimeEvent = ::RuntimeEvent; - match id.subs { - Subs::Zero => { - assert_expected_events!( - PeopleRococo, - vec![ - // Deposit and Endowed from teleport - RuntimeEvent::Balances(BalancesEvent::Minted { .. }) => {}, - RuntimeEvent::Balances(BalancesEvent::Endowed { .. }) => {}, - // Amount reserved for identity info - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == PeopleRococoSender::get(), - amount: *amount == id_deposit, - }, - // Confirmation from Migrator with individual identity and subs deposits - RuntimeEvent::IdentityMigrator( - polkadot_runtime_common::identity_migrator::Event::DepositUpdated { - who, identity, subs - }) => { - who: *who == PeopleRococoSender::get(), - identity: *identity == id_deposit, - subs: *subs == 0, - }, - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { .. }) => {}, - ] - ); - }, - Subs::Many(n) => { - let subs_deposit = n as Balance * SubAccountDepositParachain::get(); - assert_expected_events!( - PeopleRococo, - vec![ - // Deposit and Endowed from teleport - RuntimeEvent::Balances(BalancesEvent::Minted { .. }) => {}, - RuntimeEvent::Balances(BalancesEvent::Endowed { .. }) => {}, - // Amount reserved for identity info - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == PeopleRococoSender::get(), - amount: *amount == id_deposit, - }, - // Amount reserved for subs - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == PeopleRococoSender::get(), - amount: *amount == subs_deposit, - }, - // Confirmation from Migrator with individual identity and subs deposits - RuntimeEvent::IdentityMigrator( - polkadot_runtime_common::identity_migrator::Event::DepositUpdated { - who, identity, subs - }) => { - who: *who == PeopleRococoSender::get(), - identity: *identity == id_deposit, - subs: *subs == subs_deposit, - }, - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { .. }) => {}, - ] - ); - }, - }; -} - -/// Duplicate of the impl of `ToParachainIdentityReaper` in the Rococo runtime. -fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { - // Note: These `deposit` functions and `EXISTENTIAL_DEPOSIT` correspond to the Relay Chain's. - // Pulled in: use rococo_runtime_constants::currency::*; - let para_basic_deposit = deposit(1, 17) / 100; - let para_byte_deposit = deposit(0, 1) / 100; - let para_sub_account_deposit = deposit(1, 53) / 100; - let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; - - // pallet deposits - let id_deposit = - para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); - let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); - - id_deposit - .saturating_add(subs_deposit) - .saturating_add(para_existential_deposit.saturating_mul(2)) -} - -// Represent some `additional` data that would not be migrated to the parachain. The encoded size, -// and thus the byte deposit, should decrease. -fn nonsensical_additional() -> BoundedVec<(Data, Data), MaxAdditionalFields> { - BoundedVec::try_from(vec![( - Data::Raw(b"fOo".to_vec().try_into().unwrap()), - Data::Raw(b"baR".to_vec().try_into().unwrap()), - )]) - .unwrap() -} - -// Represent some `additional` data that will be migrated to the parachain as first-class fields. -fn meaningful_additional() -> BoundedVec<(Data, Data), MaxAdditionalFields> { - BoundedVec::try_from(vec![ - ( - Data::Raw(b"github".to_vec().try_into().unwrap()), - Data::Raw(b"niels-username".to_vec().try_into().unwrap()), - ), - ( - Data::Raw(b"discord".to_vec().try_into().unwrap()), - Data::Raw(b"bohr-username".to_vec().try_into().unwrap()), - ), - ]) - .unwrap() -} - -// Execute a single test case. -fn assert_relay_para_flow(id: &Identity) { - let total_deposit = set_id_relay(id); - assert_set_id_parachain(id); - assert_reap_id_relay(total_deposit, id); - assert_reap_parachain(id); -} - -// Tests with empty `IdentityInfo`. - -#[test] -fn on_reap_identity_works_for_minimal_identity_with_zero_subs() { - assert_relay_para_flow(&Identity::new(false, None, Subs::Zero)); -} - -#[test] -fn on_reap_identity_works_for_minimal_identity() { - assert_relay_para_flow(&Identity::new(false, None, Subs::Many(1))); -} - -#[test] -fn on_reap_identity_works_for_minimal_identity_with_max_subs() { - assert_relay_para_flow(&Identity::new(false, None, Subs::Many(MaxSubAccounts::get()))); -} - -// Tests with full `IdentityInfo`. - -#[test] -fn on_reap_identity_works_for_full_identity_no_additional_zero_subs() { - assert_relay_para_flow(&Identity::new(true, None, Subs::Zero)); -} - -#[test] -fn on_reap_identity_works_for_full_identity_no_additional() { - assert_relay_para_flow(&Identity::new(true, None, Subs::Many(1))); -} - -#[test] -fn on_reap_identity_works_for_full_identity_no_additional_max_subs() { - assert_relay_para_flow(&Identity::new(true, None, Subs::Many(MaxSubAccounts::get()))); -} - -// Tests with full `IdentityInfo` and `additional` fields that will _not_ be migrated. - -#[test] -fn on_reap_identity_works_for_full_identity_nonsense_additional_zero_subs() { - assert_relay_para_flow(&Identity::new(true, Some(nonsensical_additional()), Subs::Zero)); -} - -#[test] -fn on_reap_identity_works_for_full_identity_nonsense_additional() { - assert_relay_para_flow(&Identity::new(true, Some(nonsensical_additional()), Subs::Many(1))); -} - -#[test] -fn on_reap_identity_works_for_full_identity_nonsense_additional_max_subs() { - assert_relay_para_flow(&Identity::new( - true, - Some(nonsensical_additional()), - Subs::Many(MaxSubAccounts::get()), - )); -} - -// Tests with full `IdentityInfo` and `additional` fields that will be migrated. - -#[test] -fn on_reap_identity_works_for_full_identity_meaningful_additional_zero_subs() { - assert_relay_para_flow(&Identity::new(true, Some(meaningful_additional()), Subs::Zero)); -} - -#[test] -fn on_reap_identity_works_for_full_identity_meaningful_additional() { - assert_relay_para_flow(&Identity::new(true, Some(meaningful_additional()), Subs::Many(1))); -} - -#[test] -fn on_reap_identity_works_for_full_identity_meaningful_additional_max_subs() { - assert_relay_para_flow(&Identity::new( - true, - Some(meaningful_additional()), - Subs::Many(MaxSubAccounts::get()), - )); -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs index c28b305b2c94..44e6b3934f0e 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-rococo/src/tests/teleport.rs @@ -14,46 +14,27 @@ // limitations under the License. use crate::imports::*; +use emulated_integration_tests_common::{ + test_parachain_is_trusted_teleporter_for_relay, test_relay_is_trusted_teleporter, +}; -fn relay_origin_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - Rococo::assert_xcm_pallet_attempted_complete(None); +#[test] +fn teleport_from_and_to_relay() { + let amount = ROCOCO_ED * 100; + let native_asset: Assets = (Here, amount).into(); - assert_expected_events!( + test_relay_is_trusted_teleporter!( Rococo, - vec![ - // Amount to teleport is withdrawn from Sender - RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { - who: *who == t.sender.account_id, - amount: *amount == t.args.amount, - }, - // Amount to teleport is deposited in Relay's `CheckAccount` - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) => { - who: *who == ::XcmPallet::check_account(), - amount: *amount == t.args.amount, - }, - ] + RococoXcmConfig, + vec![PeopleRococo], + (native_asset, amount) ); -} -fn relay_dest_assertions(t: SystemParaToRelayTest) { - type RuntimeEvent = ::RuntimeEvent; - - Rococo::assert_ump_queue_processed(true, Some(PeopleRococo::para_id()), None); - - assert_expected_events!( + test_parachain_is_trusted_teleporter_for_relay!( + PeopleRococo, + PeopleRococoXcmConfig, Rococo, - vec![ - // Amount is withdrawn from Relay Chain's `CheckAccount` - RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { - who: *who == ::XcmPallet::check_account(), - amount: *amount == t.args.amount, - }, - // Amount minus fees are deposited in Receiver's account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == t.receiver.account_id, - }, - ] + amount ); } @@ -80,33 +61,6 @@ fn para_origin_assertions(t: SystemParaToRelayTest) { ); } -fn para_dest_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - PeopleRococo::assert_dmp_queue_complete(None); - - assert_expected_events!( - PeopleRococo, - vec![ - // Amount minus fees are deposited in Receiver's account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == t.receiver.account_id, - }, - ] - ); -} - -fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ::PolkadotXcm::limited_teleport_assets( t.signed_origin, @@ -118,92 +72,8 @@ fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResu ) } -/// Limited Teleport of native asset from Relay Chain to the System Parachain should work -#[test] -fn limited_teleport_native_assets_from_relay_to_system_para_works() { - // Init values for Relay Chain - let amount_to_send: Balance = ROCOCO_ED * 1000; - let dest = Rococo::child_location_of(PeopleRococo::para_id()); - let beneficiary_id = PeopleRococoReceiver::get(); - let test_args = TestContext { - sender: RococoSender::get(), - receiver: PeopleRococoReceiver::get(), - args: TestArgs::new_relay(dest, beneficiary_id, amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(para_dest_assertions); - test.set_dispatchable::(relay_limited_teleport_assets); - test.assert(); - - let delivery_fees = Rococo::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Limited Teleport of native asset from System Parachain to Relay Chain -/// should work when there is enough balance in Relay Chain's `CheckAccount` -#[test] -fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { - // Dependency - Relay Chain's `CheckAccount` should have enough balance - limited_teleport_native_assets_from_relay_to_system_para_works(); - - let amount_to_send: Balance = PEOPLE_ROCOCO_ED * 1000; - let destination = PeopleRococo::parent_location(); - let beneficiary_id = RococoReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - // Fund a sender - PeopleRococo::fund_accounts(vec![(PeopleRococoSender::get(), ROCOCO_ED * 2_000u128)]); - - let test_args = TestContext { - sender: PeopleRococoSender::get(), - receiver: RococoReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions); - test.set_dispatchable::(system_para_limited_teleport_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - let delivery_fees = PeopleRococo::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - /// Limited Teleport of native asset from System Parachain to Relay Chain -/// should't work when there is not enough balance in Relay Chain's `CheckAccount` +/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` #[test] fn limited_teleport_native_assets_from_system_para_to_relay_fails() { // Init values for Relay Chain diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml index f7e1cce85a2c..aa6eebc5458f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/Cargo.toml @@ -15,6 +15,7 @@ frame-support = { workspace = true } pallet-balances = { workspace = true } pallet-message-queue = { workspace = true } pallet-identity = { workspace = true } +pallet-xcm = { workspace = true } sp-runtime = { workspace = true } # Polkadot diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs index 386a1b91c85b..418cfea07ddc 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/lib.rs @@ -15,13 +15,8 @@ #[cfg(test)] mod imports { - pub use codec::Encode; // Substrate - pub use frame_support::{ - assert_ok, - sp_runtime::{AccountId32, DispatchResult}, - traits::fungibles::Inspect, - }; + pub use frame_support::{assert_ok, sp_runtime::DispatchResult, traits::fungibles::Inspect}; // Polkadot pub use xcm::prelude::*; @@ -36,20 +31,14 @@ mod imports { pub use westend_system_emulated_network::{ self, people_westend_emulated_chain::{ - genesis::ED as PEOPLE_WESTEND_ED, people_westend_runtime::{ - people, xcm_config::XcmConfig as PeopleWestendXcmConfig, - ExistentialDeposit as PeopleWestendExistentialDeposit, Runtime as PeopleRuntime, + xcm_config::XcmConfig as PeopleWestendXcmConfig, + ExistentialDeposit as PeopleWestendExistentialDeposit, }, PeopleWestendParaPallet as PeopleWestendPallet, }, westend_emulated_chain::{ - genesis::ED as WESTEND_ED, - westend_runtime::{ - xcm_config::XcmConfig as WestendXcmConfig, BasicDeposit, ByteDeposit, - MaxAdditionalFields, MaxSubAccounts, Runtime as WestendRuntime, - RuntimeOrigin as WestendOrigin, SubAccountDeposit, - }, + genesis::ED as WESTEND_ED, westend_runtime::xcm_config::XcmConfig as WestendXcmConfig, WestendRelayPallet as WestendPallet, }, PeopleWestendPara as PeopleWestend, PeopleWestendParaReceiver as PeopleWestendReceiver, @@ -57,7 +46,6 @@ mod imports { WestendRelayReceiver as WestendReceiver, WestendRelaySender as WestendSender, }; - pub type RelayToSystemParaTest = Test; pub type SystemParaToRelayTest = Test; } diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs index 3f18621224ac..08749b295dc2 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/mod.rs @@ -14,5 +14,4 @@ // limitations under the License. mod claim_assets; -mod reap_identity; mod teleport; diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs deleted file mode 100644 index cfbf4d7d9580..000000000000 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/reap_identity.rs +++ /dev/null @@ -1,547 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! # OnReapIdentity Tests -//! -//! This file contains the test cases for migrating Identity data away from the Westend Relay -//! chain and to the PeopleWestend parachain. This migration is part of the broader Minimal Relay -//! effort: -//! https://github.com/polkadot-fellows/RFCs/blob/main/text/0032-minimal-relay.md -//! -//! ## Overview -//! -//! The tests validate the robustness and correctness of the `OnReapIdentityHandler` -//! ensuring that it behaves as expected in various scenarios. Key aspects tested include: -//! -//! - **Deposit Handling**: Confirming that deposits are correctly migrated from the Relay Chain to -//! the People parachain in various scenarios (different `IdentityInfo` fields and different -//! numbers of sub-accounts). -//! -//! ### Test Scenarios -//! -//! The tests are categorized into several scenarios, each resulting in different deposits required -//! on the destination parachain. The tests ensure: -//! -//! - Reserved deposits on the Relay Chain are fully released; -//! - The freed deposit from the Relay Chain is sufficient for the parachain deposit; and -//! - The account will exist on the parachain. - -use crate::imports::*; -use frame_support::BoundedVec; -use pallet_balances::Event as BalancesEvent; -use pallet_identity::{legacy::IdentityInfo, Data, Event as IdentityEvent, IdentityOf, SubsOf}; -use people::{ - BasicDeposit as BasicDepositParachain, ByteDeposit as ByteDepositParachain, - IdentityInfo as IdentityInfoParachain, SubAccountDeposit as SubAccountDepositParachain, -}; -use westend_runtime_constants::currency::*; -use westend_system_emulated_network::{ - westend_emulated_chain::WestendRelayPallet, WestendRelay, WestendRelaySender, -}; - -type Balance = u128; -type WestendIdentity = ::Identity; -type WestendBalances = ::Balances; -type WestendIdentityMigrator = ::IdentityMigrator; -type PeopleWestendIdentity = ::Identity; -type PeopleWestendBalances = ::Balances; - -#[derive(Clone, Debug)] -struct Identity { - relay: IdentityInfo, - para: IdentityInfoParachain, - subs: Subs, -} - -impl Identity { - fn new( - full: bool, - additional: Option>, - subs: Subs, - ) -> Self { - let pgp_fingerprint = [ - 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, - 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, - ]; - let make_data = |data: &[u8], full: bool| -> Data { - if full { - Data::Raw(data.to_vec().try_into().unwrap()) - } else { - Data::None - } - }; - let (github, discord) = additional - .as_ref() - .and_then(|vec| vec.first()) - .map(|(g, d)| (g.clone(), d.clone())) - .unwrap_or((Data::None, Data::None)); - Self { - relay: IdentityInfo { - display: make_data(b"xcm-test", full), - legal: make_data(b"The Xcm Test, Esq.", full), - web: make_data(b"https://visitme/", full), - riot: make_data(b"xcm-riot", full), - email: make_data(b"xcm-test@gmail.com", full), - pgp_fingerprint: Some(pgp_fingerprint), - image: make_data(b"xcm-test.png", full), - twitter: make_data(b"@xcm-test", full), - additional: additional.unwrap_or_default(), - }, - para: IdentityInfoParachain { - display: make_data(b"xcm-test", full), - legal: make_data(b"The Xcm Test, Esq.", full), - web: make_data(b"https://visitme/", full), - matrix: make_data(b"xcm-matrix@server", full), - email: make_data(b"xcm-test@gmail.com", full), - pgp_fingerprint: Some(pgp_fingerprint), - image: make_data(b"xcm-test.png", full), - twitter: make_data(b"@xcm-test", full), - github, - discord, - }, - subs, - } - } -} - -#[derive(Clone, Debug)] -enum Subs { - Zero, - Many(u32), -} - -enum IdentityOn<'a> { - Relay(&'a IdentityInfo), - Para(&'a IdentityInfoParachain), -} - -impl IdentityOn<'_> { - fn calculate_deposit(self) -> Balance { - match self { - IdentityOn::Relay(id) => { - let base_deposit = BasicDeposit::get(); - let byte_deposit = - ByteDeposit::get() * TryInto::::try_into(id.encoded_size()).unwrap(); - base_deposit + byte_deposit - }, - IdentityOn::Para(id) => { - let base_deposit = BasicDepositParachain::get(); - let byte_deposit = ByteDepositParachain::get() * - TryInto::::try_into(id.encoded_size()).unwrap(); - base_deposit + byte_deposit - }, - } - } -} - -/// Generate an `AccountId32` from a `u32`. -/// This creates a 32-byte array, initially filled with `255`, and then repeatedly fills it -/// with the 4-byte little-endian representation of the `u32` value, until the array is full. -/// -/// **Example**: -/// -/// `account_from_u32(5)` will return an `AccountId32` with the bytes -/// `[0, 5, 0, 0, 0, 0, 0, 0, 0, 5 ... ]` -fn account_from_u32(id: u32) -> AccountId32 { - let mut buffer = [255u8; 32]; - let id_bytes = id.to_le_bytes(); - let id_size = id_bytes.len(); - for chunk in buffer.chunks_mut(id_size) { - chunk.clone_from_slice(&id_bytes); - } - AccountId32::new(buffer) -} - -// Set up the Relay Chain with an identity. -fn set_id_relay(id: &Identity) -> Balance { - let mut total_deposit: Balance = 0; - - // Set identity and Subs on Relay Chain - WestendRelay::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - - assert_ok!(WestendIdentity::set_identity( - WestendOrigin::signed(WestendRelaySender::get()), - Box::new(id.relay.clone()) - )); - - if let Subs::Many(n) = id.subs { - let subs: Vec<_> = (0..n) - .map(|i| (account_from_u32(i), Data::Raw(b"name".to_vec().try_into().unwrap()))) - .collect(); - - assert_ok!(WestendIdentity::set_subs( - WestendOrigin::signed(WestendRelaySender::get()), - subs, - )); - } - - let reserved_balance = WestendBalances::reserved_balance(WestendRelaySender::get()); - let id_deposit = IdentityOn::Relay(&id.relay).calculate_deposit(); - - let total_deposit = match id.subs { - Subs::Zero => { - total_deposit = id_deposit; // No subs - assert_expected_events!( - WestendRelay, - vec![ - RuntimeEvent::Identity(IdentityEvent::IdentitySet { .. }) => {}, - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == WestendRelaySender::get(), - amount: *amount == id_deposit, - }, - ] - ); - total_deposit - }, - Subs::Many(n) => { - let sub_account_deposit = n as Balance * SubAccountDeposit::get(); - total_deposit = - sub_account_deposit + IdentityOn::Relay(&id.relay).calculate_deposit(); - assert_expected_events!( - WestendRelay, - vec![ - RuntimeEvent::Identity(IdentityEvent::IdentitySet { .. }) => {}, - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == WestendRelaySender::get(), - amount: *amount == id_deposit, - }, - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == WestendRelaySender::get(), - amount: *amount == sub_account_deposit, - }, - ] - ); - total_deposit - }, - }; - - assert_eq!(reserved_balance, total_deposit); - }); - total_deposit -} - -// Set up the parachain with an identity and (maybe) sub accounts, but with zero deposits. -fn assert_set_id_parachain(id: &Identity) { - // Set identity and Subs on Parachain with zero deposit - PeopleWestend::execute_with(|| { - let free_bal = PeopleWestendBalances::free_balance(PeopleWestendSender::get()); - let reserved_balance = PeopleWestendBalances::reserved_balance(PeopleWestendSender::get()); - - // total balance at Genesis should be zero - assert_eq!(reserved_balance + free_bal, 0); - - assert_ok!(PeopleWestendIdentity::set_identity_no_deposit( - &PeopleWestendSender::get(), - id.para.clone(), - )); - - match id.subs { - Subs::Zero => {}, - Subs::Many(n) => { - let subs: Vec<_> = (0..n) - .map(|ii| { - (account_from_u32(ii), Data::Raw(b"name".to_vec().try_into().unwrap())) - }) - .collect(); - assert_ok!(PeopleWestendIdentity::set_subs_no_deposit( - &PeopleWestendSender::get(), - subs, - )); - }, - } - - // No amount should be reserved as deposit amounts are set to 0. - let reserved_balance = PeopleWestendBalances::reserved_balance(PeopleWestendSender::get()); - assert_eq!(reserved_balance, 0); - assert!(IdentityOf::::get(PeopleWestendSender::get()).is_some()); - - let (_, sub_accounts) = SubsOf::::get(PeopleWestendSender::get()); - - match id.subs { - Subs::Zero => assert_eq!(sub_accounts.len(), 0), - Subs::Many(n) => assert_eq!(sub_accounts.len(), n as usize), - } - }); -} - -// Reap the identity on the Relay Chain and assert that the correct things happen there. -fn assert_reap_id_relay(total_deposit: Balance, id: &Identity) { - WestendRelay::execute_with(|| { - type RuntimeEvent = ::RuntimeEvent; - let free_bal_before_reap = WestendBalances::free_balance(WestendRelaySender::get()); - let reserved_balance = WestendBalances::reserved_balance(WestendRelaySender::get()); - - assert_eq!(reserved_balance, total_deposit); - - assert_ok!(WestendIdentityMigrator::reap_identity( - WestendOrigin::signed(WestendRelaySender::get()), - WestendRelaySender::get() - )); - - let remote_deposit = match id.subs { - Subs::Zero => calculate_remote_deposit(id.relay.encoded_size() as u32, 0), - Subs::Many(n) => calculate_remote_deposit(id.relay.encoded_size() as u32, n), - }; - - assert_expected_events!( - WestendRelay, - vec![ - // `reap_identity` sums the identity and subs deposits and unreserves them in one - // call. Therefore, we only expect one `Unreserved` event. - RuntimeEvent::Balances(BalancesEvent::Unreserved { who, amount }) => { - who: *who == WestendRelaySender::get(), - amount: *amount == total_deposit, - }, - RuntimeEvent::IdentityMigrator( - polkadot_runtime_common::identity_migrator::Event::IdentityReaped { - who, - }) => { - who: *who == PeopleWestendSender::get(), - }, - ] - ); - // Identity should be gone. - assert!(IdentityOf::::get(WestendRelaySender::get()).is_none()); - - // Subs should be gone. - let (_, sub_accounts) = SubsOf::::get(WestendRelaySender::get()); - assert_eq!(sub_accounts.len(), 0); - - let reserved_balance = WestendBalances::reserved_balance(WestendRelaySender::get()); - assert_eq!(reserved_balance, 0); - - // Free balance should be greater (i.e. the teleport should work even if 100% of an - // account's balance is reserved for Identity). - let free_bal_after_reap = WestendBalances::free_balance(WestendRelaySender::get()); - assert!(free_bal_after_reap > free_bal_before_reap); - - // Implicit: total_deposit > remote_deposit. As in, accounts should always have enough - // reserved for the parachain deposit. - assert_eq!(free_bal_after_reap, free_bal_before_reap + total_deposit - remote_deposit); - }); -} - -// Reaping the identity on the Relay Chain will have sent an XCM program to the parachain. Ensure -// that everything happens as expected. -fn assert_reap_parachain(id: &Identity) { - PeopleWestend::execute_with(|| { - let reserved_balance = PeopleWestendBalances::reserved_balance(PeopleWestendSender::get()); - let id_deposit = IdentityOn::Para(&id.para).calculate_deposit(); - let total_deposit = match id.subs { - Subs::Zero => id_deposit, - Subs::Many(n) => id_deposit + n as Balance * SubAccountDepositParachain::get(), - }; - assert_reap_events(id_deposit, id); - assert_eq!(reserved_balance, total_deposit); - - // Should have at least one ED after in free balance after the reap. - assert!( - PeopleWestendBalances::free_balance(PeopleWestendSender::get()) >= PEOPLE_WESTEND_ED - ); - }); -} - -// Assert the events that should happen on the parachain upon reaping an identity on the Relay -// Chain. -fn assert_reap_events(id_deposit: Balance, id: &Identity) { - type RuntimeEvent = ::RuntimeEvent; - match id.subs { - Subs::Zero => { - assert_expected_events!( - PeopleWestend, - vec![ - // Deposit and Endowed from teleport - RuntimeEvent::Balances(BalancesEvent::Minted { .. }) => {}, - RuntimeEvent::Balances(BalancesEvent::Endowed { .. }) => {}, - // Amount reserved for identity info - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == PeopleWestendSender::get(), - amount: *amount == id_deposit, - }, - // Confirmation from Migrator with individual identity and subs deposits - RuntimeEvent::IdentityMigrator( - polkadot_runtime_common::identity_migrator::Event::DepositUpdated { - who, identity, subs - }) => { - who: *who == PeopleWestendSender::get(), - identity: *identity == id_deposit, - subs: *subs == 0, - }, - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { .. }) => {}, - ] - ); - }, - Subs::Many(n) => { - let subs_deposit = n as Balance * SubAccountDepositParachain::get(); - assert_expected_events!( - PeopleWestend, - vec![ - // Deposit and Endowed from teleport - RuntimeEvent::Balances(BalancesEvent::Minted { .. }) => {}, - RuntimeEvent::Balances(BalancesEvent::Endowed { .. }) => {}, - // Amount reserved for identity info - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == PeopleWestendSender::get(), - amount: *amount == id_deposit, - }, - // Amount reserved for subs - RuntimeEvent::Balances(BalancesEvent::Reserved { who, amount }) => { - who: *who == PeopleWestendSender::get(), - amount: *amount == subs_deposit, - }, - // Confirmation from Migrator with individual identity and subs deposits - RuntimeEvent::IdentityMigrator( - polkadot_runtime_common::identity_migrator::Event::DepositUpdated { - who, identity, subs - }) => { - who: *who == PeopleWestendSender::get(), - identity: *identity == id_deposit, - subs: *subs == subs_deposit, - }, - RuntimeEvent::MessageQueue(pallet_message_queue::Event::Processed { .. }) => {}, - ] - ); - }, - }; -} - -/// Duplicate of the impl of `ToParachainIdentityReaper` in the Westend runtime. -fn calculate_remote_deposit(bytes: u32, subs: u32) -> Balance { - // Note: These `deposit` functions and `EXISTENTIAL_DEPOSIT` correspond to the Relay Chain's. - // Pulled in: use westend_runtime_constants::currency::*; - let para_basic_deposit = deposit(1, 17) / 100; - let para_byte_deposit = deposit(0, 1) / 100; - let para_sub_account_deposit = deposit(1, 53) / 100; - let para_existential_deposit = EXISTENTIAL_DEPOSIT / 10; - - // pallet deposits - let id_deposit = - para_basic_deposit.saturating_add(para_byte_deposit.saturating_mul(bytes as Balance)); - let subs_deposit = para_sub_account_deposit.saturating_mul(subs as Balance); - - id_deposit - .saturating_add(subs_deposit) - .saturating_add(para_existential_deposit.saturating_mul(2)) -} - -// Represent some `additional` data that would not be migrated to the parachain. The encoded size, -// and thus the byte deposit, should decrease. -fn nonsensical_additional() -> BoundedVec<(Data, Data), MaxAdditionalFields> { - BoundedVec::try_from(vec![( - Data::Raw(b"fOo".to_vec().try_into().unwrap()), - Data::Raw(b"baR".to_vec().try_into().unwrap()), - )]) - .unwrap() -} - -// Represent some `additional` data that will be migrated to the parachain as first-class fields. -fn meaningful_additional() -> BoundedVec<(Data, Data), MaxAdditionalFields> { - BoundedVec::try_from(vec![ - ( - Data::Raw(b"github".to_vec().try_into().unwrap()), - Data::Raw(b"niels-username".to_vec().try_into().unwrap()), - ), - ( - Data::Raw(b"discord".to_vec().try_into().unwrap()), - Data::Raw(b"bohr-username".to_vec().try_into().unwrap()), - ), - ]) - .unwrap() -} - -// Execute a single test case. -fn assert_relay_para_flow(id: &Identity) { - let total_deposit = set_id_relay(id); - assert_set_id_parachain(id); - assert_reap_id_relay(total_deposit, id); - assert_reap_parachain(id); -} - -// Tests with empty `IdentityInfo`. - -#[test] -fn on_reap_identity_works_for_minimal_identity_with_zero_subs() { - assert_relay_para_flow(&Identity::new(false, None, Subs::Zero)); -} - -#[test] -fn on_reap_identity_works_for_minimal_identity() { - assert_relay_para_flow(&Identity::new(false, None, Subs::Many(1))); -} - -#[test] -fn on_reap_identity_works_for_minimal_identity_with_max_subs() { - assert_relay_para_flow(&Identity::new(false, None, Subs::Many(MaxSubAccounts::get()))); -} - -// Tests with full `IdentityInfo`. - -#[test] -fn on_reap_identity_works_for_full_identity_no_additional_zero_subs() { - assert_relay_para_flow(&Identity::new(true, None, Subs::Zero)); -} - -#[test] -fn on_reap_identity_works_for_full_identity_no_additional() { - assert_relay_para_flow(&Identity::new(true, None, Subs::Many(1))); -} - -#[test] -fn on_reap_identity_works_for_full_identity_no_additional_max_subs() { - assert_relay_para_flow(&Identity::new(true, None, Subs::Many(MaxSubAccounts::get()))); -} - -// Tests with full `IdentityInfo` and `additional` fields that will _not_ be migrated. - -#[test] -fn on_reap_identity_works_for_full_identity_nonsense_additional_zero_subs() { - assert_relay_para_flow(&Identity::new(true, Some(nonsensical_additional()), Subs::Zero)); -} - -#[test] -fn on_reap_identity_works_for_full_identity_nonsense_additional() { - assert_relay_para_flow(&Identity::new(true, Some(nonsensical_additional()), Subs::Many(1))); -} - -#[test] -fn on_reap_identity_works_for_full_identity_nonsense_additional_max_subs() { - assert_relay_para_flow(&Identity::new( - true, - Some(nonsensical_additional()), - Subs::Many(MaxSubAccounts::get()), - )); -} - -// Tests with full `IdentityInfo` and `additional` fields that will be migrated. - -#[test] -fn on_reap_identity_works_for_full_identity_meaningful_additional_zero_subs() { - assert_relay_para_flow(&Identity::new(true, Some(meaningful_additional()), Subs::Zero)); -} - -#[test] -fn on_reap_identity_works_for_full_identity_meaningful_additional() { - assert_relay_para_flow(&Identity::new(true, Some(meaningful_additional()), Subs::Many(1))); -} - -#[test] -fn on_reap_identity_works_for_full_identity_meaningful_additional_max_subs() { - assert_relay_para_flow(&Identity::new( - true, - Some(meaningful_additional()), - Subs::Many(MaxSubAccounts::get()), - )); -} diff --git a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs index 294583399011..83888031723f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/people/people-westend/src/tests/teleport.rs @@ -14,46 +14,27 @@ // limitations under the License. use crate::imports::*; +use emulated_integration_tests_common::{ + test_parachain_is_trusted_teleporter_for_relay, test_relay_is_trusted_teleporter, +}; -fn relay_origin_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - Westend::assert_xcm_pallet_attempted_complete(None); +#[test] +fn teleport_from_and_to_relay() { + let amount = WESTEND_ED * 100; + let native_asset: Assets = (Here, amount).into(); - assert_expected_events!( + test_relay_is_trusted_teleporter!( Westend, - vec![ - // Amount to teleport is withdrawn from Sender - RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { - who: *who == t.sender.account_id, - amount: *amount == t.args.amount, - }, - // Amount to teleport is deposited in Relay's `CheckAccount` - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, amount }) => { - who: *who == ::XcmPallet::check_account(), - amount: *amount == t.args.amount, - }, - ] + WestendXcmConfig, + vec![PeopleWestend], + (native_asset, amount) ); -} -fn relay_dest_assertions(t: SystemParaToRelayTest) { - type RuntimeEvent = ::RuntimeEvent; - - Westend::assert_ump_queue_processed(true, Some(PeopleWestend::para_id()), None); - - assert_expected_events!( + test_parachain_is_trusted_teleporter_for_relay!( + PeopleWestend, + PeopleWestendXcmConfig, Westend, - vec![ - // Amount is withdrawn from Relay Chain's `CheckAccount` - RuntimeEvent::Balances(pallet_balances::Event::Burned { who, amount }) => { - who: *who == ::XcmPallet::check_account(), - amount: *amount == t.args.amount, - }, - // Amount minus fees are deposited in Receiver's account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == t.receiver.account_id, - }, - ] + amount ); } @@ -80,33 +61,6 @@ fn para_origin_assertions(t: SystemParaToRelayTest) { ); } -fn para_dest_assertions(t: RelayToSystemParaTest) { - type RuntimeEvent = ::RuntimeEvent; - - PeopleWestend::assert_dmp_queue_complete(None); - - assert_expected_events!( - PeopleWestend, - vec![ - // Amount minus fees are deposited in Receiver's account - RuntimeEvent::Balances(pallet_balances::Event::Minted { who, .. }) => { - who: *who == t.receiver.account_id, - }, - ] - ); -} - -fn relay_limited_teleport_assets(t: RelayToSystemParaTest) -> DispatchResult { - ::XcmPallet::limited_teleport_assets( - t.signed_origin, - bx!(t.args.dest.into()), - bx!(t.args.beneficiary.into()), - bx!(t.args.assets.into()), - t.args.fee_asset_item, - t.args.weight_limit, - ) -} - fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResult { ::PolkadotXcm::limited_teleport_assets( t.signed_origin, @@ -118,92 +72,8 @@ fn system_para_limited_teleport_assets(t: SystemParaToRelayTest) -> DispatchResu ) } -/// Limited Teleport of native asset from Relay Chain to the System Parachain should work -#[test] -fn limited_teleport_native_assets_from_relay_to_system_para_works() { - // Init values for Relay Chain - let amount_to_send: Balance = WESTEND_ED * 1000; - let dest = Westend::child_location_of(PeopleWestend::para_id()); - let beneficiary_id = PeopleWestendReceiver::get(); - let test_args = TestContext { - sender: WestendSender::get(), - receiver: PeopleWestendReceiver::get(), - args: TestArgs::new_relay(dest, beneficiary_id, amount_to_send), - }; - - let mut test = RelayToSystemParaTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(relay_origin_assertions); - test.set_assertion::(para_dest_assertions); - test.set_dispatchable::(relay_limited_teleport_assets); - test.assert(); - - let delivery_fees = Westend::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - -/// Limited Teleport of native asset from System Parachain to Relay Chain -/// should work when there is enough balance in Relay Chain's `CheckAccount` -#[test] -fn limited_teleport_native_assets_back_from_system_para_to_relay_works() { - // Dependency - Relay Chain's `CheckAccount` should have enough balance - limited_teleport_native_assets_from_relay_to_system_para_works(); - - let amount_to_send: Balance = PEOPLE_WESTEND_ED * 1000; - let destination = PeopleWestend::parent_location(); - let beneficiary_id = WestendReceiver::get(); - let assets = (Parent, amount_to_send).into(); - - // Fund a sender - PeopleWestend::fund_accounts(vec![(PeopleWestendSender::get(), WESTEND_ED * 2_000u128)]); - - let test_args = TestContext { - sender: PeopleWestendSender::get(), - receiver: WestendReceiver::get(), - args: TestArgs::new_para(destination, beneficiary_id, amount_to_send, assets, None, 0), - }; - - let mut test = SystemParaToRelayTest::new(test_args); - - let sender_balance_before = test.sender.balance; - let receiver_balance_before = test.receiver.balance; - - test.set_assertion::(para_origin_assertions); - test.set_assertion::(relay_dest_assertions); - test.set_dispatchable::(system_para_limited_teleport_assets); - test.assert(); - - let sender_balance_after = test.sender.balance; - let receiver_balance_after = test.receiver.balance; - - let delivery_fees = PeopleWestend::execute_with(|| { - xcm_helpers::teleport_assets_delivery_fees::< - ::XcmSender, - >(test.args.assets.clone(), 0, test.args.weight_limit, test.args.beneficiary, test.args.dest) - }); - - // Sender's balance is reduced - assert_eq!(sender_balance_before - amount_to_send - delivery_fees, sender_balance_after); - // Receiver's balance is increased - assert!(receiver_balance_after > receiver_balance_before); -} - /// Limited Teleport of native asset from System Parachain to Relay Chain -/// should't work when there is not enough balance in Relay Chain's `CheckAccount` +/// shouldn't work when there is not enough balance in Relay Chain's `CheckAccount` #[test] fn limited_teleport_native_assets_from_system_para_to_relay_fails() { // Init values for Relay Chain diff --git a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs index ae4fe9e84337..08b1d192b0be 100644 --- a/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/collectives/collectives-westend/src/xcm_config.rs @@ -48,6 +48,7 @@ use xcm_builder::{ use xcm_executor::XcmExecutor; parameter_types! { + pub const RootLocation: Location = Location::here(); pub const WndLocation: Location = Location::parent(); pub const RelayNetwork: Option = Some(NetworkId::Westend); pub RelayChainOrigin: RuntimeOrigin = cumulus_pallet_xcm::Origin::Relay.into(); @@ -139,6 +140,13 @@ impl Contains for ParentOrParentsPlurality { } } +pub struct LocalPlurality; +impl Contains for LocalPlurality { + fn contains(loc: &Location) -> bool { + matches!(loc.unpack(), (0, [Plurality { .. }])) + } +} + pub type Barrier = TrailingSetTopicAsId< DenyThenTry< DenyReserveTransferToRelayChain, @@ -173,6 +181,8 @@ pub type Barrier = TrailingSetTopicAsId< pub type WaivedLocations = ( RelayOrOtherSystemParachains, Equals, + Equals, + LocalPlurality, ); /// Cases where a remote origin is accepted as trusted Teleporter for a given asset: From 00946b10ab18331f959f5cbced7c433b6132b1cb Mon Sep 17 00:00:00 2001 From: Muharem Date: Wed, 14 Aug 2024 15:35:34 +0200 Subject: [PATCH 41/74] Make ticket non-optional and add ensure_successful method to Consideration trait (#5359) Make ticket non-optional and add ensure_successful method to Consideration trait. Reverts the optional return ticket type for the new function introduced in [polkadot-sdk/4596](https://github.com/paritytech/polkadot-sdk/pull/4596) and adds a helper `ensure_successful` function for the runtime benchmarks. Since the existing FRAME pallet represents zero cost with a zero balance rather than `None` in an option, maintaining the ticket type as a non-optional balance is beneficial for backward compatibility and helps avoid unnecessary migrations. --- prdoc/pr_5359.prdoc | 21 ++++ .../balances/src/tests/fungible_tests.rs | 41 ++++--- substrate/frame/preimage/src/benchmarking.rs | 2 +- substrate/frame/preimage/src/lib.rs | 17 ++- substrate/frame/support/src/traits/storage.rs | 26 ++-- .../support/src/traits/tokens/fungible/mod.rs | 112 ++++++++---------- 6 files changed, 115 insertions(+), 104 deletions(-) create mode 100644 prdoc/pr_5359.prdoc diff --git a/prdoc/pr_5359.prdoc b/prdoc/pr_5359.prdoc new file mode 100644 index 000000000000..bf059129a436 --- /dev/null +++ b/prdoc/pr_5359.prdoc @@ -0,0 +1,21 @@ +title: Make ticket non-optional and add ensure_successful method to Consideration trait + +doc: + - audience: Runtime Dev + description: | + Make ticket non-optional and add ensure_successful method to Consideration trait. + + Reverts the optional return ticket type for the new function introduced in + [polkadot-sdk/4596](https://github.com/paritytech/polkadot-sdk/pull/4596) and adds a helper + `ensure_successful` function for the runtime benchmarks. + Since the existing FRAME pallet represents zero cost with a zero balance type rather than + `None` in an option, maintaining the ticket type as a non-optional balance is beneficial + for backward compatibility and helps avoid unnecessary migrations. + +crates: + - name: frame-support + bump: major + - name: pallet-preimage + bump: major + - name: pallet-balances + bump: patch diff --git a/substrate/frame/balances/src/tests/fungible_tests.rs b/substrate/frame/balances/src/tests/fungible_tests.rs index 1a09303a6590..e1c1be9b1335 100644 --- a/substrate/frame/balances/src/tests/fungible_tests.rs +++ b/substrate/frame/balances/src/tests/fungible_tests.rs @@ -518,24 +518,25 @@ fn freeze_consideration_works() { let who = 4; // freeze amount taken somewhere outside of our (Consideration) scope. let extend_freeze = 15; + let zero_ticket = Consideration::new(&who, Footprint::from_parts(0, 0)).unwrap(); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); - let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap(); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10); - let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap(); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 4); assert_ok!(Balances::increase_frozen(&TestId::Foo, &who, extend_freeze)); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 4 + extend_freeze); - let ticket = ticket.update(&who, Footprint::from_parts(8, 1)).unwrap().unwrap(); + let ticket = ticket.update(&who, Footprint::from_parts(8, 1)).unwrap(); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 8 + extend_freeze); - assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), zero_ticket); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0 + extend_freeze); - let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap(); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10 + extend_freeze); let _ = ticket.drop(&who).unwrap(); @@ -560,24 +561,26 @@ fn hold_consideration_works() { let who = 4; // hold amount taken somewhere outside of our (Consideration) scope. let extend_hold = 15; + + let zero_ticket = Consideration::new(&who, Footprint::from_parts(0, 0)).unwrap(); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); - let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap(); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10); - let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap(); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 4); assert_ok!(Balances::hold(&TestId::Foo, &who, extend_hold)); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 4 + extend_hold); - let ticket = ticket.update(&who, Footprint::from_parts(8, 1)).unwrap().unwrap(); + let ticket = ticket.update(&who, Footprint::from_parts(8, 1)).unwrap(); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 8 + extend_hold); - assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), zero_ticket); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0 + extend_hold); - let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap(); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10 + extend_hold); let _ = ticket.drop(&who).unwrap(); @@ -600,21 +603,22 @@ fn lone_freeze_consideration_works() { >; let who = 4; + let zero_ticket = Consideration::new(&who, Footprint::from_parts(0, 0)).unwrap(); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); - let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap(); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10); assert_ok!(Balances::increase_frozen(&TestId::Foo, &who, 5)); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 15); - let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap(); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 4); - assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), zero_ticket); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 0); - let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap(); assert_eq!(Balances::balance_frozen(&TestId::Foo, &who), 10); let _ = ticket.drop(&who).unwrap(); @@ -637,21 +641,22 @@ fn lone_hold_consideration_works() { >; let who = 4; + let zero_ticket = Consideration::new(&who, Footprint::from_parts(0, 0)).unwrap(); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); - let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap(); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10); assert_ok!(Balances::hold(&TestId::Foo, &who, 5)); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 15); - let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap().unwrap(); + let ticket = ticket.update(&who, Footprint::from_parts(4, 1)).unwrap(); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 4); - assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), None); + assert_eq!(ticket.update(&who, Footprint::from_parts(0, 0)).unwrap(), zero_ticket); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 0); - let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap().unwrap(); + let ticket = Consideration::new(&who, Footprint::from_parts(10, 1)).unwrap(); assert_eq!(Balances::balance_on_hold(&TestId::Foo, &who), 10); let _ = ticket.drop(&who).unwrap(); diff --git a/substrate/frame/preimage/src/benchmarking.rs b/substrate/frame/preimage/src/benchmarking.rs index 2d3bec16b818..3d0c5b900579 100644 --- a/substrate/frame/preimage/src/benchmarking.rs +++ b/substrate/frame/preimage/src/benchmarking.rs @@ -116,7 +116,7 @@ benchmarks! { T::ManagerOrigin::try_successful_origin().map_err(|_| BenchmarkError::Weightless)?, hash ) verify { - let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap().unwrap(); + let ticket = TicketOf::::new(¬er, Footprint { count: 1, size: MAX_SIZE as u64 }).unwrap(); let s = RequestStatus::Requested { maybe_ticket: Some((noter, ticket)), count: 1, maybe_len: Some(MAX_SIZE) }; assert_eq!(RequestStatusFor::::get(&hash), Some(s)); } diff --git a/substrate/frame/preimage/src/lib.rs b/substrate/frame/preimage/src/lib.rs index 30056fc6d9a4..658e7fec5348 100644 --- a/substrate/frame/preimage/src/lib.rs +++ b/substrate/frame/preimage/src/lib.rs @@ -124,8 +124,6 @@ pub mod pallet { type ManagerOrigin: EnsureOrigin; /// A means of providing some cost while data is stored on-chain. - /// - /// Should never return a `None`, implying no cost for a non-empty preimage. type Consideration: Consideration; } @@ -162,8 +160,6 @@ pub mod pallet { TooMany, /// Too few hashes were requested to be upgraded (i.e. zero). TooFew, - /// No ticket with a cost was returned by [`Config::Consideration`] to store the preimage. - NoCost, } /// A reason for this pallet placing a hold on funds. @@ -274,10 +270,10 @@ impl Pallet { // unreserve deposit T::Currency::unreserve(&who, amount); // take consideration - let Ok(Some(ticket)) = + let Ok(ticket) = T::Consideration::new(&who, Footprint::from_parts(1, len as usize)) + .defensive_proof("Unexpected inability to take deposit after unreserved") else { - defensive!("None ticket or inability to take deposit after unreserved"); return true }; RequestStatus::Unrequested { ticket: (who, ticket), len } @@ -288,10 +284,12 @@ impl Pallet { T::Currency::unreserve(&who, deposit); // take consideration if let Some(len) = maybe_len { - let Ok(Some(ticket)) = + let Ok(ticket) = T::Consideration::new(&who, Footprint::from_parts(1, len as usize)) + .defensive_proof( + "Unexpected inability to take deposit after unreserved", + ) else { - defensive!("None ticket or inability to take deposit after unreserved"); return true }; Some((who, ticket)) @@ -351,8 +349,7 @@ impl Pallet { RequestStatus::Requested { maybe_ticket: None, count: 1, maybe_len: Some(len) }, (None, Some(depositor)) => { let ticket = - T::Consideration::new(depositor, Footprint::from_parts(1, len as usize))? - .ok_or(Error::::NoCost)?; + T::Consideration::new(depositor, Footprint::from_parts(1, len as usize))?; RequestStatus::Unrequested { ticket: (depositor.clone(), ticket), len } }, }; diff --git a/substrate/frame/support/src/traits/storage.rs b/substrate/frame/support/src/traits/storage.rs index a954af14d259..eb63ea59f6cb 100644 --- a/substrate/frame/support/src/traits/storage.rs +++ b/substrate/frame/support/src/traits/storage.rs @@ -201,7 +201,7 @@ where } /// Some sort of cost taken from account temporarily in order to offset the cost to the chain of -/// holding some data `Footprint` (e.g. [`Footprint`]) in state. +/// holding some data [`Footprint`] in state. /// /// The cost may be increased, reduced or dropped entirely as the footprint changes. /// @@ -217,16 +217,14 @@ pub trait Consideration: Member + FullCodec + TypeInfo + MaxEncodedLen { /// Create a ticket for the `new` footprint attributable to `who`. This ticket *must* ultimately - /// be consumed through `update` or `drop` once the footprint changes or is removed. `None` - /// implies no cost for a given footprint. - fn new(who: &AccountId, new: Footprint) -> Result, DispatchError>; + /// be consumed through `update` or `drop` once the footprint changes or is removed. + fn new(who: &AccountId, new: Footprint) -> Result; /// Optionally consume an old ticket and alter the footprint, enforcing the new cost to `who` - /// and returning the new ticket (or an error if there was an issue). `None` implies no cost for - /// a given footprint. + /// and returning the new ticket (or an error if there was an issue). /// /// For creating tickets and dropping them, you can use the simpler `new` and `drop` instead. - fn update(self, who: &AccountId, new: Footprint) -> Result, DispatchError>; + fn update(self, who: &AccountId, new: Footprint) -> Result; /// Consume a ticket for some `old` footprint attributable to `who` which should now been freed. fn drop(self, who: &AccountId) -> Result<(), DispatchError>; @@ -239,18 +237,24 @@ pub trait Consideration: fn burn(self, _: &AccountId) { let _ = self; } + /// Ensure that creating a ticket for a given account and footprint will be successful if done + /// immediately after this call. + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(who: &AccountId, new: Footprint); } impl Consideration for () { - fn new(_: &A, _: F) -> Result, DispatchError> { - Ok(Some(())) + fn new(_: &A, _: F) -> Result { + Ok(()) } - fn update(self, _: &A, _: F) -> Result, DispatchError> { - Ok(Some(())) + fn update(self, _: &A, _: F) -> Result<(), DispatchError> { + Ok(()) } fn drop(self, _: &A) -> Result<(), DispatchError> { Ok(()) } + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(_: &A, _: F) {} } macro_rules! impl_incrementable { diff --git a/substrate/frame/support/src/traits/tokens/fungible/mod.rs b/substrate/frame/support/src/traits/tokens/fungible/mod.rs index f40e494b930d..9b7c86971bb8 100644 --- a/substrate/frame/support/src/traits/tokens/fungible/mod.rs +++ b/substrate/frame/support/src/traits/tokens/fungible/mod.rs @@ -164,6 +164,8 @@ use codec::{Decode, Encode, MaxEncodedLen}; use core::marker::PhantomData; use frame_support_procedural::{CloneNoBound, EqNoBound, PartialEqNoBound, RuntimeDebugNoBound}; use scale_info::TypeInfo; +#[cfg(feature = "runtime-benchmarks")] +use sp_runtime::Saturating; use super::{ Fortitude::{Force, Polite}, @@ -209,38 +211,35 @@ pub struct FreezeConsideration(F::Balance, PhantomData ( where F: MutateFreeze; impl< - A: 'static, - F: 'static + MutateFreeze, + A: 'static + Eq, + #[cfg(not(feature = "runtime-benchmarks"))] F: 'static + MutateFreeze, + #[cfg(feature = "runtime-benchmarks")] F: 'static + MutateFreeze + Mutate, R: 'static + Get, D: 'static + Convert, Fp: 'static, > Consideration for FreezeConsideration { - fn new(who: &A, footprint: Fp) -> Result, DispatchError> { + fn new(who: &A, footprint: Fp) -> Result { let new = D::convert(footprint); - if new.is_zero() { - Ok(None) - } else { - F::increase_frozen(&R::get(), who, new)?; - Ok(Some(Self(new, PhantomData))) - } + F::increase_frozen(&R::get(), who, new)?; + Ok(Self(new, PhantomData)) } - fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { + fn update(self, who: &A, footprint: Fp) -> Result { let new = D::convert(footprint); if self.0 > new { F::decrease_frozen(&R::get(), who, self.0 - new)?; } else if new > self.0 { F::increase_frozen(&R::get(), who, new - self.0)?; } - if new.is_zero() { - Ok(None) - } else { - Ok(Some(Self(new, PhantomData))) - } + Ok(Self(new, PhantomData)) } fn drop(self, who: &A) -> Result<(), DispatchError> { F::decrease_frozen(&R::get(), who, self.0).map(|_| ()) } + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(who: &A, fp: Fp) { + let _ = F::mint_into(who, F::minimum_balance().saturating_add(D::convert(fp))); + } } /// Consideration method using a `fungible` balance frozen as the cost exacted for the footprint. @@ -263,34 +262,27 @@ pub struct HoldConsideration( where F: MutateHold; impl< - A: 'static, - F: 'static + MutateHold, + A: 'static + Eq, + #[cfg(not(feature = "runtime-benchmarks"))] F: 'static + MutateHold, + #[cfg(feature = "runtime-benchmarks")] F: 'static + MutateHold + Mutate, R: 'static + Get, D: 'static + Convert, Fp: 'static, > Consideration for HoldConsideration { - fn new(who: &A, footprint: Fp) -> Result, DispatchError> { + fn new(who: &A, footprint: Fp) -> Result { let new = D::convert(footprint); - if new.is_zero() { - Ok(None) - } else { - F::hold(&R::get(), who, new)?; - Ok(Some(Self(new, PhantomData))) - } + F::hold(&R::get(), who, new)?; + Ok(Self(new, PhantomData)) } - fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { + fn update(self, who: &A, footprint: Fp) -> Result { let new = D::convert(footprint); if self.0 > new { F::release(&R::get(), who, self.0 - new, BestEffort)?; } else if new > self.0 { F::hold(&R::get(), who, new - self.0)?; } - if new.is_zero() { - Ok(None) - } else { - Ok(Some(Self(new, PhantomData))) - } + Ok(Self(new, PhantomData)) } fn drop(self, who: &A) -> Result<(), DispatchError> { F::release(&R::get(), who, self.0, BestEffort).map(|_| ()) @@ -298,6 +290,10 @@ impl< fn burn(self, who: &A) { let _ = F::burn_held(&R::get(), who, self.0, BestEffort, Force); } + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(who: &A, fp: Fp) { + let _ = F::mint_into(who, F::minimum_balance().saturating_add(D::convert(fp))); + } } /// Basic consideration method using a `fungible` balance frozen as the cost exacted for the @@ -321,34 +317,28 @@ impl< #[codec(mel_bound())] pub struct LoneFreezeConsideration(PhantomData (A, Fx, Rx, D, Fp)>); impl< - A: 'static, - Fx: 'static + MutateFreeze, + A: 'static + Eq, + #[cfg(not(feature = "runtime-benchmarks"))] Fx: 'static + MutateFreeze, + #[cfg(feature = "runtime-benchmarks")] Fx: 'static + MutateFreeze + Mutate, Rx: 'static + Get, D: 'static + Convert, Fp: 'static, > Consideration for LoneFreezeConsideration { - fn new(who: &A, footprint: Fp) -> Result, DispatchError> { + fn new(who: &A, footprint: Fp) -> Result { ensure!(Fx::balance_frozen(&Rx::get(), who).is_zero(), DispatchError::Unavailable); - let new = D::convert(footprint); - if new.is_zero() { - Ok(None) - } else { - Fx::set_frozen(&Rx::get(), who, new, Polite).map(|_| Some(Self(PhantomData))) - } + Fx::set_frozen(&Rx::get(), who, D::convert(footprint), Polite).map(|_| Self(PhantomData)) } - fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { - let new = D::convert(footprint); - let _ = Fx::set_frozen(&Rx::get(), who, new, Polite)?; - if new.is_zero() { - Ok(None) - } else { - Ok(Some(Self(PhantomData))) - } + fn update(self, who: &A, footprint: Fp) -> Result { + Fx::set_frozen(&Rx::get(), who, D::convert(footprint), Polite).map(|_| Self(PhantomData)) } fn drop(self, who: &A) -> Result<(), DispatchError> { Fx::thaw(&Rx::get(), who).map(|_| ()) } + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(who: &A, fp: Fp) { + let _ = Fx::mint_into(who, Fx::minimum_balance().saturating_add(D::convert(fp))); + } } /// Basic consideration method using a `fungible` balance placed on hold as the cost exacted for the @@ -372,30 +362,20 @@ impl< #[codec(mel_bound())] pub struct LoneHoldConsideration(PhantomData (A, Fx, Rx, D, Fp)>); impl< - A: 'static, - F: 'static + MutateHold, + A: 'static + Eq, + #[cfg(not(feature = "runtime-benchmarks"))] F: 'static + MutateHold, + #[cfg(feature = "runtime-benchmarks")] F: 'static + MutateHold + Mutate, R: 'static + Get, D: 'static + Convert, Fp: 'static, > Consideration for LoneHoldConsideration { - fn new(who: &A, footprint: Fp) -> Result, DispatchError> { + fn new(who: &A, footprint: Fp) -> Result { ensure!(F::balance_on_hold(&R::get(), who).is_zero(), DispatchError::Unavailable); - let new = D::convert(footprint); - if new.is_zero() { - Ok(None) - } else { - F::set_on_hold(&R::get(), who, new).map(|_| Some(Self(PhantomData))) - } + F::set_on_hold(&R::get(), who, D::convert(footprint)).map(|_| Self(PhantomData)) } - fn update(self, who: &A, footprint: Fp) -> Result, DispatchError> { - let new = D::convert(footprint); - let _ = F::set_on_hold(&R::get(), who, new)?; - if new.is_zero() { - Ok(None) - } else { - Ok(Some(Self(PhantomData))) - } + fn update(self, who: &A, footprint: Fp) -> Result { + F::set_on_hold(&R::get(), who, D::convert(footprint)).map(|_| Self(PhantomData)) } fn drop(self, who: &A) -> Result<(), DispatchError> { F::release_all(&R::get(), who, BestEffort).map(|_| ()) @@ -403,4 +383,8 @@ impl< fn burn(self, who: &A) { let _ = F::burn_all_held(&R::get(), who, BestEffort, Force); } + #[cfg(feature = "runtime-benchmarks")] + fn ensure_successful(who: &A, fp: Fp) { + let _ = F::mint_into(who, F::minimum_balance().saturating_add(D::convert(fp))); + } } From 05a8ba662f0afdd4a4e6e2f4e61e4ca2458d666c Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Wed, 14 Aug 2024 16:55:29 +0300 Subject: [PATCH 42/74] Fix OurViewChange small race (#5356) Always queue OurViewChange event before we send view changes to our peers, because otherwise we risk the peers sending us a message that can be processed by our subsystems before OurViewChange. Normally, this is not really a problem because the latency of the ViewChange we send to our peers is way higher that our subsystem processing OurViewChange, however on testnets like versi where CPU is sometimes overcommitted this race gets triggered occasionally, so let's fix it by sending the messages in the right order. --------- Signed-off-by: Alexandru Gheorghe --- polkadot/node/network/bridge/src/rx/mod.rs | 30 +++++++++++----------- prdoc/pr_5356.prdoc | 18 +++++++++++++ 2 files changed, 33 insertions(+), 15 deletions(-) create mode 100644 prdoc/pr_5356.prdoc diff --git a/polkadot/node/network/bridge/src/rx/mod.rs b/polkadot/node/network/bridge/src/rx/mod.rs index 56965ce6ba40..7745c42f78a1 100644 --- a/polkadot/node/network/bridge/src/rx/mod.rs +++ b/polkadot/node/network/bridge/src/rx/mod.rs @@ -962,6 +962,21 @@ fn update_our_view( ) }; + let our_view = OurView::new( + live_heads.iter().take(MAX_VIEW_HEADS).cloned().map(|a| (a.hash, a.span)), + finalized_number, + ); + + dispatch_validation_event_to_all_unbounded( + NetworkBridgeEvent::OurViewChange(our_view.clone()), + ctx.sender(), + ); + + dispatch_collation_event_to_all_unbounded( + NetworkBridgeEvent::OurViewChange(our_view), + ctx.sender(), + ); + let v1_validation_peers = filter_by_peer_version(&validation_peers, ValidationVersion::V1.into()); let v1_collation_peers = filter_by_peer_version(&collation_peers, CollationVersion::V1.into()); @@ -1007,21 +1022,6 @@ fn update_our_view( metrics, notification_sinks, ); - - let our_view = OurView::new( - live_heads.iter().take(MAX_VIEW_HEADS).cloned().map(|a| (a.hash, a.span)), - finalized_number, - ); - - dispatch_validation_event_to_all_unbounded( - NetworkBridgeEvent::OurViewChange(our_view.clone()), - ctx.sender(), - ); - - dispatch_collation_event_to_all_unbounded( - NetworkBridgeEvent::OurViewChange(our_view), - ctx.sender(), - ); } // Handle messages on a specific v1 peer-set. The peer is expected to be connected on that diff --git a/prdoc/pr_5356.prdoc b/prdoc/pr_5356.prdoc new file mode 100644 index 000000000000..a306be335440 --- /dev/null +++ b/prdoc/pr_5356.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix OurViewChange small race + +doc: + - audience: Node Dev + description: | + Always queue OurViewChange event before we send view changes to our peers, because otherwise we risk + the peers sending us a message that can be processed by our subsystems before OurViewChange. + Normally, this is not really a problem because the latency of the ViewChange we send to our peers + is way higher than that of our subsystem processing OurViewChange, however on testnets like versi + where CPUs are sometimes overcommitted this race gets triggered occasionally, so let's fix it by + sending the messages in the right order. + +crates: + - name: polkadot-network-bridge + bump: minor From 81d8f0c0fa0be50dcd2c0291840a1b5b7ace8b98 Mon Sep 17 00:00:00 2001 From: Serban Iorga Date: Wed, 14 Aug 2024 21:28:18 +0300 Subject: [PATCH 43/74] Beefy: add benchmarks for `report_fork_voting()` (#5188) Related to #4523 This PR adds benchmarks for `report_fork_voting()`. **Important: Even though the benchmarks are now available, we still use `Weight::MAX`. That's because I realized while working on this PR that there's still one missing piece. We should also check that the ancestry proof is optimal. I plan to do this in a future PR, hopefully the last one related to #4523.** --------- Co-authored-by: Branislav Kontur Co-authored-by: command-bot <> --- Cargo.lock | 925 ++++++++---------- polkadot/runtime/rococo/src/lib.rs | 82 +- polkadot/runtime/rococo/src/weights/mod.rs | 1 + .../rococo/src/weights/pallet_beefy_mmr.rs | 89 ++ polkadot/runtime/westend/src/lib.rs | 18 +- polkadot/runtime/westend/src/weights/mod.rs | 1 + .../westend/src/weights/pallet_beefy_mmr.rs | 89 ++ prdoc/pr_5188.prdoc | 32 + substrate/bin/node/runtime/src/lib.rs | 2 + substrate/frame/beefy-mmr/Cargo.toml | 3 + substrate/frame/beefy-mmr/src/benchmarking.rs | 129 +++ substrate/frame/beefy-mmr/src/lib.rs | 43 +- substrate/frame/beefy-mmr/src/mock.rs | 9 +- substrate/frame/beefy-mmr/src/tests.rs | 21 +- substrate/frame/beefy-mmr/src/weights.rs | 134 +++ substrate/frame/beefy/src/default_weights.rs | 5 - substrate/frame/beefy/src/lib.rs | 88 +- substrate/frame/beefy/src/mock.rs | 15 +- substrate/frame/beefy/src/tests.rs | 10 +- .../frame/merkle-mountain-range/src/lib.rs | 13 + .../merkle-mountain-range/src/mmr/mmr.rs | 46 +- .../merkle-mountain-range/src/mmr/storage.rs | 29 +- .../primitives/consensus/beefy/Cargo.toml | 2 + .../primitives/consensus/beefy/src/lib.rs | 10 + .../merkle-mountain-range/src/utils.rs | 11 +- 25 files changed, 1177 insertions(+), 630 deletions(-) create mode 100644 polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs create mode 100644 polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs create mode 100644 prdoc/pr_5188.prdoc create mode 100644 substrate/frame/beefy-mmr/src/benchmarking.rs create mode 100644 substrate/frame/beefy-mmr/src/weights.rs diff --git a/Cargo.lock b/Cargo.lock index 67cfbb5968d5..9e3afe21c447 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -42,15 +42,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" -[[package]] -name = "aead" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "aead" version = "0.5.2" @@ -61,18 +52,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "aes" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "opaque-debug 0.3.0", -] - [[package]] name = "aes" version = "0.8.3" @@ -84,31 +63,17 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "aes-gcm" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc3be92e19a7ef47457b8e6f90707e12b6ac5d20c6f3866584fa3be0787d839f" -dependencies = [ - "aead 0.4.3", - "aes 0.7.5", - "cipher 0.3.0", - "ctr 0.7.0", - "ghash 0.4.4", - "subtle 2.5.0", -] - [[package]] name = "aes-gcm" version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "831010a0f742e1209b3bcea8fab6a8e149051ba6099432c8cb2cc117dec3ead1" dependencies = [ - "aead 0.5.2", - "aes 0.8.3", + "aead", + "aes", "cipher 0.4.4", - "ctr 0.9.2", - "ghash 0.5.0", + "ctr", + "ghash", "subtle 2.5.0", ] @@ -192,9 +157,9 @@ dependencies = [ "dunce", "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", "syn-solidity", "tiny-keccak", ] @@ -263,9 +228,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" [[package]] name = "anstyle-parse" @@ -319,9 +284,9 @@ dependencies = [ "include_dir", "itertools 0.10.5", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -553,7 +518,7 @@ checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" dependencies = [ "num-bigint", "num-traits", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -655,7 +620,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae3281bc6d0fd7e549af32b52511e1302185bd688fd3359fa36423346ff682ea" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -696,9 +661,9 @@ dependencies = [ [[package]] name = "array-bytes" -version = "6.2.3" +version = "6.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d5dde061bd34119e902bbb2d9b90c5692635cf59fb91d582c2b68043f1b8293" +checksum = "6f840fb7195bcfc5e17ea40c26e5ce6d5b9ce5d584466e17703209657e459ae0" [[package]] name = "arrayref" @@ -759,7 +724,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", "synstructure 0.12.6", @@ -771,9 +736,9 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7378575ff571966e99a744addeff0bff98b8ada0dedf1956d59e634db95eaac1" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", "synstructure 0.13.1", ] @@ -783,7 +748,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -794,16 +759,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] name = "assert_cmd" -version = "2.0.15" +version = "2.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc65048dd435533bb1baf2ed9956b9a278fbfdcf90301b39ee117f06c0199d37" +checksum = "ed72493ac66d5804837f480ab3766c72bdfab91a65e565fc54fa9e42db0073a8" dependencies = [ "anstyle", "bstr", @@ -1214,14 +1179,14 @@ version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d6baa8f0178795da0e71bc42c9e5d13261aac7ee549853162e66a241ba17964" dependencies = [ - "async-lock 3.3.0", + "async-lock 3.4.0", "cfg-if", "concurrent-queue", "futures-io", "futures-lite 2.3.0", "parking", "polling 3.4.0", - "rustix 0.38.25", + "rustix 0.38.21", "slab", "tracing", "windows-sys 0.52.0", @@ -1238,13 +1203,13 @@ dependencies = [ [[package]] name = "async-lock" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" dependencies = [ - "event-listener 4.0.3", + "event-listener 5.2.0", "event-listener-strategy", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -1298,7 +1263,7 @@ dependencies = [ "log", "memchr", "once_cell", - "pin-project-lite 0.2.12", + "pin-project-lite", "pin-utils", "slab", "wasm-bindgen-futures", @@ -1312,7 +1277,7 @@ checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -1321,9 +1286,9 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -1338,9 +1303,9 @@ version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -1353,7 +1318,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -1397,7 +1362,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fee3da8ef1276b0bee5dd1c7258010d8fffd31801447323115a25560e1327b89" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -1543,12 +1508,12 @@ dependencies = [ "lazycell", "peeking_take_while", "prettyplease 0.2.12", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "regex", "rustc-hash", "shlex", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -2625,18 +2590,6 @@ dependencies = [ "keystream", ] -[[package]] -name = "chacha20" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" -dependencies = [ - "cfg-if", - "cipher 0.3.0", - "cpufeatures", - "zeroize", -] - [[package]] name = "chacha20" version = "0.9.1" @@ -2650,14 +2603,14 @@ dependencies = [ [[package]] name = "chacha20poly1305" -version = "0.9.1" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18446b09be63d457bbec447509e85f662f32952b035ce892290396bc0b0cff5" +checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35" dependencies = [ - "aead 0.4.3", - "chacha20 0.8.2", - "cipher 0.3.0", - "poly1305 0.7.2", + "aead", + "chacha20", + "cipher 0.4.4", + "poly1305", "zeroize", ] @@ -2762,15 +2715,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "cipher" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" -dependencies = [ - "generic-array 0.14.7", -] - [[package]] name = "cipher" version = "0.4.4" @@ -2779,6 +2723,7 @@ checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad" dependencies = [ "crypto-common", "inout", + "zeroize", ] [[package]] @@ -2845,9 +2790,9 @@ dependencies = [ [[package]] name = "clap-num" -version = "1.1.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e063d263364859dc54fb064cedb7c122740cd4733644b14b176c097f51e8ab7" +checksum = "488557e97528174edaa2ee268b23a809e0c598213a4bbcb4f34575a46fda147e" dependencies = [ "num-traits", ] @@ -2882,7 +2827,7 @@ checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008" dependencies = [ "heck 0.4.1", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -2894,9 +2839,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d029b67f89d30bbb547c89fd5161293c0aec155fc691d7924b64550662db93e" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -3082,7 +3027,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d51beaa537d73d2d1ff34ee70bc095f170420ab2ec5d687ecd3ec2b0d092514b" dependencies = [ "nom", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -3095,19 +3040,20 @@ checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "colored" -version = "2.1.0" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbf2150cce219b664a8a70df7a1f933836724b503f8a413af9365b4dcc4d90b8" +checksum = "2674ec482fbc38012cf31e6c42ba0177b431a0cb6f15fe40efa5aab1bda516f6" dependencies = [ + "is-terminal", "lazy_static", "windows-sys 0.48.0", ] [[package]] name = "combine" -version = "4.6.7" +version = "4.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" dependencies = [ "bytes", "memchr", @@ -3666,9 +3612,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.2.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +checksum = "c2b432c56615136f8dba245fed7ec3d5518c500a31108661067e61e72fe7e6bc" dependencies = [ "crc-catalog", ] @@ -3818,15 +3764,6 @@ dependencies = [ "subtle 2.5.0", ] -[[package]] -name = "ctr" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a232f92a03f37dd7d7dd2adc67166c77e9cd88de5b019b9a9eecfaeaf7bfd481" -dependencies = [ - "cipher 0.3.0", -] - [[package]] name = "ctr" version = "0.9.2" @@ -4213,9 +4150,9 @@ name = "cumulus-pallet-parachain-system-proc-macro" version = "0.6.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -4712,9 +4649,9 @@ dependencies = [ [[package]] name = "curl-sys" -version = "0.4.73+curl-8.8.0" +version = "0.4.72+curl-8.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "450ab250ecf17227c39afb9a2dd9261dc0035cb80f2612472fc0c4aac2dcb84d" +checksum = "29cbdc8314c447d11e8fd156dcdd031d9e02a7a976163e396b548c03153bc9ea" dependencies = [ "cc", "libc", @@ -4748,9 +4685,9 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "83fdaf97f4804dcebfa5862639bc9ce4121e82140bec2a987ac5140294865b5b" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -4787,10 +4724,10 @@ dependencies = [ "cc", "codespan-reporting", "once_cell", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "scratch", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -4805,9 +4742,9 @@ version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c49547d73ba8dcfd4ad7325d64c6d5391ff4224d498fc39a6f3f49825a530d" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -4898,9 +4835,12 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", +] [[package]] name = "derivative" @@ -4908,7 +4848,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -4919,9 +4859,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d65d7ce8132b7c0e54497a4d9a55a1c2a0912a0d786cf894472ba818fba45762" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -4930,9 +4870,9 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67e77553c4162a157adbf834ebae5b415acbecbeafc7a74b0e886657506a7611" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -4942,7 +4882,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ "convert_case", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "rustc_version 0.4.0", "syn 1.0.109", @@ -5038,9 +4978,9 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -5098,10 +5038,10 @@ dependencies = [ "common-path", "derive-syn-parse", "once_cell", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "regex", - "syn 2.0.58", + "syn 2.0.61", "termcolor", "toml 0.8.8", "walkdir", @@ -5147,7 +5087,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "558e40ea573c374cf53507fd240b7ee2f5477df7cfebdb97323ec61c719399c5" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -5293,7 +5233,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -5305,9 +5245,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -5325,9 +5265,9 @@ version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e9a1f9f7d83e59740248a6e14ecf93929ade55027844dfcea78beafccc15745" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -5336,9 +5276,9 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fd000fd6988e73bbe993ea3db9b1aa64906ab88766d654973924340c8cddb42" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -5414,12 +5354,11 @@ dependencies = [ [[package]] name = "erased-serde" -version = "0.4.5" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24e2389d65ab4fab27dc2a5de7b191e1f6617d1f1c8855c0dc569c94a4cbb18d" +checksum = "2b73807008a3c7f171cc40312f37d95ef0396e048b5848d775f54b1a4dd4a0d3" dependencies = [ "serde", - "typeid", ] [[package]] @@ -5502,23 +5441,23 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "4.0.3" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" dependencies = [ "concurrent-queue", "parking", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] name = "event-listener-strategy" -version = "0.4.0" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" dependencies = [ - "event-listener 4.0.3", - "pin-project-lite 0.2.12", + "event-listener 5.2.0", + "pin-project-lite", ] [[package]] @@ -5540,9 +5479,9 @@ dependencies = [ "file-guard", "fs-err", "prettyplease 0.2.12", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -5612,9 +5551,9 @@ dependencies = [ "expander", "indexmap 2.2.3", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -5815,9 +5754,9 @@ dependencies = [ [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -5944,11 +5883,11 @@ dependencies = [ "frame-support", "parity-scale-codec", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "scale-info", "sp-arithmetic", - "syn 2.0.58", + "syn 2.0.61", "trybuild", ] @@ -6139,7 +6078,7 @@ dependencies = [ "parity-scale-codec", "pretty_assertions", "proc-macro-warning 1.0.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "regex", "scale-info", @@ -6149,7 +6088,7 @@ dependencies = [ "sp-metadata-ir", "sp-runtime", "static_assertions", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -6158,18 +6097,18 @@ version = "10.0.0" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] name = "frame-support-procedural-tools-derive" version = "11.0.0" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -6312,7 +6251,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29f9df8a11882c4e3335eb2d18a0137c505d9ca927470b0cac9c6f0ae07d28f7" dependencies = [ - "rustix 0.38.25", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -6398,7 +6337,7 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.12", + "pin-project-lite", "waker-fn", ] @@ -6409,7 +6348,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -6418,9 +6357,9 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -6464,7 +6403,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.12", + "pin-project-lite", "pin-utils", "slab", ] @@ -6542,16 +6481,6 @@ dependencies = [ "rand_core", ] -[[package]] -name = "ghash" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1583cc1656d7839fd3732b80cf4f38850336cdb9b8ded1cd399ca62958de3c99" -dependencies = [ - "opaque-debug 0.3.0", - "polyval 0.5.3", -] - [[package]] name = "ghash" version = "0.5.0" @@ -6559,7 +6488,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" dependencies = [ "opaque-debug 0.3.0", - "polyval 0.6.1", + "polyval", ] [[package]] @@ -6677,9 +6606,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -6830,9 +6759,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-conservative" -version = "0.1.2" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212ab92002354b4819390025006c897e8140934349e8635c9b077f47b4dcbd20" +checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" [[package]] name = "hex-literal" @@ -6932,7 +6861,7 @@ checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http 0.2.9", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -6955,7 +6884,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.0", - "pin-project-lite 0.2.12", + "pin-project-lite", ] [[package]] @@ -6978,21 +6907,21 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", - "h2 0.3.24", + "h2 0.3.26", "http 0.2.9", "http-body 0.4.5", "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.12", + "pin-project-lite", "socket2 0.5.7", "tokio", "tower-service", @@ -7015,7 +6944,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.12", + "pin-project-lite", "smallvec", "tokio", "want", @@ -7029,7 +6958,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.9", - "hyper 0.14.30", + "hyper 0.14.29", "log", "rustls 0.21.7", "rustls-native-certs 0.6.3", @@ -7067,7 +6996,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "hyper 1.3.1", - "pin-project-lite 0.2.12", + "pin-project-lite", "socket2 0.5.7", "tokio", "tower", @@ -7119,6 +7048,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.10.2" @@ -7159,7 +7098,7 @@ dependencies = [ "bytes", "futures", "http 0.2.9", - "hyper 0.14.30", + "hyper 0.14.29", "log", "rand", "tokio", @@ -7211,7 +7150,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -7231,7 +7170,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", ] @@ -7356,7 +7295,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ "hermit-abi 0.3.2", - "rustix 0.38.25", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -7575,9 +7514,9 @@ checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -7826,9 +7765,9 @@ checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libnghttp2-sys" -version = "0.1.10+1.61.0" +version = "0.1.9+1.58.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "959c25552127d2e1fa72f0e52548ec04fc386e827ba71a7bd01db46a447dc135" +checksum = "b57e858af2798e167e709b9d969325b6d8e9d50232fcbc494d7d54f976854a64" dependencies = [ "cc", "libc", @@ -8163,9 +8102,9 @@ checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ "heck 0.4.1", "proc-macro-warning 0.4.2", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -8236,9 +8175,9 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.42.1" +version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3facf0691bab65f571bc97c6c65ffa836248ca631d631b7691ac91deb7fceb5f" +checksum = "004ee9c4a4631435169aee6aad2f62e3984dc031c43b6d29731e8e82a016c538" dependencies = [ "either", "futures", @@ -8247,9 +8186,10 @@ dependencies = [ "libp2p-identity", "log", "parking_lot 0.12.3", - "quicksink", + "pin-project-lite", "rw-stream-sink", - "soketto 0.7.1", + "soketto 0.8.0", + "thiserror", "url", "webpki-roots 0.25.2", ] @@ -8389,9 +8329,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.11" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" +checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f" [[package]] name = "lioness" @@ -8452,7 +8392,7 @@ dependencies = [ "rand", "rcgen", "ring 0.16.20", - "rustls 0.20.8", + "rustls 0.20.9", "serde", "sha2 0.10.8", "simple-dns", @@ -8578,7 +8518,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -8590,9 +8530,9 @@ dependencies = [ "const-random", "derive-syn-parse", "macro_magic_core_macros", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -8601,9 +8541,9 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b02abfe41815b5bd98dbd4260173db2c116dda171dc0fe7838cb206333b83308" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -8614,7 +8554,7 @@ checksum = "73ea28ee64b88876bf45277ed9a5817c1817df061a74f2b988971a12570e5869" dependencies = [ "macro_magic_core", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -8665,9 +8605,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "memfd" @@ -8976,7 +8916,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" dependencies = [ "cfg-if", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -8988,9 +8928,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" dependencies = [ "cfg-if", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -9100,7 +9040,7 @@ checksum = "fc076939022111618a5026d3be019fd8b366e76314538ff9a1b59ffbcbf98bcd" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", "synstructure 0.12.6", @@ -9148,7 +9088,7 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91761aed67d03ad966ef783ae962ef9bbaca728d2dd7ceb7939ec110fffad998" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -9237,9 +9177,9 @@ dependencies = [ [[package]] name = "network-interface" -version = "1.1.4" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a43439bf756eed340bdf8feba761e2d50c7d47175d87545cd5cbe4a137c4d1" +checksum = "ae72fd9dbd7f55dda80c00d66acc3b2130436fcba9ea89118fc508eaae48dfb0" dependencies = [ "cc", "libc", @@ -9528,15 +9468,21 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-derive" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -9701,9 +9647,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -9714,9 +9660,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-src" -version = "300.3.1+3.3.1" +version = "300.2.3+3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7259953d42a81bf137fbbd73bd30a8e1914d6dce43c2b90ed575783a22608b91" +checksum = "5cff92b6f71555b61bb9315f7c64da3ca43d87531622120fea0195fc761b4843" dependencies = [ "cc", ] @@ -9768,7 +9714,7 @@ dependencies = [ "itertools 0.11.0", "petgraph", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -10135,6 +10081,7 @@ version = "28.0.0" dependencies = [ "array-bytes", "binary-merkle-tree", + "frame-benchmarking", "frame-support", "frame-system", "log", @@ -10471,9 +10418,9 @@ dependencies = [ name = "pallet-contracts-proc-macro" version = "18.0.0" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -10639,7 +10586,7 @@ dependencies = [ "sp-npos-elections", "sp-runtime", "sp-tracing 16.0.0", - "strum 0.26.3", + "strum 0.26.2", ] [[package]] @@ -11705,10 +11652,10 @@ name = "pallet-staking-reward-curve" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "sp-runtime", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -12350,7 +12297,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -12379,7 +12326,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "syn 1.0.109", "synstructure 0.12.6", ] @@ -12765,9 +12712,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" @@ -12797,9 +12744,9 @@ checksum = "68ca01446f50dbda87c1786af8770d535423fa8a53aec03b8f4e3d7eb10e0929" dependencies = [ "pest", "pest_meta", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -12838,22 +12785,16 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] name = "pin-project-lite" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" - -[[package]] -name = "pin-project-lite" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12cc1b0bf1727a77a54b6654e7b5f1af8604923edc8b81885f8ec92f9e3f0a05" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -13755,7 +13696,7 @@ dependencies = [ "sc-network", "sc-network-types", "sp-runtime", - "strum 0.26.3", + "strum 0.26.2", "thiserror", "tracing-gum", ] @@ -14955,7 +14896,7 @@ dependencies = [ "sp-runtime", "sp-timestamp", "sp-tracing 16.0.0", - "strum 0.26.3", + "strum 0.26.2", "substrate-prometheus-endpoint", "tokio", "tracing-gum", @@ -15191,9 +15132,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c4fdfc49717fb9a196e74a5d28e0bc764eb394a2c803eb11133a31ac996c60c" dependencies = [ "polkavm-common", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -15203,7 +15144,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ba81f7b5faac81e528eb6158a6f3c9e0bb1008e0ffa19653bc8dea925ecb429" dependencies = [ "polkavm-derive-impl", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -15239,7 +15180,7 @@ dependencies = [ "concurrent-queue", "libc", "log", - "pin-project-lite 0.2.12", + "pin-project-lite", "windows-sys 0.48.0", ] @@ -15251,23 +15192,12 @@ checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" dependencies = [ "cfg-if", "concurrent-queue", - "pin-project-lite 0.2.12", - "rustix 0.38.25", + "pin-project-lite", + "rustix 0.38.21", "tracing", "windows-sys 0.52.0", ] -[[package]] -name = "poly1305" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048aeb476be11a4b6ca432ca569e375810de9294ae78f4774e78ea98a9246ede" -dependencies = [ - "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.4.0", -] - [[package]] name = "poly1305" version = "0.8.0" @@ -15276,19 +15206,7 @@ checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf" dependencies = [ "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.1", -] - -[[package]] -name = "polyval" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" -dependencies = [ - "cfg-if", - "cpufeatures", - "opaque-debug 0.3.0", - "universal-hash 0.4.0", + "universal-hash", ] [[package]] @@ -15300,7 +15218,7 @@ dependencies = [ "cfg-if", "cpufeatures", "opaque-debug 0.3.0", - "universal-hash 0.5.1", + "universal-hash", ] [[package]] @@ -15318,6 +15236,12 @@ dependencies = [ "rand", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "pprof" version = "0.12.1" @@ -15402,7 +15326,7 @@ version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "syn 1.0.109", ] @@ -15412,8 +15336,8 @@ version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c64d9ba0963cdcea2e1b2230fbae2bab30eb25a174be395c41e764bfb65dd62" dependencies = [ - "proc-macro2 1.0.75", - "syn 2.0.58", + "proc-macro2 1.0.82", + "syn 2.0.61", ] [[package]] @@ -15473,7 +15397,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", "version_check", @@ -15485,7 +15409,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "version_check", ] @@ -15502,9 +15426,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -15513,9 +15437,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b698b0b09d40e9b7c1a47b132d66a8b54bcd20583d9b6d06e4535e383b4405c" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -15529,9 +15453,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.75" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708" +checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" dependencies = [ "unicode-ident", ] @@ -15548,7 +15472,7 @@ dependencies = [ "hex", "lazy_static", "procfs-core", - "rustix 0.38.25", + "rustix 0.38.21", ] [[package]] @@ -15594,9 +15518,9 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -15675,9 +15599,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.6" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +checksum = "80b776a1b2dc779f5ee0641f8ade0125bc1298dd41a9a0c16d8bd57b42d222b1" dependencies = [ "bytes", "heck 0.5.0", @@ -15688,9 +15612,9 @@ dependencies = [ "petgraph", "prettyplease 0.2.12", "prost 0.12.6", - "prost-types 0.12.6", + "prost-types 0.12.4", "regex", - "syn 2.0.58", + "syn 2.0.61", "tempfile", ] @@ -15702,7 +15626,7 @@ checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -15715,9 +15639,9 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.11.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -15731,9 +15655,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.6" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +checksum = "3235c33eb02c1f1e212abdbe34c78b264b038fb58ca612664343271e36e55ffe" dependencies = [ "prost 0.12.6", ] @@ -15843,17 +15767,6 @@ dependencies = [ "rand", ] -[[package]] -name = "quicksink" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" -dependencies = [ - "futures-core", - "futures-sink", - "pin-project-lite 0.1.12", -] - [[package]] name = "quinn" version = "0.9.4" @@ -15861,11 +15774,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e8b432585672228923edbbf64b8b12c14e1112f62e88737655b4a083dbcd78e" dependencies = [ "bytes", - "pin-project-lite 0.2.12", - "quinn-proto 0.9.5", + "pin-project-lite", + "quinn-proto 0.9.6", "quinn-udp 0.3.2", "rustc-hash", - "rustls 0.20.8", + "rustls 0.20.9", "thiserror", "tokio", "tracing", @@ -15880,7 +15793,7 @@ checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" dependencies = [ "bytes", "futures-io", - "pin-project-lite 0.2.12", + "pin-project-lite", "quinn-proto 0.10.6", "quinn-udp 0.4.1", "rustc-hash", @@ -15892,15 +15805,15 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c956be1b23f4261676aed05a0046e204e8a6836e50203902683a718af0797989" +checksum = "94b0b33c13a79f669c85defaf4c275dc86a0c0372807d0ca3d78e0bb87274863" dependencies = [ "bytes", "rand", "ring 0.16.20", "rustc-hash", - "rustls 0.20.8", + "rustls 0.20.9", "slab", "thiserror", "tinyvec", @@ -15932,7 +15845,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "641538578b21f5e5c8ea733b736895576d0fe329bb883b937db6f4d163dbaaf4" dependencies = [ "libc", - "quinn-proto 0.9.5", + "quinn-proto 0.9.6", "socket2 0.4.9", "tracing", "windows-sys 0.42.0", @@ -15966,7 +15879,7 @@ version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", ] [[package]] @@ -16165,9 +16078,9 @@ version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -16343,10 +16256,10 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.3.24", + "h2 0.3.26", "http 0.2.9", "http-body 0.4.5", - "hyper 0.14.30", + "hyper 0.14.29", "hyper-rustls 0.24.2", "ipnet", "js-sys", @@ -16354,7 +16267,7 @@ dependencies = [ "mime", "once_cell", "percent-encoding", - "pin-project-lite 0.2.12", + "pin-project-lite", "rustls 0.21.7", "rustls-pemfile 1.0.3", "serde", @@ -16715,12 +16628,12 @@ checksum = "d428f8247852f894ee1be110b375111b586d4fa431f6c46e64ba5a0dcccbe605" dependencies = [ "cfg-if", "glob", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "regex", "relative-path", "rustc_version 0.4.0", - "syn 2.0.58", + "syn 2.0.61", "unicode-ident", ] @@ -16863,22 +16776,22 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.25" +version = "0.38.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" +checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3" dependencies = [ "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys 0.4.11", + "linux-raw-sys 0.4.10", "windows-sys 0.48.0", ] [[package]] name = "rustls" -version = "0.20.8" +version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fff78fc74d175294f4e83b28343315ffcfb114b156f0185e9741cb5570f50e2f" +checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" dependencies = [ "ring 0.16.20", "sct", @@ -16964,9 +16877,9 @@ checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-platform-verifier" -version = "0.3.2" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e3beb939bcd33c269f4bf946cc829fcd336370267c4a927ac0399c84a3151a1" +checksum = "b5f0d26fa1ce3c790f9590868f0109289a044acb954525f933e2aa3b871c157d" dependencies = [ "core-foundation", "core-foundation-sys", @@ -17107,7 +17020,7 @@ dependencies = [ "multihash 0.19.1", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.6", + "prost-build 0.12.4", "quickcheck", "rand", "sc-client-api", @@ -17203,9 +17116,9 @@ name = "sc-chain-spec-derive" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -17853,7 +17766,7 @@ dependencies = [ "partial_sort", "pin-project", "prost 0.12.6", - "prost-build 0.12.6", + "prost-build 0.12.4", "rand", "sc-block-builder", "sc-client-api", @@ -17898,7 +17811,7 @@ dependencies = [ "futures", "libp2p-identity", "parity-scale-codec", - "prost-build 0.12.6", + "prost-build 0.12.4", "sc-consensus", "sc-network-types", "sp-consensus", @@ -17940,7 +17853,7 @@ dependencies = [ "log", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.6", + "prost-build 0.12.4", "sc-client-api", "sc-network", "sc-network-types", @@ -17984,7 +17897,7 @@ dependencies = [ "mockall 0.11.4", "parity-scale-codec", "prost 0.12.6", - "prost-build 0.12.6", + "prost-build 0.12.4", "quickcheck", "sc-block-builder", "sc-client-api", @@ -18086,7 +17999,7 @@ dependencies = [ "fnv", "futures", "futures-timer", - "hyper 0.14.30", + "hyper 0.14.29", "hyper-rustls 0.24.2", "lazy_static", "log", @@ -18498,9 +18411,9 @@ name = "sc-tracing-proc-macro" version = "11.0.0" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -18612,7 +18525,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -18650,7 +18563,7 @@ version = "0.8.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0f696e21e10fa546b7ffb1c9672c6de8fbc7a81acf59524386d8639bf12737" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "serde_derive_internals", "syn 1.0.109", @@ -18689,7 +18602,7 @@ version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" dependencies = [ - "aead 0.5.2", + "aead", "arrayref", "arrayvec 0.7.4", "curve25519-dalek", @@ -18771,18 +18684,18 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.28.0" +version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" +checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09e67c467c38fd24bd5499dc9a18183b31575c12ee549197e3e20d57aa4fe3b7" +checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" dependencies = [ "cc", ] @@ -18950,9 +18863,9 @@ version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fabfb6138d2383ea8208cf98ccf69cdfb1aff4088460681d84189aa259762f97" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -18961,7 +18874,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85bf8229e7920a9f636479437026331ce11aa132b4dde37d121944a44d6e5f3c" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -19011,9 +18924,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.32" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fd075d994154d4a774f95b51fb96bdc2832b0ea48425c92546073816cda1f2f" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ "indexmap 2.2.3", "itoa", @@ -19052,9 +18965,9 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -19095,9 +19008,9 @@ dependencies = [ [[package]] name = "sha1-asm" -version = "0.5.3" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "286acebaf8b67c1130aedffad26f594eff0c1292389158135327d2e23aed582b" +checksum = "2ba6947745e7f86be3b8af00b7355857085dbdf8901393c89514510eb61f4e21" dependencies = [ "cc", ] @@ -19345,7 +19258,7 @@ dependencies = [ "bip39", "blake2-rfc", "bs58 0.5.1", - "chacha20 0.9.1", + "chacha20", "crossbeam-queue", "derive_more", "ed25519-zebra", @@ -19367,7 +19280,7 @@ dependencies = [ "num-traits", "pbkdf2", "pin-project", - "poly1305 0.8.0", + "poly1305", "rand", "rand_chacha", "ruzstd", @@ -19430,16 +19343,16 @@ checksum = "5e9f0ab6ef7eb7353d9119c170a436d1bf248eea575ac42d19d12f4e34130831" [[package]] name = "snow" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9d1425eb528a21de2755c75af4c9b5d57f50a0d4c3b7f1828a4cd03f8ba155" +checksum = "850948bee068e713b8ab860fe1adc4d109676ab4c3b621fd8147f06b261f2f85" dependencies = [ - "aes-gcm 0.9.2", + "aes-gcm", "blake2 0.10.6", "chacha20poly1305", "curve25519-dalek", "rand_core", - "ring 0.16.20", + "ring 0.17.7", "rustc_version 0.4.0", "sha2 0.10.8", "subtle 2.5.0", @@ -19928,9 +19841,9 @@ dependencies = [ "blake2 0.10.6", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -20126,7 +20039,8 @@ dependencies = [ "sp-keystore", "sp-mmr-primitives", "sp-runtime", - "strum 0.26.3", + "sp-weights", + "strum 0.26.2", "w3f-bls", ] @@ -20257,7 +20171,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.4.1" -source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -20314,7 +20228,7 @@ version = "0.1.0" dependencies = [ "quote 1.0.36", "sp-crypto-hashing", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -20330,18 +20244,18 @@ name = "sp-debug-derive" version = "8.0.0" source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] name = "sp-debug-derive" version = "14.0.0" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -20419,7 +20333,7 @@ version = "31.0.0" dependencies = [ "sp-core", "sp-runtime", - "strum 0.26.3", + "strum 0.26.2", ] [[package]] @@ -20607,13 +20521,13 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#838a534da874cf6071fba1df07643c6c5b033ae0" +source = "git+https://github.com/paritytech/polkadot-sdk#82912acb33a9030c0ef3bf590a34fca09b72dc5f" dependencies = [ "Inflector", "proc-macro-crate 1.3.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -20623,9 +20537,9 @@ dependencies = [ "Inflector", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -20718,7 +20632,7 @@ dependencies = [ name = "sp-statement-store" version = "10.0.0" dependencies = [ - "aes-gcm 0.10.3", + "aes-gcm", "curve25519-dalek", "ed25519-dalek", "hkdf", @@ -20884,10 +20798,10 @@ name = "sp-version-proc-macro" version = "13.0.0" dependencies = [ "parity-scale-codec", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "sp-version", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -20969,7 +20883,7 @@ checksum = "5e6915280e2d0db8911e5032a5c275571af6bdded2916abd691a659be25d3439" dependencies = [ "Inflector", "num-format", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "serde", "serde_json", @@ -20994,7 +20908,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f07d54c4d01a1713eb363b55ba51595da15f6f1211435b71466460da022aa140" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -21187,7 +21101,7 @@ checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" dependencies = [ "cfg_aliases", "memchr", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -21260,7 +21174,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -21282,11 +21196,11 @@ checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" [[package]] name = "strum" -version = "0.26.3" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fec0f0aef304996cf250b31b5a10dee7980c85da9d759361292b8bca5a18f06" +checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" dependencies = [ - "strum_macros 0.26.4", + "strum_macros 0.26.2", ] [[package]] @@ -21296,7 +21210,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "rustversion", "syn 1.0.109", @@ -21309,23 +21223,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck 0.4.1", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "rustversion", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] name = "strum_macros" -version = "0.26.4" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +checksum = "c6cf59daf282c0a494ba14fd21610a0325f9f90ec9d1231dea26bcb1d696c946" dependencies = [ - "heck 0.5.0", - "proc-macro2 1.0.75", + "heck 0.4.1", + "proc-macro2 1.0.82", "quote 1.0.36", "rustversion", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -21463,7 +21377,7 @@ dependencies = [ "sp-runtime", "sp-trie", "structopt", - "strum 0.26.3", + "strum 0.26.2", "thiserror", ] @@ -21639,7 +21553,7 @@ dependencies = [ "sp-maybe-compressed-blob", "sp-tracing 16.0.0", "sp-version", - "strum 0.26.3", + "strum 0.26.2", "tempfile", "toml 0.8.8", "walkdir", @@ -21772,18 +21686,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.58" +version = "2.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" +checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "unicode-ident", ] @@ -21795,9 +21709,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86b837ef12ab88835251726eb12237655e61ec8dc8a280085d1961cdc3dfd047" dependencies = [ "paste", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -21806,7 +21720,7 @@ version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", "unicode-xid 0.2.4", @@ -21818,16 +21732,16 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] name = "sysinfo" -version = "0.30.12" +version = "0.30.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "732ffa00f53e6b2af46208fba5718d9662a421049204e156328b66791ffa15ae" +checksum = "1fb4f3438c8f6389c864e61221cbc97e9bca98b4daf39a5beb7bea660f528bb2" dependencies = [ "cfg-if", "core-foundation-sys", @@ -21891,7 +21805,7 @@ dependencies = [ "cfg-if", "fastrand 2.1.0", "redox_syscall 0.4.1", - "rustix 0.38.25", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -21910,7 +21824,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.25", + "rustix 0.38.21", "windows-sys 0.48.0", ] @@ -21937,9 +21851,9 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -22101,7 +22015,7 @@ version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10ac1c5050e43014d16b2f94d0d2ce79e65ffdd8b38d8048f9c8f6a8a6da62ac" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "syn 1.0.109", ] @@ -22112,9 +22026,9 @@ version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -22188,14 +22102,16 @@ dependencies = [ [[package]] name = "time" -version = "0.3.27" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bb39ee79a6d8de55f48f2293a830e040392f1c5f16e336bdd1788cd0aadce07" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", "libc", + "num-conv", "num_threads", + "powerfmt", "serde", "time-core", "time-macros", @@ -22203,16 +22119,17 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.13" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "733d258752e9303d392b94b75230d07b0b9c489350c69b851fc6c065fde3e8f9" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] @@ -22252,9 +22169,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -22262,7 +22179,7 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.3", - "pin-project-lite 0.2.12", + "pin-project-lite", "signal-hook-registry", "socket2 0.5.7", "tokio-macros", @@ -22271,13 +22188,13 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -22319,7 +22236,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", - "pin-project-lite 0.2.12", + "pin-project-lite", "tokio", "tokio-util", ] @@ -22362,7 +22279,7 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.12", + "pin-project-lite", "tokio", ] @@ -22429,7 +22346,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.12", + "pin-project-lite", "tokio", "tower-layer", "tower-service", @@ -22447,7 +22364,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "pin-project-lite 0.2.12", + "pin-project-lite", "tower-layer", "tower-service", ] @@ -22471,7 +22388,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", - "pin-project-lite 0.2.12", + "pin-project-lite", "tracing-attributes", "tracing-core", ] @@ -22482,9 +22399,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -22524,9 +22441,9 @@ dependencies = [ "assert_matches", "expander", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -22622,9 +22539,9 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.29.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c992b4f40c234a074d48a757efeabb1a6be88af84c0c23f7ca158950cb0ae7f" +checksum = "65ed83be775d85ebb0e272914fff6462c39b3ddd6dc67b5c1c41271aad280c69" dependencies = [ "hash-db", "log", @@ -22789,12 +22706,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "typeid" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "059d83cc991e7a42fc37bd50941885db0888e34209f8cfd9aab07ddec03bc9cf" - [[package]] name = "typenum" version = "1.16.0" @@ -22870,16 +22781,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" -[[package]] -name = "universal-hash" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b2c654932e3e4f9196e69d08fdf7cfd718e1dc6f66b347e6024a0c961402" -dependencies = [ - "generic-array 0.14.7", - "subtle 2.5.0", -] - [[package]] name = "universal-hash" version = "0.5.1" @@ -22892,9 +22793,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "unsigned-varint" @@ -22932,12 +22833,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.4.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.5.0", "percent-encoding", ] @@ -22967,9 +22868,9 @@ checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "value-bag" -version = "1.9.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" +checksum = "8fec26a25bd6fca441cdd0f769fd7f891bae119f996de31f86a5eddccef54c1d" dependencies = [ "value-bag-serde1", "value-bag-sval2", @@ -22977,9 +22878,9 @@ dependencies = [ [[package]] name = "value-bag-serde1" -version = "1.9.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccacf50c5cb077a9abb723c5bcb5e0754c1a433f1e1de89edc328e2760b6328b" +checksum = "ead5b693d906686203f19a49e88c477fb8c15798b68cf72f60b4b5521b4ad891" dependencies = [ "erased-serde", "serde", @@ -22988,9 +22889,9 @@ dependencies = [ [[package]] name = "value-bag-sval2" -version = "1.9.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1785bae486022dfb9703915d42287dcb284c1ee37bd1080eeba78cc04721285b" +checksum = "3b9d0f4a816370c3a0d7d82d603b62198af17675b12fe5e91de6b47ceb505882" dependencies = [ "sval", "sval_buffer", @@ -23091,9 +22992,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "serde", @@ -23103,16 +23004,16 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", "wasm-bindgen-shared", ] @@ -23130,9 +23031,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote 1.0.36", "wasm-bindgen-macro-support", @@ -23140,22 +23041,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-bindgen-test" @@ -23177,7 +23078,7 @@ version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecb993dd8c836930ed130e020e77d9b2e65dd0fbab1b67c790b0f5d80b11a575" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", ] @@ -24243,10 +24144,10 @@ name = "xcm-procedural" version = "7.0.0" dependencies = [ "Inflector", - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", "staging-xcm", - "syn 2.0.58", + "syn 2.0.61", "trybuild", ] @@ -24409,9 +24310,9 @@ version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] @@ -24429,9 +24330,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.75", + "proc-macro2 1.0.82", "quote 1.0.36", - "syn 2.0.58", + "syn 2.0.61", ] [[package]] diff --git a/polkadot/runtime/rococo/src/lib.rs b/polkadot/runtime/rococo/src/lib.rs index 2f23b889916b..30b915d32deb 100644 --- a/polkadot/runtime/rococo/src/lib.rs +++ b/polkadot/runtime/rococo/src/lib.rs @@ -1335,6 +1335,7 @@ impl pallet_beefy_mmr::Config for Runtime { type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; type LeafExtra = H256; type BeefyDataProvider = ParaHeadsRootProvider; + type WeightInfo = weights::pallet_beefy_mmr::WeightInfo; } impl paras_sudo_wrapper::Config for Runtime {} @@ -1629,47 +1630,45 @@ pub mod migrations { /// Unreleased migrations. Add new ones here: pub type Unreleased = ( - pallet_society::migrations::MigrateToV2, - parachains_configuration::migration::v7::MigrateToV7, - assigned_slots::migration::v1::MigrateToV1, - parachains_scheduler::migration::MigrateV1ToV2, - parachains_configuration::migration::v8::MigrateToV8, - parachains_configuration::migration::v9::MigrateToV9, - paras_registrar::migration::MigrateToV1, - pallet_referenda::migration::v1::MigrateV0ToV1, - pallet_referenda::migration::v1::MigrateV0ToV1, - - // Unlock & unreserve Gov1 funds - - pallet_elections_phragmen::migrations::unlock_and_unreserve_all_funds::UnlockAndUnreserveAllFunds, - pallet_democracy::migrations::unlock_and_unreserve_all_funds::UnlockAndUnreserveAllFunds, - pallet_tips::migrations::unreserve_deposits::UnreserveDeposits, - - // Delete all Gov v1 pallet storage key/values. - - frame_support::migrations::RemovePallet::DbWeight>, - frame_support::migrations::RemovePallet::DbWeight>, - frame_support::migrations::RemovePallet::DbWeight>, - frame_support::migrations::RemovePallet::DbWeight>, - frame_support::migrations::RemovePallet::DbWeight>, - frame_support::migrations::RemovePallet::DbWeight>, - - pallet_grandpa::migrations::MigrateV4ToV5, - parachains_configuration::migration::v10::MigrateToV10, - - // Migrate Identity pallet for Usernames - pallet_identity::migration::versioned::V0ToV1, - parachains_configuration::migration::v11::MigrateToV11, - // This needs to come after the `parachains_configuration` above as we are reading the configuration. - coretime::migration::MigrateToCoretime, - parachains_configuration::migration::v12::MigrateToV12, - parachains_on_demand::migration::MigrateV0ToV1, - - // permanent - pallet_xcm::migration::MigrateToLatestXcmVersion, - - parachains_inclusion::migration::MigrateToV1, - ); + pallet_society::migrations::MigrateToV2, + parachains_configuration::migration::v7::MigrateToV7, + assigned_slots::migration::v1::MigrateToV1, + parachains_scheduler::migration::MigrateV1ToV2, + parachains_configuration::migration::v8::MigrateToV8, + parachains_configuration::migration::v9::MigrateToV9, + paras_registrar::migration::MigrateToV1, + pallet_referenda::migration::v1::MigrateV0ToV1, + pallet_referenda::migration::v1::MigrateV0ToV1, + + // Unlock & unreserve Gov1 funds + + pallet_elections_phragmen::migrations::unlock_and_unreserve_all_funds::UnlockAndUnreserveAllFunds, + pallet_democracy::migrations::unlock_and_unreserve_all_funds::UnlockAndUnreserveAllFunds, + pallet_tips::migrations::unreserve_deposits::UnreserveDeposits, + + // Delete all Gov v1 pallet storage key/values. + + frame_support::migrations::RemovePallet::DbWeight>, + frame_support::migrations::RemovePallet::DbWeight>, + frame_support::migrations::RemovePallet::DbWeight>, + frame_support::migrations::RemovePallet::DbWeight>, + frame_support::migrations::RemovePallet::DbWeight>, + frame_support::migrations::RemovePallet::DbWeight>, + pallet_grandpa::migrations::MigrateV4ToV5, + parachains_configuration::migration::v10::MigrateToV10, + + // Migrate Identity pallet for Usernames + pallet_identity::migration::versioned::V0ToV1, + parachains_configuration::migration::v11::MigrateToV11, + // This needs to come after the `parachains_configuration` above as we are reading the configuration. + coretime::migration::MigrateToCoretime, + parachains_configuration::migration::v12::MigrateToV12, + parachains_on_demand::migration::MigrateV0ToV1, + + // permanent + pallet_xcm::migration::MigrateToLatestXcmVersion, + parachains_inclusion::migration::MigrateToV1, + ); } /// Executive: handles dispatch to the various modules. @@ -1735,6 +1734,7 @@ mod benches { // Substrate [pallet_balances, Balances] [pallet_balances, NisCounterpartBalances] + [pallet_beefy_mmr, MmrLeaf] [frame_benchmarking::baseline, Baseline::] [pallet_bounties, Bounties] [pallet_child_bounties, ChildBounties] diff --git a/polkadot/runtime/rococo/src/weights/mod.rs b/polkadot/runtime/rococo/src/weights/mod.rs index 0512a393a6c4..cd3f4689f562 100644 --- a/polkadot/runtime/rococo/src/weights/mod.rs +++ b/polkadot/runtime/rococo/src/weights/mod.rs @@ -19,6 +19,7 @@ pub mod frame_system; pub mod pallet_asset_rate; pub mod pallet_balances_balances; pub mod pallet_balances_nis_counterpart_balances; +pub mod pallet_beefy_mmr; pub mod pallet_bounties; pub mod pallet_child_bounties; pub mod pallet_conviction_voting; diff --git a/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs b/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs new file mode 100644 index 000000000000..317c9149ec6c --- /dev/null +++ b/polkadot/runtime/rococo/src/weights/pallet_beefy_mmr.rs @@ -0,0 +1,89 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_beefy_mmr` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("rococo-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_beefy_mmr +// --chain=rococo-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/rococo/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_beefy_mmr`. +pub struct WeightInfo(PhantomData); +impl pallet_beefy_mmr::WeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn extract_validation_context() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 7_116_000 picoseconds. + Weight::from_parts(7_343_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Mmr::Nodes` (r:1 w:0) + /// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + fn read_peak() -> Weight { + // Proof Size summary in bytes: + // Measured: `234` + // Estimated: `3505` + // Minimum execution time: 5_652_000 picoseconds. + Weight::from_parts(5_963_000, 0) + .saturating_add(Weight::from_parts(0, 3505)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Mmr::RootHash` (r:1 w:0) + /// Proof: `Mmr::RootHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Mmr::NumberOfLeaves` (r:1 w:0) + /// Proof: `Mmr::NumberOfLeaves` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// The range of component `n` is `[2, 512]`. + fn n_items_proof_is_non_canonical(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `226` + // Estimated: `1517` + // Minimum execution time: 11_953_000 picoseconds. + Weight::from_parts(15_978_891, 0) + .saturating_add(Weight::from_parts(0, 1517)) + // Standard Error: 1_780 + .saturating_add(Weight::from_parts(1_480_582, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + } +} diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index 838ba17e5613..aa446b03368d 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -461,6 +461,7 @@ impl pallet_beefy_mmr::Config for Runtime { type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; type LeafExtra = H256; type BeefyDataProvider = ParaHeadsRootProvider; + type WeightInfo = weights::pallet_beefy_mmr::WeightInfo; } parameter_types! { @@ -601,20 +602,20 @@ impl pallet_election_provider_multi_phase::MinerConfig for Runtime { type MaxWeight = OffchainSolutionWeightLimit; type Solution = NposCompactSolution16; type MaxVotesPerVoter = < - ::DataProvider - as - frame_election_provider_support::ElectionDataProvider - >::MaxVotesPerVoter; + ::DataProvider + as + frame_election_provider_support::ElectionDataProvider + >::MaxVotesPerVoter; type MaxWinners = MaxActiveValidators; // The unsigned submissions have to respect the weight of the submit_unsigned call, thus their // weight estimate function is wired to this call's weight. fn solution_weight(v: u32, t: u32, a: u32, d: u32) -> Weight { < - ::WeightInfo - as - pallet_election_provider_multi_phase::WeightInfo - >::submit_unsigned(v, t, a, d) + ::WeightInfo + as + pallet_election_provider_multi_phase::WeightInfo + >::submit_unsigned(v, t, a, d) } } @@ -1847,6 +1848,7 @@ mod benches { // Substrate [pallet_bags_list, VoterList] [pallet_balances, Balances] + [pallet_beefy_mmr, BeefyMmrLeaf] [pallet_conviction_voting, ConvictionVoting] [pallet_election_provider_multi_phase, ElectionProviderMultiPhase] [frame_election_provider_support, ElectionProviderBench::] diff --git a/polkadot/runtime/westend/src/weights/mod.rs b/polkadot/runtime/westend/src/weights/mod.rs index 2248e421e639..cb6e2c85e363 100644 --- a/polkadot/runtime/westend/src/weights/mod.rs +++ b/polkadot/runtime/westend/src/weights/mod.rs @@ -20,6 +20,7 @@ pub mod frame_system; pub mod pallet_asset_rate; pub mod pallet_bags_list; pub mod pallet_balances; +pub mod pallet_beefy_mmr; pub mod pallet_conviction_voting; pub mod pallet_election_provider_multi_phase; pub mod pallet_fast_unstake; diff --git a/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs b/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs new file mode 100644 index 000000000000..5be207e3fcff --- /dev/null +++ b/polkadot/runtime/westend/src/weights/pallet_beefy_mmr.rs @@ -0,0 +1,89 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Autogenerated weights for `pallet_beefy_mmr` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("westend-dev")`, DB CACHE: 1024 + +// Executed Command: +// target/production/polkadot +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_beefy_mmr +// --chain=westend-dev +// --header=./polkadot/file_header.txt +// --output=./polkadot/runtime/westend/src/weights/ + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::Weight}; +use core::marker::PhantomData; + +/// Weight functions for `pallet_beefy_mmr`. +pub struct WeightInfo(PhantomData); +impl pallet_beefy_mmr::WeightInfo for WeightInfo { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn extract_validation_context() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 7_850_000 picoseconds. + Weight::from_parts(8_169_000, 0) + .saturating_add(Weight::from_parts(0, 3509)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Mmr::Nodes` (r:1 w:0) + /// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + fn read_peak() -> Weight { + // Proof Size summary in bytes: + // Measured: `201` + // Estimated: `3505` + // Minimum execution time: 6_852_000 picoseconds. + Weight::from_parts(7_448_000, 0) + .saturating_add(Weight::from_parts(0, 3505)) + .saturating_add(T::DbWeight::get().reads(1)) + } + /// Storage: `Mmr::RootHash` (r:1 w:0) + /// Proof: `Mmr::RootHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Mmr::NumberOfLeaves` (r:1 w:0) + /// Proof: `Mmr::NumberOfLeaves` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// The range of component `n` is `[2, 512]`. + fn n_items_proof_is_non_canonical(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `193` + // Estimated: `1517` + // Minimum execution time: 12_860_000 picoseconds. + Weight::from_parts(17_158_162, 0) + .saturating_add(Weight::from_parts(0, 1517)) + // Standard Error: 1_732 + .saturating_add(Weight::from_parts(1_489_410, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2)) + } +} diff --git a/prdoc/pr_5188.prdoc b/prdoc/pr_5188.prdoc new file mode 100644 index 000000000000..b2ab9ff6653b --- /dev/null +++ b/prdoc/pr_5188.prdoc @@ -0,0 +1,32 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Added benchmarks for BEEFY fork voting + +doc: + - audience: + - Runtime Dev + - Runtime User + description: | + This PR adds benchmarks for `report_fork_voting` and `report_future_voting` extrinsics to `pallet-beefy`. + `report_future_voting` can be called now. `report_fork_voting` can't be called yet. Even though we have added + the formula for computing its weight, we still use `Weight::MAX`. We will set the proper weight in a future PR. + In order to do this we need to also check that the ancestry proof is optimal. + The PR adds a `WeightInfo` associated trait to the `pallet_beefy_mmr::Config` and defines benchmarks for + `pallet_beefy_mmr`. + +crates: + - name: pallet-mmr + bump: minor + - name: sp-mmr-primitives + bump: minor + - name: sp-consensus-beefy + bump: minor + - name: rococo-runtime + bump: minor + - name: pallet-beefy + bump: major + - name: pallet-beefy-mmr + bump: major + - name: westend-runtime + bump: minor diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index a94838cf20c0..ff3f7e26baf2 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -1612,6 +1612,7 @@ impl pallet_beefy_mmr::Config for Runtime { type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; type LeafExtra = Vec; type BeefyDataProvider = (); + type WeightInfo = (); } parameter_types! { @@ -2585,6 +2586,7 @@ mod benches { [pallet_babe, Babe] [pallet_bags_list, VoterList] [pallet_balances, Balances] + [pallet_beefy_mmr, MmrLeaf] [pallet_bounties, Bounties] [pallet_broker, Broker] [pallet_child_bounties, ChildBounties] diff --git a/substrate/frame/beefy-mmr/Cargo.toml b/substrate/frame/beefy-mmr/Cargo.toml index 672a7a68bc7e..d67ac20ee922 100644 --- a/substrate/frame/beefy-mmr/Cargo.toml +++ b/substrate/frame/beefy-mmr/Cargo.toml @@ -18,6 +18,7 @@ log = { workspace = true } scale-info = { features = ["derive"], workspace = true } serde = { optional = true, workspace = true, default-features = true } binary-merkle-tree = { workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { workspace = true } frame-system = { workspace = true } pallet-beefy = { workspace = true } @@ -40,6 +41,7 @@ std = [ "array-bytes", "binary-merkle-tree/std", "codec/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "log/std", @@ -65,6 +67,7 @@ try-runtime = [ "sp-runtime/try-runtime", ] runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", "pallet-mmr/runtime-benchmarks", diff --git a/substrate/frame/beefy-mmr/src/benchmarking.rs b/substrate/frame/beefy-mmr/src/benchmarking.rs new file mode 100644 index 000000000000..135f95eabb99 --- /dev/null +++ b/substrate/frame/beefy-mmr/src/benchmarking.rs @@ -0,0 +1,129 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Beefy pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use crate::Pallet as BeefyMmr; +use codec::Encode; +use frame_benchmarking::v2::*; +use frame_support::traits::Hooks; +use frame_system::{Config as SystemConfig, Pallet as System}; +use pallet_mmr::{Nodes, Pallet as Mmr}; +use sp_consensus_beefy::Payload; +use sp_runtime::traits::One; + +pub trait Config: + pallet_mmr::Config + crate::Config +{ +} + +impl Config for T where + T: pallet_mmr::Config + crate::Config +{ +} + +fn init_block(block_num: u32) { + let block_num = block_num.into(); + System::::initialize(&block_num, &::Hash::default(), &Default::default()); + Mmr::::on_initialize(block_num); +} + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn extract_validation_context() { + if !cfg!(feature = "test") { + pallet_mmr::UseLocalStorage::::set(true); + } + + init_block::(1); + let header = System::::finalize(); + frame_system::BlockHash::::insert(BlockNumberFor::::one(), header.hash()); + + let validation_context; + #[block] + { + validation_context = + as AncestryHelper>>::extract_validation_context(header); + } + + assert!(validation_context.is_some()); + } + + #[benchmark] + fn read_peak() { + if !cfg!(feature = "test") { + pallet_mmr::UseLocalStorage::::set(true); + } + + init_block::(1); + + let peak; + #[block] + { + peak = Nodes::::get(0) + } + + assert!(peak.is_some()); + } + + /// Generate ancestry proofs with `n` nodes and benchmark the verification logic. + /// These proofs are inflated, containing all the leafs, so we won't read any peak during + /// the verification. We need to account for the peaks separately. + #[benchmark] + fn n_items_proof_is_non_canonical(n: Linear<2, 512>) { + if !cfg!(feature = "test") { + pallet_mmr::UseLocalStorage::::set(true); + } + + for block_num in 1..=n { + init_block::(block_num); + } + let proof = Mmr::::generate_mock_ancestry_proof().unwrap(); + assert_eq!(proof.items.len(), n as usize); + + let is_non_canonical; + #[block] + { + is_non_canonical = as AncestryHelper>>::is_non_canonical( + &Commitment { + payload: Payload::from_single_entry( + known_payloads::MMR_ROOT_ID, + MerkleRootOf::::default().encode(), + ), + block_number: n.into(), + validator_set_id: 0, + }, + proof, + Mmr::::mmr_root(), + ); + }; + + assert_eq!(is_non_canonical, true); + } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(Default::default()), + crate::mock::Test + ); +} diff --git a/substrate/frame/beefy-mmr/src/lib.rs b/substrate/frame/beefy-mmr/src/lib.rs index 195bbfbf2f29..73119c3faa9b 100644 --- a/substrate/frame/beefy-mmr/src/lib.rs +++ b/substrate/frame/beefy-mmr/src/lib.rs @@ -35,27 +35,34 @@ extern crate alloc; -use sp_runtime::traits::{Convert, Header, Member}; +use sp_runtime::{ + generic::OpaqueDigestItemId, + traits::{Convert, Header, Member}, + SaturatedConversion, +}; use alloc::vec::Vec; use codec::Decode; -use pallet_mmr::{primitives::AncestryProof, LeafDataProvider, ParentNumberAndHash}; +use pallet_mmr::{primitives::AncestryProof, LeafDataProvider, NodesUtils, ParentNumberAndHash}; use sp_consensus_beefy::{ known_payloads, mmr::{BeefyAuthoritySet, BeefyDataProvider, BeefyNextAuthoritySet, MmrLeaf, MmrLeafVersion}, - AncestryHelper, Commitment, ConsensusLog, ValidatorSet as BeefyValidatorSet, + AncestryHelper, AncestryHelperWeightInfo, Commitment, ConsensusLog, + ValidatorSet as BeefyValidatorSet, }; -use frame_support::{crypto::ecdsa::ECDSAExt, traits::Get}; +use frame_support::{crypto::ecdsa::ECDSAExt, pallet_prelude::Weight, traits::Get}; use frame_system::pallet_prelude::{BlockNumberFor, HeaderFor}; pub use pallet::*; -use sp_runtime::generic::OpaqueDigestItemId; +pub use weights::WeightInfo; +mod benchmarking; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +mod weights; /// A BEEFY consensus digest item with MMR root hash. pub struct DepositBeefyDigest(core::marker::PhantomData); @@ -126,6 +133,8 @@ pub mod pallet { /// Retrieve arbitrary data that should be added to the mmr leaf type BeefyDataProvider: BeefyDataProvider; + + type WeightInfo: WeightInfo; } /// Details of current BEEFY authority set. @@ -263,6 +272,30 @@ where } } +impl AncestryHelperWeightInfo> for Pallet +where + T: pallet_mmr::Config, +{ + fn extract_validation_context() -> Weight { + ::WeightInfo::extract_validation_context() + } + + fn is_non_canonical(proof: &>>::Proof) -> Weight { + let mmr_utils = NodesUtils::new(proof.leaf_count); + let num_peaks = mmr_utils.number_of_peaks(); + + // The approximated cost of verifying an ancestry proof with `n` nodes. + // We add the previous peaks to the total number of nodes, + // since they have to be processed as well. + ::WeightInfo::n_items_proof_is_non_canonical( + proof.items.len().saturating_add(proof.prev_peaks.len()).saturated_into(), + ) + // `n_items_proof_is_non_canonical()` uses inflated proofs that contain all the leafs, + // where no peak needs to be read. So we need to also add the cost of reading the peaks. + .saturating_add(::WeightInfo::read_peak().saturating_mul(num_peaks)) + } +} + impl Pallet { /// Return the currently active BEEFY authority set proof. pub fn authority_set_proof() -> BeefyAuthoritySet> { diff --git a/substrate/frame/beefy-mmr/src/mock.rs b/substrate/frame/beefy-mmr/src/mock.rs index 1102f9677aaa..6756c618d706 100644 --- a/substrate/frame/beefy-mmr/src/mock.rs +++ b/substrate/frame/beefy-mmr/src/mock.rs @@ -37,6 +37,7 @@ use crate as pallet_beefy_mmr; pub use sp_consensus_beefy::{ ecdsa_crypto::AuthorityId as BeefyId, mmr::BeefyDataProvider, ConsensusLog, BEEFY_ENGINE_ID, }; +use sp_core::offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}; impl_opaque_keys! { pub struct MockSessionKeys { @@ -122,6 +123,7 @@ impl pallet_beefy_mmr::Config for Test { type LeafExtra = Vec; type BeefyDataProvider = DummyDataProvider; + type WeightInfo = (); } pub struct DummyDataProvider; @@ -191,5 +193,10 @@ pub fn new_test_ext_raw_authorities(authorities: Vec<(u64, BeefyId)>) -> TestExt .assimilate_storage(&mut t) .unwrap(); - t.into() + let mut ext: TestExternalities = t.into(); + let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); + ext.register_extension(OffchainDbExt::new(offchain.clone())); + ext.register_extension(OffchainWorkerExt::new(offchain)); + + ext } diff --git a/substrate/frame/beefy-mmr/src/tests.rs b/substrate/frame/beefy-mmr/src/tests.rs index f99835a1dc0a..b126a01012b4 100644 --- a/substrate/frame/beefy-mmr/src/tests.rs +++ b/substrate/frame/beefy-mmr/src/tests.rs @@ -24,10 +24,7 @@ use sp_consensus_beefy::{ AncestryHelper, Commitment, Payload, ValidatorSet, }; -use sp_core::{ - offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt}, - H256, -}; +use sp_core::H256; use sp_io::TestExternalities; use sp_runtime::{traits::Keccak256, DigestItem}; @@ -40,8 +37,6 @@ fn init_block(block: u64, maybe_parent_hash: Option) { System::initialize(&block, &parent_hash, &Default::default()); Session::on_initialize(block); Mmr::on_initialize(block); - Beefy::on_initialize(block); - BeefyMmr::on_initialize(block); } pub fn beefy_log(log: ConsensusLog) -> DigestItem { @@ -211,11 +206,6 @@ fn should_update_authorities() { fn extract_validation_context_should_work_correctly() { let mut ext = new_test_ext(vec![1, 2]); - // Register offchain ext. - let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); - ext.register_extension(OffchainDbExt::new(offchain.clone())); - ext.register_extension(OffchainWorkerExt::new(offchain)); - ext.execute_with(|| { init_block(1, None); let h1 = System::finalize(); @@ -262,13 +252,8 @@ fn is_non_canonical_should_work_correctly() { }); ext.persist_offchain_overlay(); - // Register offchain ext. - let (offchain, _offchain_state) = TestOffchainExt::with_offchain_db(ext.offchain_db()); - ext.register_extension(OffchainDbExt::new(offchain.clone())); - ext.register_extension(OffchainWorkerExt::new(offchain)); - ext.execute_with(|| { - let valid_proof = Mmr::generate_ancestry_proof(250, None).unwrap(); + let valid_proof = BeefyMmr::generate_proof(250, None).unwrap(); let mut invalid_proof = valid_proof.clone(); invalid_proof.items.push((300, Default::default())); @@ -343,7 +328,7 @@ fn is_non_canonical_should_work_correctly() { // - should return false, if the commitment is targeting the canonical chain // - should return true if the commitment is NOT targeting the canonical chain for prev_block_number in 1usize..=500 { - let proof = Mmr::generate_ancestry_proof(prev_block_number as u64, None).unwrap(); + let proof = BeefyMmr::generate_proof(prev_block_number as u64, None).unwrap(); assert_eq!( BeefyMmr::is_non_canonical( diff --git a/substrate/frame/beefy-mmr/src/weights.rs b/substrate/frame/beefy-mmr/src/weights.rs new file mode 100644 index 000000000000..c292f25400cc --- /dev/null +++ b/substrate/frame/beefy-mmr/src/weights.rs @@ -0,0 +1,134 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for `pallet_beefy_mmr` +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-13, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! WORST CASE MAP SIZE: `1000000` +//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: `Compiled`, CHAIN: `Some("dev")`, DB CACHE: `1024` + +// Executed Command: +// target/production/substrate-node +// benchmark +// pallet +// --steps=50 +// --repeat=20 +// --extrinsic=* +// --wasm-execution=compiled +// --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json +// --pallet=pallet_beefy_mmr +// --chain=dev +// --header=./substrate/HEADER-APACHE2 +// --output=./substrate/frame/beefy-mmr/src/weights.rs +// --template=./substrate/.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] +#![allow(missing_docs)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use core::marker::PhantomData; + +/// Weight functions needed for `pallet_beefy_mmr`. +pub trait WeightInfo { + fn extract_validation_context() -> Weight; + fn read_peak() -> Weight; + fn n_items_proof_is_non_canonical(n: u32, ) -> Weight; +} + +/// Weights for `pallet_beefy_mmr` using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn extract_validation_context() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 7_461_000 picoseconds. + Weight::from_parts(7_669_000, 3509) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `Mmr::Nodes` (r:1 w:0) + /// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + fn read_peak() -> Weight { + // Proof Size summary in bytes: + // Measured: `333` + // Estimated: `3505` + // Minimum execution time: 6_137_000 picoseconds. + Weight::from_parts(6_423_000, 3505) + .saturating_add(T::DbWeight::get().reads(1_u64)) + } + /// Storage: `Mmr::RootHash` (r:1 w:0) + /// Proof: `Mmr::RootHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Mmr::NumberOfLeaves` (r:1 w:0) + /// Proof: `Mmr::NumberOfLeaves` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// The range of component `n` is `[2, 512]`. + fn n_items_proof_is_non_canonical(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `325` + // Estimated: `1517` + // Minimum execution time: 10_687_000 picoseconds. + Weight::from_parts(14_851_626, 1517) + // Standard Error: 1_455 + .saturating_add(Weight::from_parts(961_703, 0).saturating_mul(n.into())) + .saturating_add(T::DbWeight::get().reads(2_u64)) + } +} + +// For backwards compatibility and tests. +impl WeightInfo for () { + /// Storage: `System::BlockHash` (r:1 w:0) + /// Proof: `System::BlockHash` (`max_values`: None, `max_size`: Some(44), added: 2519, mode: `MaxEncodedLen`) + fn extract_validation_context() -> Weight { + // Proof Size summary in bytes: + // Measured: `92` + // Estimated: `3509` + // Minimum execution time: 7_461_000 picoseconds. + Weight::from_parts(7_669_000, 3509) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `Mmr::Nodes` (r:1 w:0) + /// Proof: `Mmr::Nodes` (`max_values`: None, `max_size`: Some(40), added: 2515, mode: `MaxEncodedLen`) + fn read_peak() -> Weight { + // Proof Size summary in bytes: + // Measured: `333` + // Estimated: `3505` + // Minimum execution time: 6_137_000 picoseconds. + Weight::from_parts(6_423_000, 3505) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + } + /// Storage: `Mmr::RootHash` (r:1 w:0) + /// Proof: `Mmr::RootHash` (`max_values`: Some(1), `max_size`: Some(32), added: 527, mode: `MaxEncodedLen`) + /// Storage: `Mmr::NumberOfLeaves` (r:1 w:0) + /// Proof: `Mmr::NumberOfLeaves` (`max_values`: Some(1), `max_size`: Some(8), added: 503, mode: `MaxEncodedLen`) + /// The range of component `n` is `[2, 512]`. + fn n_items_proof_is_non_canonical(n: u32, ) -> Weight { + // Proof Size summary in bytes: + // Measured: `325` + // Estimated: `1517` + // Minimum execution time: 10_687_000 picoseconds. + Weight::from_parts(14_851_626, 1517) + // Standard Error: 1_455 + .saturating_add(Weight::from_parts(961_703, 0).saturating_mul(n.into())) + .saturating_add(RocksDbWeight::get().reads(2_u64)) + } +} diff --git a/substrate/frame/beefy/src/default_weights.rs b/substrate/frame/beefy/src/default_weights.rs index 70dd3bb02bf1..6b83015459d6 100644 --- a/substrate/frame/beefy/src/default_weights.rs +++ b/substrate/frame/beefy/src/default_weights.rs @@ -57,11 +57,6 @@ impl crate::WeightInfo for () { .saturating_add(DbWeight::get().reads(2)) } - // TODO: Calculate - fn report_fork_voting(_validator_count: u32, _max_nominators_per_validator: u32) -> Weight { - Weight::MAX - } - fn set_new_genesis() -> Weight { DbWeight::get().writes(1) } diff --git a/substrate/frame/beefy/src/lib.rs b/substrate/frame/beefy/src/lib.rs index 353ba876c7ed..cf690a9df339 100644 --- a/substrate/frame/beefy/src/lib.rs +++ b/substrate/frame/beefy/src/lib.rs @@ -19,21 +19,33 @@ extern crate alloc; +mod default_weights; +mod equivocation; +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + use alloc::{boxed::Box, vec::Vec}; use codec::{Encode, MaxEncodedLen}; +use log; use frame_support::{ dispatch::{DispatchResultWithPostInfo, Pays}, pallet_prelude::*, traits::{Get, OneSessionHandler}, - weights::Weight, + weights::{constants::RocksDbWeight as DbWeight, Weight}, BoundedSlice, BoundedVec, Parameter, }; use frame_system::{ ensure_none, ensure_signed, pallet_prelude::{BlockNumberFor, HeaderFor, OriginFor}, }; -use log; +use sp_consensus_beefy::{ + AncestryHelper, AncestryHelperWeightInfo, AuthorityIndex, BeefyAuthorityId, ConsensusLog, + DoubleVotingProof, ForkVotingProof, FutureBlockVotingProof, OnNewValidatorSet, ValidatorSet, + BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, +}; use sp_runtime::{ generic::DigestItem, traits::{IsMember, Member, One}, @@ -42,24 +54,10 @@ use sp_runtime::{ use sp_session::{GetSessionNumber, GetValidatorCount}; use sp_staking::{offence::OffenceReportSystem, SessionIndex}; -use sp_consensus_beefy::{ - AncestryHelper, AuthorityIndex, BeefyAuthorityId, ConsensusLog, DoubleVotingProof, - ForkVotingProof, FutureBlockVotingProof, OnNewValidatorSet, ValidatorSet, BEEFY_ENGINE_ID, - GENESIS_AUTHORITY_SET_ID, -}; - -mod default_weights; -mod equivocation; -#[cfg(test)] -mod mock; -#[cfg(test)] -mod tests; - +use crate::equivocation::EquivocationEvidenceFor; pub use crate::equivocation::{EquivocationOffence, EquivocationReportSystem, TimeSlot}; pub use pallet::*; -use crate::equivocation::EquivocationEvidenceFor; - const LOG_TARGET: &str = "runtime::beefy"; #[frame_support::pallet] @@ -102,7 +100,8 @@ pub mod pallet { type OnNewValidatorSet: OnNewValidatorSet<::BeefyId>; /// Hook for checking commitment canonicity. - type AncestryHelper: AncestryHelper>; + type AncestryHelper: AncestryHelper> + + AncestryHelperWeightInfo>; /// Weights for this pallet. type WeightInfo: WeightInfo; @@ -295,9 +294,10 @@ pub mod pallet { /// and validate the given key ownership proof against the extracted offender. /// If both are valid, the offence will be reported. #[pallet::call_index(3)] - #[pallet::weight(T::WeightInfo::report_fork_voting( + #[pallet::weight(T::WeightInfo::report_fork_voting::( key_owner_proof.validator_count(), T::MaxNominators::get(), + &equivocation_proof.ancestry_proof ))] pub fn report_fork_voting( origin: OriginFor, @@ -329,9 +329,10 @@ pub mod pallet { /// if the block author is defined it will be defined as the equivocation /// reporter. #[pallet::call_index(4)] - #[pallet::weight(T::WeightInfo::report_fork_voting( + #[pallet::weight(T::WeightInfo::report_fork_voting::( key_owner_proof.validator_count(), T::MaxNominators::get(), + &equivocation_proof.ancestry_proof ))] pub fn report_fork_voting_unsigned( origin: OriginFor, @@ -358,7 +359,7 @@ pub mod pallet { /// and validate the given key ownership proof against the extracted offender. /// If both are valid, the offence will be reported. #[pallet::call_index(5)] - #[pallet::weight(T::WeightInfo::report_fork_voting( + #[pallet::weight(T::WeightInfo::report_future_block_voting( key_owner_proof.validator_count(), T::MaxNominators::get(), ))] @@ -389,7 +390,7 @@ pub mod pallet { /// if the block author is defined it will be defined as the equivocation /// reporter. #[pallet::call_index(6)] - #[pallet::weight(T::WeightInfo::report_fork_voting( + #[pallet::weight(T::WeightInfo::report_future_block_voting( key_owner_proof.validator_count(), T::MaxNominators::get(), ))] @@ -740,15 +741,52 @@ pub trait WeightInfo { validator_count: u32, max_nominators_per_validator: u32, ) -> Weight; + + fn set_new_genesis() -> Weight; +} + +pub(crate) trait WeightInfoExt: WeightInfo { fn report_double_voting(validator_count: u32, max_nominators_per_validator: u32) -> Weight { Self::report_voting_equivocation(2, validator_count, max_nominators_per_validator) } - fn report_fork_voting(validator_count: u32, max_nominators_per_validator: u32) -> Weight; + + fn report_fork_voting( + validator_count: u32, + max_nominators_per_validator: u32, + ancestry_proof: &>>::Proof, + ) -> Weight { + let _weight = >>::extract_validation_context() + .saturating_add( + >>::is_non_canonical( + ancestry_proof, + ), + ) + .saturating_add(Self::report_voting_equivocation( + 1, + validator_count, + max_nominators_per_validator, + )); + + // TODO: https://github.com/paritytech/polkadot-sdk/issues/4523 - return `_weight` here. + // We return `Weight::MAX` currently in order to disallow this extrinsic for the moment. + // We need to check that the proof is optimal. + Weight::MAX + } + fn report_future_block_voting( validator_count: u32, max_nominators_per_validator: u32, ) -> Weight { - Self::report_voting_equivocation(1, validator_count, max_nominators_per_validator) + // checking if the report is for a future block + DbWeight::get() + .reads(1) + // check and report the equivocated vote + .saturating_add(Self::report_voting_equivocation( + 1, + validator_count, + max_nominators_per_validator, + )) } - fn set_new_genesis() -> Weight; } + +impl WeightInfoExt for T where T: WeightInfo {} diff --git a/substrate/frame/beefy/src/mock.rs b/substrate/frame/beefy/src/mock.rs index b423fa0bda89..5c79d8f7d7d7 100644 --- a/substrate/frame/beefy/src/mock.rs +++ b/substrate/frame/beefy/src/mock.rs @@ -21,12 +21,13 @@ use std::vec; use frame_election_provider_support::{ bounds::{ElectionBounds, ElectionBoundsBuilder}, - onchain, SequentialPhragmen, + onchain, SequentialPhragmen, Weight, }; use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ConstU32, ConstU64, KeyOwnerProofSystem, OnFinalize, OnInitialize}, }; +use frame_system::pallet_prelude::HeaderFor; use pallet_session::historical as pallet_session_historical; use sp_core::{crypto::KeyTypeId, ConstU128}; use sp_runtime::{ @@ -43,7 +44,7 @@ use sp_state_machine::BasicExternalities; use crate as pallet_beefy; pub use sp_consensus_beefy::{ecdsa_crypto::AuthorityId as BeefyId, ConsensusLog, BEEFY_ENGINE_ID}; -use sp_consensus_beefy::{AncestryHelper, Commitment}; +use sp_consensus_beefy::{AncestryHelper, AncestryHelperWeightInfo, Commitment}; impl_opaque_keys! { pub struct MockSessionKeys { @@ -131,6 +132,16 @@ impl AncestryHelper
for MockAncestryHelper { } } +impl AncestryHelperWeightInfo
for MockAncestryHelper { + fn extract_validation_context() -> Weight { + unimplemented!() + } + + fn is_non_canonical(_proof: &>>::Proof) -> Weight { + unimplemented!() + } +} + impl pallet_beefy::Config for Test { type BeefyId = BeefyId; type MaxAuthorities = ConstU32<100>; diff --git a/substrate/frame/beefy/src/tests.rs b/substrate/frame/beefy/src/tests.rs index a63b3532b698..d75237205cac 100644 --- a/substrate/frame/beefy/src/tests.rs +++ b/substrate/frame/beefy/src/tests.rs @@ -35,7 +35,7 @@ use sp_consensus_beefy::{ use sp_runtime::DigestItem; use sp_session::MembershipProof; -use crate::{self as beefy, mock::*, Call, Config, Error, WeightInfo}; +use crate::{self as beefy, mock::*, Call, Config, Error, WeightInfoExt}; fn init_block(block: u64) { System::set_block_number(block); @@ -765,7 +765,9 @@ fn report_double_voting_has_valid_weight() { // the weight depends on the size of the validator set, // but there's a lower bound of 100 validators. assert!((1..=100) - .map(|validators| ::WeightInfo::report_double_voting(validators, 1000)) + .map(|validators| <::WeightInfo as WeightInfoExt>::report_double_voting( + validators, 1000 + )) .collect::>() .windows(2) .all(|w| w[0] == w[1])); @@ -773,7 +775,9 @@ fn report_double_voting_has_valid_weight() { // after 100 validators the weight should keep increasing // with every extra validator. assert!((100..=1000) - .map(|validators| ::WeightInfo::report_double_voting(validators, 1000)) + .map(|validators| <::WeightInfo as WeightInfoExt>::report_double_voting( + validators, 1000 + )) .collect::>() .windows(2) .all(|w| w[0].ref_time() < w[1].ref_time())); diff --git a/substrate/frame/merkle-mountain-range/src/lib.rs b/substrate/frame/merkle-mountain-range/src/lib.rs index 0ab44711bcf5..7dfe95c83361 100644 --- a/substrate/frame/merkle-mountain-range/src/lib.rs +++ b/substrate/frame/merkle-mountain-range/src/lib.rs @@ -240,6 +240,11 @@ pub mod pallet { pub type Nodes, I: 'static = ()> = StorageMap<_, Identity, NodeIndex, HashOf, OptionQuery>; + /// Helper flag used in the runtime benchmarks for the initial setup. + #[cfg(feature = "runtime-benchmarks")] + #[pallet::storage] + pub type UseLocalStorage = StorageValue<_, bool, ValueQuery>; + #[pallet::hooks] impl, I: 'static> Hooks> for Pallet { fn on_initialize(_n: BlockNumberFor) -> Weight { @@ -439,6 +444,14 @@ impl, I: 'static> Pallet { mmr.generate_ancestry_proof(prev_leaf_count) } + #[cfg(feature = "runtime-benchmarks")] + pub fn generate_mock_ancestry_proof() -> Result>, Error> + { + let leaf_count = Self::block_num_to_leaf_count(>::block_number())?; + let mmr: ModuleMmr = mmr::Mmr::new(leaf_count); + mmr.generate_mock_ancestry_proof() + } + pub fn verify_ancestry_proof( root: HashOf, ancestry_proof: primitives::AncestryProof>, diff --git a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs index 2b46357c5072..f9a4580b9bb3 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/mmr.rs @@ -156,7 +156,7 @@ where } /// Return the internal size of the MMR (number of nodes). - #[cfg(test)] + #[cfg(any(test, feature = "runtime-benchmarks"))] pub fn size(&self) -> NodeIndex { self.mmr.mmr_size() } @@ -252,4 +252,48 @@ where .collect(), }) } + + /// Generate an inflated ancestry proof for the latest leaf in the MMR. + /// + /// The generated proof contains all the leafs in the MMR, so this way we can generate a proof + /// with exactly `leaf_count` items. + #[cfg(feature = "runtime-benchmarks")] + pub fn generate_mock_ancestry_proof( + &self, + ) -> Result>, Error> { + use crate::ModuleMmr; + use alloc::vec; + use sp_mmr_primitives::mmr_lib::helper; + + let mmr: ModuleMmr = Mmr::new(self.leaves); + let store = >::default(); + + let mut prev_peaks = vec![]; + for peak_pos in helper::get_peaks(mmr.size()) { + let peak = store + .get_elem(peak_pos) + .map_err(|_| Error::GenerateProof)? + .ok_or(Error::GenerateProof)? + .hash(); + prev_peaks.push(peak); + } + + let mut proof_items = vec![]; + for leaf_idx in 0..self.leaves { + let leaf_pos = NodesUtils::leaf_index_to_leaf_node_index(leaf_idx); + let leaf = store + .get_elem(leaf_pos) + .map_err(|_| Error::GenerateProof)? + .ok_or(Error::GenerateProof)? + .hash(); + proof_items.push((leaf_pos, leaf)); + } + + Ok(sp_mmr_primitives::AncestryProof { + prev_peaks, + prev_leaf_count: self.leaves, + leaf_count: self.leaves, + items: proof_items, + }) + } } diff --git a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs index a39089801484..02852388b417 100644 --- a/substrate/frame/merkle-mountain-range/src/mmr/storage.rs +++ b/substrate/frame/merkle-mountain-range/src/mmr/storage.rs @@ -22,7 +22,6 @@ use codec::Encode; use core::iter::Peekable; use log::{debug, trace}; use sp_core::offchain::StorageKind; -use sp_io::offchain_index; use sp_mmr_primitives::{mmr_lib, mmr_lib::helper, utils::NodesUtils}; use crate::{ @@ -47,6 +46,26 @@ pub struct RuntimeStorage; /// DOES NOT support adding new items to the MMR. pub struct OffchainStorage; +impl OffchainStorage { + fn get(key: &[u8]) -> Option> { + sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) + } + + #[cfg(not(feature = "runtime-benchmarks"))] + fn set, I: 'static>(key: &[u8], value: &[u8]) { + sp_io::offchain_index::set(key, value); + } + + #[cfg(feature = "runtime-benchmarks")] + fn set, I: 'static>(key: &[u8], value: &[u8]) { + if crate::pallet::UseLocalStorage::::get() { + sp_io::offchain::local_storage_set(StorageKind::PERSISTENT, key, value); + } else { + sp_io::offchain_index::set(key, value); + } + } +} + /// A storage layer for MMR. /// /// There are two different implementations depending on the use case. @@ -78,7 +97,7 @@ where pos, ancestor_leaf_idx, key ); // Try to retrieve the element from Off-chain DB. - if let Some(elem) = sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) { + if let Some(elem) = OffchainStorage::get(&key) { return Ok(codec::Decode::decode(&mut &*elem).ok()) } @@ -93,8 +112,7 @@ where pos, ancestor_leaf_idx, ancestor_parent_hash, temp_key ); // Retrieve the element from Off-chain DB. - Ok(sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &temp_key) - .and_then(|v| codec::Decode::decode(&mut &*v).ok())) + Ok(OffchainStorage::get(&temp_key).and_then(|v| codec::Decode::decode(&mut &*v).ok())) } } @@ -203,8 +221,7 @@ where target: "runtime::mmr::offchain", "offchain db set: pos {} parent_hash {:?} key {:?}", pos, parent_hash, temp_key ); - // Indexing API is used to store the full node content. - offchain_index::set(&temp_key, &encoded_node); + OffchainStorage::set::(&temp_key, &encoded_node); } } diff --git a/substrate/primitives/consensus/beefy/Cargo.toml b/substrate/primitives/consensus/beefy/Cargo.toml index f31aa5756ba2..57ddab9a70ce 100644 --- a/substrate/primitives/consensus/beefy/Cargo.toml +++ b/substrate/primitives/consensus/beefy/Cargo.toml @@ -26,6 +26,7 @@ sp-io = { workspace = true } sp-mmr-primitives = { workspace = true } sp-runtime = { workspace = true } sp-keystore = { workspace = true } +sp-weights = { workspace = true } strum = { features = ["derive"], workspace = true } lazy_static = { optional = true, workspace = true } @@ -48,6 +49,7 @@ std = [ "sp-keystore/std", "sp-mmr-primitives/std", "sp-runtime/std", + "sp-weights/std", "strum/std", ] diff --git a/substrate/primitives/consensus/beefy/src/lib.rs b/substrate/primitives/consensus/beefy/src/lib.rs index 6ec4a727e276..e977fb0ea25f 100644 --- a/substrate/primitives/consensus/beefy/src/lib.rs +++ b/substrate/primitives/consensus/beefy/src/lib.rs @@ -56,6 +56,7 @@ use sp_runtime::{ traits::{Hash, Header as HeaderT, Keccak256, NumberFor}, OpaqueValue, }; +use sp_weights::Weight; /// Key type for BEEFY module. pub const KEY_TYPE: sp_core::crypto::KeyTypeId = sp_application_crypto::key_types::BEEFY; @@ -460,6 +461,15 @@ pub trait AncestryHelper { ) -> bool; } +/// Weight information for the logic in `AncestryHelper`. +pub trait AncestryHelperWeightInfo: AncestryHelper
{ + /// Weight info for the `AncestryHelper::extract_validation_context()` method. + fn extract_validation_context() -> Weight; + + /// Weight info for the `AncestryHelper::is_non_canonical()` method. + fn is_non_canonical(proof: &>::Proof) -> Weight; +} + /// An opaque type used to represent the key ownership proof at the runtime API /// boundary. The inner value is an encoded representation of the actual key /// ownership proof which will be parameterized when defining the runtime. At diff --git a/substrate/primitives/merkle-mountain-range/src/utils.rs b/substrate/primitives/merkle-mountain-range/src/utils.rs index 72674e24a272..2460af4b800f 100644 --- a/substrate/primitives/merkle-mountain-range/src/utils.rs +++ b/substrate/primitives/merkle-mountain-range/src/utils.rs @@ -91,7 +91,7 @@ impl NodesUtils { Self::leaf_node_index_to_leaf_index(rightmost_leaf_pos) } - // Translate a _leaf_ `NodeIndex` to its `LeafIndex`. + /// Translate a _leaf_ `NodeIndex` to its `LeafIndex`. fn leaf_node_index_to_leaf_index(pos: NodeIndex) -> LeafIndex { if pos == 0 { return 0 @@ -100,8 +100,13 @@ impl NodesUtils { (pos + peaks.len() as u64) >> 1 } - // Starting from any node position get position of rightmost leaf; this is the leaf - // responsible for the addition of node `pos`. + /// Translate a `LeafIndex` to its _leaf_ `NodeIndex`. + pub fn leaf_index_to_leaf_node_index(leaf_index: NodeIndex) -> LeafIndex { + helper::leaf_index_to_pos(leaf_index) + } + + /// Starting from any node position get position of rightmost leaf; this is the leaf + /// responsible for the addition of node `pos`. fn rightmost_leaf_node_index_from_pos(pos: NodeIndex) -> NodeIndex { pos - (helper::pos_height_in_tree(pos) as u64) } From d8c2944741b1f1b43fa37951fb311d363c753e4e Mon Sep 17 00:00:00 2001 From: Oliver Tale-Yazdi Date: Wed, 14 Aug 2024 21:22:08 +0200 Subject: [PATCH 44/74] [CI] Fix prdoc command (#5358) Changes: - Run the prdoc command in a docker container since otherwise the set-up-gh script wont work. - Take try-runtime snapshot at night to avoid spamming the node with snapshot jobs at day. --------- Signed-off-by: Oliver Tale-Yazdi --- .github/workflows/check-runtime-migration.yml | 3 +++ .github/workflows/command-prdoc.yml | 12 ++++++++++++ 2 files changed, 15 insertions(+) diff --git a/.github/workflows/check-runtime-migration.yml b/.github/workflows/check-runtime-migration.yml index 2b963b2230fb..9b7a6fafcd15 100644 --- a/.github/workflows/check-runtime-migration.yml +++ b/.github/workflows/check-runtime-migration.yml @@ -6,6 +6,9 @@ on: - master pull_request: types: [opened, synchronize, reopened, ready_for_review] + # Take a snapshot at 5am when most SDK devs are not working. + schedule: + - cron: '0 5 * * *' merge_group: workflow_dispatch: diff --git a/.github/workflows/command-prdoc.yml b/.github/workflows/command-prdoc.yml index da8f14089cb8..3a08b9a5fb28 100644 --- a/.github/workflows/command-prdoc.yml +++ b/.github/workflows/command-prdoc.yml @@ -43,9 +43,21 @@ concurrency: cancel-in-progress: true jobs: + set-image: + runs-on: ubuntu-latest + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT cmd-prdoc: + needs: [set-image] runs-on: ubuntu-latest timeout-minutes: 20 + container: + image: ${{ needs.set-image.outputs.IMAGE }} permissions: contents: write pull-requests: write From 5a9396f41e3cb7d94da3a9305512783229bc278b Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Wed, 14 Aug 2024 22:44:59 +0300 Subject: [PATCH 45/74] Unify `no_genesis` check (#5360) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The same exact `matches!()` was duplicated in `Configuration::no_genesis()` method and inline in full node parts creation. Since this is the same exact logic and reason, it makes sense to de-duplicate them. --------- Co-authored-by: Bastian Köcher --- prdoc/pr_5360.prdoc | 3 +++ substrate/client/service/src/builder.rs | 5 +---- 2 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 prdoc/pr_5360.prdoc diff --git a/prdoc/pr_5360.prdoc b/prdoc/pr_5360.prdoc new file mode 100644 index 000000000000..4b07f30bfd09 --- /dev/null +++ b/prdoc/pr_5360.prdoc @@ -0,0 +1,3 @@ +crates: + - name: sc-service + bump: none diff --git a/substrate/client/service/src/builder.rs b/substrate/client/service/src/builder.rs index d7fb7918481c..90bfd9ec27f7 100644 --- a/substrate/client/service/src/builder.rs +++ b/substrate/client/service/src/builder.rs @@ -248,10 +248,7 @@ where offchain_worker_enabled: config.offchain_worker.enabled, offchain_indexing_api: config.offchain_worker.indexing_enabled, wasm_runtime_overrides: config.wasm_runtime_overrides.clone(), - no_genesis: matches!( - config.network.sync_mode, - SyncMode::LightState { .. } | SyncMode::Warp { .. } - ), + no_genesis: config.no_genesis(), wasm_runtime_substitutes, enable_import_proof_recording, }, From feacf2f3f9f39c1dbf0ecbee983c4488f948fd61 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Wed, 14 Aug 2024 21:48:13 +0200 Subject: [PATCH 46/74] [Pools] Fix issues with member migration to `DelegateStake` (#4822) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Context Pool members using the old `TransferStake` strategy were able to transfer all their funds to the pool. With `DelegateStake` changes, we want to ensure similar behaviour by allowing members to delegate all their stake to the pool. ## Changes - Ensure all the balance including ED of an account can be delegated (and used in the pool) by adding a provider for delegators. - Gates calls that mutates the pool or pool member if they are in unmigrated state. Closes https://github.com/paritytech-secops/srlabs_findings/issues/409. - Adds remote test to migrate all pools and members to `DelegateStake` which can be used with `Kusama` and `Polkadot` runtime state. closes https://github.com/paritytech/polkadot-sdk/issues/4629. - Add new runtime apis to read pool and member balance. ## Addressing possible migration errors Pool members migrating can run into two types of errors: - Already Staking: If the pool member is already staking, we cannot migrate them to `DelegateStake` since this may mean they are able to use the same staked funds in the pool. Users would need to withdraw all their funds from staking, in order to migrate their pool funds. - Pool contribution below ED: For these cases transfer from pool account to member account would fail. The affected users can top up their accounts and redo migration. Another error that was earlier possible was when member's free balance is below ED. This PR adds a provider to delegator allowing all user balance including ED can be contributed towards the pool. This helps `1095` accounts in Polkadot and `41` accounts in Kusama to migrate now which would have earlier failed. ## Results from RemoteExternalities Tests. ### Kusama `Migration stats: success: 3017, direct_stakers: 361, unexpected_errors: 0` ### Polkadot `Migration stats: success: 42859, direct_stakers: 643, unexpected_errors: 0` ## TODO - [x] Add runtime api for member total balance. - [x] New [issue](https://github.com/paritytech/polkadot-sdk/issues/5009) to reap pool members with contribution below ED. - [x] Add provider for delegators so whole balance including ED can be held while contributing to pools. - [x] Gate all pool extrinsics if pool/member is in non-migrated state. --------- Co-authored-by: Gonçalo Pestana --- polkadot/runtime/westend/src/lib.rs | 49 +--- polkadot/runtime/westend/src/tests.rs | 137 +++++++++++ prdoc/pr_4822.prdoc | 25 ++ substrate/bin/node/runtime/src/lib.rs | 8 + .../frame/delegated-staking/src/impls.rs | 2 +- substrate/frame/delegated-staking/src/lib.rs | 112 ++++----- .../frame/delegated-staking/src/tests.rs | 186 +++++++++++++-- .../frame/delegated-staking/src/types.rs | 38 ++- .../nomination-pools/runtime-api/src/lib.rs | 6 + substrate/frame/nomination-pools/src/lib.rs | 112 ++++++++- .../test-delegate-stake/src/lib.rs | 221 ++++++++++++++++-- 11 files changed, 733 insertions(+), 163 deletions(-) create mode 100644 prdoc/pr_4822.prdoc diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index aa446b03368d..a9fbbb4f33da 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -99,7 +99,7 @@ use sp_runtime::{ IdentityLookup, Keccak256, OpaqueKeys, SaturatedConversion, Verify, }, transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}, - ApplyExtrinsicResult, FixedU128, KeyTypeId, Perbill, Percent, Permill, + ApplyExtrinsicResult, FixedU128, KeyTypeId, Percent, Permill, }; use sp_staking::SessionIndex; #[cfg(any(feature = "std", test))] @@ -2476,6 +2476,14 @@ sp_api::impl_runtime_apis! { fn member_needs_delegate_migration(member: AccountId) -> bool { NominationPools::api_member_needs_delegate_migration(member) } + + fn member_total_balance(member: AccountId) -> Balance { + NominationPools::api_member_total_balance(member) + } + + fn pool_balance(pool_id: pallet_nomination_pools::PoolId) -> Balance { + NominationPools::api_pool_balance(pool_id) + } } impl pallet_staking_runtime_api::StakingApi for Runtime { @@ -2776,45 +2784,6 @@ sp_api::impl_runtime_apis! { } } -#[cfg(all(test, feature = "try-runtime"))] -mod remote_tests { - use super::*; - use frame_try_runtime::{runtime_decl_for_try_runtime::TryRuntime, UpgradeCheckSelect}; - use remote_externalities::{ - Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, Transport, - }; - use std::env::var; - - #[tokio::test] - async fn run_migrations() { - if var("RUN_MIGRATION_TESTS").is_err() { - return; - } - - sp_tracing::try_init_simple(); - let transport: Transport = - var("WS").unwrap_or("wss://westend-rpc.polkadot.io:443".to_string()).into(); - let maybe_state_snapshot: Option = var("SNAP").map(|s| s.into()).ok(); - let mut ext = Builder::::default() - .mode(if let Some(state_snapshot) = maybe_state_snapshot { - Mode::OfflineOrElseOnline( - OfflineConfig { state_snapshot: state_snapshot.clone() }, - OnlineConfig { - transport, - state_snapshot: Some(state_snapshot), - ..Default::default() - }, - ) - } else { - Mode::Online(OnlineConfig { transport, ..Default::default() }) - }) - .build() - .await - .unwrap(); - ext.execute_with(|| Runtime::on_runtime_upgrade(UpgradeCheckSelect::PreAndPost)); - } -} - mod clean_state_migration { use super::Runtime; #[cfg(feature = "try-runtime")] diff --git a/polkadot/runtime/westend/src/tests.rs b/polkadot/runtime/westend/src/tests.rs index 4d5e2e946bce..dc8103ab52c4 100644 --- a/polkadot/runtime/westend/src/tests.rs +++ b/polkadot/runtime/westend/src/tests.rs @@ -99,3 +99,140 @@ fn check_treasury_pallet_id() { westend_runtime_constants::TREASURY_PALLET_ID ); } + +#[cfg(all(test, feature = "try-runtime"))] +mod remote_tests { + use super::*; + use frame_try_runtime::{runtime_decl_for_try_runtime::TryRuntime, UpgradeCheckSelect}; + use remote_externalities::{ + Builder, Mode, OfflineConfig, OnlineConfig, SnapshotConfig, Transport, + }; + use std::env::var; + + #[tokio::test] + async fn run_migrations() { + if var("RUN_MIGRATION_TESTS").is_err() { + return; + } + + sp_tracing::try_init_simple(); + let transport: Transport = + var("WS").unwrap_or("wss://westend-rpc.polkadot.io:443".to_string()).into(); + let maybe_state_snapshot: Option = var("SNAP").map(|s| s.into()).ok(); + let mut ext = Builder::::default() + .mode(if let Some(state_snapshot) = maybe_state_snapshot { + Mode::OfflineOrElseOnline( + OfflineConfig { state_snapshot: state_snapshot.clone() }, + OnlineConfig { + transport, + state_snapshot: Some(state_snapshot), + ..Default::default() + }, + ) + } else { + Mode::Online(OnlineConfig { transport, ..Default::default() }) + }) + .build() + .await + .unwrap(); + ext.execute_with(|| Runtime::on_runtime_upgrade(UpgradeCheckSelect::PreAndPost)); + } + + #[tokio::test] + async fn delegate_stake_migration() { + // Intended to be run only manually. + if var("RUN_MIGRATION_TESTS").is_err() { + return; + } + use frame_support::assert_ok; + sp_tracing::try_init_simple(); + + let transport: Transport = var("WS").unwrap_or("ws://127.0.0.1:9900".to_string()).into(); + let maybe_state_snapshot: Option = var("SNAP").map(|s| s.into()).ok(); + let mut ext = Builder::::default() + .mode(if let Some(state_snapshot) = maybe_state_snapshot { + Mode::OfflineOrElseOnline( + OfflineConfig { state_snapshot: state_snapshot.clone() }, + OnlineConfig { + transport, + state_snapshot: Some(state_snapshot), + pallets: vec![ + "staking".into(), + "system".into(), + "balances".into(), + "nomination-pools".into(), + "delegated-staking".into(), + ], + ..Default::default() + }, + ) + } else { + Mode::Online(OnlineConfig { transport, ..Default::default() }) + }) + .build() + .await + .unwrap(); + ext.execute_with(|| { + // create an account with some balance + let alice = AccountId::from([1u8; 32]); + use frame_support::traits::Currency; + let _ = Balances::deposit_creating(&alice, 100_000 * UNITS); + + // iterate over all pools + pallet_nomination_pools::BondedPools::::iter_keys().for_each(|k| { + if pallet_nomination_pools::Pallet::::api_pool_needs_delegate_migration(k) + { + assert_ok!( + pallet_nomination_pools::Pallet::::migrate_pool_to_delegate_stake( + RuntimeOrigin::signed(alice.clone()).into(), + k, + ) + ); + } + }); + + // member migration stats + let mut success = 0; + let mut direct_stakers = 0; + let mut unexpected_errors = 0; + + // iterate over all pool members + pallet_nomination_pools::PoolMembers::::iter_keys().for_each(|k| { + if pallet_nomination_pools::Pallet::::api_member_needs_delegate_migration( + k.clone(), + ) { + // reasons migrations can fail: + let is_direct_staker = pallet_staking::Bonded::::contains_key(&k); + + let migration = pallet_nomination_pools::Pallet::::migrate_delegation( + RuntimeOrigin::signed(alice.clone()).into(), + sp_runtime::MultiAddress::Id(k.clone()), + ); + + if is_direct_staker { + // if the member is a direct staker, the migration should fail until pool + // member unstakes all funds from pallet-staking. + direct_stakers += 1; + assert_eq!( + migration.unwrap_err(), + pallet_delegated_staking::Error::::AlreadyStaking.into() + ); + } else if migration.is_err() { + unexpected_errors += 1; + log::error!(target: "remote_test", "Unexpected error {:?} while migrating {:?}", migration.unwrap_err(), k); + } else { + success += 1; + } + } + }); + + log::info!( + target: "remote_test", + "Migration stats: success: {}, direct_stakers: {}, unexpected_errors: {}", + success, + direct_stakers, + unexpected_errors + ); + }); + } +} diff --git a/prdoc/pr_4822.prdoc b/prdoc/pr_4822.prdoc new file mode 100644 index 000000000000..44f3e41d8d5a --- /dev/null +++ b/prdoc/pr_4822.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Ensure as many as possible pool members can migrate to `DelegateStake` + +doc: + - audience: Runtime Dev + description: | + 1. Allows pool members to use their total balance while joining pool with `DelegateStake`. + 2. Gates call mutating pool or member in unmigrated state. + 3. Runtime apis for reading pool and member balance. + +crates: + - name: westend-runtime + bump: minor + - name: kitchensink-runtime + bump: patch + - name: pallet-delegated-staking + bump: patch + - name: pallet-nomination-pools + bump: minor + - name: sp-staking + bump: patch + - name: pallet-nomination-pools-runtime-api + bump: minor diff --git a/substrate/bin/node/runtime/src/lib.rs b/substrate/bin/node/runtime/src/lib.rs index ff3f7e26baf2..ea9a66862d64 100644 --- a/substrate/bin/node/runtime/src/lib.rs +++ b/substrate/bin/node/runtime/src/lib.rs @@ -2784,6 +2784,14 @@ impl_runtime_apis! { fn member_needs_delegate_migration(member: AccountId) -> bool { NominationPools::api_member_needs_delegate_migration(member) } + + fn member_total_balance(member: AccountId) -> Balance { + NominationPools::api_member_total_balance(member) + } + + fn pool_balance(pool_id: pallet_nomination_pools::PoolId) -> Balance { + NominationPools::api_pool_balance(pool_id) + } } impl pallet_staking_runtime_api::StakingApi for Runtime { diff --git a/substrate/frame/delegated-staking/src/impls.rs b/substrate/frame/delegated-staking/src/impls.rs index 8c05b0bcfc22..4e6812dee249 100644 --- a/substrate/frame/delegated-staking/src/impls.rs +++ b/substrate/frame/delegated-staking/src/impls.rs @@ -19,7 +19,7 @@ //! Implementations of public traits, namely [`DelegationInterface`] and [`OnStakingUpdate`]. use super::*; -use sp_staking::{Agent, DelegationInterface, DelegationMigrator, Delegator, OnStakingUpdate}; +use sp_staking::{DelegationInterface, DelegationMigrator, OnStakingUpdate}; impl DelegationInterface for Pallet { type Balance = BalanceOf; diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs index 1882989cfa7d..08ce747ec0c0 100644 --- a/substrate/frame/delegated-staking/src/lib.rs +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -148,7 +148,7 @@ use frame_support::{ }, Balanced, Inspect as FunInspect, Mutate as FunMutate, }, - tokens::{fungible::Credit, Fortitude, Precision, Preservation}, + tokens::{fungible::Credit, Fortitude, Precision, Preservation, Restriction}, Defensive, DefensiveOption, Imbalance, OnUnbalanced, }, }; @@ -319,9 +319,7 @@ pub mod pallet { Error::::NotAllowed ); - // remove provider reference - let _ = frame_system::Pallet::::dec_providers(&who)?; - >::remove(who); + AgentLedger::::remove(&who); Ok(()) } @@ -392,9 +390,6 @@ pub mod pallet { ) -> DispatchResult { let agent = ensure_signed(origin)?; - // Ensure they have minimum delegation. - ensure!(amount >= T::Currency::minimum_balance(), Error::::NotEnoughFunds); - // Ensure delegator is sane. ensure!(!Self::is_agent(&delegator), Error::::NotAllowed); ensure!(!Self::is_delegator(&delegator), Error::::NotAllowed); @@ -493,13 +488,8 @@ impl Pallet { /// Registers a new agent in the system. fn do_register_agent(who: &T::AccountId, reward_account: &T::AccountId) { + // TODO: Consider taking a deposit for being an agent. AgentLedger::::new(reward_account).update(who); - - // Agent does not hold balance of its own but this pallet will provide for this to exist. - // This is expected to be a keyless account and not created by any user directly so safe. - // TODO: Someday if we allow anyone to be an agent, we should take a deposit for - // being a delegator. - frame_system::Pallet::::inc_providers(who); } /// Migrate existing staker account `who` to an `Agent` account. @@ -510,9 +500,6 @@ impl Pallet { // transferred to actual delegator. let proxy_delegator = Self::generate_proxy_delegator(Agent::from(who.clone())); - // Keep proxy delegator alive until all funds are migrated. - frame_system::Pallet::::inc_providers(&proxy_delegator.clone().get()); - // Get current stake let stake = T::CoreStaking::stake(who)?; @@ -534,7 +521,6 @@ impl Pallet { T::CoreStaking::set_payee(who, reward_account)?; // delegate all transferred funds back to agent. Self::do_delegate(proxy_delegator, Agent::from(who.clone()), amount_to_transfer)?; - // if the transferred/delegated amount was greater than the stake, mark the extra as // unclaimed withdrawal. let unclaimed_withdraws = amount_to_transfer @@ -578,21 +564,23 @@ impl Pallet { let delegator = delegator.get(); let mut ledger = AgentLedger::::get(&agent).ok_or(Error::::NotAgent)?; + + if let Some(mut existing_delegation) = Delegation::::get(&delegator) { + ensure!(existing_delegation.agent == agent, Error::::InvalidDelegation); + // update amount and return the updated delegation. + existing_delegation.amount = existing_delegation + .amount + .checked_add(&amount) + .ok_or(ArithmeticError::Overflow)?; + existing_delegation + } else { + Delegation::::new(&agent, amount) + } + .update(&delegator); + // try to hold the funds. T::Currency::hold(&HoldReason::StakingDelegation.into(), &delegator, amount)?; - let new_delegation_amount = - if let Some(existing_delegation) = Delegation::::get(&delegator) { - ensure!(existing_delegation.agent == agent, Error::::InvalidDelegation); - existing_delegation - .amount - .checked_add(&amount) - .ok_or(ArithmeticError::Overflow)? - } else { - amount - }; - - Delegation::::new(&agent, new_delegation_amount).update_or_kill(&delegator); ledger.total_delegated = ledger.total_delegated.checked_add(&amount).ok_or(ArithmeticError::Overflow)?; ledger.update(&agent); @@ -638,9 +626,6 @@ impl Pallet { .checked_sub(&amount) .defensive_ok_or(ArithmeticError::Overflow)?; - // remove delegator if nothing delegated anymore - delegation.update_or_kill(&delegator); - let released = T::Currency::release( &HoldReason::StakingDelegation.into(), &delegator, @@ -650,6 +635,9 @@ impl Pallet { defensive_assert!(released == amount, "hold should have been released fully"); + // update delegation. + delegation.update(&delegator); + Self::deposit_event(Event::::Released { agent, delegator, amount }); Ok(()) @@ -668,49 +656,34 @@ impl Pallet { let mut source_delegation = Delegators::::get(&source_delegator).defensive_ok_or(Error::::BadState)?; - // some checks that must have already been checked before. + // ensure source has enough funds to migrate. ensure!(source_delegation.amount >= amount, Error::::NotEnoughFunds); debug_assert!( !Self::is_delegator(&destination_delegator) && !Self::is_agent(&destination_delegator) ); let agent = source_delegation.agent.clone(); - // update delegations - Delegation::::new(&agent, amount).update_or_kill(&destination_delegator); + // create a new delegation for destination delegator. + Delegation::::new(&agent, amount).update(&destination_delegator); source_delegation.amount = source_delegation .amount .checked_sub(&amount) .defensive_ok_or(Error::::BadState)?; - source_delegation.update_or_kill(&source_delegator); - - // release funds from source - let released = T::Currency::release( + // transfer the held amount in `source_delegator` to `destination_delegator`. + let _ = T::Currency::transfer_on_hold( &HoldReason::StakingDelegation.into(), - &source_delegator, - amount, - Precision::BestEffort, - )?; - - defensive_assert!(released == amount, "hold should have been released fully"); - - // transfer the released amount to `destination_delegator`. - let post_balance = T::Currency::transfer( &source_delegator, &destination_delegator, amount, - Preservation::Expendable, - ) - .map_err(|_| Error::::BadState)?; - - // if balance is zero, clear provider for source (proxy) delegator. - if post_balance == Zero::zero() { - let _ = frame_system::Pallet::::dec_providers(&source_delegator).defensive(); - } + Precision::Exact, + Restriction::OnHold, + Fortitude::Polite, + )?; - // hold the funds again in the new delegator account. - T::Currency::hold(&HoldReason::StakingDelegation.into(), &destination_delegator, amount)?; + // update source delegation. + source_delegation.update(&source_delegator); Self::deposit_event(Event::::MigratedDelegation { agent, @@ -752,7 +725,7 @@ impl Pallet { agent_ledger.remove_slash(actual_slash).save(); delegation.amount = delegation.amount.checked_sub(&actual_slash).ok_or(ArithmeticError::Overflow)?; - delegation.update_or_kill(&delegator); + delegation.update(&delegator); if let Some(reporter) = maybe_reporter { let reward_payout: BalanceOf = T::SlashRewardFraction::get() * actual_slash; @@ -801,18 +774,21 @@ impl Pallet { ledgers: BTreeMap>, ) -> Result<(), sp_runtime::TryRuntimeError> { for (agent, ledger) in ledgers { - ensure!( - matches!( - T::CoreStaking::status(&agent).expect("agent should be bonded"), - sp_staking::StakerStatus::Nominator(_) | sp_staking::StakerStatus::Idle - ), - "agent should be bonded and not validator" - ); + let staked_value = ledger.stakeable_balance(); + + if !staked_value.is_zero() { + ensure!( + matches!( + T::CoreStaking::status(&agent).expect("agent should be bonded"), + sp_staking::StakerStatus::Nominator(_) | sp_staking::StakerStatus::Idle + ), + "agent should be bonded and not validator" + ); + } ensure!( ledger.stakeable_balance() >= - T::CoreStaking::total_stake(&agent) - .expect("agent should exist as a nominator"), + T::CoreStaking::total_stake(&agent).unwrap_or_default(), "Cannot stake more than balance" ); } diff --git a/substrate/frame/delegated-staking/src/tests.rs b/substrate/frame/delegated-staking/src/tests.rs index bc8bc2dccc3c..2c965e18b1b3 100644 --- a/substrate/frame/delegated-staking/src/tests.rs +++ b/substrate/frame/delegated-staking/src/tests.rs @@ -334,6 +334,36 @@ fn apply_pending_slash() { }); } +#[test] +fn allow_full_amount_to_be_delegated() { + ExtBuilder::default().build_and_execute(|| { + let agent: AccountId = 200; + let reward_acc: AccountId = 201; + let delegator: AccountId = 300; + + // set intention to accept delegation. + fund(&agent, 1000); + assert_ok!(DelegatedStaking::register_agent(RawOrigin::Signed(agent).into(), reward_acc)); + + // delegate to this account + fund(&delegator, 1000); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator).into(), + agent, + 1000 + )); + + // verify + assert!(DelegatedStaking::is_agent(&agent)); + assert_eq!(DelegatedStaking::stakeable_balance(Agent::from(agent)), 1000); + assert_eq!( + Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &delegator), + 1000 + ); + assert_eq!(DelegatedStaking::held_balance_of(Delegator::from(delegator)), 1000); + }); +} + /// Integration tests with pallet-staking. mod staking_integration { use super::*; @@ -625,32 +655,42 @@ mod staking_integration { #[test] fn migration_works() { ExtBuilder::default().build_and_execute(|| { + // initial era + start_era(1); + // add a nominator let staked_amount = 4000; let agent_amount = 5000; - fund(&200, agent_amount); + let agent = 200; + fund(&agent, agent_amount); assert_ok!(Staking::bond( - RuntimeOrigin::signed(200), + RuntimeOrigin::signed(agent), staked_amount, RewardDestination::Account(201) )); - assert_ok!(Staking::nominate(RuntimeOrigin::signed(200), vec![GENESIS_VALIDATOR],)); - let init_stake = Staking::stake(&200).unwrap(); + assert_ok!(Staking::nominate(RuntimeOrigin::signed(agent), vec![GENESIS_VALIDATOR],)); + let init_stake = Staking::stake(&agent).unwrap(); // scenario: 200 is a pool account, and the stake comes from its 4 delegators (300..304) // in equal parts. lets try to migrate this nominator into delegate based stake. // all balance currently is in 200 - assert_eq!(Balances::free_balance(200), agent_amount); + assert_eq!(Balances::free_balance(agent), agent_amount); // to migrate, nominator needs to set an account as a proxy delegator where staked funds // will be moved and delegated back to this old nominator account. This should be funded // with at least ED. let proxy_delegator = - DelegatedStaking::generate_proxy_delegator(Agent::from(200)).get(); + DelegatedStaking::generate_proxy_delegator(Agent::from(agent)).get(); - assert_ok!(DelegatedStaking::migrate_to_agent(RawOrigin::Signed(200).into(), 201)); + assert_ok!(DelegatedStaking::migrate_to_agent(RawOrigin::Signed(agent).into(), 201)); + // after migration, funds are moved to proxy delegator, still a provider exists. + assert_eq!(System::providers(&agent), 1); + assert_eq!(Balances::free_balance(agent), 0); + // proxy delegator has one provider as well with no free balance. + assert_eq!(System::providers(&proxy_delegator), 1); + assert_eq!(Balances::free_balance(proxy_delegator), 0); // verify all went well let mut expected_proxy_delegated_amount = agent_amount; @@ -659,12 +699,12 @@ mod staking_integration { expected_proxy_delegated_amount ); // stake amount is transferred from delegate to proxy delegator account. - assert_eq!(Balances::free_balance(200), 0); - assert_eq!(Staking::stake(&200).unwrap(), init_stake); - assert_eq!(get_agent_ledger(&200).ledger.effective_balance(), agent_amount); - assert_eq!(get_agent_ledger(&200).available_to_bond(), 0); + assert_eq!(Balances::free_balance(agent), 0); + assert_eq!(Staking::stake(&agent).unwrap(), init_stake); + assert_eq!(get_agent_ledger(&agent).ledger.effective_balance(), agent_amount); + assert_eq!(get_agent_ledger(&agent).available_to_bond(), 0); assert_eq!( - get_agent_ledger(&200).ledger.unclaimed_withdrawals, + get_agent_ledger(&agent).ledger.unclaimed_withdrawals, agent_amount - staked_amount ); @@ -672,14 +712,15 @@ mod staking_integration { let delegator_share = agent_amount / 4; for delegator in 300..304 { assert_eq!(Balances::free_balance(delegator), 0); - // fund them with ED - fund(&delegator, ExistentialDeposit::get()); - // migrate 1/4th amount into each delegator + assert_eq!(System::providers(&delegator), 0); + + // No pre-balance needed to migrate delegator. assert_ok!(DelegatedStaking::migrate_delegation( - RawOrigin::Signed(200).into(), + RawOrigin::Signed(agent).into(), delegator, delegator_share )); + assert_eq!(System::providers(&delegator), 1); assert_eq!( Balances::balance_on_hold(&HoldReason::StakingDelegation.into(), &delegator), delegator_share @@ -694,20 +735,123 @@ mod staking_integration { ); // delegate stake is unchanged. - assert_eq!(Staking::stake(&200).unwrap(), init_stake); - assert_eq!(get_agent_ledger(&200).ledger.effective_balance(), agent_amount); - assert_eq!(get_agent_ledger(&200).available_to_bond(), 0); + assert_eq!(Staking::stake(&agent).unwrap(), init_stake); + assert_eq!(get_agent_ledger(&agent).ledger.effective_balance(), agent_amount); + assert_eq!(get_agent_ledger(&agent).available_to_bond(), 0); assert_eq!( - get_agent_ledger(&200).ledger.unclaimed_withdrawals, + get_agent_ledger(&agent).ledger.unclaimed_withdrawals, agent_amount - staked_amount ); } // cannot use migrate delegator anymore assert_noop!( - DelegatedStaking::migrate_delegation(RawOrigin::Signed(200).into(), 305, 1), + DelegatedStaking::migrate_delegation(RawOrigin::Signed(agent).into(), 305, 1), Error::::NotEnoughFunds ); + + // no provider left on proxy delegator since all funds are migrated + assert_eq!(System::providers(&proxy_delegator), 0); + + // withdraw all delegations from delegators + assert_ok!(Staking::chill(RuntimeOrigin::signed(agent))); + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), staked_amount)); + start_era(4); + assert_ok!(Staking::withdraw_unbonded(RawOrigin::Signed(agent).into(), 0)); + for delegator in 300..304 { + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + delegator, + delegator_share, + 0 + )); + // delegator is cleaned up from storage. + assert!(!Delegators::::contains_key(delegator)); + // has free balance now + assert_eq!(Balances::free_balance(delegator), delegator_share); + // and only one provider as delegator_share > ED + assert_eq!(System::providers(&delegator), 1); + } + + // Agent can be removed now. + assert_ok!(DelegatedStaking::remove_agent(RawOrigin::Signed(agent).into())); + // agent is correctly removed. + assert!(!Agents::::contains_key(agent)); + // and no provider left. + assert_eq!(System::providers(&agent), 0); + }); + } + + #[test] + fn accounts_are_cleaned_up() { + ExtBuilder::default().build_and_execute(|| { + let agent: AccountId = 200; + let reward_acc: AccountId = 201; + let delegator: AccountId = 300; + + // set intention to accept delegation. + fund(&agent, 1000); + + // Agent is provided since it has balance > ED. + assert_eq!(System::providers(&agent), 1); + assert_ok!(DelegatedStaking::register_agent( + RawOrigin::Signed(agent).into(), + reward_acc + )); + // becoming an agent adds another provider. + assert_eq!(System::providers(&agent), 2); + + // delegate to this account + fund(&delegator, 1000); + // account has one provider since its funded. + assert_eq!(System::providers(&delegator), 1); + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator).into(), + agent, + 500 + )); + // delegator has an extra provider now. + assert_eq!(System::providers(&delegator), 2); + // all 1000 tokens including ED can be held. + assert_ok!(DelegatedStaking::delegate_to_agent( + RawOrigin::Signed(delegator).into(), + agent, + 500 + )); + // free balance dropping below ED will reduce a provider, but it still has one provider + // left. + assert_eq!(System::providers(&delegator), 1); + + // withdraw all delegation + assert_ok!(Staking::unbond(RawOrigin::Signed(agent).into(), 1000)); + start_era(4); + assert_ok!(Staking::withdraw_unbonded(RawOrigin::Signed(agent).into(), 0)); + + // Since delegations are still left, agents cannot be removed yet from storage. + assert_noop!( + DelegatedStaking::remove_agent(RawOrigin::Signed(agent).into()), + Error::::NotAllowed + ); + + assert_ok!(DelegatedStaking::release_delegation( + RawOrigin::Signed(agent).into(), + delegator, + 1000, + 0 + )); + + // now agents can be removed. + assert_ok!(DelegatedStaking::remove_agent(RawOrigin::Signed(agent).into())); + + // agent and delegator provider is decremented. + assert_eq!(System::providers(&delegator), 1); + assert_eq!(System::providers(&agent), 1); + + // if we transfer all funds, providers are removed. + assert_ok!(Balances::transfer_all(RawOrigin::Signed(delegator).into(), 1337, false)); + assert_ok!(Balances::transfer_all(RawOrigin::Signed(agent).into(), 1337, false)); + assert_eq!(System::providers(&delegator), 0); + assert_eq!(System::providers(&agent), 0); }); } } diff --git a/substrate/frame/delegated-staking/src/types.rs b/substrate/frame/delegated-staking/src/types.rs index aff282774c3c..a78aa3f55906 100644 --- a/substrate/frame/delegated-staking/src/types.rs +++ b/substrate/frame/delegated-staking/src/types.rs @@ -64,15 +64,25 @@ impl Delegation { ) } - /// Save self to storage. If the delegation amount is zero, remove the delegation. - pub(crate) fn update_or_kill(self, key: &T::AccountId) { - // Clean up if no delegation left. - if self.amount == Zero::zero() { - >::remove(key); - return + /// Save self to storage. + /// + /// If the delegation amount is zero, remove the delegation. Also adds and removes provider + /// reference as needed. + pub(crate) fn update(self, key: &T::AccountId) { + if >::contains_key(key) { + // Clean up if no delegation left. + if self.amount == Zero::zero() { + >::remove(key); + // Remove provider if no delegation left. + let _ = frame_system::Pallet::::dec_providers(key).defensive(); + return + } + } else { + // this is a new delegation. Provide for this account. + frame_system::Pallet::::inc_providers(key); } - >::insert(key, self) + >::insert(key, self); } } @@ -118,10 +128,24 @@ impl AgentLedger { } /// Save self to storage with the given key. + /// + /// Increments provider count if this is a new agent. pub(crate) fn update(self, key: &T::AccountId) { + if !>::contains_key(key) { + // This is a new agent. Provide for this account. + frame_system::Pallet::::inc_providers(key); + } >::insert(key, self) } + /// Remove self from storage. + pub(crate) fn remove(key: &T::AccountId) { + debug_assert!(>::contains_key(key), "Agent should exist in storage"); + >::remove(key); + // Remove provider reference. + let _ = frame_system::Pallet::::dec_providers(key).defensive(); + } + /// Effective total balance of the `Agent`. /// /// This takes into account any slashes reported to `Agent` but unapplied. diff --git a/substrate/frame/nomination-pools/runtime-api/src/lib.rs b/substrate/frame/nomination-pools/runtime-api/src/lib.rs index 67627e0acb13..d81ad1dd4954 100644 --- a/substrate/frame/nomination-pools/runtime-api/src/lib.rs +++ b/substrate/frame/nomination-pools/runtime-api/src/lib.rs @@ -63,5 +63,11 @@ sp_api::decl_runtime_apis! { /// [`migrate_delegation`](pallet_nomination_pools::Call::migrate_delegation) /// to migrate the funds of the pool member. fn member_needs_delegate_migration(member: AccountId) -> bool; + + /// Returns the total contribution of a pool member including any balance that is unbonding. + fn member_total_balance(who: AccountId) -> Balance; + + /// Total balance contributed to the pool. + fn pool_balance(pool_id: PoolId) -> Balance; } } diff --git a/substrate/frame/nomination-pools/src/lib.rs b/substrate/frame/nomination-pools/src/lib.rs index 9108060ccb2a..44e3463dc9f2 100644 --- a/substrate/frame/nomination-pools/src/lib.rs +++ b/substrate/frame/nomination-pools/src/lib.rs @@ -2010,6 +2010,8 @@ pub mod pallet { pool_id: PoolId, ) -> DispatchResult { let who = ensure_signed(origin)?; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); ensure!(amount >= MinJoinBond::::get(), Error::::MinimumBondNotMet); // If a member already exists that means they already belong to a pool @@ -2072,6 +2074,13 @@ pub mod pallet { )] pub fn bond_extra(origin: OriginFor, extra: BondExtra>) -> DispatchResult { let who = ensure_signed(origin)?; + + // ensure who is not in an un-migrated state. + ensure!( + !Self::api_member_needs_delegate_migration(who.clone()), + Error::::NotMigrated + ); + Self::do_bond_extra(who.clone(), who, extra) } @@ -2087,6 +2096,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::claim_payout())] pub fn claim_payout(origin: OriginFor) -> DispatchResult { let signer = ensure_signed(origin)?; + // ensure signer is not in an un-migrated state. + ensure!( + !Self::api_member_needs_delegate_migration(signer.clone()), + Error::::NotMigrated + ); + Self::do_claim_payout(signer.clone(), signer) } @@ -2130,6 +2145,12 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; let member_account = T::Lookup::lookup(member_account)?; + // ensure member is not in an un-migrated state. + ensure!( + !Self::api_member_needs_delegate_migration(member_account.clone()), + Error::::NotMigrated + ); + let (mut member, mut bonded_pool, mut reward_pool) = Self::get_member_with_pools(&member_account)?; @@ -2213,6 +2234,9 @@ pub mod pallet { num_slashing_spans: u32, ) -> DispatchResult { let _ = ensure_signed(origin)?; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); + let pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; // For now we only allow a pool to withdraw unbonded if its not destroying. If the pool @@ -2259,6 +2283,12 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let caller = ensure_signed(origin)?; let member_account = T::Lookup::lookup(member_account)?; + // ensure member is not in an un-migrated state. + ensure!( + !Self::api_member_needs_delegate_migration(member_account.clone()), + Error::::NotMigrated + ); + let mut member = PoolMembers::::get(&member_account).ok_or(Error::::PoolMemberNotFound)?; let current_era = T::StakeAdapter::current_era(); @@ -2493,6 +2523,8 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); ensure!(bonded_pool.can_nominate(&who), Error::::NotNominator); let depositor_points = PoolMembers::::get(&bonded_pool.roles.depositor) @@ -2527,6 +2559,8 @@ pub mod pallet { let who = ensure_signed(origin)?; let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; ensure!(bonded_pool.state != PoolState::Destroying, Error::::CanNotChangeState); + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); if bonded_pool.can_toggle_state(&who) { bonded_pool.set_state(state); @@ -2562,6 +2596,8 @@ pub mod pallet { .can_set_metadata(&who), Error::::DoesNotHavePermission ); + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); Metadata::::mutate(pool_id, |pool_meta| *pool_meta = metadata); @@ -2638,6 +2674,9 @@ pub mod pallet { }, }; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); + match new_root { ConfigOp::Noop => (), ConfigOp::Remove => bonded_pool.roles.root = None, @@ -2684,8 +2723,9 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::chill())] pub fn chill(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; - let bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); let depositor_points = PoolMembers::::get(&bonded_pool.roles.depositor) .ok_or(Error::::PoolMemberNotFound)? @@ -2720,7 +2760,14 @@ pub mod pallet { extra: BondExtra>, ) -> DispatchResult { let who = ensure_signed(origin)?; - Self::do_bond_extra(who, T::Lookup::lookup(member)?, extra) + let member_account = T::Lookup::lookup(member)?; + // ensure member is not in an un-migrated state. + ensure!( + !Self::api_member_needs_delegate_migration(member_account.clone()), + Error::::NotMigrated + ); + + Self::do_bond_extra(who, member_account, extra) } /// Allows a pool member to set a claim permission to allow or disallow permissionless @@ -2737,9 +2784,14 @@ pub mod pallet { permission: ClaimPermission, ) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(PoolMembers::::contains_key(&who), Error::::PoolMemberNotFound); + // ensure member is not in an un-migrated state. + ensure!( + !Self::api_member_needs_delegate_migration(who.clone()), + Error::::NotMigrated + ); + ClaimPermissions::::mutate(who, |source| { *source = permission; }); @@ -2755,6 +2807,12 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::claim_payout())] pub fn claim_payout_other(origin: OriginFor, other: T::AccountId) -> DispatchResult { let signer = ensure_signed(origin)?; + // ensure member is not in an un-migrated state. + ensure!( + !Self::api_member_needs_delegate_migration(other.clone()), + Error::::NotMigrated + ); + Self::do_claim_payout(signer, other) } @@ -2773,6 +2831,9 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); + ensure!(bonded_pool.can_manage_commission(&who), Error::::DoesNotHavePermission); let mut reward_pool = RewardPools::::get(pool_id) @@ -2809,6 +2870,9 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); + ensure!(bonded_pool.can_manage_commission(&who), Error::::DoesNotHavePermission); bonded_pool.commission.try_update_max(pool_id, max_commission)?; @@ -2831,6 +2895,8 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); ensure!(bonded_pool.can_manage_commission(&who), Error::::DoesNotHavePermission); bonded_pool.commission.try_update_change_rate(change_rate)?; @@ -2852,6 +2918,9 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::claim_commission())] pub fn claim_commission(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); + Self::do_claim_commission(who, pool_id) } @@ -2866,6 +2935,9 @@ pub mod pallet { #[pallet::weight(T::WeightInfo::adjust_pool_deposit())] pub fn adjust_pool_deposit(origin: OriginFor, pool_id: PoolId) -> DispatchResult { let who = ensure_signed(origin)?; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); + Self::do_adjust_pool_deposit(who, pool_id) } @@ -2882,6 +2954,8 @@ pub mod pallet { ) -> DispatchResult { let who = ensure_signed(origin)?; let mut bonded_pool = BondedPool::::get(pool_id).ok_or(Error::::PoolNotFound)?; + // ensure pool is not in an un-migrated state. + ensure!(!Self::api_pool_needs_delegate_migration(pool_id), Error::::NotMigrated); ensure!(bonded_pool.can_manage_commission(&who), Error::::DoesNotHavePermission); bonded_pool.commission.claim_permission = permission.clone(); @@ -2956,9 +3030,12 @@ pub mod pallet { ); let pool_contribution = member.total_balance(); - ensure!(pool_contribution >= MinJoinBond::::get(), Error::::MinimumBondNotMet); - // the member must have some contribution to be migrated. - ensure!(pool_contribution > Zero::zero(), Error::::AlreadyMigrated); + // ensure the pool contribution is greater than the existential deposit otherwise we + // cannot transfer funds to member account. + ensure!( + pool_contribution >= T::Currency::minimum_balance(), + Error::::MinimumBondNotMet + ); let delegation = T::StakeAdapter::member_delegation_balance(Member::from(member_account.clone())); @@ -3459,6 +3536,7 @@ impl Pallet { fn do_adjust_pool_deposit(who: T::AccountId, pool: PoolId) -> DispatchResult { let bonded_pool = BondedPool::::get(pool).ok_or(Error::::PoolNotFound)?; + let reward_acc = &bonded_pool.reward_account(); let pre_frozen_balance = T::Currency::balance_frozen(&FreezeReason::PoolMinBalance.into(), reward_acc); @@ -3880,7 +3958,13 @@ impl Pallet { return false } + // if pool does not exist, return false. + if !BondedPools::::contains_key(pool_id) { + return false + } + let pool_account = Self::generate_bonded_account(pool_id); + // true if pool is still not migrated to `DelegateStake`. T::StakeAdapter::pool_strategy(Pool::from(pool_account)) != adapter::StakeStrategyType::Delegate @@ -3914,6 +3998,22 @@ impl Pallet { }) .unwrap_or_default() } + + /// Contribution of the member in the pool. + /// + /// Includes balance that is unbonded from staking but not claimed yet from the pool, therefore + /// this balance can be higher than the staked funds. + pub fn api_member_total_balance(who: T::AccountId) -> BalanceOf { + PoolMembers::::get(who.clone()) + .map(|m| m.total_balance()) + .unwrap_or_default() + } + + /// Total balance contributed to the pool. + pub fn api_pool_balance(pool_id: PoolId) -> BalanceOf { + T::StakeAdapter::total_balance(Pool::from(Self::generate_bonded_account(pool_id))) + .unwrap_or_default() + } } impl sp_staking::OnStakingUpdate> for Pallet { diff --git a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs index a50a6b73f5fc..7fee2a0bdb23 100644 --- a/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs +++ b/substrate/frame/nomination-pools/test-delegate-stake/src/lib.rs @@ -25,16 +25,16 @@ use frame_support::{ }; use mock::*; use pallet_nomination_pools::{ - BondExtra, BondedPools, Error as PoolsError, Event as PoolsEvent, LastPoolId, PoolMember, - PoolMembers, PoolState, + BondExtra, BondedPools, CommissionChangeRate, ConfigOp, Error as PoolsError, + Event as PoolsEvent, LastPoolId, PoolMember, PoolMembers, PoolState, }; use pallet_staking::{ CurrentEra, Error as StakingError, Event as StakingEvent, Payee, RewardDestination, }; -use pallet_delegated_staking::{Error as DelegatedStakingError, Event as DelegatedStakingEvent}; +use pallet_delegated_staking::Event as DelegatedStakingEvent; -use sp_runtime::{bounded_btree_map, traits::Zero}; +use sp_runtime::{bounded_btree_map, traits::Zero, Perbill}; use sp_staking::Agent; #[test] @@ -999,7 +999,6 @@ fn pool_migration_e2e() { LegacyAdapter::set(false); // cannot migrate the member delegation unless pool is migrated first. - assert!(!Pools::api_member_needs_delegate_migration(20)); assert_noop!( Pools::migrate_delegation(RuntimeOrigin::signed(10), 20), PoolsError::::NotMigrated @@ -1031,13 +1030,17 @@ fn pool_migration_e2e() { // move to era 5 when 20 can withdraw unbonded funds. CurrentEra::::set(Some(5)); - // Unbond works even without claiming delegation. Lets unbond 22. - assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, 5)); + + // Cannot unbond without claiming delegation. Lets unbond 22. + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(22), 22, 5), + PoolsError::::NotMigrated + ); // withdraw fails for 20 before claiming delegation assert_noop!( Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 10), - DelegatedStakingError::::NotDelegator + PoolsError::::NotMigrated ); let pre_claim_balance_20 = Balances::total_balance(&20); @@ -1060,17 +1063,11 @@ fn pool_migration_e2e() { assert_eq!( staking_events_since_last_call(), - vec![ - StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }, - StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 5 } - ] + vec![StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 5 }] ); assert_eq!( pool_events_since_last_call(), - vec![ - PoolsEvent::Unbonded { member: 22, pool_id: 1, balance: 5, points: 5, era: 8 }, - PoolsEvent::Withdrawn { member: 20, pool_id: 1, balance: 5, points: 5 }, - ] + vec![PoolsEvent::Withdrawn { member: 20, pool_id: 1, balance: 5, points: 5 },] ); assert_eq!( delegated_staking_events_since_last_call(), @@ -1113,8 +1110,9 @@ fn pool_migration_e2e() { assert_eq!(Balances::total_balance(&21), pre_migrate_balance_21 + 10); // MIGRATE 22 - let pre_migrate_balance_22 = Balances::total_balance(&22); assert_eq!(Balances::total_balance_on_hold(&22), 0); + // make balance of 22 as 0. + let _ = Balances::make_free_balance_be(&22, 0); // migrate delegation for 22. assert!(Pools::api_member_needs_delegate_migration(22)); @@ -1128,17 +1126,20 @@ fn pool_migration_e2e() { ); // tokens moved to 22's account and held there. - assert_eq!(Balances::total_balance(&22), pre_migrate_balance_22 + 10); + assert_eq!(Balances::total_balance(&22), 10); assert_eq!(Balances::total_balance_on_hold(&22), 10); - // withdraw fails since 22 only unbonds at era 8. + // unbond 22 should work now + assert_ok!(Pools::unbond(RuntimeOrigin::signed(22), 22, 5)); + + // withdraw fails since 22 only unbonds after era 9. assert_noop!( Pools::withdraw_unbonded(RuntimeOrigin::signed(22), 22, 5), PoolsError::::CannotWithdrawAny ); // go to era when 22 can unbond - CurrentEra::::set(Some(10)); + CurrentEra::::set(Some(9)); // withdraw works now assert_ok!(Pools::withdraw_unbonded(RuntimeOrigin::signed(22), 22, 10)); @@ -1151,6 +1152,7 @@ fn pool_migration_e2e() { staking_events_since_last_call(), vec![ StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 10 }, + StakingEvent::Unbonded { stash: POOL1_BONDED, amount: 5 }, StakingEvent::Withdrawn { stash: POOL1_BONDED, amount: 5 } ] ); @@ -1161,6 +1163,7 @@ fn pool_migration_e2e() { PoolsEvent::Withdrawn { member: 21, pool_id: 1, balance: 10, points: 10 }, // 21 was fully unbonding and removed from pool. PoolsEvent::MemberRemoved { member: 21, pool_id: 1, released_balance: 0 }, + PoolsEvent::Unbonded { member: 22, pool_id: 1, balance: 5, points: 5, era: 9 }, PoolsEvent::Withdrawn { member: 22, pool_id: 1, balance: 5, points: 5 }, ] ); @@ -1184,6 +1187,183 @@ fn pool_migration_e2e() { }) } +#[test] +fn disable_pool_operations_on_non_migrated() { + new_test_ext().execute_with(|| { + LegacyAdapter::set(true); + assert_eq!(Balances::minimum_balance(), 5); + assert_eq!(Staking::current_era(), None); + + // create the pool with TransferStake strategy. + assert_ok!(Pools::create(RuntimeOrigin::signed(10), 50, 10, 10, 10)); + assert_eq!(LastPoolId::::get(), 1); + + // have the pool nominate. + assert_ok!(Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3])); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 50 }] + ); + assert_eq!( + pool_events_since_last_call(), + vec![ + PoolsEvent::Created { depositor: 10, pool_id: 1 }, + PoolsEvent::Bonded { member: 10, pool_id: 1, bonded: 50, joined: true }, + ] + ); + + let pre_20 = Balances::free_balance(20); + assert_ok!(Pools::join(RuntimeOrigin::signed(20), 10, 1)); + + // verify members balance is moved to pool. + assert_eq!(Balances::free_balance(20), pre_20 - 10); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 10 },] + ); + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 10, joined: true },] + ); + + // we reset the adapter to `DelegateStake`. + LegacyAdapter::set(false); + + // pool is pending migration. + assert!(Pools::api_pool_needs_delegate_migration(1)); + + // ensure pool mutation is not allowed until pool is migrated. + assert_noop!( + Pools::join(RuntimeOrigin::signed(21), 10, 1), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::pool_withdraw_unbonded(RuntimeOrigin::signed(10), 1, 0), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::nominate(RuntimeOrigin::signed(10), 1, vec![1, 2, 3]), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::set_state(RuntimeOrigin::signed(10), 1, PoolState::Blocked), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::set_metadata(RuntimeOrigin::signed(10), 1, vec![1, 1]), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::update_roles( + RuntimeOrigin::signed(10), + 1, + ConfigOp::Set(5), + ConfigOp::Set(6), + ConfigOp::Set(7) + ), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::chill(RuntimeOrigin::signed(10), 1), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::set_commission(RuntimeOrigin::signed(10), 1, None), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::set_commission_max(RuntimeOrigin::signed(10), 1, Zero::zero()), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::set_commission_change_rate( + RuntimeOrigin::signed(10), + 1, + CommissionChangeRate { max_increase: Perbill::from_percent(1), min_delay: 2_u64 } + ), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::claim_commission(RuntimeOrigin::signed(10), 1), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::adjust_pool_deposit(RuntimeOrigin::signed(10), 1), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::set_commission_claim_permission(RuntimeOrigin::signed(10), 1, None), + PoolsError::::NotMigrated + ); + + // migrate the pool. + assert_ok!(Pools::migrate_pool_to_delegate_stake(RuntimeOrigin::signed(10), 1)); + assert_eq!( + delegated_staking_events_since_last_call(), + vec![DelegatedStakingEvent::Delegated { + agent: POOL1_BONDED, + delegator: DelegatedStaking::generate_proxy_delegator(Agent::from(POOL1_BONDED)) + .get(), + amount: 50 + 10 + },] + ); + + // member is pending migration. + assert!(Pools::api_member_needs_delegate_migration(20)); + + // ensure member mutation is not allowed until member's delegation is migrated. + assert_noop!( + Pools::bond_extra(RuntimeOrigin::signed(20), BondExtra::FreeBalance(5)), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::bond_extra_other(RuntimeOrigin::signed(10), 20, BondExtra::Rewards), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::claim_payout(RuntimeOrigin::signed(20)), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::unbond(RuntimeOrigin::signed(20), 20, 5), + PoolsError::::NotMigrated + ); + assert_noop!( + Pools::withdraw_unbonded(RuntimeOrigin::signed(20), 20, 0), + PoolsError::::NotMigrated + ); + + // migrate 20 + assert_ok!(Pools::migrate_delegation(RuntimeOrigin::signed(10), 20)); + // now `bond_extra` for 20 works. + assert_ok!(Pools::bond_extra(RuntimeOrigin::signed(20), BondExtra::FreeBalance(5))); + + assert_eq!( + staking_events_since_last_call(), + vec![StakingEvent::Bonded { stash: POOL1_BONDED, amount: 5 },] + ); + + assert_eq!( + pool_events_since_last_call(), + vec![PoolsEvent::Bonded { member: 20, pool_id: 1, bonded: 5, joined: false },] + ); + + assert_eq!( + delegated_staking_events_since_last_call(), + vec![ + DelegatedStakingEvent::MigratedDelegation { + agent: POOL1_BONDED, + delegator: 20, + amount: 10 + }, + DelegatedStakingEvent::Delegated { agent: POOL1_BONDED, delegator: 20, amount: 5 }, + ] + ); + }) +} + #[test] fn pool_no_dangling_delegation() { new_test_ext().execute_with(|| { @@ -1214,6 +1394,7 @@ fn pool_no_dangling_delegation() { delegated_staking_events_since_last_call(), vec![DelegatedStakingEvent::Delegated { agent: POOL1_BONDED, + delegator: alice, amount: 40 },] From 53f42749ca1a84a9b391388eadbc3a98004708ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Wed, 14 Aug 2024 22:09:08 +0200 Subject: [PATCH 47/74] Upgrade accidentally downgraded deps (#5365) --- Cargo.lock | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e3afe21c447..cc8b64457a79 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -22992,11 +22992,12 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "serde", "serde_json", "wasm-bindgen-macro", @@ -23004,9 +23005,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", @@ -23031,9 +23032,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote 1.0.36", "wasm-bindgen-macro-support", @@ -23041,9 +23042,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2 1.0.82", "quote 1.0.36", @@ -23054,9 +23055,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-bindgen-test" From ebf4f8d2d590f41817d5d38b2d9b5812a46f2342 Mon Sep 17 00:00:00 2001 From: Ankan <10196091+Ank4n@users.noreply.github.com> Date: Thu, 15 Aug 2024 02:10:31 +0200 Subject: [PATCH 48/74] [Pools] fix derivation of pool account (#4999) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit closes https://github.com/paritytech-secops/srlabs_findings/issues/408. This fixes how ProxyDelegator accounts are derived but may cause issues in Westend since it would use the old derivative accounts. Does not affect Polkadot/Kusama as this pallet is not deployed to them yet. --------- Co-authored-by: Gonçalo Pestana --- Cargo.lock | 1 + polkadot/runtime/westend/src/lib.rs | 15 +-- prdoc/pr_4999.prdoc | 18 +++ substrate/frame/delegated-staking/Cargo.toml | 3 + substrate/frame/delegated-staking/src/lib.rs | 20 +++- .../frame/delegated-staking/src/migration.rs | 107 ++++++++++++++++++ 6 files changed, 152 insertions(+), 12 deletions(-) create mode 100644 prdoc/pr_4999.prdoc create mode 100644 substrate/frame/delegated-staking/src/migration.rs diff --git a/Cargo.lock b/Cargo.lock index cc8b64457a79..76973e3397b3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10489,6 +10489,7 @@ dependencies = [ "frame-election-provider-support", "frame-support", "frame-system", + "log", "pallet-balances", "pallet-nomination-pools", "pallet-staking", diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index a9fbbb4f33da..5f8d5d424938 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1760,11 +1760,8 @@ pub type SignedExtra = ( ); parameter_types! { - // This is the max pools that will be migrated in the runtime upgrade. Westend has more pools - // than this, but we want to emulate some non migrated pools. In prod runtimes, if weight is not - // a concern, it is recommended to set to (existing pools + 10) to also account for any new - // pools getting created before the migration is actually executed. - pub const MaxPoolsToMigrate: u32 = 250; + /// Bounding number of agent pot accounts to be migrated in a single block. + pub const MaxAgentsToMigrate: u32 = 300; } /// All migrations that will run on the next runtime upgrade. @@ -1798,13 +1795,11 @@ pub mod migrations { /// Unreleased migrations. Add new ones here: pub type Unreleased = ( - // Migrate NominationPools to `DelegateStake` adapter. This is unversioned upgrade and - // should not be applied yet in Kusama/Polkadot. - pallet_nomination_pools::migration::unversioned::DelegationStakeMigration< + // This is only needed for Westend. + pallet_delegated_staking::migration::unversioned::ProxyDelegatorMigration< Runtime, - MaxPoolsToMigrate, + MaxAgentsToMigrate, >, - pallet_staking::migrations::v15::MigrateV14ToV15, ); } diff --git a/prdoc/pr_4999.prdoc b/prdoc/pr_4999.prdoc new file mode 100644 index 000000000000..d396fcdbe8b3 --- /dev/null +++ b/prdoc/pr_4999.prdoc @@ -0,0 +1,18 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fixes entropy for derivation of proxy delegator account. + +doc: + - audience: Runtime Dev + description: | + This fixes how ProxyDelegator accounts are derived but may cause issues in Westend since it would use the old + derivative accounts. Does not affect Polkadot/Kusama as this pallet is not deployed to them yet. + +crates: + - name: westend-runtime + bump: patch + - name: pallet-delegated-staking + bump: patch + - name: pallet-nomination-pools + bump: patch \ No newline at end of file diff --git a/substrate/frame/delegated-staking/Cargo.toml b/substrate/frame/delegated-staking/Cargo.toml index 1bd17c59f1e7..8d5ccd342b6b 100644 --- a/substrate/frame/delegated-staking/Cargo.toml +++ b/substrate/frame/delegated-staking/Cargo.toml @@ -18,6 +18,8 @@ frame-system = { workspace = true } scale-info = { features = ["derive"], workspace = true } sp-runtime = { workspace = true } sp-staking = { workspace = true } +sp-io = { workspace = true } +log = { workspace = true } [dev-dependencies] sp-core = { workspace = true, default-features = true } @@ -38,6 +40,7 @@ std = [ "frame-election-provider-support/std", "frame-support/std", "frame-system/std", + "log/std", "pallet-balances/std", "pallet-nomination-pools/std", "pallet-staking/std", diff --git a/substrate/frame/delegated-staking/src/lib.rs b/substrate/frame/delegated-staking/src/lib.rs index 08ce747ec0c0..7b8d14b0a611 100644 --- a/substrate/frame/delegated-staking/src/lib.rs +++ b/substrate/frame/delegated-staking/src/lib.rs @@ -126,6 +126,7 @@ #![deny(rustdoc::broken_intra_doc_links)] mod impls; +pub mod migration; #[cfg(test)] mod mock; #[cfg(test)] @@ -152,12 +153,25 @@ use frame_support::{ Defensive, DefensiveOption, Imbalance, OnUnbalanced, }, }; +use sp_io::hashing::blake2_256; use sp_runtime::{ - traits::{AccountIdConversion, CheckedAdd, CheckedSub, Zero}, + traits::{CheckedAdd, CheckedSub, TrailingZeroInput, Zero}, ArithmeticError, DispatchResult, Perbill, RuntimeDebug, Saturating, }; use sp_staking::{Agent, Delegator, EraIndex, StakingInterface, StakingUnchecked}; +/// The log target of this pallet. +pub const LOG_TARGET: &str = "runtime::delegated-staking"; +// syntactic sugar for logging. +#[macro_export] +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: $crate::LOG_TARGET, + concat!("[{:?}] 🏊‍♂️ ", $patter), >::block_number() $(, $values)* + ) + }; +} pub type BalanceOf = <::Currency as FunInspect<::AccountId>>::Balance; @@ -463,7 +477,9 @@ impl Pallet { /// Derive a (keyless) pot account from the given agent account and account type. fn sub_account(account_type: AccountType, acc: T::AccountId) -> T::AccountId { - T::PalletId::get().into_sub_account_truncating((account_type, acc.clone())) + let entropy = (T::PalletId::get(), acc, account_type).using_encoded(blake2_256); + Decode::decode(&mut TrailingZeroInput::new(entropy.as_ref())) + .expect("infinite length input; no invalid inputs for type; qed") } /// Held balance of a delegator. diff --git a/substrate/frame/delegated-staking/src/migration.rs b/substrate/frame/delegated-staking/src/migration.rs new file mode 100644 index 000000000000..8bc7312c4eab --- /dev/null +++ b/substrate/frame/delegated-staking/src/migration.rs @@ -0,0 +1,107 @@ +// This file is part of Substrate. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use frame_support::traits::OnRuntimeUpgrade; + +#[cfg(feature = "try-runtime")] +use sp_runtime::TryRuntimeError; + +pub mod unversioned { + use super::*; + #[cfg(feature = "try-runtime")] + use alloc::vec::Vec; + use sp_runtime::traits::AccountIdConversion; + + /// Migrates `ProxyDelegator` accounts with better entropy than the old logic which didn't take + /// into account all the bytes of the agent account ID. + pub struct ProxyDelegatorMigration(PhantomData<(T, MaxAgents)>); + + impl> OnRuntimeUpgrade for ProxyDelegatorMigration { + fn on_runtime_upgrade() -> Weight { + let mut weight = Weight::zero(); + let old_proxy_delegator = |agent: T::AccountId| { + T::PalletId::get() + .into_sub_account_truncating((AccountType::ProxyDelegator, agent.clone())) + }; + + Agents::::iter_keys().take(MaxAgents::get() as usize).for_each(|agent| { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 0)); + let old_proxy = old_proxy_delegator(agent.clone()); + + // if delegation does not exist, it does not need to be migrated. + if let Some(delegation) = Delegation::::get(&old_proxy) { + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 0)); + + let new_proxy = + Pallet::::generate_proxy_delegator(Agent::from(agent.clone())); + + // accrue read writes for `do_migrate_delegation` + weight.saturating_accrue(T::DbWeight::get().reads_writes(8, 8)); + let _ = Pallet::::do_migrate_delegation( + Delegator::from(old_proxy.clone()), + new_proxy.clone(), + delegation.amount, + ) + .map_err(|e| { + log!( + error, + "Failed to migrate old proxy delegator {:?} to new proxy {:?} for agent {:?} with error: {:?}", + old_proxy, + new_proxy, + agent, + e, + ); + }); + }; + }); + + log!(info, "Finished migrating old proxy delegator accounts to new ones"); + weight + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(_data: Vec) -> Result<(), TryRuntimeError> { + let mut unmigrated_count = 0; + let old_proxy_delegator = |agent: T::AccountId| { + T::PalletId::get() + .into_sub_account_truncating((AccountType::ProxyDelegator, agent.clone())) + }; + + Agents::::iter_keys().take(MaxAgents::get() as usize).for_each(|agent| { + let old_proxy: T::AccountId = old_proxy_delegator(agent.clone()); + let held_balance = Pallet::::held_balance_of(Delegator::from(old_proxy.clone())); + let delegation = Delegation::::get(&old_proxy); + if delegation.is_some() || !held_balance.is_zero() { + log!( + error, + "Old proxy delegator {:?} for agent {:?} is not migrated.", + old_proxy, + agent, + ); + unmigrated_count += 1; + } + }); + + if unmigrated_count > 0 { + Err(TryRuntimeError::Other("Some old proxy delegator accounts are not migrated.")) + } else { + Ok(()) + } + } + } +} From 78c3daabd97367f70c1ebc0d7fe55abef4d76952 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=B3nal=20Murray?= Date: Thu, 15 Aug 2024 10:02:56 +0100 Subject: [PATCH 49/74] [Coretime] Always include UnpaidExecution, not just when revenue is > 0 (#5369) The NotifyRevenue XCM from relay to coretime chain fails to pass the barrier when revenue is 0. https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/parachains/src/coretime/mod.rs#L401 pushes notifyrevenue onto an [empty vec](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/parachains/src/coretime/mod.rs#L361) when `revenue == 0`, so it never explicitly requests unpaid execution, because that happens only in [the block where revenue is `> 0`](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/runtime/parachains/src/coretime/mod.rs#L387). This will need to be backported to 1.14 when merged. --- polkadot/runtime/parachains/src/coretime/mod.rs | 9 ++++----- prdoc/pr_5369.prdoc | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) create mode 100644 prdoc/pr_5369.prdoc diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index fbd8935f1990..9b9bdb86878f 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -358,7 +358,10 @@ fn mk_coretime_call(call: crate::coretime::CoretimeCalls) -> Instruct fn do_notify_revenue(when: BlockNumber, raw_revenue: Balance) -> Result<(), XcmError> { let dest = Junction::Parachain(T::BrokerId::get()).into_location(); - let mut message = Vec::new(); + let mut message = vec![Instruction::UnpaidExecution { + weight_limit: WeightLimit::Unlimited, + check_origin: None, + }]; let asset = Asset { id: AssetId(Location::here()), fun: Fungible(raw_revenue) }; let dummy_xcm_context = XcmContext { origin: None, message_id: [0; 32], topic: None }; @@ -384,10 +387,6 @@ fn do_notify_revenue(when: BlockNumber, raw_revenue: Balance) -> Resu message.extend( [ - Instruction::UnpaidExecution { - weight_limit: WeightLimit::Unlimited, - check_origin: None, - }, ReceiveTeleportedAsset(assets_reanchored), DepositAsset { assets: Wild(AllCounted(1)), diff --git a/prdoc/pr_5369.prdoc b/prdoc/pr_5369.prdoc new file mode 100644 index 000000000000..1baa5e1cbe7d --- /dev/null +++ b/prdoc/pr_5369.prdoc @@ -0,0 +1,15 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Fix failing XCM from relay to Coretime Chain when revenue is zero + +doc: + - audience: Runtime Dev + description: | + The coretime assigner now always includes UnpaidExecution when calling `notify_revenue` via a + `Transact`, not just when revenue is nonzero. This fixes an issue where the XCM would fail to + process on the receiving side. + +crates: + - name: polkadot-runtime-parachains + bump: patch From e91f1463884946fa1c11b1994fc6bb121de57091 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 15 Aug 2024 11:21:18 +0200 Subject: [PATCH 50/74] Bump trie-db from 0.29.0 to 0.29.1 (#5231) Bumps [trie-db](https://github.com/paritytech/trie) from 0.29.0 to 0.29.1.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=trie-db&package-manager=cargo&previous-version=0.29.0&new-version=0.29.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 76973e3397b3..2bad3947edfb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -22540,9 +22540,9 @@ dependencies = [ [[package]] name = "trie-db" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65ed83be775d85ebb0e272914fff6462c39b3ddd6dc67b5c1c41271aad280c69" +checksum = "0c992b4f40c234a074d48a757efeabb1a6be88af84c0c23f7ca158950cb0ae7f" dependencies = [ "hash-db", "log", diff --git a/Cargo.toml b/Cargo.toml index 74b12f96603a..edb425e619c1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1319,7 +1319,7 @@ tracing-log = { version = "0.2.0" } tracing-subscriber = { version = "0.3.18" } tracking-allocator = { path = "polkadot/node/tracking-allocator", default-features = false, package = "staging-tracking-allocator" } trie-bench = { version = "0.39.0" } -trie-db = { version = "0.29.0", default-features = false } +trie-db = { version = "0.29.1", default-features = false } trie-root = { version = "0.18.0", default-features = false } trie-standardmap = { version = "0.16.0" } trybuild = { version = "1.0.89" } From 048f4b8e95d4fe75c9562acc8dfa3cfc0fd33d31 Mon Sep 17 00:00:00 2001 From: Iker <34474035+IkerAlus@users.noreply.github.com> Date: Thu, 15 Aug 2024 11:04:31 +0100 Subject: [PATCH 51/74] Update Identity pallet README.md (#5183) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update Identity pallet README.md according to the up-to-date docs, particularly to explain the _username_ concept of the pallet. --------- Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Bastian Köcher --- substrate/frame/identity/README.md | 43 +++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/substrate/frame/identity/README.md b/substrate/frame/identity/README.md index 0203656eff46..94b2ae0231d7 100644 --- a/substrate/frame/identity/README.md +++ b/substrate/frame/identity/README.md @@ -1,9 +1,11 @@ -# Identity Module +# pallet-identity -- [`identity::Config`](https://docs.rs/pallet-identity/latest/pallet_identity/trait.Config.html) -- [`Call`](https://docs.rs/pallet-identity/latest/pallet_identity/enum.Call.html) +## Identity Pallet -## Overview +- [`Config`] +- [`Call`] + +### Overview A federated naming system, allowing for multiple registrars to be added from a specified origin. Registrars can set a fee to provide identity-verification service. Anyone can put forth a @@ -23,32 +25,53 @@ by definition, these have equivalent ownership and each has an individual name. The number of registrars should be limited, and the deposit made sufficiently large, to ensure no state-bloat attack is viable. -## Interface +#### Usernames + +The pallet provides functionality for username authorities to issue usernames. When an account +receives a username, they get a default instance of `IdentityInfo`. Usernames also serve as a +reverse lookup from username to account. + +Username authorities are given an allocation by governance to prevent state bloat. Usernames +impose no cost or deposit on the user. -### Dispatchable Functions +Users can have multiple usernames that map to the same `AccountId`, however one `AccountId` can +only map to a single username, known as the *primary*. -#### For general users +### Interface + +#### Dispatchable Functions + +##### For General Users - `set_identity` - Set the associated identity of an account; a small deposit is reserved if not already taken. - `clear_identity` - Remove an account's associated identity; the deposit is returned. - `request_judgement` - Request a judgement from a registrar, paying a fee. - `cancel_request` - Cancel the previous request for a judgement. +- `accept_username` - Accept a username issued by a username authority. +- `remove_expired_approval` - Remove a username that was issued but never accepted. +- `set_primary_username` - Set a given username as an account's primary. +- `remove_dangling_username` - Remove a username that maps to an account without an identity. -#### For general users with sub-identities +##### For General Users with Sub-Identities - `set_subs` - Set the sub-accounts of an identity. - `add_sub` - Add a sub-identity to an identity. - `remove_sub` - Remove a sub-identity of an identity. - `rename_sub` - Rename a sub-identity of an identity. - `quit_sub` - Remove a sub-identity of an identity (called by the sub-identity). -#### For registrars +##### For Registrars - `set_fee` - Set the fee required to be paid for a judgement to be given by the registrar. - `set_fields` - Set the fields that a registrar cares about in their judgements. - `provide_judgement` - Provide a judgement to an identity. -#### For super-users +##### For Username Authorities +- `set_username_for` - Set a username for a given account. The account must approve it. + +##### For Superusers - `add_registrar` - Add a new registrar to the system. - `kill_identity` - Forcibly remove the associated identity; the deposit is lost. +- `add_username_authority` - Add an account with the ability to issue usernames. +- `remove_username_authority` - Remove an account with the ability to issue usernames. [`Call`]: ./enum.Call.html [`Config`]: ./trait.Config.html From 63bf73d5c5a0f89f7eeedfc268fe4be2c14123de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Thu, 15 Aug 2024 12:45:19 +0200 Subject: [PATCH 52/74] Aura: Ensure we are building on each relay chain fork (#5352) We only want to build one block per slot for Aura on parachains. However, we still need to build on each relay chain fork, which is using the same slot. Closes: https://github.com/paritytech/polkadot-sdk/issues/5349 --------- Co-authored-by: Davide Galassi Co-authored-by: Sebastian Kunert --- cumulus/client/consensus/aura/src/collators/basic.rs | 8 ++++++-- prdoc/pr_5352.prdoc | 10 ++++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 prdoc/pr_5352.prdoc diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index 4efd50a04ec6..0f9583cd0eb0 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -138,6 +138,7 @@ where }; let mut last_processed_slot = 0; + let mut last_relay_chain_block = Default::default(); while let Some(request) = collation_requests.next().await { macro_rules! reject_with_error { @@ -215,11 +216,13 @@ where // // Most parachains currently run with 12 seconds slots and thus, they would try to // produce multiple blocks per slot which very likely would fail on chain. Thus, we have - // this "hack" to only produce on block per slot. + // this "hack" to only produce one block per slot per relay chain fork. // // With https://github.com/paritytech/polkadot-sdk/issues/3168 this implementation will be // obsolete and also the underlying issue will be fixed. - if last_processed_slot >= *claim.slot() { + if last_processed_slot >= *claim.slot() && + last_relay_chain_block < *relay_parent_header.number() + { continue } @@ -261,6 +264,7 @@ where } last_processed_slot = *claim.slot(); + last_relay_chain_block = *relay_parent_header.number(); } } } diff --git a/prdoc/pr_5352.prdoc b/prdoc/pr_5352.prdoc new file mode 100644 index 000000000000..4b055d81cb51 --- /dev/null +++ b/prdoc/pr_5352.prdoc @@ -0,0 +1,10 @@ +title: "Aura: Ensure parachains are building on all relay chain forks" + +doc: + - audience: Node Dev + description: | + Ensure that parachains using the `basic` collator are building on all relay chain forks. + +crates: + - name: cumulus-client-consensus-aura + bump: patch From a6dffe390c24011fcf3fabab83c46d82f9410400 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Thu, 15 Aug 2024 13:48:17 +0200 Subject: [PATCH 53/74] Correct some typos in crates' descriptions (#5262) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- cumulus/client/pov-recovery/Cargo.toml | 2 +- cumulus/pallets/aura-ext/src/lib.rs | 2 +- .../bridge-hubs/bridge-hub-rococo/Cargo.toml | 2 +- prdoc/pr_5262.prdoc | 25 +++++++++++++++++++ substrate/frame/try-runtime/Cargo.toml | 2 +- substrate/frame/whitelist/Cargo.toml | 2 +- umbrella/src/lib.rs | 6 ++--- 7 files changed, 33 insertions(+), 8 deletions(-) create mode 100644 prdoc/pr_5262.prdoc diff --git a/cumulus/client/pov-recovery/Cargo.toml b/cumulus/client/pov-recovery/Cargo.toml index a95b24bc2933..3127dd26fcaa 100644 --- a/cumulus/client/pov-recovery/Cargo.toml +++ b/cumulus/client/pov-recovery/Cargo.toml @@ -2,7 +2,7 @@ name = "cumulus-client-pov-recovery" version = "0.7.0" authors.workspace = true -description = "Cumulus-specific networking protocol" +description = "Parachain PoV recovery" edition.workspace = true license = "GPL-3.0-or-later WITH Classpath-exception-2.0" diff --git a/cumulus/pallets/aura-ext/src/lib.rs b/cumulus/pallets/aura-ext/src/lib.rs index 4c9e61458a87..dc854eb82018 100644 --- a/cumulus/pallets/aura-ext/src/lib.rs +++ b/cumulus/pallets/aura-ext/src/lib.rs @@ -16,7 +16,7 @@ //! Cumulus extension pallet for AuRa //! -//! This pallets extends the Substrate AuRa pallet to make it compatible with parachains. It +//! This pallet extends the Substrate AuRa pallet to make it compatible with parachains. It //! provides the [`Pallet`], the [`Config`] and the [`GenesisConfig`]. //! //! It is also required that the parachain runtime uses the provided [`BlockExecutor`] to properly diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml index f3e03aa74300..f85861bdaa54 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/Cargo.toml @@ -3,7 +3,7 @@ name = "bridge-hub-rococo-runtime" version = "0.5.0" authors.workspace = true edition.workspace = true -description = "Rococo's BridgeHub parachain runtime" +description = "Rococo's BridgeHub parachain runtime" license = "Apache-2.0" [lints] diff --git a/prdoc/pr_5262.prdoc b/prdoc/pr_5262.prdoc new file mode 100644 index 000000000000..828f0ffeb1bc --- /dev/null +++ b/prdoc/pr_5262.prdoc @@ -0,0 +1,25 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Correct some typos in crates' descriptions + +doc: + - audience: Runtime Dev + description: | + Corrected typos and copy-paste errors in crates' descriptions. + +crates: + - name: cumulus-client-pov-recovery + bump: patch + - name: cumulus-pallet-aura-ext + bump: patch + - name: bridge-hub-rococo-runtime + bump: patch + - name: frame-try-runtime + bump: patch + - name: pallet-whitelist + bump: patch + - name: polkadot-sdk + bump: patch + - name: polkadot-runtime-parachains + bump: none diff --git a/substrate/frame/try-runtime/Cargo.toml b/substrate/frame/try-runtime/Cargo.toml index 2bd791f52385..7f7d1f2b50e0 100644 --- a/substrate/frame/try-runtime/Cargo.toml +++ b/substrate/frame/try-runtime/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" homepage.workspace = true repository.workspace = true -description = "FRAME pallet for democracy" +description = "Supporting types for try-runtime, testing and dry-running commands." [lints] workspace = true diff --git a/substrate/frame/whitelist/Cargo.toml b/substrate/frame/whitelist/Cargo.toml index e910fc323714..a347174ed2eb 100644 --- a/substrate/frame/whitelist/Cargo.toml +++ b/substrate/frame/whitelist/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" homepage.workspace = true repository.workspace = true -description = "FRAME pallet for whitelisting call, and dispatch from specific origin" +description = "FRAME pallet for whitelisting calls, and dispatching from a specific origin" [lints] workspace = true diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 07f1cbad1262..7bdfcd0b03ae 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -109,7 +109,7 @@ pub use cumulus_client_network; #[cfg(feature = "cumulus-client-parachain-inherent")] pub use cumulus_client_parachain_inherent; -/// Cumulus-specific networking protocol. +/// Parachain PoV recovery. #[cfg(feature = "cumulus-client-pov-recovery")] pub use cumulus_client_pov_recovery; @@ -272,7 +272,7 @@ pub use frame_system_benchmarking; #[cfg(feature = "frame-system-rpc-runtime-api")] pub use frame_system_rpc_runtime_api; -/// FRAME pallet for democracy. +/// Supporting types for try-runtime, testing and dry-running commands. #[cfg(feature = "frame-try-runtime")] pub use frame_try_runtime; @@ -689,7 +689,7 @@ pub use pallet_utility; #[cfg(feature = "pallet-vesting")] pub use pallet_vesting; -/// FRAME pallet for whitelisting call, and dispatch from specific origin. +/// FRAME pallet for whitelisting calls, and dispatching from a specific origin. #[cfg(feature = "pallet-whitelist")] pub use pallet_whitelist; From 069f8a6a374108affa4e240685c7504a1fbd1397 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFs?= <49660929+SailorSnoW@users.noreply.github.com> Date: Thu, 15 Aug 2024 14:33:32 +0200 Subject: [PATCH 54/74] fix visibility for `pallet_nfts` types used as call arguments (#3634) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix #3631 Types which are impacted and fixed here are `ItemTip`, `PriceWithDirection`, `PreSignedMint`, `PreSignedAttributes`. Co-authored-by: Bastian Köcher --- substrate/frame/nfts/src/types.rs | 36 +++++++++++++++---------------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/substrate/frame/nfts/src/types.rs b/substrate/frame/nfts/src/types.rs index 1687a03520af..60d7c639c88c 100644 --- a/substrate/frame/nfts/src/types.rs +++ b/substrate/frame/nfts/src/types.rs @@ -193,13 +193,13 @@ pub struct ItemMetadata> { #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct ItemTip { /// The collection of the item. - pub(super) collection: CollectionId, + pub collection: CollectionId, /// An item of which the tip is sent for. - pub(super) item: ItemId, + pub item: ItemId, /// A sender of the tip. - pub(super) receiver: AccountId, + pub receiver: AccountId, /// An amount the sender is willing to tip. - pub(super) amount: Amount, + pub amount: Amount, } /// Information about the pending swap. @@ -246,9 +246,9 @@ pub enum PriceDirection { #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug, TypeInfo, MaxEncodedLen)] pub struct PriceWithDirection { /// An amount. - pub(super) amount: Amount, + pub amount: Amount, /// A direction (send or receive). - pub(super) direction: PriceDirection, + pub direction: PriceDirection, } /// Support for up to 64 user-enabled features on a collection. @@ -518,31 +518,31 @@ impl_codec_bitflags!(CollectionRoles, u8, CollectionRole); #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct PreSignedMint { /// A collection of the item to be minted. - pub(super) collection: CollectionId, + pub collection: CollectionId, /// Item's ID. - pub(super) item: ItemId, + pub item: ItemId, /// Additional item's key-value attributes. - pub(super) attributes: Vec<(Vec, Vec)>, + pub attributes: Vec<(Vec, Vec)>, /// Additional item's metadata. - pub(super) metadata: Vec, + pub metadata: Vec, /// Restrict the claim to a particular account. - pub(super) only_account: Option, + pub only_account: Option, /// A deadline for the signature. - pub(super) deadline: Deadline, + pub deadline: Deadline, /// An optional price the claimer would need to pay for the mint. - pub(super) mint_price: Option, + pub mint_price: Option, } #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, TypeInfo)] pub struct PreSignedAttributes { /// Collection's ID. - pub(super) collection: CollectionId, + pub collection: CollectionId, /// Item's ID. - pub(super) item: ItemId, + pub item: ItemId, /// Key-value attributes. - pub(super) attributes: Vec<(Vec, Vec)>, + pub attributes: Vec<(Vec, Vec)>, /// Attributes' namespace. - pub(super) namespace: AttributeNamespace, + pub namespace: AttributeNamespace, /// A deadline for the signature. - pub(super) deadline: Deadline, + pub deadline: Deadline, } From b5029eb4fd6c7ffd8164b2fe12b71bad0c59c9f2 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Thu, 15 Aug 2024 16:55:49 +0200 Subject: [PATCH 55/74] Update links in the documentation (#5175) - Where applicable, use a regular [`reference`] instead of `../../../reference/index.html`. - Typos. - Update a link to `polkadot-evm` which has moved out of the monorepo. - ~~The link specification for `chain_spec_builder` is invalid~~ (actually it was valid) - it works fine without it. Part of https://github.com/paritytech/eng-automation/issues/10 --- Cargo.lock | 1 + docs/sdk/Cargo.toml | 1 + docs/sdk/src/guides/your_first_pallet/mod.rs | 18 +++++++++--------- .../src/reference_docs/chain_spec_genesis.rs | 1 - .../runtime_vs_smart_contract.rs | 6 +++--- 5 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2bad3947edfb..df01a143efb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14565,6 +14565,7 @@ dependencies = [ "pallet-balances", "pallet-broker", "pallet-collective", + "pallet-contracts", "pallet-default-config-example", "pallet-democracy", "pallet-example-offchain-worker", diff --git a/docs/sdk/Cargo.toml b/docs/sdk/Cargo.toml index 2f85171bb93d..424758c32b34 100644 --- a/docs/sdk/Cargo.toml +++ b/docs/sdk/Cargo.toml @@ -22,6 +22,7 @@ frame = { features = [ "runtime", ], workspace = true, default-features = true } pallet-examples = { workspace = true } +pallet-contracts = { workspace = true } pallet-default-config-example = { workspace = true, default-features = true } pallet-example-offchain-worker = { workspace = true, default-features = true } diff --git a/docs/sdk/src/guides/your_first_pallet/mod.rs b/docs/sdk/src/guides/your_first_pallet/mod.rs index 3c74469e1768..006d0be7ded3 100644 --- a/docs/sdk/src/guides/your_first_pallet/mod.rs +++ b/docs/sdk/src/guides/your_first_pallet/mod.rs @@ -21,7 +21,7 @@ //! this guide, namely the crate/package names, based on which template you use. //! //! > Be aware that you can read the entire source code backing this tutorial by clicking on the -//! > [`source`](./mod.rs.html) button at the top right of the page. +//! > `source` button at the top right of the page. //! //! You should have studied the following modules as a prelude to this guide: //! @@ -45,7 +45,7 @@ //! Consider the following as a "shell pallet". We continue building the rest of this pallet based //! on this template. //! -//! [`pallet::config`] and [`pallet::pallet`](frame_support::pallet) are both mandatory parts of any +//! [`pallet::config`] and [`pallet::pallet`] are both mandatory parts of any //! pallet. Refer to the documentation of each to get an overview of what they do. #![doc = docify::embed!("./src/guides/your_first_pallet/mod.rs", shell_pallet)] //! @@ -319,13 +319,13 @@ //! - Learn more about the individual pallet items/macros, such as event and errors and call, in //! [`frame::pallet_macros`]. //! -//! [`pallet::storage`]: ../../../frame_support/pallet_macros/attr.config.html -//! [`pallet::call`]: ../../../frame_support/pallet_macros/attr.call.html -//! [`pallet::event`]: ../../../frame_support/pallet_macros/attr.event.html -//! [`pallet::error`]: ../../../frame_support/pallet_macros/attr.error.html -//! [`pallet::pallet`]: ../../../frame_support/pallet_macros/attr.pallet.html -//! [`pallet::config`]: ../../../frame_support/pallet_macros/attr.config.html -//! [`pallet::generate_deposit`]: ../../../frame_support/pallet_macros/attr.generate_deposit.html +//! [`pallet::storage`]: frame_support::pallet_macros::storage +//! [`pallet::call`]: frame_support::pallet_macros::call +//! [`pallet::event`]: frame_support::pallet_macros::event +//! [`pallet::error`]: frame_support::pallet_macros::error +//! [`pallet::pallet`]: frame_support::pallet +//! [`pallet::config`]: frame_support::pallet_macros::config +//! [`pallet::generate_deposit`]: frame_support::pallet_macros::generate_deposit #[docify::export] #[frame::pallet(dev_mode)] diff --git a/docs/sdk/src/reference_docs/chain_spec_genesis.rs b/docs/sdk/src/reference_docs/chain_spec_genesis.rs index 557795cb410c..39e5993d020f 100644 --- a/docs/sdk/src/reference_docs/chain_spec_genesis.rs +++ b/docs/sdk/src/reference_docs/chain_spec_genesis.rs @@ -178,7 +178,6 @@ //! [`pallet::genesis_build`]: frame_support::pallet_macros::genesis_build //! [`pallet::genesis_config`]: frame_support::pallet_macros::genesis_config //! [`BuildGenesisConfig`]: frame_support::traits::BuildGenesisConfig -//! [`chain_spec_builder`]: ../../../staging_chain_spec_builder/index.html //! [`serde`]: https://serde.rs/field-attrs.html //! [`get_storage_for_patch`]: sc_chain_spec::GenesisConfigBuilderRuntimeCaller::get_storage_for_patch //! [`GenesisBuilder::get_preset`]: sp_genesis_builder::GenesisBuilder::get_preset diff --git a/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs b/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs index 4a0ba3ca48f5..c91b66b944c6 100644 --- a/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs +++ b/docs/sdk/src/reference_docs/runtime_vs_smart_contract.rs @@ -20,8 +20,8 @@ //! #### Smart Contracts in Substrate //! Smart Contracts are autonomous, programmable constructs deployed on the blockchain. //! In [FRAME](frame), Smart Contracts infrastructure is implemented by the -//! [`pallet_contracts`](../../../pallet_contracts/index.html) for WASM-based contracts or the -//! [`pallet_evm`](../../../pallet_evm/index.html) for EVM-compatible contracts. These pallets +//! [`pallet_contracts`] for WASM-based contracts or the +//! [`pallet_evm`](https://github.com/polkadot-evm/frontier/tree/master/frame/evm) for EVM-compatible contracts. These pallets //! enable Smart Contract developers to build applications and systems on top of a Substrate-based //! blockchain. //! @@ -108,7 +108,7 @@ //! - **Deployment and Iteration**: Smart Contracts, by nature, are designed for more //! straightforward deployment and iteration. Developers can quickly deploy contracts. //! - **Contract Code Updates**: Once deployed, although typically immutable, Smart Contracts can be -//! upgraded, but lack of migration logic. The [pallet_contracts](../../../pallet_contracts/index.html) +//! upgraded, but lack of migration logic. The [`pallet_contracts`] //! allows for contracts to be upgraded by exposing the `set_code` dispatchable. More details on this //! can be found in [Ink! documentation on upgradeable contracts](https://use.ink/basics/upgradeable-contracts). //! - **Isolated Impact**: Upgrades or changes to a smart contract generally impact only that From 41a679c466d2da1c5760405198c09bb2d320a106 Mon Sep 17 00:00:00 2001 From: Yuri Volkov <0@mcornholio.ru> Date: Thu, 15 Aug 2024 17:30:29 +0200 Subject: [PATCH 56/74] Moving cargo check for runtimes to GHA (#5340) Fixes https://github.com/paritytech/ci_cd/issues/1015 --- .../actions/cargo-check-runtimes/action.yml | 22 +++ .../workflows/check-cargo-check-runtimes.yml | 136 ++++++++++++++++++ 2 files changed, 158 insertions(+) create mode 100644 .github/actions/cargo-check-runtimes/action.yml create mode 100644 .github/workflows/check-cargo-check-runtimes.yml diff --git a/.github/actions/cargo-check-runtimes/action.yml b/.github/actions/cargo-check-runtimes/action.yml new file mode 100644 index 000000000000..869f17661e4a --- /dev/null +++ b/.github/actions/cargo-check-runtimes/action.yml @@ -0,0 +1,22 @@ +name: 'cargo check runtimes' +description: 'Runs `cargo check` for every directory in provided root.' +inputs: + root: + description: "Root directory. Expected to contain several cargo packages inside." + required: true +runs: + using: "composite" + steps: + - name: Check + shell: bash + run: | + mkdir -p ~/.forklift + cp .forklift/config.toml ~/.forklift/config.toml + cd ${{ inputs.root }} + for directory in $(echo */); do + echo "_____Running cargo check for ${directory} ______"; + cd ${directory}; + pwd; + SKIP_WASM_BUILD=1 forklift cargo check --locked; + cd ..; + done diff --git a/.github/workflows/check-cargo-check-runtimes.yml b/.github/workflows/check-cargo-check-runtimes.yml new file mode 100644 index 000000000000..ebcf6c5fc9bd --- /dev/null +++ b/.github/workflows/check-cargo-check-runtimes.yml @@ -0,0 +1,136 @@ +name: Check Cargo Check Runtimes + +on: + pull_request: + types: [ opened, synchronize, reopened, ready_for_review, labeled ] + + +# Jobs in this workflow depend on each other, only for limiting peak amount of spawned workers + +jobs: + # GitHub Actions allows using 'env' in a container context. + # However, env variables don't work for forks: https://github.com/orgs/community/discussions/44322 + # This workaround sets the container image for each job using 'set-image' job output. + set-image: + if: contains(github.event.label.name, 'GHA-migration') || contains(github.event.pull_request.labels.*.name, 'GHA-migration') + runs-on: ubuntu-latest + timeout-minutes: 20 + outputs: + IMAGE: ${{ steps.set_image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - id: set_image + run: cat .github/env >> $GITHUB_OUTPUT + check-runtime-assets: + runs-on: arc-runners-polkadot-sdk-beefy + needs: [set-image] + timeout-minutes: 20 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Run cargo check + uses: ./.github/actions/cargo-check-runtimes + with: + root: cumulus/parachains/runtimes/assets + + check-runtime-collectives: + runs-on: arc-runners-polkadot-sdk-beefy + needs: [check-runtime-assets, set-image] + timeout-minutes: 20 + container: + image: ${{ needs.set-image.outputs.IMAGE }} + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Run cargo check + uses: ./.github/actions/cargo-check-runtimes + with: + root: cumulus/parachains/runtimes/collectives + + check-runtime-coretime: + runs-on: arc-runners-polkadot-sdk-beefy + container: + image: ${{ needs.set-image.outputs.IMAGE }} + needs: [check-runtime-assets, set-image] + timeout-minutes: 20 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Run cargo check + uses: ./.github/actions/cargo-check-runtimes + with: + root: cumulus/parachains/runtimes/coretime + + check-runtime-bridge-hubs: + runs-on: arc-runners-polkadot-sdk-beefy + container: + image: ${{ needs.set-image.outputs.IMAGE }} + needs: [set-image] + timeout-minutes: 20 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Run cargo check + uses: ./.github/actions/cargo-check-runtimes + with: + root: cumulus/parachains/runtimes/bridge-hubs + + check-runtime-contracts: + runs-on: arc-runners-polkadot-sdk-beefy + container: + image: ${{ needs.set-image.outputs.IMAGE }} + needs: [check-runtime-collectives, set-image] + timeout-minutes: 20 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Run cargo check + uses: ./.github/actions/cargo-check-runtimes + with: + root: cumulus/parachains/runtimes/contracts + + check-runtime-starters: + runs-on: arc-runners-polkadot-sdk-beefy + container: + image: ${{ needs.set-image.outputs.IMAGE }} + needs: [check-runtime-assets, set-image] + timeout-minutes: 20 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Run cargo check + uses: ./.github/actions/cargo-check-runtimes + with: + root: cumulus/parachains/runtimes/starters + + check-runtime-testing: + runs-on: arc-runners-polkadot-sdk-beefy + container: + image: ${{ needs.set-image.outputs.IMAGE }} + needs: [check-runtime-starters, set-image] + timeout-minutes: 20 + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Run cargo check + uses: ./.github/actions/cargo-check-runtimes + with: + root: cumulus/parachains/runtimes/testing + + confirm-required-jobs-passed: + runs-on: ubuntu-latest + name: All check-runtime-* tests passed + # If any new job gets added, be sure to add it to this array + needs: + - check-runtime-assets + - check-runtime-collectives + - check-runtime-coretime + - check-runtime-bridge-hubs + - check-runtime-contracts + - check-runtime-starters + - check-runtime-testing + steps: + - run: echo '### Good job! All the tests passed 🚀' >> $GITHUB_STEP_SUMMARY From 90c91b1ec0a90f563283b8c26144d0839528a711 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Thu, 15 Aug 2024 18:28:31 +0100 Subject: [PATCH 57/74] allow for `u8` to be used as hold/freeze reason (#5348) ..without needing to provide your own `newtype` around it. This will allow `type Reason = u8` to be used as `FreezeReason` and `HoldReason`, which I think is a nice simplification if one doens't want to deal with the complications. At the same time, it is a bit of an anti-pattern. Putting it out there to check people's vibes. --- prdoc/pr_5348.prdoc | 13 +++++++++++++ substrate/frame/support/src/traits/misc.rs | 4 ++++ 2 files changed, 17 insertions(+) create mode 100644 prdoc/pr_5348.prdoc diff --git a/prdoc/pr_5348.prdoc b/prdoc/pr_5348.prdoc new file mode 100644 index 000000000000..c2282c4c74c4 --- /dev/null +++ b/prdoc/pr_5348.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: allow for u8 to be used as hold/freeze reason + +doc: + - audience: Runtime Dev + description: | + Allows for `u8` type to be configured as `HoldReason` and `FreezeReason` + +crates: + - name: frame-support + bump: patch diff --git a/substrate/frame/support/src/traits/misc.rs b/substrate/frame/support/src/traits/misc.rs index 7c8c22d1ae5a..492475d6f63c 100644 --- a/substrate/frame/support/src/traits/misc.rs +++ b/substrate/frame/support/src/traits/misc.rs @@ -48,6 +48,10 @@ impl VariantCount for () { const VARIANT_COUNT: u32 = 0; } +impl VariantCount for u8 { + const VARIANT_COUNT: u32 = 256; +} + /// Adapter for `Get` to access `VARIANT_COUNT` from `trait pub trait VariantCount {`. pub struct VariantCountOf(core::marker::PhantomData); impl Get for VariantCountOf { From 843c4db78f0ca260376c5a3abf78c95782ebdc64 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Thu, 15 Aug 2024 19:40:08 +0200 Subject: [PATCH 58/74] Update Readme of the `polkadot` crate (#5326) - Typos. - Those telemetry links like https://telemetry.polkadot.io/#list/Kusama didn't seem to properly point to a proper list (anymore?) - updated them. - Also looks like it was trying to use rust-style linking instead of markdown linking, changed that. - Relative links do not work on crates.io - updated to absolute, similarly as some already existing links, such as contribution guidelines. --- polkadot/README.md | 25 +++++++++++-------------- prdoc/pr_5326.prdoc | 13 +++++++++++++ 2 files changed, 24 insertions(+), 14 deletions(-) create mode 100644 prdoc/pr_5326.prdoc diff --git a/polkadot/README.md b/polkadot/README.md index 47af79a3aa92..fa14995e9af3 100644 --- a/polkadot/README.md +++ b/polkadot/README.md @@ -103,9 +103,8 @@ Connect to the global Polkadot Mainnet network by running: ../target/release/polkadot --chain=polkadot ``` -You can see your node on [telemetry] (set a custom name with `--name "my custom name"`). - -[telemetry](https://telemetry.polkadot.io/#list/Polkadot): https://telemetry.polkadot.io/#list/Polkadot +You can see your node on [Polkadot telemetry](https://telemetry.polkadot.io/#list/0x91b171bb158e2d3848fa23a9f1c25182fb8e20313b2c1eb49219da7a70ce90c3) +(set a custom name with `--name "my custom name"`). ### Connect to the "Kusama" Canary Network @@ -115,9 +114,8 @@ Connect to the global Kusama canary network by running: ../target/release/polkadot --chain=kusama ``` -You can see your node on [telemetry] (set a custom name with `--name "my custom name"`). - -[telemetry](https://telemetry.polkadot.io/#list/Kusama): https://telemetry.polkadot.io/#list/Kusama +You can see your node on [Kusama telemetry](https://telemetry.polkadot.io/#list/0xb0a8d493285c2df73290dfb7e61f870f17b41801197a149ca93654499ea3dafe) +(set a custom name with `--name "my custom name"`). ### Connect to the Westend Testnet @@ -127,9 +125,8 @@ Connect to the global Westend testnet by running: ../target/release/polkadot --chain=westend ``` -You can see your node on [telemetry] (set a custom name with `--name "my custom name"`). - -[telemetry](https://telemetry.polkadot.io/#list/Westend): https://telemetry.polkadot.io/#list/Westend +You can see your node on [Westend telemetry](https://telemetry.polkadot.io/#list/0xe143f23803ac50e8f6f8e62695d1ce9e4e1d68aa36c1cd2cfd15340213f3423e) +(set a custom name with `--name "my custom name"`). ### Obtaining DOTs @@ -147,7 +144,7 @@ Then, grab the Polkadot source code: ```bash git clone https://github.com/paritytech/polkadot-sdk.git -cd polkadot +cd polkadot-sdk ``` Then build the code. You will need to build in release mode (`--release`) to start a network. Only @@ -185,7 +182,7 @@ You can run a simple single-node development "network" on your machine by runnin cargo run --bin polkadot --release -- --dev ``` -You can muck around by heading to and choose "Local Node" from the +You can muck around by heading to and choosing "Local Node" from the Settings menu. ### Local Two-node Testnet @@ -214,11 +211,11 @@ that we currently maintain. ### Using Docker -[Using Docker](../docs/contributor/docker.md) +[Using Docker](https://github.com/paritytech/polkadot-sdk/blob/master/docs/contributor/docker.md) ### Shell Completion -[Shell Completion](doc/shell-completion.md) +[Shell Completion](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/doc/shell-completion.md) ## Contributing @@ -232,4 +229,4 @@ that we currently maintain. ## License -Polkadot is [GPL 3.0 licensed](LICENSE). +Polkadot is [GPL 3.0 licensed](https://github.com/paritytech/polkadot-sdk/blob/master/polkadot/LICENSE). diff --git a/prdoc/pr_5326.prdoc b/prdoc/pr_5326.prdoc new file mode 100644 index 000000000000..0301b8c17a30 --- /dev/null +++ b/prdoc/pr_5326.prdoc @@ -0,0 +1,13 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: Update Readme of the `polkadot` crate + +doc: + - audience: Node Operator + description: | + Updated Readme of the `polkadot` crate. + +crates: + - name: polkadot + bump: patch From 80c3c1fd712ce9f1dd0c539e384dbeab699485ed Mon Sep 17 00:00:00 2001 From: Francisco Aguirre Date: Fri, 16 Aug 2024 08:33:31 +0200 Subject: [PATCH 59/74] Fix zombienet bridges test (#5373) After https://github.com/paritytech/polkadot-sdk/pull/4129, a zombienet bridge test was broken. This is because XCMv4 locations have an array in the `interior` field, which also has to appear in PJS. --------- Co-authored-by: Serban Iorga --- .../environments/rococo-westend/bridges_rococo_westend.sh | 4 ++-- .../testing/framework/js-helpers/wrapped-assets-balance.js | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh index ef4a5597902f..54633449134b 100755 --- a/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh +++ b/bridges/testing/environments/rococo-westend/bridges_rococo_westend.sh @@ -270,7 +270,7 @@ case "$1" in "//Alice" \ 1000 \ "ws://127.0.0.1:9910" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": { "GlobalConsensus": "Westend" } } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X1": [{ "GlobalConsensus": "Westend" }] } }')" \ "$GLOBAL_CONSENSUS_WESTEND_SOVEREIGN_ACCOUNT" \ 10000000000 \ true @@ -329,7 +329,7 @@ case "$1" in "//Alice" \ 1000 \ "ws://127.0.0.1:9010" \ - "$(jq --null-input '{ "parents": 2, "interior": { "X1": { "GlobalConsensus": "Rococo" } } }')" \ + "$(jq --null-input '{ "parents": 2, "interior": { "X1": [{ "GlobalConsensus": "Rococo" }] } }')" \ "$GLOBAL_CONSENSUS_ROCOCO_SOVEREIGN_ACCOUNT" \ 10000000000 \ true diff --git a/bridges/testing/framework/js-helpers/wrapped-assets-balance.js b/bridges/testing/framework/js-helpers/wrapped-assets-balance.js index 27287118547f..7b343ed97a88 100644 --- a/bridges/testing/framework/js-helpers/wrapped-assets-balance.js +++ b/bridges/testing/framework/js-helpers/wrapped-assets-balance.js @@ -8,7 +8,7 @@ async function run(nodeName, networkInfo, args) { const bridgedNetworkName = args[2]; while (true) { const foreignAssetAccount = await api.query.foreignAssets.account( - { parents: 2, interior: { X1: { GlobalConsensus: bridgedNetworkName } } }, + { parents: 2, interior: { X1: [{ GlobalConsensus: bridgedNetworkName }] } }, accountAddress ); if (foreignAssetAccount.isSome) { From dbd194aad4b20ff03ed74e64cf06cb59d4396788 Mon Sep 17 00:00:00 2001 From: Tsvetomir Dimitrov Date: Fri, 16 Aug 2024 10:05:40 +0300 Subject: [PATCH 60/74] More logs in `is_potential_spam` from `dispute-coordinator` (#5252) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add more logs in `is_potential_spam` revealing why a statement was marked as a spam. --------- Co-authored-by: Bastian Köcher --- .../core/dispute-coordinator/src/initialized.rs | 13 +++++++++++++ polkadot/node/core/dispute-coordinator/src/lib.rs | 12 ++++++++++++ prdoc/pr_5252.prdoc | 11 +++++++++++ 3 files changed, 36 insertions(+) create mode 100644 prdoc/pr_5252.prdoc diff --git a/polkadot/node/core/dispute-coordinator/src/initialized.rs b/polkadot/node/core/dispute-coordinator/src/initialized.rs index 5f86da87f21c..5096fe5e6891 100644 --- a/polkadot/node/core/dispute-coordinator/src/initialized.rs +++ b/polkadot/node/core/dispute-coordinator/src/initialized.rs @@ -1351,6 +1351,12 @@ impl Initialized { } } for validator_index in new_state.votes().invalid.keys() { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?validator_index, + "Disabled offchain for voting invalid against a valid candidate", + ); self.offchain_disabled_validators .insert_against_valid(session, *validator_index); } @@ -1375,6 +1381,13 @@ impl Initialized { } for (validator_index, (kind, _sig)) in new_state.votes().valid.raw() { let is_backer = kind.is_backing(); + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?validator_index, + ?is_backer, + "Disabled offchain for voting valid for an invalid candidate", + ); self.offchain_disabled_validators.insert_for_invalid( session, *validator_index, diff --git a/polkadot/node/core/dispute-coordinator/src/lib.rs b/polkadot/node/core/dispute-coordinator/src/lib.rs index daa384b36ffb..34d9ddf3a97c 100644 --- a/polkadot/node/core/dispute-coordinator/src/lib.rs +++ b/polkadot/node/core/dispute-coordinator/src/lib.rs @@ -478,6 +478,18 @@ pub fn is_potential_spam( let all_invalid_votes_disabled = vote_state.invalid_votes_all_disabled(is_disabled); let ignore_disabled = !is_confirmed && all_invalid_votes_disabled; + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?is_disputed, + ?is_included, + ?is_backed, + ?is_confirmed, + ?all_invalid_votes_disabled, + ?ignore_disabled, + "Checking for potential spam" + ); + (is_disputed && !is_included && !is_backed && !is_confirmed) || ignore_disabled } diff --git a/prdoc/pr_5252.prdoc b/prdoc/pr_5252.prdoc new file mode 100644 index 000000000000..fd4454ac3b9d --- /dev/null +++ b/prdoc/pr_5252.prdoc @@ -0,0 +1,11 @@ +title: Additional logging in `dispute-coordinator` subsystem + +doc: + - audience: Node Dev + description: | + Additional logging in `dispute-coordinator` subsystem tracing the list of offchain disabled + validators and the reason why an import statement is considered spam. + +crates: + - name: polkadot-node-core-dispute-coordinator + bump: patch \ No newline at end of file From 4780e3d07ff23a49e8f0a508138f83eb6e0d36c6 Mon Sep 17 00:00:00 2001 From: Alexandru Gheorghe <49718502+alexggh@users.noreply.github.com> Date: Fri, 16 Aug 2024 10:38:25 +0300 Subject: [PATCH 61/74] approval-distribution: Fix handling of conclude (#5375) After https://github.com/paritytech/polkadot-sdk/commit/0636ffdc3dfea52e90102403527ff99d2f2d6e7c approval-distribution did not terminate anymore if Conclude signal was received. This should have been caught by the subsystem tests, but it wasn't because the subsystem is also exiting on error when the channels are dropped so the test overseer was dropped which made the susbystem exit and masked the problem. This pr fixes both the test and the subsystem. Signed-off-by: Alexandru Gheorghe --- .../node/network/approval-distribution/src/lib.rs | 11 ++++++++--- .../node/network/approval-distribution/src/tests.rs | 12 +++++++++--- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/polkadot/node/network/approval-distribution/src/lib.rs b/polkadot/node/network/approval-distribution/src/lib.rs index 3462aaef1f69..134586253ce1 100644 --- a/polkadot/node/network/approval-distribution/src/lib.rs +++ b/polkadot/node/network/approval-distribution/src/lib.rs @@ -2508,7 +2508,9 @@ impl ApprovalDistribution { }; - self.handle_from_orchestra(message, &mut approval_voting_sender, &mut network_sender, state, rng).await; + if self.handle_from_orchestra(message, &mut approval_voting_sender, &mut network_sender, state, rng).await { + return; + } }, } @@ -2516,6 +2518,8 @@ impl ApprovalDistribution { } /// Handles a from orchestra message received by approval distribution subystem. + /// + /// Returns `true` if the subsystem should be stopped. pub async fn handle_from_orchestra< N: overseer::SubsystemSender, A: overseer::SubsystemSender, @@ -2526,7 +2530,7 @@ impl ApprovalDistribution { network_sender: &mut N, state: &mut State, rng: &mut (impl CryptoRng + Rng), - ) { + ) -> bool { match message { FromOrchestra::Communication { msg } => Self::handle_incoming( @@ -2555,8 +2559,9 @@ impl ApprovalDistribution { gum::trace!(target: LOG_TARGET, number = %number, "finalized signal"); state.handle_block_finalized(network_sender, &self.metrics, number).await; }, - FromOrchestra::Signal(OverseerSignal::Conclude) => return, + FromOrchestra::Signal(OverseerSignal::Conclude) => return true, } + false } async fn handle_incoming< diff --git a/polkadot/node/network/approval-distribution/src/tests.rs b/polkadot/node/network/approval-distribution/src/tests.rs index 3ea722c51a92..1ca571721ea9 100644 --- a/polkadot/node/network/approval-distribution/src/tests.rs +++ b/polkadot/node/network/approval-distribution/src/tests.rs @@ -59,9 +59,13 @@ fn test_harness>( let subsystem = ApprovalDistribution::new(Default::default()); { let mut rng = rand_chacha::ChaCha12Rng::seed_from_u64(12345); - - let subsystem = - subsystem.run_inner(context, &mut state, REPUTATION_CHANGE_TEST_INTERVAL, &mut rng); + let (tx, rx) = oneshot::channel(); + let subsystem = async { + subsystem + .run_inner(context, &mut state, REPUTATION_CHANGE_TEST_INTERVAL, &mut rng) + .await; + tx.send(()).expect("Fail to notify subystem is done"); + }; let test_fut = test_fn(virtual_overseer); @@ -76,6 +80,8 @@ fn test_harness>( .timeout(TIMEOUT) .await .expect("Conclude send timeout"); + let _ = + rx.timeout(Duration::from_secs(2)).await.expect("Subsystem did not conclude"); }, subsystem, )); From 74267881e765a01b1c7b3114c21b80dbe7686940 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Fri, 16 Aug 2024 12:29:04 +0200 Subject: [PATCH 62/74] Remove redundant minimal template workspace (#5330) This removes the workspace of the minimal template, which (I think) is redundant. The other two templates do not have such a workspace. The synchronized template created [it's own workspace](https://github.com/paritytech/polkadot-sdk-minimal-template/blob/master/Cargo.toml) anyway, and the new readme replaced the old docs contained in `lib.rs`. Closes https://github.com/paritytech/polkadot-sdk-minimal-template/issues/11 Silent because the crate was private. --- Cargo.lock | 13 ------- Cargo.toml | 1 - templates/minimal/Cargo.toml | 22 ----------- templates/minimal/src/lib.rs | 75 ------------------------------------ 4 files changed, 111 deletions(-) delete mode 100644 templates/minimal/Cargo.toml delete mode 100644 templates/minimal/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index df01a143efb6..4148b660cd3c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8729,19 +8729,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "minimal-template" -version = "0.0.0" -dependencies = [ - "docify", - "minimal-template-node", - "minimal-template-runtime", - "pallet-minimal-template", - "polkadot-sdk-docs", - "polkadot-sdk-frame", - "simple-mermaid 0.1.1", -] - [[package]] name = "minimal-template-node" version = "0.0.0" diff --git a/Cargo.toml b/Cargo.toml index edb425e619c1..ebe9dc0dab3e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -522,7 +522,6 @@ members = [ "substrate/utils/prometheus", "substrate/utils/substrate-bip39", "substrate/utils/wasm-builder", - "templates/minimal", "templates/minimal/node", "templates/minimal/pallets/template", "templates/minimal/runtime", diff --git a/templates/minimal/Cargo.toml b/templates/minimal/Cargo.toml deleted file mode 100644 index ba96e139bcf1..000000000000 --- a/templates/minimal/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "minimal-template" -description = "A minimal template built with Substrate, part of Polkadot Sdk." -version = "0.0.0" -license = "Unlicense" -authors.workspace = true -homepage.workspace = true -repository.workspace = true -edition.workspace = true -publish = false - -[dependencies] -minimal-template-node = { workspace = true } -minimal-template-runtime = { workspace = true } -pallet-minimal-template = { workspace = true, default-features = true } -polkadot-sdk-docs = { workspace = true } - -frame = { workspace = true, default-features = true } - -# How we build docs in rust-docs -simple-mermaid = "0.1.1" -docify = { workspace = true } diff --git a/templates/minimal/src/lib.rs b/templates/minimal/src/lib.rs deleted file mode 100644 index 68825d190bb2..000000000000 --- a/templates/minimal/src/lib.rs +++ /dev/null @@ -1,75 +0,0 @@ -//! # Minimal Template -//! -//! This is a minimal template for creating a blockchain using the Polkadot SDK. -//! -//! ## Components -//! -//! The template consists of the following components: -//! -//! ### Node -//! -//! A minimal blockchain [`node`](`minimal_template_node`) that is capable of running a -//! runtime. It uses a simple chain specification, provides an option to choose Manual or -//! InstantSeal for consensus and exposes a few commands to interact with the node. -//! -//! ### Runtime -//! -//! A minimal [`runtime`](`minimal_template_runtime`) (or a state transition function) that -//! is capable of being run on the node. It is built using the [`FRAME`](`frame`) framework -//! that enables the composition of the core logic via separate modules called "pallets". -//! FRAME defines a complete DSL for building such pallets and the runtime itself. -//! -//! #### Transaction Fees -//! -//! The runtime charges a transaction fee for every transaction that is executed. The fee is -//! calculated based on the weight of the transaction (accouting for the execution time) and -//! length of the call data. Please refer to -//! [`benchmarking docs`](`polkadot_sdk_docs::reference_docs::frame_benchmarking_weight`) for -//! more information on how the weight is calculated. -//! -//! This template sets the fee as independent of the weight of the extrinsic and fixed for any -//! length of the call data for demo purposes. -//! -//! ### Pallet -//! -//! A minimal [`pallet`](`pallet_minimal_template`) that is built using FRAME. It is a unit of -//! encapsulated logic that has a clearly defined responsibility and can be linked to other pallets. -//! -//! ## Getting Started -//! -//! To get started with the template, follow the steps below: -//! -//! ### Build the Node -//! -//! Build the node using the following command: -//! -//! ```bash -//! cargo build -p minimal-template-node --release -//! ``` -//! -//! ### Run the Node -//! -//! Run the node using the following command: -//! -//! ```bash -//! ./target/release/minimal-template-node --dev -//! ``` -//! -//! ### CLI Options -//! -//! The node exposes a few options that can be used to interact with the node. To see the list of -//! available options, run the following command: -//! -//! ```bash -//! ./target/release/minimal-template-node --help -//! ``` -//! -//! #### Consensus Algorithm -//! -//! In order to run the node with a specific consensus algorithm, use the `--consensus` flag. For -//! example, to run the node with ManualSeal consensus with a block time of 5000ms, use the -//! following command: -//! -//! ```bash -//! ./target/release/minimal-template-node --dev --consensus manual-seal-5000 -//! ``` From fd522b818693965b6ec9e4cfdb4182c114d6105c Mon Sep 17 00:00:00 2001 From: Guillaume Thiolliere Date: Fri, 16 Aug 2024 23:09:06 +0900 Subject: [PATCH 63/74] Fix doc: start_destroy doesn't need asset to be frozen (#5204) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix https://github.com/paritytech/polkadot-sdk/issues/5184 `owner` can set himself as a `freezer` and freeze the asset so requirement is not really needed. And requirement is not implemented. --------- Co-authored-by: Bastian Köcher Co-authored-by: Oliver Tale-Yazdi --- prdoc/pr_5204.prdoc | 13 +++++++++++++ substrate/frame/assets/src/lib.rs | 2 -- substrate/frame/support/src/traits/tokens/misc.rs | 2 +- 3 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 prdoc/pr_5204.prdoc diff --git a/prdoc/pr_5204.prdoc b/prdoc/pr_5204.prdoc new file mode 100644 index 000000000000..38a73b6b00ef --- /dev/null +++ b/prdoc/pr_5204.prdoc @@ -0,0 +1,13 @@ +title: "Pallet assets: fix doc: start_destroy never required asset to be frozen" + +doc: + - audience: Runtime Dev + description: | + In pallet assets calling `start_destroy` doesn't require the asset to be frozen. Doc is fixed. + + +crates: + - name: pallet-assets + bump: patch + - name: frame-support + bump: patch diff --git a/substrate/frame/assets/src/lib.rs b/substrate/frame/assets/src/lib.rs index 6f8ad0c29393..e909932bfc82 100644 --- a/substrate/frame/assets/src/lib.rs +++ b/substrate/frame/assets/src/lib.rs @@ -801,8 +801,6 @@ pub mod pallet { /// /// - `id`: The identifier of the asset to be destroyed. This must identify an existing /// asset. - /// - /// The asset class must be frozen before calling `start_destroy`. #[pallet::call_index(2)] pub fn start_destroy(origin: OriginFor, id: T::AssetIdParameter) -> DispatchResult { let maybe_check_owner = match T::ForceOrigin::try_origin(origin) { diff --git a/substrate/frame/support/src/traits/tokens/misc.rs b/substrate/frame/support/src/traits/tokens/misc.rs index 9fa1df862097..52d3e8c014b3 100644 --- a/substrate/frame/support/src/traits/tokens/misc.rs +++ b/substrate/frame/support/src/traits/tokens/misc.rs @@ -98,7 +98,7 @@ pub enum WithdrawConsequence { /// There has been an overflow in the system. This is indicative of a corrupt state and /// likely unrecoverable. Overflow, - /// Not enough of the funds in the account are unavailable for withdrawal. + /// Not enough of the funds in the account are available for withdrawal. Frozen, /// Account balance would reduce to zero, potentially destroying it. The parameter is the /// amount of balance which is destroyed. From feac7a521092c599d47df3e49084e6bff732c7db Mon Sep 17 00:00:00 2001 From: Nazar Mokrynskyi Date: Sun, 18 Aug 2024 08:23:46 +0300 Subject: [PATCH 64/74] Replace unnecessary `&mut self` with `&self` in `BlockImport::import_block()` (#5339) There was no need for it to be `&mut self` since block import can happen concurrently for different blocks and in many cases it was `&mut Arc` anyway :man_shrugging: Similar in nature to https://github.com/paritytech/polkadot-sdk/pull/4844 --- cumulus/client/consensus/common/src/lib.rs | 2 +- .../common/src/parachain_consensus.rs | 7 +-- cumulus/client/consensus/common/src/tests.rs | 4 +- cumulus/client/network/src/tests.rs | 2 +- .../src/lib.rs | 6 +- .../src/validate_block/tests.rs | 2 +- .../service/benches/validate_block_glutton.rs | 2 +- cumulus/test/service/src/bench_utils.rs | 2 +- polkadot/node/test/client/src/lib.rs | 4 +- .../xcm-executor/integration-tests/src/lib.rs | 10 ++-- prdoc/pr_5339.prdoc | 45 ++++++++++++++ .../bin/node/cli/benches/block_production.rs | 2 +- substrate/bin/node/cli/tests/basic.rs | 2 +- .../basic-authorship/src/basic_authorship.rs | 2 +- substrate/client/consensus/babe/src/lib.rs | 4 +- substrate/client/consensus/babe/src/tests.rs | 2 +- .../client/consensus/beefy/src/import.rs | 2 +- substrate/client/consensus/beefy/src/tests.rs | 2 +- .../consensus/common/src/block_import.rs | 15 +---- .../common/src/import_queue/basic_queue.rs | 2 +- .../consensus/grandpa/src/finality_proof.rs | 2 +- .../client/consensus/grandpa/src/import.rs | 18 +++--- .../client/consensus/grandpa/src/tests.rs | 8 +-- .../consensus/grandpa/src/voting_rule.rs | 4 +- .../consensus/grandpa/src/warp_proof.rs | 2 +- substrate/client/consensus/pow/src/lib.rs | 2 +- substrate/client/consensus/pow/src/worker.rs | 2 +- .../merkle-mountain-range/src/test_utils.rs | 2 +- substrate/client/network/src/bitswap/mod.rs | 2 +- substrate/client/network/sync/src/strategy.rs | 2 +- .../sync/src/strategy/chain_sync/test.rs | 58 +++++++++---------- .../client/network/test/src/block_import.rs | 2 +- substrate/client/network/test/src/lib.rs | 4 +- substrate/client/offchain/src/lib.rs | 2 +- .../client/rpc-spec-v2/src/archive/tests.rs | 16 ++--- .../src/chain_head/subscription/inner.rs | 2 +- .../rpc-spec-v2/src/chain_head/tests.rs | 52 ++++++++--------- substrate/client/rpc/src/chain/tests.rs | 8 +-- substrate/client/rpc/src/dev/tests.rs | 4 +- substrate/client/rpc/src/state/tests.rs | 8 +-- substrate/client/service/src/client/client.rs | 16 ++--- .../client/service/test/src/client/mod.rs | 40 ++++++------- .../client/transaction-pool/tests/pool.rs | 2 +- substrate/test-utils/client/src/client_ext.rs | 31 +++++----- .../runtime/client/src/trait_tests.rs | 6 +- substrate/test-utils/runtime/src/lib.rs | 2 +- 46 files changed, 223 insertions(+), 193 deletions(-) create mode 100644 prdoc/pr_5339.prdoc diff --git a/cumulus/client/consensus/common/src/lib.rs b/cumulus/client/consensus/common/src/lib.rs index e12750dcc553..6766c2409c38 100644 --- a/cumulus/client/consensus/common/src/lib.rs +++ b/cumulus/client/consensus/common/src/lib.rs @@ -185,7 +185,7 @@ where } async fn import_block( - &mut self, + &self, mut params: sc_consensus::BlockImportParams, ) -> Result { // Blocks are stored within the backend by using POST hash. diff --git a/cumulus/client/consensus/common/src/parachain_consensus.rs b/cumulus/client/consensus/common/src/parachain_consensus.rs index 944917673b11..861354ed63c3 100644 --- a/cumulus/client/consensus/common/src/parachain_consensus.rs +++ b/cumulus/client/consensus/common/src/parachain_consensus.rs @@ -433,11 +433,8 @@ async fn handle_new_best_parachain_head( } } -async fn import_block_as_new_best( - hash: Block::Hash, - header: Block::Header, - mut parachain: &P, -) where +async fn import_block_as_new_best(hash: Block::Hash, header: Block::Header, parachain: &P) +where Block: BlockT, P: UsageProvider + Send + Sync + BlockBackend, for<'a> &'a P: BlockImport, diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index 284fa39ed1e7..06f90330d474 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -321,7 +321,7 @@ fn build_block( } async fn import_block>( - importer: &mut I, + importer: &I, block: Block, origin: BlockOrigin, import_as_best: bool, @@ -568,7 +568,7 @@ fn follow_finalized_does_not_stop_on_unknown_block() { fn follow_new_best_sets_best_after_it_is_imported() { sp_tracing::try_init_simple(); - let mut client = Arc::new(TestClientBuilder::default().build()); + let client = Arc::new(TestClientBuilder::default().build()); let block = build_and_import_block(client.clone(), false); diff --git a/cumulus/client/network/src/tests.rs b/cumulus/client/network/src/tests.rs index 18d121c41d16..cde73c4c5180 100644 --- a/cumulus/client/network/src/tests.rs +++ b/cumulus/client/network/src/tests.rs @@ -612,7 +612,7 @@ fn relay_parent_not_imported_when_block_announce_is_processed() { block_on(async move { let (mut validator, api) = make_validator_and_api(); - let mut client = api.relay_client.clone(); + let client = api.relay_client.clone(); let block = client.init_polkadot_block_builder().build().expect("Build new block").block; let (signal, header) = make_gossip_message_and_header(api, block.hash(), 0).await; diff --git a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs index 38ba84748c1e..c796dc5f7c38 100644 --- a/cumulus/client/relay-chain-inprocess-interface/src/lib.rs +++ b/cumulus/client/relay-chain-inprocess-interface/src/lib.rs @@ -423,7 +423,7 @@ mod tests { #[test] fn returns_directly_for_available_block() { - let (mut client, block, relay_chain_interface) = build_client_backend_and_block(); + let (client, block, relay_chain_interface) = build_client_backend_and_block(); let hash = block.hash(); block_on(client.import(BlockOrigin::Own, block)).expect("Imports the block"); @@ -439,7 +439,7 @@ mod tests { #[test] fn resolve_after_block_import_notification_was_received() { - let (mut client, block, relay_chain_interface) = build_client_backend_and_block(); + let (client, block, relay_chain_interface) = build_client_backend_and_block(); let hash = block.hash(); block_on(async move { @@ -468,7 +468,7 @@ mod tests { #[test] fn do_not_resolve_after_different_block_import_notification_was_received() { - let (mut client, block, relay_chain_interface) = build_client_backend_and_block(); + let (client, block, relay_chain_interface) = build_client_backend_and_block(); let hash = block.hash(); let ext = construct_transfer_extrinsic( diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 1527492f5784..a44d17507810 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -312,7 +312,7 @@ fn validation_params_and_memory_optimized_validation_params_encode_and_decode() fn validate_block_works_with_child_tries() { sp_tracing::try_init_simple(); - let (mut client, parent_head) = create_test_client(); + let (client, parent_head) = create_test_client(); let TestBlockData { block, .. } = build_block_with_witness( &client, vec![generate_extrinsic(&client, Charlie, TestPalletCall::read_and_write_child_tries {})], diff --git a/cumulus/test/service/benches/validate_block_glutton.rs b/cumulus/test/service/benches/validate_block_glutton.rs index 6ec338c7f142..6fe26519a3eb 100644 --- a/cumulus/test/service/benches/validate_block_glutton.rs +++ b/cumulus/test/service/benches/validate_block_glutton.rs @@ -43,7 +43,7 @@ use sp_runtime::traits::Header as HeaderT; use cumulus_test_service::bench_utils as utils; async fn import_block( - mut client: &cumulus_test_client::Client, + client: &cumulus_test_client::Client, built: cumulus_test_runtime::Block, import_existing: bool, ) { diff --git a/cumulus/test/service/src/bench_utils.rs b/cumulus/test/service/src/bench_utils.rs index 4ace894b392a..67ffbdd1d212 100644 --- a/cumulus/test/service/src/bench_utils.rs +++ b/cumulus/test/service/src/bench_utils.rs @@ -111,7 +111,7 @@ pub fn extrinsic_set_validation_data( } /// Import block into the given client and make sure the import was successful -pub async fn import_block(mut client: &TestClient, block: &NodeBlock, import_existing: bool) { +pub async fn import_block(client: &TestClient, block: &NodeBlock, import_existing: bool) { let mut params = BlockImportParams::new(BlockOrigin::File, block.header.clone()); params.body = Some(block.extrinsics.clone()); params.state_action = StateAction::Execute; diff --git a/polkadot/node/test/client/src/lib.rs b/polkadot/node/test/client/src/lib.rs index 6b205c09f2f3..498994d9a0a8 100644 --- a/polkadot/node/test/client/src/lib.rs +++ b/polkadot/node/test/client/src/lib.rs @@ -102,7 +102,7 @@ mod tests { #[test] fn ensure_test_client_can_build_and_import_block() { - let mut client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().build(); let block_builder = client.init_polkadot_block_builder(); let block = block_builder.build().expect("Finalizes the block").block; @@ -113,7 +113,7 @@ mod tests { #[test] fn ensure_test_client_can_push_extrinsic() { - let mut client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().build(); let transfer = construct_transfer_extrinsic( &client, diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index 279d7118f8cf..7683c6025392 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -34,7 +34,7 @@ use xcm_executor::traits::WeightBounds; #[test] fn basic_buy_fees_message_executes() { sp_tracing::try_init_simple(); - let mut client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().build(); let msg = Xcm(vec![ WithdrawAsset((Parent, 100).into()), @@ -75,7 +75,7 @@ fn basic_buy_fees_message_executes() { #[test] fn transact_recursion_limit_works() { sp_tracing::try_init_simple(); - let mut client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().build(); let base_xcm = |call: polkadot_test_runtime::RuntimeCall| { Xcm(vec![ @@ -174,7 +174,7 @@ fn query_response_fires() { use polkadot_test_runtime::RuntimeEvent::TestNotifier; sp_tracing::try_init_simple(); - let mut client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().build(); let mut block_builder = client.init_polkadot_block_builder(); @@ -256,7 +256,7 @@ fn query_response_elicits_handler() { use polkadot_test_runtime::RuntimeEvent::TestNotifier; sp_tracing::try_init_simple(); - let mut client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().build(); let mut block_builder = client.init_polkadot_block_builder(); @@ -332,7 +332,7 @@ fn query_response_elicits_handler() { #[test] fn deposit_reserve_asset_works_for_any_xcm_sender() { sp_tracing::try_init_simple(); - let mut client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().build(); // Init values for the simulated origin Parachain let amount_to_send: u128 = 1_000_000_000_000; diff --git a/prdoc/pr_5339.prdoc b/prdoc/pr_5339.prdoc new file mode 100644 index 000000000000..850ba903e126 --- /dev/null +++ b/prdoc/pr_5339.prdoc @@ -0,0 +1,45 @@ +title: Replace unnecessary `&mut self` with `&self` in `BlockImport::import_block()` + +doc: + - audience: Node Dev + description: | + Simplifies block import API to match intended design where independent blocks can technically be imported + concurrently and in practice was called through `Arc` anyway + +crates: + - name: cumulus-client-consensus-common + bump: major + - name: cumulus-client-network + bump: none + - name: cumulus-relay-chain-inprocess-interface + bump: none + - name: cumulus-pallet-parachain-system + bump: none + - name: sc-basic-authorship + bump: patch + - name: sc-consensus-babe + bump: major + - name: sc-consensus-beefy + bump: major + - name: sc-consensus + bump: major + - name: sc-consensus-grandpa + bump: major + - name: sc-consensus-pow + bump: major + - name: mmr-gadget + bump: none + - name: sc-network + bump: none + - name: sc-network-sync + bump: none + - name: sc-offchain + bump: none + - name: sc-rpc-spec-v2 + bump: none + - name: sc-rpc + bump: none + - name: sc-service + bump: major + - name: sc-transaction-pool + bump: none diff --git a/substrate/bin/node/cli/benches/block_production.rs b/substrate/bin/node/cli/benches/block_production.rs index 8239637b3a9f..9ac9d8b4f67d 100644 --- a/substrate/bin/node/cli/benches/block_production.rs +++ b/substrate/bin/node/cli/benches/block_production.rs @@ -124,7 +124,7 @@ fn extrinsic_set_time(now: u64) -> OpaqueExtrinsic { .into() } -fn import_block(mut client: &FullClient, built: BuiltBlock) { +fn import_block(client: &FullClient, built: BuiltBlock) { let mut params = BlockImportParams::new(BlockOrigin::File, built.block.header); params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(built.storage_changes)); diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index 0a2e3fd25047..037ddbb1e47b 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -850,7 +850,7 @@ fn should_import_block_with_test_client() { sp_consensus::BlockOrigin, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, }; - let mut client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().build(); let block1 = changes_trie_block(); let block_data = block1.0; let block = node_primitives::Block::decode(&mut &block_data[..]).unwrap(); diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index 74805488792a..527a3d12d9e7 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -852,7 +852,7 @@ mod tests { block }; - let import_and_maintain = |mut client: Arc, block: TestBlock| { + let import_and_maintain = |client: Arc, block: TestBlock| { let hash = block.hash(); block_on(client.import(BlockOrigin::Own, block)).unwrap(); block_on(txpool.maintain(chain_event( diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index 0c1eb8875864..9770b16871e1 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -1342,7 +1342,7 @@ where // This function makes multiple transactions to the DB. If one of them fails we may // end up in an inconsistent state and have to resync. async fn import_state( - &mut self, + &self, mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); @@ -1405,7 +1405,7 @@ where type Error = ConsensusError; async fn import_block( - &mut self, + &self, mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 6f805188b9a4..5c2e0eae959c 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -150,7 +150,7 @@ where type Error = BI::Error; async fn import_block( - &mut self, + &self, block: BlockImportParams, ) -> Result { Ok(self.0.import_block(block).await.expect("importing block failed")) diff --git a/substrate/client/consensus/beefy/src/import.rs b/substrate/client/consensus/beefy/src/import.rs index 848026852933..17a4a5866636 100644 --- a/substrate/client/consensus/beefy/src/import.rs +++ b/substrate/client/consensus/beefy/src/import.rs @@ -132,7 +132,7 @@ where type Error = ConsensusError; async fn import_block( - &mut self, + &self, mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); diff --git a/substrate/client/consensus/beefy/src/tests.rs b/substrate/client/consensus/beefy/src/tests.rs index 4b1dd447320b..afa6191d8bfe 100644 --- a/substrate/client/consensus/beefy/src/tests.rs +++ b/substrate/client/consensus/beefy/src/tests.rs @@ -790,7 +790,7 @@ async fn beefy_importing_justifications() { let client = net.peer(0).client().clone(); let full_client = client.as_client(); - let (mut block_import, _, peer_data) = net.make_block_import(client.clone()); + let (block_import, _, peer_data) = net.make_block_import(client.clone()); let PeerData { beefy_voter_links, .. } = peer_data; let justif_stream = beefy_voter_links.lock().take().unwrap().from_block_import_justif_stream; let mut justif_recv = justif_stream.subscribe(100_000); diff --git a/substrate/client/consensus/common/src/block_import.rs b/substrate/client/consensus/common/src/block_import.rs index c5adbb5a5fca..4d7b89f37d86 100644 --- a/substrate/client/consensus/common/src/block_import.rs +++ b/substrate/client/consensus/common/src/block_import.rs @@ -310,10 +310,7 @@ pub trait BlockImport { async fn check_block(&self, block: BlockCheckParams) -> Result; /// Import a block. - async fn import_block( - &mut self, - block: BlockImportParams, - ) -> Result; + async fn import_block(&self, block: BlockImportParams) -> Result; } #[async_trait::async_trait] @@ -326,10 +323,7 @@ impl BlockImport for crate::import_queue::BoxBlockImport { } /// Import a block. - async fn import_block( - &mut self, - block: BlockImportParams, - ) -> Result { + async fn import_block(&self, block: BlockImportParams) -> Result { (**self).import_block(block).await } } @@ -346,10 +340,7 @@ where (&**self).check_block(block).await } - async fn import_block( - &mut self, - block: BlockImportParams, - ) -> Result { + async fn import_block(&self, block: BlockImportParams) -> Result { (&**self).import_block(block).await } } diff --git a/substrate/client/consensus/common/src/import_queue/basic_queue.rs b/substrate/client/consensus/common/src/import_queue/basic_queue.rs index 6a307acb2517..7b371145e2e7 100644 --- a/substrate/client/consensus/common/src/import_queue/basic_queue.rs +++ b/substrate/client/consensus/common/src/import_queue/basic_queue.rs @@ -525,7 +525,7 @@ mod tests { } async fn import_block( - &mut self, + &self, _block: BlockImportParams, ) -> Result { Ok(ImportResult::imported(true)) diff --git a/substrate/client/consensus/grandpa/src/finality_proof.rs b/substrate/client/consensus/grandpa/src/finality_proof.rs index af965f2e4ae6..8a47d121e869 100644 --- a/substrate/client/consensus/grandpa/src/finality_proof.rs +++ b/substrate/client/consensus/grandpa/src/finality_proof.rs @@ -319,7 +319,7 @@ mod tests { ) -> (Arc, Arc, Vec) { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let mut blocks = Vec::new(); for _ in 0..number_of_blocks { diff --git a/substrate/client/consensus/grandpa/src/import.rs b/substrate/client/consensus/grandpa/src/import.rs index 8b7b02f180ec..5cec5204c739 100644 --- a/substrate/client/consensus/grandpa/src/import.rs +++ b/substrate/client/consensus/grandpa/src/import.rs @@ -20,6 +20,7 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use codec::Decode; use log::debug; +use parking_lot::Mutex; use sc_client_api::{backend::Backend, utils::is_descendent_of}; use sc_consensus::{ @@ -62,7 +63,8 @@ pub struct GrandpaBlockImport { select_chain: SC, authority_set: SharedAuthoritySet>, send_voter_commands: TracingUnboundedSender>>, - authority_set_hard_forks: HashMap>>, + authority_set_hard_forks: + Mutex>>>, justification_sender: GrandpaJustificationSender, telemetry: Option, _phantom: PhantomData, @@ -78,7 +80,7 @@ impl Clone select_chain: self.select_chain.clone(), authority_set: self.authority_set.clone(), send_voter_commands: self.send_voter_commands.clone(), - authority_set_hard_forks: self.authority_set_hard_forks.clone(), + authority_set_hard_forks: Mutex::new(self.authority_set_hard_forks.lock().clone()), justification_sender: self.justification_sender.clone(), telemetry: self.telemetry.clone(), _phantom: PhantomData, @@ -242,7 +244,7 @@ where hash: Block::Hash, ) -> Option>> { // check for forced authority set hard forks - if let Some(change) = self.authority_set_hard_forks.get(&hash) { + if let Some(change) = self.authority_set_hard_forks.lock().get(&hash) { return Some(change.clone()) } @@ -461,7 +463,7 @@ where /// Import whole new state and reset authority set. async fn import_state( - &mut self, + &self, mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); @@ -474,7 +476,7 @@ where // We've just imported a new state. We trust the sync module has verified // finality proofs and that the state is correct and final. // So we can read the authority list and set id from the state. - self.authority_set_hard_forks.clear(); + self.authority_set_hard_forks.lock().clear(); let authorities = self .inner .runtime_api() @@ -523,7 +525,7 @@ where type Error = ConsensusError; async fn import_block( - &mut self, + &self, mut block: BlockImportParams, ) -> Result { let hash = block.post_hash(); @@ -750,7 +752,7 @@ impl GrandpaBlockImport, justification: Justification, diff --git a/substrate/client/consensus/grandpa/src/tests.rs b/substrate/client/consensus/grandpa/src/tests.rs index 2aa1b5f6ee1b..9cca28a39599 100644 --- a/substrate/client/consensus/grandpa/src/tests.rs +++ b/substrate/client/consensus/grandpa/src/tests.rs @@ -906,7 +906,7 @@ async fn allows_reimporting_change_blocks() { let mut net = GrandpaTestNet::new(api.clone(), 3, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import(client.clone()); + let (block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_client(); let mut builder = BlockBuilderBuilder::new(&*full_client) @@ -954,7 +954,7 @@ async fn test_bad_justification() { let mut net = GrandpaTestNet::new(api.clone(), 3, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import(client.clone()); + let (block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_client(); let mut builder = BlockBuilderBuilder::new(&*full_client) @@ -2083,7 +2083,7 @@ async fn imports_justification_for_regular_blocks_on_import() { let mut net = GrandpaTestNet::new(api.clone(), 1, 0); let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import(client.clone()); + let (block_import, ..) = net.make_block_import(client.clone()); let full_client = client.as_client(); // create a new block (without importing it) @@ -2122,7 +2122,7 @@ async fn imports_justification_for_regular_blocks_on_import() { GrandpaJustification::from_commit(&full_client, round, commit).unwrap() }; - let mut generate_and_import_block_with_justification = |parent| { + let generate_and_import_block_with_justification = |parent| { // we import the block with justification attached let block = generate_block(parent); let block_hash = block.hash(); diff --git a/substrate/client/consensus/grandpa/src/voting_rule.rs b/substrate/client/consensus/grandpa/src/voting_rule.rs index c37596d20f68..c1d3cd2fbd6a 100644 --- a/substrate/client/consensus/grandpa/src/voting_rule.rs +++ b/substrate/client/consensus/grandpa/src/voting_rule.rs @@ -367,7 +367,7 @@ mod tests { // where each subtracts 50 blocks from the current target let rule = VotingRulesBuilder::new().add(Subtract(50)).add(Subtract(50)).build(); - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = Arc::new(TestClientBuilder::new().build()); let mut hashes = Vec::with_capacity(200); for _ in 0..200 { @@ -416,7 +416,7 @@ mod tests { fn before_best_by_has_cutoff_at_base() { let rule = BeforeBestBlockBy(2); - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = Arc::new(TestClientBuilder::new().build()); let n = 5; let mut hashes = Vec::with_capacity(n); diff --git a/substrate/client/consensus/grandpa/src/warp_proof.rs b/substrate/client/consensus/grandpa/src/warp_proof.rs index c836ab09fd5d..a79581b1e9f1 100644 --- a/substrate/client/consensus/grandpa/src/warp_proof.rs +++ b/substrate/client/consensus/grandpa/src/warp_proof.rs @@ -338,7 +338,7 @@ mod tests { let mut rng = rand::rngs::StdRng::from_seed([0; 32]); let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let available_authorities = Ed25519Keyring::iter().collect::>(); let genesis_authorities = vec![(Ed25519Keyring::Alice.public().into(), 1)]; diff --git a/substrate/client/consensus/pow/src/lib.rs b/substrate/client/consensus/pow/src/lib.rs index 50e9533abb36..cd7da128549f 100644 --- a/substrate/client/consensus/pow/src/lib.rs +++ b/substrate/client/consensus/pow/src/lib.rs @@ -317,7 +317,7 @@ where } async fn import_block( - &mut self, + &self, mut block: BlockImportParams, ) -> Result { let best_header = self diff --git a/substrate/client/consensus/pow/src/worker.rs b/substrate/client/consensus/pow/src/worker.rs index 9e9c4fc137d8..73400136483a 100644 --- a/substrate/client/consensus/pow/src/worker.rs +++ b/substrate/client/consensus/pow/src/worker.rs @@ -192,7 +192,7 @@ where import_block.insert_intermediate(INTERMEDIATE_KEY, intermediate); let header = import_block.post_header(); - let mut block_import = self.block_import.lock(); + let block_import = self.block_import.lock(); match block_import.import_block(import_block).await { Ok(res) => { diff --git a/substrate/client/merkle-mountain-range/src/test_utils.rs b/substrate/client/merkle-mountain-range/src/test_utils.rs index fcf9fa25b593..3b0506ef55d3 100644 --- a/substrate/client/merkle-mountain-range/src/test_utils.rs +++ b/substrate/client/merkle-mountain-range/src/test_utils.rs @@ -122,7 +122,7 @@ impl MockClient { name: &[u8], maybe_leaf_idx: Option, ) -> MmrBlock { - let mut client = self.client.lock(); + let client = self.client.lock(); let hash = client.expect_block_hash_from_id(&at).unwrap(); let mut block_builder = BlockBuilderBuilder::new(&*client) diff --git a/substrate/client/network/src/bitswap/mod.rs b/substrate/client/network/src/bitswap/mod.rs index 22f1973adcb2..1e20572eeeb1 100644 --- a/substrate/client/network/src/bitswap/mod.rs +++ b/substrate/client/network/src/bitswap/mod.rs @@ -468,7 +468,7 @@ mod tests { #[tokio::test] async fn transaction_found() { - let mut client = TestClientBuilder::with_tx_storage(u32::MAX).build(); + let client = TestClientBuilder::with_tx_storage(u32::MAX).build(); let mut block_builder = BlockBuilderBuilder::new(&client) .on_parent_block(client.chain_info().genesis_hash) .with_parent_block_number(0) diff --git a/substrate/client/network/sync/src/strategy.rs b/substrate/client/network/sync/src/strategy.rs index 72f84d1c286e..58befe94e84a 100644 --- a/substrate/client/network/sync/src/strategy.rs +++ b/substrate/client/network/sync/src/strategy.rs @@ -617,7 +617,7 @@ mod test { #[test] fn set_target_block_finished_warp_sync() { // Populate database with finalized state. - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = Arc::new(TestClientBuilder::new().build()); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().best_hash) .with_parent_block_number(client.chain_info().best_number) diff --git a/substrate/client/network/sync/src/strategy/chain_sync/test.rs b/substrate/client/network/sync/src/strategy/chain_sync/test.rs index cd955113542b..39d0c8f8d4d6 100644 --- a/substrate/client/network/sync/src/strategy/chain_sync/test.rs +++ b/substrate/client/network/sync/src/strategy/chain_sync/test.rs @@ -91,7 +91,7 @@ fn processes_empty_response_on_justification_request_for_unknown_block() { #[test] fn restart_doesnt_affect_peers_downloading_finality_data() { - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = Arc::new(TestClientBuilder::new().build()); // we request max 8 blocks to always initiate block requests to both peers for the test to be // deterministic @@ -103,7 +103,7 @@ fn restart_doesnt_affect_peers_downloading_finality_data() { let peer_id2 = PeerId::random(); let peer_id3 = PeerId::random(); - let mut new_blocks = |n| { + let new_blocks = |n| { for _ in 0..n { let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().best_hash) @@ -242,12 +242,12 @@ fn get_block_request( } /// Build and import a new best block. -fn build_block(client: &mut Arc, at: Option, fork: bool) -> Block { +fn build_block(client: &TestClient, at: Option, fork: bool) -> Block { let at = at.unwrap_or_else(|| client.info().best_hash); - let mut block_builder = BlockBuilderBuilder::new(&**client) + let mut block_builder = BlockBuilderBuilder::new(client) .on_parent_block(at) - .fetch_parent_block_number(&**client) + .fetch_parent_block_number(client) .unwrap() .build() .unwrap(); @@ -282,13 +282,13 @@ fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { sp_tracing::try_init_simple(); let blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = TestClientBuilder::new().build(); (0..MAX_DOWNLOAD_AHEAD * 2) - .map(|_| build_block(&mut client, None, false)) + .map(|_| build_block(&client, None, false)) .collect::>() }; - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = Arc::new(TestClientBuilder::new().build()); let info = client.info(); let mut sync = @@ -415,13 +415,13 @@ fn do_ancestor_search_when_common_block_to_best_queued_gap_is_to_big() { fn can_sync_huge_fork() { sp_tracing::try_init_simple(); - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) - .map(|_| build_block(&mut client, None, false)) + .map(|_| build_block(&client, None, false)) .collect::>(); let fork_blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = TestClientBuilder::new().build(); let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] .into_iter() .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) @@ -431,8 +431,7 @@ fn can_sync_huge_fork() { fork_blocks .into_iter() .chain( - (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) - .map(|_| build_block(&mut client, None, true)), + (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1).map(|_| build_block(&client, None, true)), ) .collect::>() }; @@ -550,13 +549,13 @@ fn can_sync_huge_fork() { fn syncs_fork_without_duplicate_requests() { sp_tracing::try_init_simple(); - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = Arc::new(TestClientBuilder::new().build()); let blocks = (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 4) - .map(|_| build_block(&mut client, None, false)) + .map(|_| build_block(&client, None, false)) .collect::>(); let fork_blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = TestClientBuilder::new().build(); let fork_blocks = blocks[..MAX_BLOCKS_TO_LOOK_BACKWARDS as usize * 2] .into_iter() .inspect(|b| block_on(client.import(BlockOrigin::Own, (*b).clone())).unwrap()) @@ -566,8 +565,7 @@ fn syncs_fork_without_duplicate_requests() { fork_blocks .into_iter() .chain( - (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1) - .map(|_| build_block(&mut client, None, true)), + (0..MAX_BLOCKS_TO_LOOK_BACKWARDS * 2 + 1).map(|_| build_block(&client, None, true)), ) .collect::>() }; @@ -708,8 +706,8 @@ fn syncs_fork_without_duplicate_requests() { #[test] fn removes_target_fork_on_disconnect() { sp_tracing::try_init_simple(); - let mut client = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..3).map(|_| build_block(&mut client, None, false)).collect::>(); + let client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..3).map(|_| build_block(&client, None, false)).collect::>(); let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, std::iter::empty()) @@ -733,8 +731,8 @@ fn removes_target_fork_on_disconnect() { #[test] fn can_import_response_with_missing_blocks() { sp_tracing::try_init_simple(); - let mut client2 = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..4).map(|_| build_block(&mut client2, None, false)).collect::>(); + let client2 = TestClientBuilder::new().build(); + let blocks = (0..4).map(|_| build_block(&client2, None, false)).collect::>(); let empty_client = Arc::new(TestClientBuilder::new().build()); @@ -770,14 +768,14 @@ fn ancestor_search_repeat() { #[test] fn sync_restart_removes_block_but_not_justification_requests() { - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = Arc::new(TestClientBuilder::new().build()); let mut sync = ChainSync::new(ChainSyncMode::Full, client.clone(), 1, 64, None, std::iter::empty()) .unwrap(); let peers = vec![PeerId::random(), PeerId::random()]; - let mut new_blocks = |n| { + let new_blocks = |n| { for _ in 0..n { let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().best_hash) @@ -880,11 +878,11 @@ fn sync_restart_removes_block_but_not_justification_requests() { fn request_across_forks() { sp_tracing::try_init_simple(); - let mut client = Arc::new(TestClientBuilder::new().build()); - let blocks = (0..100).map(|_| build_block(&mut client, None, false)).collect::>(); + let client = Arc::new(TestClientBuilder::new().build()); + let blocks = (0..100).map(|_| build_block(&client, None, false)).collect::>(); let fork_a_blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = TestClientBuilder::new().build(); let mut fork_blocks = blocks[..] .into_iter() .inspect(|b| { @@ -894,13 +892,13 @@ fn request_across_forks() { .cloned() .collect::>(); for _ in 0..10 { - fork_blocks.push(build_block(&mut client, None, false)); + fork_blocks.push(build_block(&client, None, false)); } fork_blocks }; let fork_b_blocks = { - let mut client = Arc::new(TestClientBuilder::new().build()); + let client = TestClientBuilder::new().build(); let mut fork_blocks = blocks[..] .into_iter() .inspect(|b| { @@ -910,7 +908,7 @@ fn request_across_forks() { .cloned() .collect::>(); for _ in 0..10 { - fork_blocks.push(build_block(&mut client, None, true)); + fork_blocks.push(build_block(&client, None, true)); } fork_blocks }; diff --git a/substrate/client/network/test/src/block_import.rs b/substrate/client/network/test/src/block_import.rs index dad5a27e9535..8c120d9de662 100644 --- a/substrate/client/network/test/src/block_import.rs +++ b/substrate/client/network/test/src/block_import.rs @@ -32,7 +32,7 @@ use substrate_test_runtime_client::{ }; fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { - let mut client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(); let block = BlockBuilderBuilder::new(&client) .on_parent_block(client.chain_info().best_hash) .with_parent_block_number(client.chain_info().best_number) diff --git a/substrate/client/network/test/src/lib.rs b/substrate/client/network/test/src/lib.rs index 6ac20842a33f..9285306948a6 100644 --- a/substrate/client/network/test/src/lib.rs +++ b/substrate/client/network/test/src/lib.rs @@ -217,7 +217,7 @@ impl BlockImport for PeersClient { } async fn import_block( - &mut self, + &self, block: BlockImportParams, ) -> Result { self.client.import_block(block).await @@ -607,7 +607,7 @@ where } async fn import_block( - &mut self, + &self, block: BlockImportParams, ) -> Result { self.inner.import_block(block).await diff --git a/substrate/client/offchain/src/lib.rs b/substrate/client/offchain/src/lib.rs index 48d3b8f1393a..7cee64e6ce7e 100644 --- a/substrate/client/offchain/src/lib.rs +++ b/substrate/client/offchain/src/lib.rs @@ -481,7 +481,7 @@ mod tests { let (client, backend) = substrate_test_runtime_client::TestClientBuilder::new() .enable_offchain_indexing_api() .build_with_backend(); - let mut client = Arc::new(client); + let client = Arc::new(client); let offchain_db = backend.offchain_storage().unwrap(); let key = &b"hello"[..]; diff --git a/substrate/client/rpc-spec-v2/src/archive/tests.rs b/substrate/client/rpc-spec-v2/src/archive/tests.rs index de71ed82a128..078016f5b3e2 100644 --- a/substrate/client/rpc-spec-v2/src/archive/tests.rs +++ b/substrate/client/rpc-spec-v2/src/archive/tests.rs @@ -96,7 +96,7 @@ async fn archive_genesis() { #[tokio::test] async fn archive_body() { - let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -130,7 +130,7 @@ async fn archive_body() { #[tokio::test] async fn archive_header() { - let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); // Invalid block hash. let invalid_hash = hex_string(&INVALID_HASH); @@ -176,7 +176,7 @@ async fn archive_finalized_height() { #[tokio::test] async fn archive_hash_by_height() { - let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); // Genesis height. let hashes: Vec = api.call("archive_unstable_hashByHeight", [0]).await.unwrap(); @@ -282,7 +282,7 @@ async fn archive_hash_by_height() { #[tokio::test] async fn archive_call() { - let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); let invalid_hash = hex_string(&INVALID_HASH); // Invalid parameter (non-hex). @@ -341,7 +341,7 @@ async fn archive_call() { #[tokio::test] async fn archive_storage_hashes_values() { - let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); let block = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().genesis_hash) @@ -431,7 +431,7 @@ async fn archive_storage_hashes_values() { #[tokio::test] async fn archive_storage_closest_merkle_value() { - let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, MAX_QUERIED_LIMIT); /// The core of this test. /// @@ -592,7 +592,7 @@ async fn archive_storage_closest_merkle_value() { #[tokio::test] async fn archive_storage_paginate_iterations() { // 1 iteration allowed before pagination kicks in. - let (mut client, api) = setup_api(1, MAX_QUERIED_LIMIT); + let (client, api) = setup_api(1, MAX_QUERIED_LIMIT); // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) @@ -787,7 +787,7 @@ async fn archive_storage_paginate_iterations() { #[tokio::test] async fn archive_storage_discarded_items() { // One query at a time - let (mut client, api) = setup_api(MAX_PAGINATION_LIMIT, 1); + let (client, api) = setup_api(MAX_PAGINATION_LIMIT, 1); // Import a new block with storage changes. let mut builder = BlockBuilderBuilder::new(&*client) diff --git a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs index d4d616f54dc8..14325b4fbb98 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/subscription/inner.rs @@ -890,7 +890,7 @@ mod tests { } fn produce_blocks( - mut client: Arc>>, + client: Arc>>, num_blocks: usize, ) -> Vec<::Hash> { let mut blocks = Vec::with_capacity(num_blocks); diff --git a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs index b195e05b6649..38f091471f87 100644 --- a/substrate/client/rpc-spec-v2/src/chain_head/tests.rs +++ b/substrate/client/rpc-spec-v2/src/chain_head/tests.rs @@ -137,7 +137,7 @@ async fn setup_api() -> ( CHILD_VALUE.to_vec(), ); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -186,7 +186,7 @@ async fn setup_api() -> ( } async fn import_block( - mut client: Arc>, + client: Arc>, parent_hash: ::Hash, parent_number: u64, ) -> Block { @@ -203,7 +203,7 @@ async fn import_block( } async fn import_best_block_with_tx( - mut client: Arc>, + client: Arc>, parent_hash: ::Hash, parent_number: u64, tx: Transfer, @@ -245,7 +245,7 @@ macro_rules! check_new_and_best_block_events { async fn follow_subscription_produces_blocks() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -316,7 +316,7 @@ async fn follow_subscription_produces_blocks() { async fn follow_with_runtime() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -469,7 +469,7 @@ async fn get_header() { #[tokio::test] async fn get_body() { - let (mut client, api, mut block_sub, sub_id, block) = setup_api().await; + let (client, api, mut block_sub, sub_id, block) = setup_api().await; let block_hash = format!("{:?}", block.header.hash()); let invalid_hash = hex_string(&INVALID_HASH); @@ -626,7 +626,7 @@ async fn call_runtime() { async fn call_runtime_without_flag() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -691,7 +691,7 @@ async fn call_runtime_without_flag() { #[tokio::test] async fn get_storage_hash() { - let (mut client, api, mut block_sub, sub_id, block) = setup_api().await; + let (client, api, mut block_sub, sub_id, block) = setup_api().await; let block_hash = format!("{:?}", block.header.hash()); let invalid_hash = hex_string(&INVALID_HASH); let key = hex_string(&KEY); @@ -835,7 +835,7 @@ async fn get_storage_hash() { #[tokio::test] async fn get_storage_multi_query_iter() { - let (mut client, api, mut block_sub, sub_id, block) = setup_api().await; + let (client, api, mut block_sub, sub_id, block) = setup_api().await; let key = hex_string(&KEY); // Import a new block with storage changes. @@ -959,7 +959,7 @@ async fn get_storage_multi_query_iter() { #[tokio::test] async fn get_storage_value() { - let (mut client, api, mut block_sub, sub_id, block) = setup_api().await; + let (client, api, mut block_sub, sub_id, block) = setup_api().await; let block_hash = format!("{:?}", block.hash()); let invalid_hash = hex_string(&INVALID_HASH); let key = hex_string(&KEY); @@ -1287,7 +1287,7 @@ async fn unique_operation_ids() { async fn separate_operation_ids_for_subscriptions() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -1375,7 +1375,7 @@ async fn separate_operation_ids_for_subscriptions() { async fn follow_generates_initial_blocks() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -1533,7 +1533,7 @@ async fn follow_generates_initial_blocks() { async fn follow_exceeding_pinned_blocks() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -1612,7 +1612,7 @@ async fn follow_exceeding_pinned_blocks() { async fn follow_with_unpin() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -1720,7 +1720,7 @@ async fn follow_with_unpin() { async fn unpin_duplicate_hashes() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -1825,7 +1825,7 @@ async fn unpin_duplicate_hashes() { async fn follow_with_multiple_unpin_hashes() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -1972,7 +1972,7 @@ async fn follow_with_multiple_unpin_hashes() { async fn follow_prune_best_block() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -2160,7 +2160,7 @@ async fn follow_prune_best_block() { async fn follow_forks_pruned_block() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -2322,7 +2322,7 @@ async fn follow_forks_pruned_block() { async fn follow_report_multiple_pruned_block() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let api = ChainHead::new( client.clone(), @@ -2559,7 +2559,7 @@ async fn pin_block_references() { ) .unwrap(); - let mut client = Arc::new( + let client = Arc::new( new_in_mem::<_, Block, _, RuntimeApi>( backend.clone(), executor, @@ -2705,7 +2705,7 @@ async fn pin_block_references() { async fn follow_finalized_before_new_block() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); let client_mock = Arc::new(ChainHeadMockClient::new(client.clone())); @@ -2823,7 +2823,7 @@ async fn ensure_operation_limits_works() { CHILD_VALUE.to_vec(), ); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); // Configure the chainHead with maximum 1 ongoing operations. let api = ChainHead::new( @@ -2930,7 +2930,7 @@ async fn check_continue_operation() { CHILD_VALUE.to_vec(), ); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); // Configure the chainHead with maximum 1 item before asking for pagination. let api = ChainHead::new( @@ -3115,7 +3115,7 @@ async fn stop_storage_operation() { CHILD_VALUE.to_vec(), ); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); // Configure the chainHead with maximum 1 item before asking for pagination. let api = ChainHead::new( @@ -3221,7 +3221,7 @@ async fn stop_storage_operation() { #[tokio::test] async fn storage_closest_merkle_value() { - let (mut client, api, mut sub, sub_id, block) = setup_api().await; + let (client, api, mut sub, sub_id, block) = setup_api().await; /// The core of this test. /// @@ -3414,7 +3414,7 @@ async fn storage_closest_merkle_value() { async fn chain_head_stop_all_subscriptions() { let builder = TestClientBuilder::new(); let backend = builder.backend(); - let mut client = Arc::new(builder.build()); + let client = Arc::new(builder.build()); // Configure the chainHead to stop all subscriptions on lagging distance of 5 blocks. let api = ChainHead::new( diff --git a/substrate/client/rpc/src/chain/tests.rs b/substrate/client/rpc/src/chain/tests.rs index afb81f709f7d..9d78aa88091d 100644 --- a/substrate/client/rpc/src/chain/tests.rs +++ b/substrate/client/rpc/src/chain/tests.rs @@ -72,7 +72,7 @@ async fn should_return_header() { #[tokio::test] async fn should_return_a_block() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), test_executor()).into_rpc(); let block = BlockBuilderBuilder::new(&*client) @@ -137,7 +137,7 @@ async fn should_return_a_block() { #[tokio::test] async fn should_return_block_hash() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), test_executor()).into_rpc(); let res: ListOrValue> = @@ -204,7 +204,7 @@ async fn should_return_block_hash() { #[tokio::test] async fn should_return_finalized_hash() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), test_executor()).into_rpc(); let res: H256 = api.call("chain_getFinalizedHead", EmptyParams::new()).await.unwrap(); @@ -248,7 +248,7 @@ async fn should_notify_about_finalized_block() { } async fn test_head_subscription(method: &str) { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new()); let mut sub = { let api = new_full(client.clone(), test_executor()).into_rpc(); diff --git a/substrate/client/rpc/src/dev/tests.rs b/substrate/client/rpc/src/dev/tests.rs index e8f9ba4990d2..8f09d48e8a5d 100644 --- a/substrate/client/rpc/src/dev/tests.rs +++ b/substrate/client/rpc/src/dev/tests.rs @@ -24,7 +24,7 @@ use substrate_test_runtime_client::{prelude::*, runtime::Block}; #[tokio::test] async fn block_stats_work() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new()); let api = >::new(client.clone(), DenyUnsafe::No).into_rpc(); let block = BlockBuilderBuilder::new(&*client) @@ -76,7 +76,7 @@ async fn block_stats_work() { #[tokio::test] async fn deny_unsafe_works() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new()); let api = >::new(client.clone(), DenyUnsafe::Yes).into_rpc(); let block = BlockBuilderBuilder::new(&*client) diff --git a/substrate/client/rpc/src/state/tests.rs b/substrate/client/rpc/src/state/tests.rs index dd866e671c50..86df46f63a0f 100644 --- a/substrate/client/rpc/src/state/tests.rs +++ b/substrate/client/rpc/src/state/tests.rs @@ -216,7 +216,7 @@ async fn should_notify_about_storage_changes() { init_logger(); let mut sub = { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new()); let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No); let api_rpc = api.into_rpc(); @@ -256,7 +256,7 @@ async fn should_send_initial_storage_changes_and_notifications() { init_logger(); let mut sub = { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new()); let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No); let alice_balance_key = [ @@ -304,10 +304,10 @@ async fn should_send_initial_storage_changes_and_notifications() { #[tokio::test] async fn should_query_storage() { - async fn run_tests(mut client: Arc) { + async fn run_tests(client: Arc) { let (api, _child) = new_full(client.clone(), test_executor(), DenyUnsafe::No); - let mut add_block = |index| { + let add_block = |index| { let mut builder = BlockBuilderBuilder::new(&*client) .on_parent_block(client.chain_info().best_hash) .with_parent_block_number(client.chain_info().best_number) diff --git a/substrate/client/service/src/client/client.rs b/substrate/client/service/src/client/client.rs index a2c9212f7b9c..22defd7c5514 100644 --- a/substrate/client/service/src/client/client.rs +++ b/substrate/client/service/src/client/client.rs @@ -1754,7 +1754,7 @@ where /// If you are not sure that there are no BlockImport objects provided by the consensus /// algorithm, don't use this function. async fn import_block( - &mut self, + &self, mut import_block: BlockImportParams, ) -> Result { let span = tracing::span!(tracing::Level::DEBUG, "import_block"); @@ -1854,19 +1854,19 @@ where { type Error = ConsensusError; - async fn import_block( - &mut self, - import_block: BlockImportParams, - ) -> Result { - (&*self).import_block(import_block).await - } - async fn check_block( &self, block: BlockCheckParams, ) -> Result { (&self).check_block(block).await } + + async fn import_block( + &self, + import_block: BlockImportParams, + ) -> Result { + (&self).import_block(import_block).await + } } impl Finalizer for Client diff --git a/substrate/client/service/test/src/client/mod.rs b/substrate/client/service/test/src/client/mod.rs index bd48fae63444..13e63962fe8f 100644 --- a/substrate/client/service/test/src/client/mod.rs +++ b/substrate/client/service/test/src/client/mod.rs @@ -238,7 +238,7 @@ fn client_initializes_from_genesis_ok() { #[test] fn block_builder_works_with_no_transactions() { - let mut client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(); let block = BlockBuilderBuilder::new(&client) .on_parent_block(client.chain_info().genesis_hash) @@ -256,7 +256,7 @@ fn block_builder_works_with_no_transactions() { #[test] fn block_builder_works_with_transactions() { - let mut client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(); let mut builder = BlockBuilderBuilder::new(&client) .on_parent_block(client.chain_info().genesis_hash) @@ -316,7 +316,7 @@ fn block_builder_works_with_transactions() { #[test] fn block_builder_does_not_include_invalid() { - let mut client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(); let mut builder = BlockBuilderBuilder::new(&client) .on_parent_block(client.chain_info().genesis_hash) .with_parent_block_number(0) @@ -390,7 +390,7 @@ fn best_containing_with_genesis_block() { fn uncles_with_only_ancestors() { // block tree: // G -> A1 -> A2 - let mut client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(); // G -> A1 let a1 = BlockBuilderBuilder::new(&client) @@ -424,7 +424,7 @@ fn uncles_with_multiple_forks() { // A1 -> B2 -> B3 -> B4 // B2 -> C3 // A1 -> D2 - let mut client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(); // G -> A1 let a1 = BlockBuilderBuilder::new(&client) @@ -584,7 +584,7 @@ fn finality_target_on_longest_chain_with_single_chain_3_blocks() { // block tree: // G -> A1 -> A2 - let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 let a1 = BlockBuilderBuilder::new(&client) @@ -625,7 +625,7 @@ fn finality_target_on_longest_chain_with_multiple_forks() { // A1 -> B2 -> B3 -> B4 // B2 -> C3 // A1 -> D2 - let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 let a1 = BlockBuilderBuilder::new(&client) @@ -877,7 +877,7 @@ fn finality_target_on_longest_chain_with_max_depth_higher_than_best() { // block tree: // G -> A1 -> A2 - let (mut client, chain_select) = TestClientBuilder::new().build_with_longest_chain(); + let (client, chain_select) = TestClientBuilder::new().build_with_longest_chain(); let chain = client.chain_info(); // G -> A1 @@ -914,7 +914,7 @@ fn finality_target_with_best_not_on_longest_chain() { // -> B2 -> (B3) -> B4 // ^best - let (mut client, chain_select) = TestClientBuilder::new().build_with_longest_chain(); + let (client, chain_select) = TestClientBuilder::new().build_with_longest_chain(); let chain = client.chain_info(); // G -> A1 @@ -1045,7 +1045,7 @@ fn finality_target_with_best_not_on_longest_chain() { fn import_with_justification() { // block tree: // G -> A1 -> A2 -> A3 - let mut client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(); let mut finality_notifications = client.finality_notification_stream(); @@ -1099,7 +1099,7 @@ fn import_with_justification() { #[test] fn importing_diverged_finalized_block_should_trigger_reorg() { - let mut client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(); // G -> A1 -> A2 // \ @@ -1160,7 +1160,7 @@ fn importing_diverged_finalized_block_should_trigger_reorg() { #[test] fn finalizing_diverged_block_should_trigger_reorg() { - let (mut client, select_chain) = TestClientBuilder::new().build_with_longest_chain(); + let (client, select_chain) = TestClientBuilder::new().build_with_longest_chain(); // G -> A1 -> A2 // \ @@ -1257,7 +1257,7 @@ fn finalizing_diverged_block_should_trigger_reorg() { #[test] fn finality_notifications_content() { sp_tracing::try_init_simple(); - let (mut client, _select_chain) = TestClientBuilder::new().build_with_longest_chain(); + let (client, _select_chain) = TestClientBuilder::new().build_with_longest_chain(); // -> D3 -> D4 // G -> A1 -> A2 -> A3 @@ -1410,7 +1410,7 @@ fn get_hash_by_block_number_doesnt_panic() { #[test] fn state_reverted_on_reorg() { sp_tracing::try_init_simple(); - let mut client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(); let current_balance = |client: &substrate_test_runtime_client::TestClient| { client @@ -1492,7 +1492,7 @@ fn doesnt_import_blocks_that_revert_finality() { .unwrap(), ); - let mut client = TestClientBuilder::with_backend(backend).build(); + let client = TestClientBuilder::with_backend(backend).build(); let mut finality_notifications = client.finality_notification_stream(); @@ -1619,7 +1619,7 @@ fn respects_block_rules() { known_bad: &mut HashSet, fork_rules: &mut Vec<(u64, H256)>, ) { - let mut client = if record_only { + let client = if record_only { TestClientBuilder::new().build() } else { TestClientBuilder::new() @@ -1771,7 +1771,7 @@ fn returns_status_for_pruned_blocks() { .unwrap(), ); - let mut client = TestClientBuilder::with_backend(backend).build(); + let client = TestClientBuilder::with_backend(backend).build(); let a1 = BlockBuilderBuilder::new(&client) .on_parent_block(client.chain_info().genesis_hash) @@ -2160,7 +2160,7 @@ fn cleans_up_closed_notification_sinks_on_block_import() { /// Test that ensures that we always send an import notification for re-orgs. #[test] fn reorg_triggers_a_notification_even_for_sources_that_should_not_trigger_notifications() { - let mut client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().build(); let mut notification_stream = futures::executor::block_on_stream(client.import_notification_stream()); @@ -2232,7 +2232,7 @@ fn use_dalek_ext_works() { sp_core::ed25519::Signature::default() } - let mut client = TestClientBuilder::new().build(); + let client = TestClientBuilder::new().build(); client.execution_extensions().set_extensions_factory( sc_client_api::execution_extensions::ExtensionBeforeBlock::::new( @@ -2263,7 +2263,7 @@ fn use_dalek_ext_works() { #[test] fn finalize_after_best_block_updates_best() { - let mut client = substrate_test_runtime_client::new(); + let client = substrate_test_runtime_client::new(); // G -> A1 let a1 = BlockBuilderBuilder::new(&client) diff --git a/substrate/client/transaction-pool/tests/pool.rs b/substrate/client/transaction-pool/tests/pool.rs index 49bd2203c12b..6d70b6ce67ec 100644 --- a/substrate/client/transaction-pool/tests/pool.rs +++ b/substrate/client/transaction-pool/tests/pool.rs @@ -980,7 +980,7 @@ fn ready_set_should_eventually_resolve_when_block_update_arrives() { #[test] fn import_notification_to_pool_maintain_works() { - let mut client = Arc::new(substrate_test_runtime_client::new()); + let client = Arc::new(substrate_test_runtime_client::new()); let best_hash = client.info().best_hash; let finalized_hash = client.info().finalized_hash; diff --git a/substrate/test-utils/client/src/client_ext.rs b/substrate/test-utils/client/src/client_ext.rs index 9dc4739eb795..a4f91f2ec836 100644 --- a/substrate/test-utils/client/src/client_ext.rs +++ b/substrate/test-utils/client/src/client_ext.rs @@ -42,25 +42,22 @@ pub trait ClientExt: Sized { #[async_trait::async_trait] pub trait ClientBlockImportExt: Sized { /// Import block to the chain. No finality. - async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + async fn import(&self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; /// Import a block and make it our best block if possible. - async fn import_as_best( - &mut self, - origin: BlockOrigin, - block: Block, - ) -> Result<(), ConsensusError>; + async fn import_as_best(&self, origin: BlockOrigin, block: Block) + -> Result<(), ConsensusError>; /// Import a block and finalize it. async fn import_as_final( - &mut self, + &self, origin: BlockOrigin, block: Block, ) -> Result<(), ConsensusError>; /// Import block with justification(s), finalizes block. async fn import_justified( - &mut self, + &self, origin: BlockOrigin, block: Block, justifications: Justifications, @@ -94,7 +91,7 @@ where for<'r> &'r T: BlockImport, T: Send + Sync, { - async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import(&self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); @@ -104,7 +101,7 @@ where } async fn import_as_best( - &mut self, + &self, origin: BlockOrigin, block: Block, ) -> Result<(), ConsensusError> { @@ -117,7 +114,7 @@ where } async fn import_as_final( - &mut self, + &self, origin: BlockOrigin, block: Block, ) -> Result<(), ConsensusError> { @@ -131,7 +128,7 @@ where } async fn import_justified( - &mut self, + &self, origin: BlockOrigin, block: Block, justifications: Justifications, @@ -151,11 +148,11 @@ where impl ClientBlockImportExt for Client where Self: BlockImport, - RA: Send, + RA: Send + Sync, B: Send + Sync, E: Send + Sync, { - async fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + async fn import(&self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { let (header, extrinsics) = block.deconstruct(); let mut import = BlockImportParams::new(origin, header); import.body = Some(extrinsics); @@ -165,7 +162,7 @@ where } async fn import_as_best( - &mut self, + &self, origin: BlockOrigin, block: Block, ) -> Result<(), ConsensusError> { @@ -178,7 +175,7 @@ where } async fn import_as_final( - &mut self, + &self, origin: BlockOrigin, block: Block, ) -> Result<(), ConsensusError> { @@ -192,7 +189,7 @@ where } async fn import_justified( - &mut self, + &self, origin: BlockOrigin, block: Block, justifications: Justifications, diff --git a/substrate/test-utils/runtime/client/src/trait_tests.rs b/substrate/test-utils/runtime/client/src/trait_tests.rs index 6f6bb5c36ee4..c3a5f173d14e 100644 --- a/substrate/test-utils/runtime/client/src/trait_tests.rs +++ b/substrate/test-utils/runtime/client/src/trait_tests.rs @@ -46,7 +46,7 @@ where // B2 -> C3 // A1 -> D2 - let mut client = TestClientBuilder::with_backend(backend.clone()).build(); + let client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); let genesis_hash = client.chain_info().genesis_hash; @@ -221,7 +221,7 @@ where // B2 -> C3 // A1 -> D2 - let mut client = TestClientBuilder::with_backend(backend.clone()).build(); + let client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); let genesis_hash = client.chain_info().genesis_hash; @@ -390,7 +390,7 @@ where // A1 -> B2 -> B3 -> B4 // B2 -> C3 // A1 -> D2 - let mut client = TestClientBuilder::with_backend(backend.clone()).build(); + let client = TestClientBuilder::with_backend(backend.clone()).build(); let blockchain = backend.blockchain(); let genesis_hash = client.chain_info().genesis_hash; diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index d1a3eaa2daa9..514f3bcba204 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -1062,7 +1062,7 @@ mod tests { // This tests that the on-chain `HEAP_PAGES` parameter is respected. // Create a client devoting only 8 pages of wasm memory. This gives us ~512k of heap memory. - let mut client = TestClientBuilder::new().set_heap_pages(8).build(); + let client = TestClientBuilder::new().set_heap_pages(8).build(); let best_hash = client.chain_info().best_hash; // Try to allocate 1024k of memory on heap. This is going to fail since it is twice larger From 3fe22d17c4904c5ae016034b6ec2c335464b4165 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Sun, 18 Aug 2024 21:41:47 +0200 Subject: [PATCH 65/74] binary-merkle-tree: Do not spam test output (#5376) The CI isn't happy with the amount of output: https://gitlab.parity.io/parity/mirrors/polkadot-sdk/-/jobs/7035621/raw --------- Co-authored-by: Shawn Tabrizi --- prdoc/pr_5376.prdoc | 3 +++ substrate/utils/binary-merkle-tree/src/lib.rs | 9 --------- 2 files changed, 3 insertions(+), 9 deletions(-) create mode 100644 prdoc/pr_5376.prdoc diff --git a/prdoc/pr_5376.prdoc b/prdoc/pr_5376.prdoc new file mode 100644 index 000000000000..c9874ef7d865 --- /dev/null +++ b/prdoc/pr_5376.prdoc @@ -0,0 +1,3 @@ +crates: + - name: binary-merkle-tree + bump: none diff --git a/substrate/utils/binary-merkle-tree/src/lib.rs b/substrate/utils/binary-merkle-tree/src/lib.rs index 030e9f69b9b6..f2d338cf028e 100644 --- a/substrate/utils/binary-merkle-tree/src/lib.rs +++ b/substrate/utils/binary-merkle-tree/src/lib.rs @@ -356,7 +356,6 @@ mod tests { #[test] fn should_generate_empty_root() { // given - sp_tracing::init_for_tests(); let data: Vec<[u8; 1]> = Default::default(); // when @@ -372,7 +371,6 @@ mod tests { #[test] fn should_generate_single_root() { // given - sp_tracing::init_for_tests(); let data = vec![array_bytes::hex2array_unchecked::<_, 20>( "E04CC55ebEE1cBCE552f250e85c57B70B2E2625b", )]; @@ -390,7 +388,6 @@ mod tests { #[test] fn should_generate_root_pow_2() { // given - sp_tracing::init_for_tests(); let data = vec![ array_bytes::hex2array_unchecked::<_, 20>("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b"), array_bytes::hex2array_unchecked::<_, 20>("25451A4de12dcCc2D166922fA938E900fCc4ED24"), @@ -408,7 +405,6 @@ mod tests { #[test] fn should_generate_root_complex() { - sp_tracing::init_for_tests(); let test = |root, data| { assert_eq!(array_bytes::bytes2hex("", &merkle_root::(data)), root); }; @@ -437,7 +433,6 @@ mod tests { #[test] fn should_generate_and_verify_proof_simple() { // given - sp_tracing::init_for_tests(); let data = vec!["a", "b", "c"]; // when @@ -501,7 +496,6 @@ mod tests { #[test] fn should_generate_and_verify_proof_complex() { // given - sp_tracing::init_for_tests(); let data = vec!["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]; for l in 0..data.len() { @@ -521,7 +515,6 @@ mod tests { #[test] fn should_generate_and_verify_proof_large() { // given - sp_tracing::init_for_tests(); let mut data = vec![]; for i in 1..16 { for c in 'a'..'z' { @@ -548,7 +541,6 @@ mod tests { #[test] fn should_generate_and_verify_proof_large_tree() { // given - sp_tracing::init_for_tests(); let mut data = vec![]; for i in 0..6000 { data.push(format!("{}", i)); @@ -571,7 +563,6 @@ mod tests { #[test] #[should_panic] fn should_panic_on_invalid_leaf_index() { - sp_tracing::init_for_tests(); merkle_proof::(vec!["a"], 5); } From a4b51593dbe9b9390fa64f5040a9230b5d33b898 Mon Sep 17 00:00:00 2001 From: Przemek Rzad Date: Mon, 19 Aug 2024 15:05:54 +0200 Subject: [PATCH 66/74] Repair the docs TOC (#5388) ## Before ## After --- docs/sdk/assets/header.html | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/sdk/assets/header.html b/docs/sdk/assets/header.html index f55c31b53216..c24c10940759 100644 --- a/docs/sdk/assets/header.html +++ b/docs/sdk/assets/header.html @@ -14,12 +14,13 @@ headers.forEach(header => { let link = document.createElement("a"); link.href = "#" + header.id; - link.textContent = header.textContent; + const headerTextContent = header.textContent.replace("§", "") + link.textContent = headerTextContent; link.className = header.tagName.toLowerCase(); toc.appendChild(link); - if (header.id == "modules" && header.textContent == "Modules") { + if (header.id == "modules" && headerTextContent == "Modules") { modules.forEach(module => { let link = document.createElement("a"); link.href = module.href; From 8f72895144b1214975bae7cfbe5f76f0436c4c30 Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Mon, 19 Aug 2024 21:18:35 +0300 Subject: [PATCH 67/74] Refactor extension weights Signed-off-by: georgepisaltu --- .../extensions/check_obsolete_extension.rs | 4 +- .../extensions/refund_relayer_extension.rs | 6 +- .../storage-weight-reclaim/src/lib.rs | 8 +- substrate/bin/node/cli/tests/basic.rs | 8 +- substrate/bin/node/cli/tests/fees.rs | 12 ++- substrate/frame/sudo/src/extension.rs | 13 +-- substrate/frame/support/src/dispatch.rs | 81 ++++++++++--------- .../system/src/extensions/check_genesis.rs | 6 +- .../system/src/extensions/check_mortality.rs | 7 +- .../src/extensions/check_non_zero_sender.rs | 8 +- .../system/src/extensions/check_nonce.rs | 7 +- .../src/extensions/check_spec_version.rs | 6 +- .../system/src/extensions/check_tx_version.rs | 6 +- .../system/src/extensions/check_weight.rs | 12 +-- .../asset-conversion-tx-payment/src/lib.rs | 31 ++++--- .../asset-conversion-tx-payment/src/tests.rs | 45 +++++------ .../asset-tx-payment/src/lib.rs | 33 ++++---- .../asset-tx-payment/src/tests.rs | 38 +++++---- .../skip-feeless-payment/src/lib.rs | 12 +-- .../frame/transaction-payment/src/lib.rs | 12 +-- .../frame/transaction-payment/src/tests.rs | 45 ++++++----- .../runtime/src/generic/checked_extrinsic.rs | 8 +- .../src/generic/unchecked_extrinsic.rs | 5 +- .../primitives/runtime/src/traits/mod.rs | 4 +- .../as_transaction_extension.rs | 4 +- .../dispatch_transaction.rs | 4 +- .../src/traits/transaction_extension/mod.rs | 68 ++++++++-------- substrate/test-utils/runtime/src/lib.rs | 7 +- 28 files changed, 259 insertions(+), 241 deletions(-) diff --git a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs index 97744da5ad8c..d5e09df58dcd 100644 --- a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs @@ -344,7 +344,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { post_info: &sp_runtime::traits::PostDispatchInfoOf<$call>, len: usize, result: &sp_runtime::DispatchResult, - ) -> Result, sp_runtime::transaction_validity::TransactionValidityError> { + ) -> Result { use $crate::extensions::check_obsolete_extension::__private::tuplex::PopFront; let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(None) }; let has_failed = result.is_err(); @@ -357,7 +357,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { $call, >>::post_dispatch(&relayer, has_failed, item); )* - Ok(None) + Ok(frame_support::pallet_prelude::Weight::zero()) } } }; diff --git a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs index a5b453b2f5c9..7bab1d86d268 100644 --- a/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/refund_relayer_extension.rs @@ -533,7 +533,7 @@ where post_info: &PostDispatchInfoOf>, len: usize, result: &DispatchResult, - ) -> Result, TransactionValidityError> { + ) -> Result { let call_result = T::analyze_call_result(Some(pre), info, post_info, len, result); match call_result { @@ -561,7 +561,7 @@ where ), } - Ok(None) + Ok(Weight::zero()) } } @@ -1676,7 +1676,7 @@ pub(crate) mod tests { 1024, &dispatch_result, ); - assert_eq!(post_dispatch_result, Ok(None)); + assert_eq!(post_dispatch_result, Ok(Weight::zero())); } fn expected_delivery_reward() -> ThisChainBalance { diff --git a/cumulus/primitives/storage-weight-reclaim/src/lib.rs b/cumulus/primitives/storage-weight-reclaim/src/lib.rs index e7cfe4de7245..499d41a45776 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/lib.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/lib.rs @@ -155,9 +155,9 @@ where post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, - ) -> Result, TransactionValidityError> { + ) -> Result { let Some(pre_dispatch_proof_size) = pre else { - return Ok(None); + return Ok(Weight::zero()); }; let Some(post_dispatch_proof_size) = get_proof_size() else { @@ -165,7 +165,7 @@ where target: LOG_TARGET, "Proof recording enabled during pre-dispatch, now disabled. This should not happen." ); - return Ok(None) + return Ok(Weight::zero()) }; // Unspent weight according to the `actual_weight` from `PostDispatchInfo` // This unspent weight will be refunded by the `CheckWeight` extension, so we need to @@ -209,7 +209,7 @@ where current.accrue(Weight::from_parts(0, missing_from_node), info.class); } }); - Ok(None) + Ok(Weight::zero()) } impl_tx_ext_default!(T::RuntimeCall; validate); diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index f678ba11a06b..61f4b7326a3a 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -61,16 +61,16 @@ pub fn bloaty_code_unwrap() -> &'static [u8] { /// Note that reads the multiplier from storage directly, hence to get the fee of `extrinsic` /// at block `n`, it must be called prior to executing block `n` to do the calculation with the /// correct multiplier. -fn transfer_fee(extrinsic: &E) -> Balance { +fn transfer_fee(extrinsic: &UncheckedExtrinsic) -> Balance { let mut info = default_transfer_call().get_dispatch_info(); - info.extension_weight = TxExtension::weight(); + info.extension_weight = extrinsic.extension_weight(); TransactionPayment::compute_fee(extrinsic.encode().len() as u32, &info, 0) } /// Default transfer fee, same as `transfer_fee`, but with a weight refund factored in. -fn transfer_fee_with_refund(extrinsic: &E, weight_refund: Weight) -> Balance { +fn transfer_fee_with_refund(extrinsic: &UncheckedExtrinsic, weight_refund: Weight) -> Balance { let mut info = default_transfer_call().get_dispatch_info(); - info.extension_weight = TxExtension::weight(); + info.extension_weight = extrinsic.extension_weight(); let post_info = (Some(info.total_weight().saturating_sub(weight_refund)), info.pays_fee).into(); TransactionPayment::compute_actual_fee(extrinsic.encode().len() as u32, &info, &post_info, 0) } diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index 906f35b94afb..a39567da2f87 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -37,6 +37,14 @@ use sp_runtime::{ pub mod common; use self::common::{sign, *}; +fn charge_asset_tx_unspent_weight() -> Weight { + use pallet_asset_conversion_tx_payment::{Config as ACConfig, WeightInfo as ACWeights}; + <::WeightInfo as ACWeights>::charge_asset_tx_payment_asset() + .saturating_sub( + <::WeightInfo as ACWeights>::charge_asset_tx_payment_native(), + ) +} + #[test] fn fee_multiplier_increases_and_decreases_on_big_weight() { let mut t = new_test_ext(compact_code_unwrap()); @@ -178,9 +186,9 @@ fn transaction_fee_is_correct() { balance_alice -= length_fee; let mut info = default_transfer_call().get_dispatch_info(); - info.extension_weight = TxExtension::weight(); + info.extension_weight = xt.extension_weight(); let mut weight = info.total_weight(); - let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let weight_refund = charge_asset_tx_unspent_weight(); weight.saturating_reduce(weight_refund); let weight_fee = IdentityFee::::weight_to_fee(&weight); diff --git a/substrate/frame/sudo/src/extension.rs b/substrate/frame/sudo/src/extension.rs index ed4f8e180aaa..11e3e47faaff 100644 --- a/substrate/frame/sudo/src/extension.rs +++ b/substrate/frame/sudo/src/extension.rs @@ -72,11 +72,6 @@ impl CheckOnlySudoAccount { impl TransactionExtensionBase for CheckOnlySudoAccount { const IDENTIFIER: &'static str = "CheckOnlySudoAccount"; type Implicit = (); - - fn weight() -> frame_support::weights::Weight { - use crate::weights::WeightInfo; - T::WeightInfo::check_only_sudo_account() - } } impl TransactionExtension<::RuntimeCall> for CheckOnlySudoAccount @@ -88,6 +83,14 @@ where type Pre = (); type Val = (); + fn weight( + &self, + _: &::RuntimeCall, + ) -> frame_support::weights::Weight { + use crate::weights::WeightInfo; + T::WeightInfo::check_only_sudo_account() + } + fn validate( &self, origin: <::RuntimeCall as Dispatchable>::RuntimeOrigin, diff --git a/substrate/frame/support/src/dispatch.rs b/substrate/frame/support/src/dispatch.rs index 675b79c4ce9d..2568ccb90cbd 100644 --- a/substrate/frame/support/src/dispatch.rs +++ b/substrate/frame/support/src/dispatch.rs @@ -27,7 +27,8 @@ use serde::{Deserialize, Serialize}; use sp_runtime::{ generic::{CheckedExtrinsic, UncheckedExtrinsic}, traits::{ - Dispatchable, ExtensionPostDispatchWeightHandler, RefundWeight, TransactionExtensionBase, + Dispatchable, ExtensionPostDispatchWeightHandler, RefundWeight, TransactionExtension, + TransactionExtensionBase, }, DispatchError, RuntimeDebug, }; @@ -380,7 +381,7 @@ where } /// Implementation for unchecked extrinsic. -impl GetDispatchInfo +impl> GetDispatchInfo for UncheckedExtrinsic where Call: GetDispatchInfo + Dispatchable, @@ -394,7 +395,8 @@ where } /// Implementation for checked extrinsic. -impl GetDispatchInfo for CheckedExtrinsic +impl> GetDispatchInfo + for CheckedExtrinsic where Call: GetDispatchInfo, Extension: TransactionExtensionBase, @@ -590,18 +592,17 @@ impl RefundWeight for PostDispatchInfo { } impl ExtensionPostDispatchWeightHandler for PostDispatchInfo { - fn set_extension_weight(&mut self, info: &DispatchInfo, weight: Weight) { - let actual_weight = self.actual_weight.unwrap_or(info.call_weight).saturating_add(weight); + fn set_extension_weight(&mut self, info: &DispatchInfo) { + let actual_weight = self + .actual_weight + .unwrap_or(info.call_weight) + .saturating_add(info.extension_weight); self.actual_weight = Some(actual_weight); } } impl ExtensionPostDispatchWeightHandler<()> for PostDispatchInfo { - fn set_extension_weight(&mut self, _info: &(), weight: sp_weights::Weight) { - if let Some(actual) = &mut self.actual_weight { - actual.saturating_add(weight); - } - } + fn set_extension_weight(&mut self, _: &()) {} } // TODO: Eventually remove these @@ -1219,14 +1220,15 @@ mod test_extensions { impl TransactionExtensionBase for HalfCostIf { const IDENTIFIER: &'static str = "HalfCostIf"; type Implicit = (); - fn weight() -> sp_weights::Weight { - Weight::from_parts(100, 0) - } } impl TransactionExtension for HalfCostIf { type Val = (); type Pre = bool; + fn weight(&self, _: &RuntimeCall) -> sp_weights::Weight { + Weight::from_parts(100, 0) + } + fn prepare( self, _val: Self::Val, @@ -1244,11 +1246,11 @@ mod test_extensions { _post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, - ) -> Result, TransactionValidityError> { + ) -> Result { if pre { - Ok(Some(Self::weight().saturating_div(2))) + Ok(Weight::from_parts(50, 0)) } else { - Ok(Some(Self::weight())) + Ok(Weight::zero()) } } impl_tx_ext_default!(RuntimeCall; validate); @@ -1262,9 +1264,6 @@ mod test_extensions { impl TransactionExtensionBase for FreeIfUnder { const IDENTIFIER: &'static str = "FreeIfUnder"; type Implicit = (); - fn weight() -> sp_weights::Weight { - Weight::from_parts(200, 0) - } } impl TransactionExtension for FreeIfUnder where @@ -1273,6 +1272,10 @@ mod test_extensions { type Val = (); type Pre = u64; + fn weight(&self, _: &RuntimeCall) -> sp_weights::Weight { + Weight::from_parts(200, 0) + } + fn prepare( self, _val: Self::Val, @@ -1290,13 +1293,13 @@ mod test_extensions { post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, - ) -> Result, TransactionValidityError> { + ) -> Result { if let Some(actual) = post_info.actual_weight { if pre > actual.ref_time() { - return Ok(Some(Weight::zero())); + return Ok(Weight::from_parts(200, 0)); } } - Ok(None) + Ok(Weight::zero()) } impl_tx_ext_default!(RuntimeCall; validate); } @@ -1309,14 +1312,15 @@ mod test_extensions { impl TransactionExtensionBase for ActualWeightIs { const IDENTIFIER: &'static str = "ActualWeightIs"; type Implicit = (); - fn weight() -> sp_weights::Weight { - Weight::from_parts(300, 0) - } } impl TransactionExtension for ActualWeightIs { type Val = (); type Pre = u64; + fn weight(&self, _: &RuntimeCall) -> sp_weights::Weight { + Weight::from_parts(300, 0) + } + fn prepare( self, _val: Self::Val, @@ -1334,8 +1338,8 @@ mod test_extensions { _post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, - ) -> Result, TransactionValidityError> { - Ok(Some(Weight::from_parts(core::cmp::min(pre, 300), 0))) + ) -> Result { + Ok(Weight::from_parts(300u64.saturating_sub(pre), 0)) } impl_tx_ext_default!(RuntimeCall; validate); } @@ -1351,10 +1355,7 @@ mod extension_weight_tests { use sp_core::parameter_types; use sp_runtime::{ generic::{self, ExtrinsicFormat}, - traits::{ - Applyable, BlakeTwo256, DispatchTransaction, TransactionExtension, - TransactionExtensionBase, - }, + traits::{Applyable, BlakeTwo256, DispatchTransaction, TransactionExtension}, BuildStorage, }; use sp_weights::RuntimeDbWeight; @@ -1429,7 +1430,7 @@ mod extension_weight_tests { let mut info = call.get_dispatch_info(); assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); - info.extension_weight = TxExtension::weight(); + info.extension_weight = ext.weight(&call); let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); let res = call.dispatch(Some(0).into()); let mut post_info = res.unwrap(); @@ -1455,7 +1456,7 @@ mod extension_weight_tests { let mut info = call.get_dispatch_info(); assert_eq!(info.total_weight(), Weight::from_parts(1000, 0)); - info.extension_weight = TxExtension::weight(); + info.extension_weight = ext.weight(&call); let post_info = ext.dispatch_transaction(Some(0).into(), call, &info, 0).unwrap().unwrap(); // 1000 call weight + 50 + 200 + 0 @@ -1474,7 +1475,7 @@ mod extension_weight_tests { let mut info = call.get_dispatch_info(); assert_eq!(info.call_weight, Weight::from_parts(1000, 0)); - info.extension_weight = TxExtension::weight(); + info.extension_weight = ext.weight(&call); assert_eq!(info.total_weight(), Weight::from_parts(1600, 0)); let (pre, _) = ext.validate_and_prepare(Some(0).into(), &call, &info, 0).unwrap(); let res = call.clone().dispatch(Some(0).into()); @@ -1482,7 +1483,7 @@ mod extension_weight_tests { // 500 actual call weight assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); // add the 600 worst case extension weight - post_info.set_extension_weight(&info, TxExtension::weight()); + post_info.set_extension_weight(&info); // extension weight should be refunded assert_ok!(>::post_dispatch( pre, @@ -1502,7 +1503,7 @@ mod extension_weight_tests { // 500 actual call weight assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); // add the 600 worst case extension weight - post_info.set_extension_weight(&info, TxExtension::weight()); + post_info.set_extension_weight(&info); // extension weight should be refunded assert_ok!(>::post_dispatch( pre, @@ -1522,7 +1523,7 @@ mod extension_weight_tests { // 500 actual call weight assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); // add the 600 worst case extension weight - post_info.set_extension_weight(&info, TxExtension::weight()); + post_info.set_extension_weight(&info); // extension weight should be refunded assert_ok!(>::post_dispatch( pre, @@ -1542,7 +1543,7 @@ mod extension_weight_tests { // 500 actual call weight assert_eq!(post_info.actual_weight, Some(Weight::from_parts(500, 0))); // add the 600 worst case extension weight - post_info.set_extension_weight(&info, TxExtension::weight()); + post_info.set_extension_weight(&info); // extension weight should be refunded assert_ok!(>::post_dispatch( pre, @@ -1563,13 +1564,13 @@ mod extension_weight_tests { // First testcase let ext: TxExtension = (HalfCostIf(false), FreeIfUnder(2000), ActualWeightIs(0)); let xt = CheckedExtrinsic { - format: ExtrinsicFormat::Signed(0, ext), + format: ExtrinsicFormat::Signed(0, ext.clone()), function: call.clone(), }; assert_eq!(xt.extension_weight(), Weight::from_parts(600, 0)); let mut info = call.get_dispatch_info(); assert_eq!(info.call_weight, Weight::from_parts(1000, 0)); - info.extension_weight = TxExtension::weight(); + info.extension_weight = ext.weight(&call); assert_eq!(info.total_weight(), Weight::from_parts(1600, 0)); let post_info = xt.apply::(&info, 0).unwrap().unwrap(); // 500 actual call weight + 100 + 0 + 0 diff --git a/substrate/frame/system/src/extensions/check_genesis.rs b/substrate/frame/system/src/extensions/check_genesis.rs index d5baac157315..b5df883bb771 100644 --- a/substrate/frame/system/src/extensions/check_genesis.rs +++ b/substrate/frame/system/src/extensions/check_genesis.rs @@ -59,12 +59,12 @@ impl TransactionExtensionBase for CheckGenesis { fn implicit(&self) -> Result { Ok(>::block_hash(BlockNumberFor::::zero())) } - fn weight() -> sp_weights::Weight { - ::check_genesis() - } } impl TransactionExtension for CheckGenesis { type Val = (); type Pre = (); + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + ::check_genesis() + } impl_tx_ext_default!(T::RuntimeCall; validate prepare); } diff --git a/substrate/frame/system/src/extensions/check_mortality.rs b/substrate/frame/system/src/extensions/check_mortality.rs index 88d4fe1e6b2a..7d6d4bf9e236 100644 --- a/substrate/frame/system/src/extensions/check_mortality.rs +++ b/substrate/frame/system/src/extensions/check_mortality.rs @@ -72,14 +72,15 @@ impl TransactionExtensionBase for CheckMortality { Ok(>::block_hash(n)) } } - fn weight() -> sp_weights::Weight { - ::check_mortality() - } } impl TransactionExtension for CheckMortality { type Pre = (); type Val = (); + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + ::check_mortality() + } + fn validate( &self, origin: ::RuntimeOrigin, diff --git a/substrate/frame/system/src/extensions/check_non_zero_sender.rs b/substrate/frame/system/src/extensions/check_non_zero_sender.rs index ceecdc532dae..0062c4ab3ca1 100644 --- a/substrate/frame/system/src/extensions/check_non_zero_sender.rs +++ b/substrate/frame/system/src/extensions/check_non_zero_sender.rs @@ -55,13 +55,15 @@ impl CheckNonZeroSender { impl TransactionExtensionBase for CheckNonZeroSender { const IDENTIFIER: &'static str = "CheckNonZeroSender"; type Implicit = (); - fn weight() -> sp_weights::Weight { - ::check_non_zero_sender() - } } impl TransactionExtension for CheckNonZeroSender { type Val = (); type Pre = (); + + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + ::check_non_zero_sender() + } + fn validate( &self, origin: ::RuntimeOrigin, diff --git a/substrate/frame/system/src/extensions/check_nonce.rs b/substrate/frame/system/src/extensions/check_nonce.rs index c6c96665a7ab..a875c1a1ab25 100644 --- a/substrate/frame/system/src/extensions/check_nonce.rs +++ b/substrate/frame/system/src/extensions/check_nonce.rs @@ -69,9 +69,6 @@ impl core::fmt::Debug for CheckNonce { impl TransactionExtensionBase for CheckNonce { const IDENTIFIER: &'static str = "CheckNonce"; type Implicit = (); - fn weight() -> sp_weights::Weight { - ::check_nonce() - } } impl TransactionExtension for CheckNonce where @@ -81,6 +78,10 @@ where type Val = Option<(T::AccountId, T::Nonce)>; type Pre = (); + fn weight(&self, _: &T::RuntimeCall) -> sp_weights::Weight { + ::check_nonce() + } + fn validate( &self, origin: ::RuntimeOrigin, diff --git a/substrate/frame/system/src/extensions/check_spec_version.rs b/substrate/frame/system/src/extensions/check_spec_version.rs index a32c48dd466f..9161aad0ef05 100644 --- a/substrate/frame/system/src/extensions/check_spec_version.rs +++ b/substrate/frame/system/src/extensions/check_spec_version.rs @@ -59,14 +59,14 @@ impl TransactionExtensionBase for CheckSpecVersion { fn implicit(&self) -> Result { Ok(>::runtime_version().spec_version) } - fn weight() -> sp_weights::Weight { - ::check_spec_version() - } } impl TransactionExtension<::RuntimeCall> for CheckSpecVersion { type Val = (); type Pre = (); + fn weight(&self, _: &::RuntimeCall) -> sp_weights::Weight { + ::check_spec_version() + } impl_tx_ext_default!(::RuntimeCall; validate prepare); } diff --git a/substrate/frame/system/src/extensions/check_tx_version.rs b/substrate/frame/system/src/extensions/check_tx_version.rs index 908a9446afdf..5eb8ba2cb761 100644 --- a/substrate/frame/system/src/extensions/check_tx_version.rs +++ b/substrate/frame/system/src/extensions/check_tx_version.rs @@ -59,14 +59,14 @@ impl TransactionExtensionBase for CheckTxVersion { fn implicit(&self) -> Result { Ok(>::runtime_version().transaction_version) } - fn weight() -> sp_weights::Weight { - ::check_tx_version() - } } impl TransactionExtension<::RuntimeCall> for CheckTxVersion { type Val = (); type Pre = (); + fn weight(&self, _: &::RuntimeCall) -> sp_weights::Weight { + ::check_tx_version() + } impl_tx_ext_default!(::RuntimeCall; validate prepare); } diff --git a/substrate/frame/system/src/extensions/check_weight.rs b/substrate/frame/system/src/extensions/check_weight.rs index 0d64268d32ab..ea03b7438cd4 100644 --- a/substrate/frame/system/src/extensions/check_weight.rs +++ b/substrate/frame/system/src/extensions/check_weight.rs @@ -211,10 +211,6 @@ where impl TransactionExtensionBase for CheckWeight { const IDENTIFIER: &'static str = "CheckWeight"; type Implicit = (); - - fn weight() -> Weight { - ::check_weight() - } } impl TransactionExtension for CheckWeight where @@ -223,6 +219,10 @@ where type Pre = (); type Val = u32; /* next block length */ + fn weight(&self, _: &T::RuntimeCall) -> Weight { + ::check_weight() + } + fn validate( &self, origin: T::RuntimeOrigin, @@ -253,7 +253,7 @@ where post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, - ) -> Result, TransactionValidityError> { + ) -> Result { let unspent = post_info.calc_unspent(info); if unspent.any_gt(Weight::zero()) { crate::BlockWeight::::mutate(|current_weight| { @@ -273,7 +273,7 @@ where Pallet::::all_extrinsics_len(), ); - Ok(None) + Ok(Weight::zero()) } } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs index b8b38d06befc..1926fe8b418d 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/lib.rs @@ -258,10 +258,6 @@ where { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; type Implicit = (); - - fn weight() -> Weight { - ::WeightInfo::charge_asset_tx_payment_asset() - } } impl TransactionExtension for ChargeAssetTxPayment @@ -286,8 +282,18 @@ where T::AccountId, // imbalance resulting from withdrawing the fee InitialPayment, + // weight used by the extension + Weight, ); + fn weight(&self, _: &T::RuntimeCall) -> Weight { + if self.asset_id.is_some() { + ::WeightInfo::charge_asset_tx_payment_asset() + } else { + ::WeightInfo::charge_asset_tx_payment_native() + } + } + fn validate( &self, origin: ::RuntimeOrigin, @@ -318,7 +324,7 @@ where let (_tip, who, fee) = val; // Mutating call of `withdraw_fee` to actually charge for the transaction. let (_fee, initial_payment) = self.withdraw_fee(&who, call, info, fee)?; - Ok((self.tip, who.clone(), initial_payment)) + Ok((self.tip, who.clone(), initial_payment, self.weight(call))) } fn post_dispatch_details( @@ -327,14 +333,14 @@ where post_info: &PostDispatchInfoOf, len: usize, _result: &DispatchResult, - ) -> Result, TransactionValidityError> { - let (tip, who, initial_payment) = pre; + ) -> Result { + let (tip, who, initial_payment, extension_weight) = pre; match initial_payment { InitialPayment::Native(already_withdrawn) => { // Take into account the weight used by this extension before calculating the // refund. let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_native(); - let unspent_weight = Self::weight().saturating_sub(actual_ext_weight); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); let mut actual_post_info = *post_info; actual_post_info.refund(unspent_weight); let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( @@ -354,13 +360,13 @@ where pallet_transaction_payment::Pallet::::deposit_fee_paid_event( who, actual_fee, tip, ); - Ok(Some(::WeightInfo::charge_asset_tx_payment_native())) + Ok(unspent_weight) }, InitialPayment::Asset((asset_id, already_withdrawn)) => { // Take into account the weight used by this extension before calculating the // refund. let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_asset(); - let unspent_weight = Self::weight().saturating_sub(actual_ext_weight); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); let mut actual_post_info = *post_info; actual_post_info.refund(unspent_weight); let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( @@ -386,7 +392,7 @@ where asset_id, }); - Ok(Some(::WeightInfo::charge_asset_tx_payment_asset())) + Ok(unspent_weight) }, InitialPayment::Nothing => { // `actual_fee` should be zero here for any signed extrinsic. It would be @@ -394,7 +400,8 @@ where // `compute_actual_fee` is not aware of them. In both cases it's fine to just // move ahead without adjusting the fee, though, so we do nothing. debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - Ok(Some(::WeightInfo::charge_asset_tx_payment_zero())) + Ok(extension_weight + .saturating_sub(::WeightInfo::charge_asset_tx_payment_zero())) }, } } diff --git a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs index 0d5e7e77b91c..4895c12f0c10 100644 --- a/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-conversion-tx-payment/src/tests.rs @@ -165,12 +165,11 @@ fn transaction_payment_in_native_possible() { .execute_with(|| { let len = 10; let mut info = info_from_weight(WEIGHT_5); - info.extension_weight = ChargeAssetTxPayment::::weight(); - let (pre, _) = ChargeAssetTxPayment::::from(0, None) - .validate_and_prepare(Some(1).into(), CALL, &info, len) - .unwrap(); + let ext = ChargeAssetTxPayment::::from(0, None); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(1).into(), CALL, &info, len).unwrap(); let initial_balance = 10 * balance_factor; - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 20 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( pre, @@ -179,17 +178,16 @@ fn transaction_payment_in_native_possible() { len, &Ok(()), )); - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 20 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); let mut info = info_from_weight(WEIGHT_100); - let extension_weight = ChargeAssetTxPayment::::weight(); + let ext = ChargeAssetTxPayment::::from(5 /* tipped */, None); + let extension_weight = ext.weight(CALL); info.extension_weight = extension_weight; - let (pre, _) = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .validate_and_prepare(Some(2).into(), CALL, &info, len) - .unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(2).into(), CALL, &info, len).unwrap(); let initial_balance_for_2 = 20 * balance_factor; - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 20 - 5); + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 15 - 5); let call_actual_weight = WEIGHT_50; let post_info = post_info_from_weight( info.call_weight @@ -429,8 +427,9 @@ fn asset_transaction_payment_with_tip_and_refund() { assert_eq!(Assets::balance(asset_id, caller), balance); let weight = 100; - let ext_weight = ChargeAssetTxPayment::::weight(); let tip = 5; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + let ext_weight = ext.weight(CALL); let len = 10; let fee_in_native = base_weight + weight + ext_weight.ref_time() + len as u64 + tip; let input_quote = AssetConversion::quote_price_tokens_for_exact_tokens( @@ -443,10 +442,9 @@ fn asset_transaction_payment_with_tip_and_refund() { let fee_in_asset = input_quote.unwrap(); let mut info = info_from_weight(WEIGHT_100); - info.extension_weight = ext_weight; - let (pre, _) = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) - .validate_and_prepare(Some(caller).into(), CALL, &info, len) - .unwrap(); + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(caller).into(), CALL, &info, len).unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_in_asset); let final_weight = 50; @@ -683,7 +681,7 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { assert_eq!(Assets::balance(asset_id, caller), balance); - let (_tip, _who, initial_payment) = ⪯ + let (_tip, _who, initial_payment, _extension_weight) = ⪯ let not_paying = match initial_payment { &InitialPayment::Nothing => true, _ => false, @@ -721,16 +719,15 @@ fn fee_with_native_asset_passed_with_id() { let tip = 10; let call_weight = 100; - let extension_weight = ChargeAssetTxPayment::::weight(); + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + let extension_weight = ext.weight(CALL); let len = 5; let initial_fee = base_weight + call_weight + extension_weight.ref_time() + len as u64 + tip; let mut info = info_from_weight(WEIGHT_100); info.extension_weight = extension_weight; - let (pre, _) = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())) - .validate_and_prepare(Some(caller).into(), CALL, &info, len) - .unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(caller).into(), CALL, &info, len).unwrap(); assert_eq!(Balances::free_balance(caller), caller_balance - initial_fee); let final_weight = 50; @@ -746,9 +743,8 @@ fn fee_with_native_asset_passed_with_id() { len, &Ok(()), ) - .unwrap() .unwrap(), - extension_weight + Weight::zero() ); assert_eq!(Balances::free_balance(caller), caller_balance - expected_fee); @@ -792,8 +788,9 @@ fn transfer_add_and_remove_account() { assert_eq!(Assets::balance(asset_id, caller), balance); let call_weight = 100; - let extension_weight = ChargeAssetTxPayment::::weight(); let tip = 5; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id.into())); + let extension_weight = ext.weight(CALL); let len = 10; let fee_in_native = base_weight + call_weight + extension_weight.ref_time() + len as u64 + tip; diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs index 8364f0d5c719..c5fc78504a2a 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/lib.rs @@ -271,10 +271,6 @@ where { const IDENTIFIER: &'static str = "ChargeAssetTxPayment"; type Implicit = (); - - fn weight() -> Weight { - ::WeightInfo::charge_asset_tx_payment_asset() - } } impl TransactionExtension for ChargeAssetTxPayment @@ -303,8 +299,18 @@ where InitialPayment, // asset_id for the transaction payment Option>, + // weight used by the extension + Weight, ); + fn weight(&self, _: &T::RuntimeCall) -> Weight { + if self.asset_id.is_some() { + ::WeightInfo::charge_asset_tx_payment_asset() + } else { + ::WeightInfo::charge_asset_tx_payment_native() + } + } + fn validate( &self, origin: ::RuntimeOrigin, @@ -339,7 +345,7 @@ where let (tip, who, fee) = val; // Mutating call of `withdraw_fee` to actually charge for the transaction. let (_fee, initial_payment) = self.withdraw_fee(&who, call, info, fee)?; - Ok((tip, who, initial_payment, self.asset_id)) + Ok((tip, who, initial_payment, self.asset_id, self.weight(call))) } fn post_dispatch_details( @@ -348,14 +354,14 @@ where post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, - ) -> Result, TransactionValidityError> { - let (tip, who, initial_payment, asset_id) = pre; + ) -> Result { + let (tip, who, initial_payment, asset_id, extension_weight) = pre; match initial_payment { InitialPayment::Native(already_withdrawn) => { // Take into account the weight used by this extension before calculating the // refund. let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_native(); - let unspent_weight = Self::weight().saturating_sub(actual_ext_weight); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); let mut actual_post_info = *post_info; actual_post_info.refund(unspent_weight); pallet_transaction_payment::ChargeTransactionPayment::::post_dispatch_details( @@ -365,13 +371,11 @@ where len, result, )?; - Ok(Some(::WeightInfo::charge_asset_tx_payment_native())) + Ok(unspent_weight) }, InitialPayment::Asset(already_withdrawn) => { - // Take into account the weight used by this extension before calculating the - // refund. let actual_ext_weight = ::WeightInfo::charge_asset_tx_payment_asset(); - let unspent_weight = Self::weight().saturating_sub(actual_ext_weight); + let unspent_weight = extension_weight.saturating_sub(actual_ext_weight); let mut actual_post_info = *post_info; actual_post_info.refund(unspent_weight); let actual_fee = pallet_transaction_payment::Pallet::::compute_actual_fee( @@ -396,7 +400,7 @@ where tip: converted_tip, asset_id, }); - Ok(Some(::WeightInfo::charge_asset_tx_payment_asset())) + Ok(unspent_weight) }, InitialPayment::Nothing => { // `actual_fee` should be zero here for any signed extrinsic. It would be @@ -404,7 +408,8 @@ where // `compute_actual_fee` is not aware of them. In both cases it's fine to just // move ahead without adjusting the fee, though, so we do nothing. debug_assert!(tip.is_zero(), "tip should be zero if initial fee was zero."); - Ok(Some(::WeightInfo::charge_asset_tx_payment_zero())) + Ok(extension_weight + .saturating_sub(::WeightInfo::charge_asset_tx_payment_zero())) }, } } diff --git a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs index 040427d13c7e..76c49da42748 100644 --- a/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/asset-tx-payment/src/tests.rs @@ -120,12 +120,11 @@ fn transaction_payment_in_native_possible() { .execute_with(|| { let len = 10; let mut info = info_from_weight(Weight::from_parts(5, 0)); - info.extension_weight = ChargeAssetTxPayment::::weight(); - let (pre, _) = ChargeAssetTxPayment::::from(0, None) - .validate_and_prepare(Some(1).into(), CALL, &info, len) - .unwrap(); + let ext = ChargeAssetTxPayment::::from(0, None); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(1).into(), CALL, &info, len).unwrap(); let initial_balance = 10 * balance_factor; - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 20 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( pre, @@ -134,21 +133,20 @@ fn transaction_payment_in_native_possible() { len, &Ok(()), )); - assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 20 - 10); + assert_eq!(Balances::free_balance(1), initial_balance - 5 - 5 - 15 - 10); let mut info = info_from_weight(Weight::from_parts(100, 0)); - info.extension_weight = ChargeAssetTxPayment::::weight(); - let (pre, _) = ChargeAssetTxPayment::::from(5 /* tipped */, None) - .validate_and_prepare(Some(2).into(), CALL, &info, len) - .unwrap(); + let ext = ChargeAssetTxPayment::::from(5 /* tipped */, None); + info.extension_weight = ext.weight(CALL); + let (pre, _) = ext.validate_and_prepare(Some(2).into(), CALL, &info, len).unwrap(); let initial_balance_for_2 = 20 * balance_factor; - assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 20 - 5); + assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 100 - 15 - 5); let call_actual_weight = Weight::from_parts(50, 0); // The extension weight refund should be taken into account in `post_dispatch`. - let post_info = post_info_from_weight( - call_actual_weight.saturating_add(ChargeAssetTxPayment::::weight()), - ); + let post_info = post_info_from_weight(call_actual_weight.saturating_add( + ChargeAssetTxPayment::::from(5 /* tipped */, None).weight(CALL), + )); assert_ok!(ChargeAssetTxPayment::::post_dispatch_details( pre, &info, @@ -159,7 +157,8 @@ fn transaction_payment_in_native_possible() { assert_eq!( post_info.actual_weight, Some( - call_actual_weight.saturating_add(MockWeights::charge_asset_tx_payment_asset()) + call_actual_weight + .saturating_add(MockWeights::charge_asset_tx_payment_native()) ) ); assert_eq!(Balances::free_balance(2), initial_balance_for_2 - 5 - 10 - 50 - 15 - 5); @@ -325,8 +324,9 @@ fn asset_transaction_payment_with_tip_and_refund() { assert_ok!(Assets::mint_into(asset_id.into(), &beneficiary, balance)); assert_eq!(Assets::balance(asset_id, caller), balance); let weight = 100; - let ext_weight = ChargeAssetTxPayment::::weight(); let tip = 5; + let ext = ChargeAssetTxPayment::::from(tip, Some(asset_id)); + let ext_weight = ext.weight(CALL); let len = 10; // we convert the from weight to fee based on the ratio between asset min balance and // existential deposit @@ -334,9 +334,7 @@ fn asset_transaction_payment_with_tip_and_refund() { min_balance / ExistentialDeposit::get(); let mut info = info_from_weight(Weight::from_parts(weight, 0)); info.extension_weight = ext_weight; - let (pre, _) = ChargeAssetTxPayment::::from(tip, Some(asset_id)) - .validate_and_prepare(Some(caller).into(), CALL, &info, len) - .unwrap(); + let (pre, _) = ext.validate_and_prepare(Some(caller).into(), CALL, &info, len).unwrap(); assert_eq!(Assets::balance(asset_id, caller), balance - fee_with_tip); System::assert_has_event(RuntimeEvent::Assets(pallet_assets::Event::Withdrawn { @@ -579,7 +577,7 @@ fn post_dispatch_fee_is_zero_if_pre_dispatch_fee_is_zero() { .unwrap(); // `Pays::No` implies no pre-dispatch fees assert_eq!(Assets::balance(asset_id, caller), balance); - let (_tip, _who, initial_payment, _asset_id) = ⪯ + let (_tip, _who, initial_payment, _asset_id, _extension_weight) = ⪯ let not_paying = match initial_payment { &InitialPayment::Nothing => true, _ => false, diff --git a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs index 410a353d3264..099d8a0c12b0 100644 --- a/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/skip-feeless-payment/src/lib.rs @@ -129,10 +129,6 @@ impl TransactionExtensionB fn implicit(&self) -> Result { self.0.implicit() } - - fn weight() -> frame_support::weights::Weight { - S::weight() - } } impl> @@ -143,6 +139,10 @@ where type Val = Intermediate as OriginTrait>::PalletsOrigin>; type Pre = Intermediate as OriginTrait>::PalletsOrigin>; + fn weight(&self, call: &T::RuntimeCall) -> frame_support::weights::Weight { + self.0.weight(call) + } + fn validate( &self, origin: OriginOf, @@ -181,12 +181,12 @@ where post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, - ) -> Result, TransactionValidityError> { + ) -> Result { match pre { Apply(pre) => S::post_dispatch_details(pre, info, post_info, len, result), Skip(origin) => { Pallet::::deposit_event(Event::::FeeSkipped { origin }); - Ok(None) + Ok(Weight::zero()) }, } } diff --git a/substrate/frame/transaction-payment/src/lib.rs b/substrate/frame/transaction-payment/src/lib.rs index 194bfb92015a..ab1f8a7afa9a 100644 --- a/substrate/frame/transaction-payment/src/lib.rs +++ b/substrate/frame/transaction-payment/src/lib.rs @@ -853,10 +853,6 @@ impl core::fmt::Debug for ChargeTransactionPayment { impl TransactionExtensionBase for ChargeTransactionPayment { const IDENTIFIER: &'static str = "ChargeTransactionPayment"; type Implicit = (); - - fn weight() -> Weight { - T::WeightInfo::charge_transaction_payment() - } } impl TransactionExtension for ChargeTransactionPayment @@ -881,6 +877,10 @@ where <::OnChargeTransaction as OnChargeTransaction>::LiquidityInfo, ); + fn weight(&self, _: &T::RuntimeCall) -> Weight { + T::WeightInfo::charge_transaction_payment() + } + fn validate( &self, origin: ::RuntimeOrigin, @@ -927,13 +927,13 @@ where post_info: &PostDispatchInfoOf, len: usize, _result: &DispatchResult, - ) -> Result, TransactionValidityError> { + ) -> Result { let actual_fee = Pallet::::compute_actual_fee(len as u32, info, &post_info, tip); T::OnChargeTransaction::correct_and_deposit_fee( &who, info, &post_info, actual_fee, tip, imbalance, )?; Pallet::::deposit_event(Event::::TransactionFeePaid { who, actual_fee, tip }); - Ok(Some(::WeightInfo::charge_transaction_payment())) + Ok(Weight::zero()) } } diff --git a/substrate/frame/transaction-payment/src/tests.rs b/substrate/frame/transaction-payment/src/tests.rs index b3b96cf04601..05b3072af211 100644 --- a/substrate/frame/transaction-payment/src/tests.rs +++ b/substrate/frame/transaction-payment/src/tests.rs @@ -141,15 +141,15 @@ fn transaction_extension_transaction_payment_work() { .build() .execute_with(|| { let mut info = info_from_weight(Weight::from_parts(5, 0)); - let ext_weight = ChargeTransactionPayment::::weight(); + let ext = Ext::from(0); + let ext_weight = ext.weight(CALL); info.extension_weight = ext_weight; - Ext::from(0) - .test_run(Some(1).into(), CALL, &info, 10, |_| { - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10 - 10); - Ok(default_post_info()) - }) - .unwrap() - .unwrap(); + ext.test_run(Some(1).into(), CALL, &info, 10, |_| { + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10 - 10); + Ok(default_post_info()) + }) + .unwrap() + .unwrap(); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10 - 10); assert_eq!(FeeUnbalancedAmount::get(), 5 + 5 + 10 + 10); assert_eq!(TipUnbalancedAmount::get(), 0); @@ -183,17 +183,17 @@ fn transaction_extension_transaction_payment_multiplied_refund_works() { let len = 10; let origin = Some(2).into(); let mut info = info_from_weight(Weight::from_parts(100, 0)); - let ext_weight = ChargeTransactionPayment::::weight(); + let ext = Ext::from(5 /* tipped */); + let ext_weight = ext.weight(CALL); info.extension_weight = ext_weight; - Ext::from(5 /* tipped */) - .test_run(origin, CALL, &info, len, |_| { - // 5 base fee, 10 byte fee, 3/2 * (100 call weight fee + 10 ext weight fee), 5 - // tip - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 165 - 5); - Ok(post_info_from_weight(Weight::from_parts(50, 0))) - }) - .unwrap() - .unwrap(); + ext.test_run(origin, CALL, &info, len, |_| { + // 5 base fee, 10 byte fee, 3/2 * (100 call weight fee + 10 ext weight fee), 5 + // tip + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 165 - 5); + Ok(post_info_from_weight(Weight::from_parts(50, 0))) + }) + .unwrap() + .unwrap(); // 75 (3/2 of the returned 50 units of call weight, 0 returned of ext weight) is // refunded @@ -615,23 +615,24 @@ fn refund_consistent_with_actual_weight() { .build() .execute_with(|| { let mut info = info_from_weight(Weight::from_parts(100, 0)); - let ext_weight = ChargeTransactionPayment::::weight(); + let tip = 5; + let ext = Ext::from(tip); + let ext_weight = ext.weight(CALL); info.extension_weight = ext_weight; let mut post_info = post_info_from_weight(Weight::from_parts(33, 0)); let prev_balance = Balances::free_balance(2); let len = 10; - let tip = 5; NextFeeMultiplier::::put(Multiplier::saturating_from_rational(5, 4)); - let actual_post_info = Ext::from(tip) + let actual_post_info = ext .test_run(Some(2).into(), CALL, &info, len, |_| Ok(post_info)) .unwrap() .unwrap(); post_info .actual_weight .as_mut() - .map(|w| w.saturating_accrue(ChargeTransactionPayment::::weight())); + .map(|w| w.saturating_accrue(Ext::from(tip).weight(CALL))); assert_eq!(post_info, actual_post_info); let refund_based_fee = prev_balance - Balances::free_balance(2); diff --git a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs index 434db364d805..83f79e30c2c8 100644 --- a/substrate/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/checked_extrinsic.rs @@ -24,8 +24,7 @@ use sp_weights::Weight; use crate::{ traits::{ self, transaction_extension::TransactionExtension, DispatchInfoOf, DispatchTransaction, - Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf, TransactionExtensionBase, - ValidateUnsigned, + Dispatchable, MaybeDisplay, Member, PostDispatchInfoOf, ValidateUnsigned, }, transaction_validity::{TransactionSource, TransactionValidity}, }; @@ -133,7 +132,7 @@ where } } -impl +impl> CheckedExtrinsic { /// Returns the weight of the extension of this transaction, if present. If the transaction @@ -141,7 +140,8 @@ impl pub fn extension_weight(&self) -> Weight { match &self.format { ExtrinsicFormat::Bare => Weight::zero(), - ExtrinsicFormat::Signed(_, _) | ExtrinsicFormat::General(_) => Extension::weight(), + ExtrinsicFormat::Signed(_, ext) | ExtrinsicFormat::General(ext) => + ext.weight(&self.function), } } } diff --git a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs index 383283d5f749..d041bbd92a26 100644 --- a/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/substrate/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -453,7 +453,7 @@ impl +impl> UncheckedExtrinsic { /// Returns the weight of the extension of this transaction, if present. If the transaction @@ -461,7 +461,8 @@ impl pub fn extension_weight(&self) -> Weight { match &self.preamble { Preamble::Bare(_) => Weight::zero(), - Preamble::Signed(_, _, _, _, _) | Preamble::General(_, _) => Extension::weight(), + Preamble::Signed(_, _, _, ext, _) | Preamble::General(_, ext) => + ext.weight(&self.function), } } } diff --git a/substrate/primitives/runtime/src/traits/mod.rs b/substrate/primitives/runtime/src/traits/mod.rs index 94ecab1f9c6e..8789300cc1aa 100644 --- a/substrate/primitives/runtime/src/traits/mod.rs +++ b/substrate/primitives/runtime/src/traits/mod.rs @@ -1498,7 +1498,7 @@ pub trait RefundWeight { /// after dispatch. pub trait ExtensionPostDispatchWeightHandler: RefundWeight { /// Accrue some weight pertaining to the extension. - fn set_extension_weight(&mut self, info: &DispatchInfo, weight: sp_weights::Weight); + fn set_extension_weight(&mut self, info: &DispatchInfo); } impl RefundWeight for () { @@ -1506,7 +1506,7 @@ impl RefundWeight for () { } impl ExtensionPostDispatchWeightHandler<()> for () { - fn set_extension_weight(&mut self, _info: &(), _weight: sp_weights::Weight) {} + fn set_extension_weight(&mut self, _info: &()) {} } /// A lazy call (module function and argument values) that can be executed via its `dispatch` diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs index 000241f13c21..67aa8c900e82 100644 --- a/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs +++ b/substrate/primitives/runtime/src/traits/transaction_extension/as_transaction_extension.rs @@ -98,9 +98,9 @@ where post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, - ) -> Result, TransactionValidityError> { + ) -> Result { SE::post_dispatch(Some(pre), info, post_info, len, result)?; - Ok(None) + Ok(Weight::zero()) } fn validate_bare_compat( diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs index 7e6934825037..04f31e1399eb 100644 --- a/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs +++ b/substrate/primitives/runtime/src/traits/transaction_extension/dispatch_transaction.rs @@ -116,7 +116,7 @@ impl, Call: Dispatchable + Encode> DispatchTransac Ok(info) => info, Err(err) => &mut err.post_info, }; - post_info.set_extension_weight(info, Self::weight()); + post_info.set_extension_weight(info); T::post_dispatch(pre, info, post_info, len, &pd_res)?; Ok(res) } @@ -137,7 +137,7 @@ impl, Call: Dispatchable + Encode> DispatchTransac Ok(info) => info, Err(err) => &mut err.post_info, }; - post_info.set_extension_weight(info, Self::weight()); + post_info.set_extension_weight(info); T::post_dispatch(pre, info, post_info, len, &pd_res)?; Ok(res) } diff --git a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs index ad6099fafee4..9f83e18d731a 100644 --- a/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs +++ b/substrate/primitives/runtime/src/traits/transaction_extension/mod.rs @@ -70,11 +70,6 @@ pub trait TransactionExtensionBase: Ok(Self::Implicit::decode(&mut &[][..]).map_err(|_| IndeterminateImplicit)?) } - /// The weight consumed by executing this extension instance fully during transaction dispatch. - fn weight() -> Weight { - Weight::zero() - } - /// Returns the metadata for this extension. /// /// As a [`TransactionExtension`] can be a tuple of [`TransactionExtension`]s we need to return @@ -144,7 +139,7 @@ pub trait TransactionExtensionBase: /// value in a `Some`. This is useful in computing fee refunds, similar to how post dispatch /// information is used to refund fees for calls. Alternatively, a `None` can be returned, which /// means that the worst case scenario weight, namely the value returned by -/// [weight](TransactionExtensionBase::weight), is the actual weight. This particular piece of logic +/// [weight](TransactionExtension::weight), is the actual weight. This particular piece of logic /// is embedded in the default implementation of /// [post_dispatch](TransactionExtension::post_dispatch) so that the weight is assumed to be worst /// case scenario, but implementers of this trait can correct it with extra effort. Therefore, all @@ -210,6 +205,11 @@ pub trait TransactionExtension: TransactionExtensionBase { /// The type that encodes information that can be passed from prepare to post-dispatch. type Pre; + /// The weight consumed by executing this extension instance fully during transaction dispatch. + fn weight(&self, _call: &Call) -> Weight { + Weight::zero() + } + /// Validate a transaction for the transaction queue. /// /// This function can be called frequently by the transaction queue to obtain transaction @@ -288,8 +288,9 @@ pub trait TransactionExtension: TransactionExtensionBase { /// introduce a `TransactionValidityError`, causing the block to become invalid for including /// it. /// - /// On success, the caller can return the actual amount of weight consumed by this extension - /// during dispatch. + /// On success, the caller must return the amount of unspent weight left over by this extension + /// after dispatch. By default, this function returns no unspent weight, which means the entire + /// weight computed for the worst case scenario is consumed. /// /// WARNING: This function does not automatically keep track of accumulated "actual" weight. /// Unless this weight is handled at the call site, use @@ -314,19 +315,19 @@ pub trait TransactionExtension: TransactionExtensionBase { _post_info: &PostDispatchInfoOf, _len: usize, _result: &DispatchResult, - ) -> Result, TransactionValidityError> { - Ok(None) + ) -> Result { + Ok(Weight::zero()) } /// A wrapper for [post_dispatch_details](TransactionExtension::post_dispatch_details) that /// refunds the unspent weight consumed by this extension into the post dispatch information. /// - /// If `post_dispatch` returns a consumed weight, which should be less than the worst case - /// weight provided by [weight](TransactionExtensionBase::weight), that is the value refunded in - /// `post_info`. + /// If `post_dispatch_details` returns a non-zero unspent weight, which, by definition, must be + /// less than the worst case weight provided by [weight](TransactionExtension::weight), that + /// is the value refunded in `post_info`. /// - /// If no weight is returned by `post_dispatch_details`, this function assumes the worst case - /// weight and does not refund anything. + /// If no unspent weight is reported by `post_dispatch_details`, this function assumes the worst + /// case weight and does not refund anything. /// /// For more information, look into /// [post_dispatch_details](TransactionExtension::post_dispatch_details). @@ -337,12 +338,9 @@ pub trait TransactionExtension: TransactionExtensionBase { len: usize, result: &DispatchResult, ) -> Result<(), TransactionValidityError> { - if let Some(unspent_weight) = - Self::post_dispatch_details(pre, info, &post_info, len, result)? - .map(|actual_ext_weight| Self::weight().saturating_sub(actual_ext_weight)) - { - post_info.refund(unspent_weight); - } + let unspent_weight = Self::post_dispatch_details(pre, info, &post_info, len, result)?; + post_info.refund(unspent_weight); + Ok(()) } @@ -477,11 +475,6 @@ impl TransactionExtensionBase for Tuple { fn implicit(&self) -> Result { Ok(for_tuples!( ( #( Tuple.implicit()? ),* ) )) } - fn weight() -> Weight { - let mut weight = Weight::zero(); - for_tuples!( #( weight = weight.saturating_add(Tuple::weight()); )* ); - weight - } fn metadata() -> Vec { let mut ids = Vec::new(); for_tuples!( #( ids.extend(Tuple::metadata()); )* ); @@ -495,6 +488,12 @@ impl TransactionExtension for Tuple { for_tuples!( type Val = ( #( Tuple::Val ),* ); ); for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); + fn weight(&self, call: &Call) -> Weight { + let mut weight = Weight::zero(); + for_tuples!( #( weight = weight.saturating_add(Tuple.weight(call)); )* ); + weight + } + fn validate( &self, origin: ::RuntimeOrigin, @@ -555,14 +554,13 @@ impl TransactionExtension for Tuple { post_info: &PostDispatchInfoOf, len: usize, result: &DispatchResult, - ) -> Result, TransactionValidityError> { - let mut weight = Weight::zero(); + ) -> Result { + let mut total_unspent_weight = Weight::zero(); for_tuples!( #({ - let maybe_weight = Tuple::post_dispatch_details(pre.Tuple, info, post_info, len, result)?; - let actual_weight = maybe_weight.unwrap_or_else(|| Tuple::weight()); - weight = weight.saturating_add(actual_weight); + let unspent_weight = Tuple::post_dispatch_details(pre.Tuple, info, post_info, len, result)?; + total_unspent_weight = total_unspent_weight.saturating_add(unspent_weight); })* ); - Ok(Some(weight)) + Ok(total_unspent_weight) } fn post_dispatch( @@ -583,14 +581,14 @@ impl TransactionExtensionBase for () { fn implicit(&self) -> sp_std::result::Result { Ok(()) } - fn weight() -> Weight { - Weight::zero() - } } impl TransactionExtension for () { type Val = (); type Pre = (); + fn weight(&self, _call: &Call) -> Weight { + Weight::zero() + } fn validate( &self, origin: ::RuntimeOrigin, diff --git a/substrate/test-utils/runtime/src/lib.rs b/substrate/test-utils/runtime/src/lib.rs index 215ca2fd885f..e47c73fd7dd1 100644 --- a/substrate/test-utils/runtime/src/lib.rs +++ b/substrate/test-utils/runtime/src/lib.rs @@ -260,12 +260,7 @@ impl sp_runtime::traits::RefundWeight for CheckSubstrateCall { impl sp_runtime::traits::ExtensionPostDispatchWeightHandler for CheckSubstrateCall { - fn set_extension_weight( - &mut self, - _info: &CheckSubstrateCall, - _weight: frame_support::weights::Weight, - ) { - } + fn set_extension_weight(&mut self, _info: &CheckSubstrateCall) {} } impl sp_runtime::traits::Dispatchable for CheckSubstrateCall { From 89587d5f63578058026617eb6765ac229b487783 Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Mon, 19 Aug 2024 23:39:56 +0300 Subject: [PATCH 68/74] fix storage weight reclaim test Signed-off-by: georgepisaltu --- cumulus/primitives/storage-weight-reclaim/src/tests.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cumulus/primitives/storage-weight-reclaim/src/tests.rs b/cumulus/primitives/storage-weight-reclaim/src/tests.rs index 0e5bfccbd934..c5552b0f0a33 100644 --- a/cumulus/primitives/storage-weight-reclaim/src/tests.rs +++ b/cumulus/primitives/storage-weight-reclaim/src/tests.rs @@ -457,7 +457,7 @@ fn test_incorporates_check_weight_unspent_weight_on_negative() { #[test] fn test_nothing_relcaimed() { - let mut test_ext = setup_test_externalities(&[100, 200]); + let mut test_ext = setup_test_externalities(&[0, 100]); test_ext.execute_with(|| { set_current_storage_weight(0); @@ -481,12 +481,12 @@ fn test_nothing_relcaimed() { .validate_and_prepare(Some(ALICE.clone()).into(), CALL, &info, LEN) .unwrap(); // Should return `setup_test_externalities` proof recorder value: 100. - assert_eq!(pre, Some(100)); + assert_eq!(pre, Some(0)); // The `CheckWeight` extension will refund `actual_weight` from `PostDispatchInfo` // we always need to call `post_dispatch` to verify that they interoperate correctly. // Nothing to refund, unspent is 0, total weight 250 - assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, 0, &Ok(()),)); + assert_ok!(CheckWeight::::post_dispatch_details((), &info, &post_info, LEN, &Ok(()))); // `setup_test_externalities` proof recorder value: 200, so this means the extrinsic // actually used 100 proof size. // Nothing to refund or add, weight matches proof recorder @@ -495,7 +495,7 @@ fn test_nothing_relcaimed() { &info, &post_info, LEN, - &Ok(()), + &Ok(()) )); // Check block len weight was not reclaimed: From a0a41c86c7b4743bb73ac1b4b6b5d70806ae9b88 Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Tue, 20 Aug 2024 12:15:44 +0300 Subject: [PATCH 69/74] Fix compilation Signed-off-by: georgepisaltu --- .../extensions/check_obsolete_extension.rs | 2 +- substrate/bin/node/cli/tests/basic.rs | 52 +++++-------------- substrate/bin/node/cli/tests/fees.rs | 9 ++-- substrate/frame/executive/src/tests.rs | 8 +-- .../transaction-payment/src/benchmarking.rs | 2 +- 5 files changed, 24 insertions(+), 49 deletions(-) diff --git a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs index d5e09df58dcd..ce1037167d87 100644 --- a/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs +++ b/bridges/bin/runtime-common/src/extensions/check_obsolete_extension.rs @@ -346,7 +346,7 @@ macro_rules! generate_bridge_reject_obsolete_headers_and_messages { result: &sp_runtime::DispatchResult, ) -> Result { use $crate::extensions::check_obsolete_extension::__private::tuplex::PopFront; - let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(None) }; + let Some((relayer, to_post_dispatch)) = to_post_dispatch else { return Ok(frame_support::pallet_prelude::Weight::zero()) }; let has_failed = result.is_err(); $( let (item, to_post_dispatch) = to_post_dispatch.pop_front(); diff --git a/substrate/bin/node/cli/tests/basic.rs b/substrate/bin/node/cli/tests/basic.rs index 61f4b7326a3a..af8c132e8d65 100644 --- a/substrate/bin/node/cli/tests/basic.rs +++ b/substrate/bin/node/cli/tests/basic.rs @@ -25,15 +25,13 @@ use frame_system::{self, AccountInfo, EventRecord, Phase}; use polkadot_sdk::*; use sp_core::{storage::well_known_keys, traits::Externalities}; use sp_runtime::{ - traits::{Hash as HashT, TransactionExtensionBase}, - transaction_validity::InvalidTransaction, - ApplyExtrinsicResult, + traits::Hash as HashT, transaction_validity::InvalidTransaction, ApplyExtrinsicResult, }; use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, Balances, CheckedExtrinsic, Header, Runtime, RuntimeCall, RuntimeEvent, System, - TransactionPayment, Treasury, TxExtension, UncheckedExtrinsic, + TransactionPayment, Treasury, UncheckedExtrinsic, }; use node_primitives::{Balance, Hash}; use node_testing::keyring::*; @@ -265,7 +263,7 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let weight_refund = Weight::zero(); let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; @@ -306,7 +304,7 @@ fn successful_execution_with_foreign_code_gives_ok() { let r = executor_call(&mut t, "Core_initialize_block", &vec![].and(&from_block_number(1u32))).0; assert!(r.is_ok()); - let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let weight_refund = Weight::zero(); let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())).0; @@ -326,7 +324,8 @@ fn full_native_block_import_works() { let mut alice_last_known_balance: Balance = Default::default(); let mut fees = t.execute_with(|| transfer_fee(&xt())); - let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let extension_weight = xt().extension_weight(); + let weight_refund = Weight::zero(); let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let transfer_weight = default_transfer_call().get_dispatch_info().call_weight.saturating_add( @@ -376,14 +375,6 @@ fn full_native_block_import_works() { }), topics: vec![], }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { - who: alice().into(), - amount: fees - fees_after_refund, - }), - topics: vec![], - }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { @@ -421,7 +412,7 @@ fn full_native_block_import_works() { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { weight: transfer_weight - .saturating_add(TxExtension::weight().saturating_sub(weight_refund)), + .saturating_add(extension_weight.saturating_sub(weight_refund)), class: Default::default(), pays: Default::default(), }), @@ -433,7 +424,8 @@ fn full_native_block_import_works() { fees = t.execute_with(|| transfer_fee(&xt())); let pot = t.execute_with(|| Treasury::pot()); - let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let extension_weight = xt().extension_weight(); + let weight_refund = Weight::zero(); let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); @@ -479,14 +471,6 @@ fn full_native_block_import_works() { }), topics: vec![], }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { - who: bob().into(), - amount: fees - fees_after_refund, - }), - topics: vec![], - }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { @@ -524,7 +508,7 @@ fn full_native_block_import_works() { phase: Phase::ApplyExtrinsic(1), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { weight: transfer_weight - .saturating_add(TxExtension::weight().saturating_sub(weight_refund)), + .saturating_add(extension_weight.saturating_sub(weight_refund)), class: Default::default(), pays: Default::default(), }), @@ -547,14 +531,6 @@ fn full_native_block_import_works() { }), topics: vec![], }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { - who: alice().into(), - amount: fees - fees_after_refund, - }), - topics: vec![], - }, EventRecord { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::Balances(pallet_balances::Event::Deposit { @@ -592,7 +568,7 @@ fn full_native_block_import_works() { phase: Phase::ApplyExtrinsic(2), event: RuntimeEvent::System(frame_system::Event::ExtrinsicSuccess { weight: transfer_weight - .saturating_add(TxExtension::weight().saturating_sub(weight_refund)), + .saturating_add(extension_weight.saturating_sub(weight_refund)), class: Default::default(), pays: Default::default(), }), @@ -610,7 +586,7 @@ fn full_wasm_block_import_works() { let (block1, block2) = blocks(); let mut alice_last_known_balance: Balance = Default::default(); - let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let weight_refund = Weight::zero(); let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block1.0).0.unwrap(); @@ -621,7 +597,7 @@ fn full_wasm_block_import_works() { alice_last_known_balance = Balances::total_balance(&alice()); }); - let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let weight_refund = Weight::zero(); let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); executor_call(&mut t, "Core_execute_block", &block2.0).0.unwrap(); @@ -873,7 +849,7 @@ fn successful_execution_gives_ok() { assert_eq!(Balances::total_balance(&alice()), 111 * DOLLARS); }); - let weight_refund = pallet_asset_conversion_tx_payment::ChargeAssetTxPayment::::weight().saturating_sub(<::WeightInfo as pallet_asset_conversion_tx_payment::WeightInfo>::charge_asset_tx_payment_native()); + let weight_refund = Weight::zero(); let fees_after_refund = t.execute_with(|| transfer_fee_with_refund(&xt(), weight_refund)); let r = executor_call(&mut t, "BlockBuilder_apply_extrinsic", &vec![].and(&xt())) diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index a39567da2f87..c1f291d6dfae 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -19,20 +19,17 @@ use codec::{Encode, Joiner}; use frame_support::{ dispatch::GetDispatchInfo, traits::Currency, - weights::{constants::ExtrinsicBaseWeight, IdentityFee, WeightToFee}, + weights::{constants::ExtrinsicBaseWeight, IdentityFee, Weight, WeightToFee}, }; use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, Balances, CheckedExtrinsic, Multiplier, Runtime, RuntimeCall, TransactionByteFee, - TransactionPayment, TxExtension, + TransactionPayment, }; use node_primitives::Balance; use node_testing::keyring::*; use polkadot_sdk::*; -use sp_runtime::{ - traits::{One, TransactionExtensionBase}, - Perbill, -}; +use sp_runtime::{traits::One, Perbill}; pub mod common; use self::common::{sign, *}; diff --git a/substrate/frame/executive/src/tests.rs b/substrate/frame/executive/src/tests.rs index 48d0309e052b..6a1363a241fd 100644 --- a/substrate/frame/executive/src/tests.rs +++ b/substrate/frame/executive/src/tests.rs @@ -24,7 +24,7 @@ use sp_core::H256; use sp_runtime::{ generic::{DigestItem, Era}, testing::{Block, Digest, Header}, - traits::{Block as BlockT, Header as HeaderT, TransactionExtensionBase}, + traits::{Block as BlockT, Header as HeaderT, TransactionExtension}, transaction_validity::{ InvalidTransaction, TransactionValidityError, UnknownTransaction, ValidTransaction, }, @@ -720,7 +720,8 @@ fn block_weight_limit_enforced() { let mut t = new_test_ext(10000); let transfer_weight = <::WeightInfo as pallet_balances::WeightInfo>::transfer_allow_death(); - let extension_weight = TxExtension::weight(); + let extension_weight = tx_ext(0u32.into(), 0) + .weight(&RuntimeCall::Balances(BalancesCall::transfer_allow_death { dest: 33, value: 0 })); // on_initialize weight + base block execution weight let block_weights = ::BlockWeights::get(); let base_block_weight = Weight::from_parts(175, 0) + block_weights.base_block; @@ -791,6 +792,7 @@ fn block_weight_and_size_is_stored_per_tx() { tx_ext(2, 0), ); let len = xt.clone().encode().len() as u32; + let extension_weight = xt.extension_weight(); let transfer_weight = <::WeightInfo as pallet_balances::WeightInfo>::transfer_allow_death(); let mut t = new_test_ext(2); t.execute_with(|| { @@ -808,7 +810,7 @@ fn block_weight_and_size_is_stored_per_tx() { assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); let extrinsic_weight = transfer_weight + - TxExtension::weight() + + extension_weight + ::BlockWeights::get() .get(DispatchClass::Normal) .base_extrinsic; diff --git a/substrate/frame/transaction-payment/src/benchmarking.rs b/substrate/frame/transaction-payment/src/benchmarking.rs index 53e181880077..3ef6dbca3892 100644 --- a/substrate/frame/transaction-payment/src/benchmarking.rs +++ b/substrate/frame/transaction-payment/src/benchmarking.rs @@ -51,9 +51,9 @@ mod benchmarks { ); let tip = >::minimum_balance(); let ext: ChargeTransactionPayment = ChargeTransactionPayment::from(tip); - let extension_weight = ChargeTransactionPayment::::weight(); let inner = frame_system::Call::remark { remark: alloc::vec![] }; let call = T::RuntimeCall::from(inner); + let extension_weight = ext.weight(&call); let info = DispatchInfo { call_weight: Weight::from_parts(100, 0), extension_weight, From 53ad8c04f7e2e766fa530a365ef6ec266aa3c4bf Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Tue, 20 Aug 2024 13:12:42 +0300 Subject: [PATCH 70/74] Fix bench features Signed-off-by: georgepisaltu --- Cargo.lock | 1 + .../assets/asset-hub-rococo/src/lib.rs | 32 +++++++++------- .../assets/asset-hub-westend/src/lib.rs | 38 +++++++++++-------- substrate/frame/examples/Cargo.toml | 1 + .../authorization-tx-extension/Cargo.toml | 12 ++++++ 5 files changed, 55 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index edb1f17872ec..0cc72bfc16e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10617,6 +10617,7 @@ name = "pallet-example-authorization-tx-extension" version = "4.0.0-dev" dependencies = [ "docify", + "frame-benchmarking", "frame-support", "frame-system", "log", diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 9366a34bcfce..9b4de768d723 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -1111,28 +1111,28 @@ pub struct AssetConversionTxHelper; impl pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< AccountId, - xcm::v3::MultiLocation, - xcm::v3::MultiLocation, + cumulus_primitives_core::Location, + cumulus_primitives_core::Location, > for AssetConversionTxHelper { - fn create_asset_id_parameter(seed: u32) -> (xcm::v3::MultiLocation, xcm::v3::MultiLocation) { + fn create_asset_id_parameter(seed: u32) -> (Location, Location) { // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. - let asset_id = xcm::v3::MultiLocation::new( + let asset_id = Location::new( 1, - xcm::v3::Junctions::X3( - xcm::v3::Junction::Parachain(3000), - xcm::v3::Junction::PalletInstance(53), - xcm::v3::Junction::GeneralIndex(seed.into()), - ), + [ + cumulus_primitives_core::Junction::Parachain(3000), + cumulus_primitives_core::Junction::PalletInstance(53), + cumulus_primitives_core::Junction::GeneralIndex(seed.into()), + ], ); - (asset_id, asset_id) + (asset_id.clone(), asset_id) } - fn setup_balances_and_pool(asset_id: xcm::v3::MultiLocation, account: AccountId) { + fn setup_balances_and_pool(asset_id: cumulus_primitives_core::Location, account: AccountId) { use frame_support::{assert_ok, traits::fungibles::Mutate}; assert_ok!(ForeignAssets::force_create( RuntimeOrigin::root(), - asset_id.into(), + asset_id.clone().into(), account.clone().into(), /* owner */ true, /* is_sufficient */ 1, @@ -1141,9 +1141,13 @@ impl let lp_provider = account.clone(); use frame_support::traits::Currency; let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); - assert_ok!(ForeignAssets::mint_into(asset_id.into(), &lp_provider, u64::MAX.into())); + assert_ok!(ForeignAssets::mint_into( + asset_id.clone().into(), + &lp_provider, + u64::MAX.into() + )); - let token_native = alloc::boxed::Box::new(TokenLocationV3::get()); + let token_native = alloc::boxed::Box::new(TokenLocation::get()); let token_second = alloc::boxed::Box::new(asset_id); assert_ok!(AssetConversion::create_pool( diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 2b76cf715898..62d42a9786a5 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -1161,28 +1161,30 @@ pub struct AssetConversionTxHelper; impl pallet_asset_conversion_tx_payment::BenchmarkHelperTrait< AccountId, - xcm::v3::MultiLocation, - xcm::v3::MultiLocation, + cumulus_primitives_core::Location, + cumulus_primitives_core::Location, > for AssetConversionTxHelper { - fn create_asset_id_parameter(seed: u32) -> (xcm::v3::MultiLocation, xcm::v3::MultiLocation) { + fn create_asset_id_parameter( + seed: u32, + ) -> (cumulus_primitives_core::Location, cumulus_primitives_core::Location) { // Use a different parachain' foreign assets pallet so that the asset is indeed foreign. - let asset_id = xcm::v3::MultiLocation::new( + let asset_id = cumulus_primitives_core::Location::new( 1, - xcm::v3::Junctions::X3( - xcm::v3::Junction::Parachain(3000), - xcm::v3::Junction::PalletInstance(53), - xcm::v3::Junction::GeneralIndex(seed.into()), - ), + [ + cumulus_primitives_core::Junction::Parachain(3000), + cumulus_primitives_core::Junction::PalletInstance(53), + cumulus_primitives_core::Junction::GeneralIndex(seed.into()), + ], ); - (asset_id, asset_id) + (asset_id.clone(), asset_id) } - fn setup_balances_and_pool(asset_id: xcm::v3::MultiLocation, account: AccountId) { + fn setup_balances_and_pool(asset_id: cumulus_primitives_core::Location, account: AccountId) { use frame_support::{assert_ok, traits::fungibles::Mutate}; assert_ok!(ForeignAssets::force_create( RuntimeOrigin::root(), - asset_id.into(), + asset_id.clone().into(), account.clone().into(), /* owner */ true, /* is_sufficient */ 1, @@ -1191,10 +1193,16 @@ impl let lp_provider = account.clone(); use frame_support::traits::Currency; let _ = Balances::deposit_creating(&lp_provider, u64::MAX.into()); - assert_ok!(ForeignAssets::mint_into(asset_id.into(), &lp_provider, u64::MAX.into())); + assert_ok!(ForeignAssets::mint_into( + asset_id.clone().into(), + &lp_provider, + u64::MAX.into() + )); - let token_native = - alloc::boxed::Box::new(xcm::v3::MultiLocation::new(1, xcm::v3::Junctions::Here)); + let token_native = alloc::boxed::Box::new(cumulus_primitives_core::Location::new( + 1, + cumulus_primitives_core::Junctions::Here, + )); let token_second = alloc::boxed::Box::new(asset_id); assert_ok!(AssetConversion::create_pool( diff --git a/substrate/frame/examples/Cargo.toml b/substrate/frame/examples/Cargo.toml index 2f4af1cc0220..0c6ad5ef0978 100644 --- a/substrate/frame/examples/Cargo.toml +++ b/substrate/frame/examples/Cargo.toml @@ -44,6 +44,7 @@ std = [ try-runtime = [ "pallet-default-config-example/try-runtime", "pallet-dev-mode/try-runtime", + "pallet-example-authorization-tx-extension/try-runtime", "pallet-example-basic/try-runtime", "pallet-example-kitchensink/try-runtime", "pallet-example-offchain-worker/try-runtime", diff --git a/substrate/frame/examples/authorization-tx-extension/Cargo.toml b/substrate/frame/examples/authorization-tx-extension/Cargo.toml index 4cd637772851..07e2f56f3bf4 100644 --- a/substrate/frame/examples/authorization-tx-extension/Cargo.toml +++ b/substrate/frame/examples/authorization-tx-extension/Cargo.toml @@ -21,6 +21,7 @@ docify = { workspace = true } log = { workspace = true } scale-info = { features = ["derive"], workspace = true } +frame-benchmarking = { optional = true, workspace = true } frame-support = { features = ["experimental"], workspace = true } frame-system = { workspace = true } @@ -43,3 +44,14 @@ std = [ "sp-io/std", "sp-runtime/std", ] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", + "sp-runtime/try-runtime", +] From 67d655ec5760879f21b1bcc6afd11143901b5e97 Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Tue, 20 Aug 2024 14:20:04 +0300 Subject: [PATCH 71/74] Fix fee test Signed-off-by: georgepisaltu --- substrate/bin/node/cli/tests/fees.rs | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/substrate/bin/node/cli/tests/fees.rs b/substrate/bin/node/cli/tests/fees.rs index c1f291d6dfae..3cb0578fd0b3 100644 --- a/substrate/bin/node/cli/tests/fees.rs +++ b/substrate/bin/node/cli/tests/fees.rs @@ -19,7 +19,7 @@ use codec::{Encode, Joiner}; use frame_support::{ dispatch::GetDispatchInfo, traits::Currency, - weights::{constants::ExtrinsicBaseWeight, IdentityFee, Weight, WeightToFee}, + weights::{constants::ExtrinsicBaseWeight, IdentityFee, WeightToFee}, }; use kitchensink_runtime::{ constants::{currency::*, time::SLOT_DURATION}, @@ -34,14 +34,6 @@ use sp_runtime::{traits::One, Perbill}; pub mod common; use self::common::{sign, *}; -fn charge_asset_tx_unspent_weight() -> Weight { - use pallet_asset_conversion_tx_payment::{Config as ACConfig, WeightInfo as ACWeights}; - <::WeightInfo as ACWeights>::charge_asset_tx_payment_asset() - .saturating_sub( - <::WeightInfo as ACWeights>::charge_asset_tx_payment_native(), - ) -} - #[test] fn fee_multiplier_increases_and_decreases_on_big_weight() { let mut t = new_test_ext(compact_code_unwrap()); @@ -184,9 +176,7 @@ fn transaction_fee_is_correct() { let mut info = default_transfer_call().get_dispatch_info(); info.extension_weight = xt.extension_weight(); - let mut weight = info.total_weight(); - let weight_refund = charge_asset_tx_unspent_weight(); - weight.saturating_reduce(weight_refund); + let weight = info.total_weight(); let weight_fee = IdentityFee::::weight_to_fee(&weight); // we know that weight to fee multiplier is effect-less in block 1. From 999fd23aa416f92b6b87bc36a5257212a6d722ab Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Tue, 20 Aug 2024 14:26:27 +0300 Subject: [PATCH 72/74] Fix feature propagation Signed-off-by: georgepisaltu --- substrate/frame/examples/authorization-tx-extension/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/substrate/frame/examples/authorization-tx-extension/Cargo.toml b/substrate/frame/examples/authorization-tx-extension/Cargo.toml index 07e2f56f3bf4..34ea2ab156ea 100644 --- a/substrate/frame/examples/authorization-tx-extension/Cargo.toml +++ b/substrate/frame/examples/authorization-tx-extension/Cargo.toml @@ -43,6 +43,7 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", + "frame-benchmarking?/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", From 0b5c6e1b356964ef575e3750aaf91fbac87c83a9 Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Tue, 20 Aug 2024 14:28:29 +0300 Subject: [PATCH 73/74] Fix toml format Signed-off-by: georgepisaltu --- substrate/frame/examples/authorization-tx-extension/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/frame/examples/authorization-tx-extension/Cargo.toml b/substrate/frame/examples/authorization-tx-extension/Cargo.toml index 34ea2ab156ea..65f847672db2 100644 --- a/substrate/frame/examples/authorization-tx-extension/Cargo.toml +++ b/substrate/frame/examples/authorization-tx-extension/Cargo.toml @@ -36,6 +36,7 @@ sp-keyring = { workspace = true, default-features = true } default = ["std"] std = [ "codec/std", + "frame-benchmarking?/std", "frame-support/std", "frame-system/std", "log/std", @@ -43,7 +44,6 @@ std = [ "sp-core/std", "sp-io/std", "sp-runtime/std", - "frame-benchmarking?/std" ] runtime-benchmarks = [ "frame-benchmarking/runtime-benchmarks", From 40f4791a0ae820ea769b9d80130ec75169f5f1f6 Mon Sep 17 00:00:00 2001 From: georgepisaltu Date: Tue, 20 Aug 2024 15:19:25 +0300 Subject: [PATCH 74/74] Fix bench test Signed-off-by: georgepisaltu --- substrate/bin/node/bench/src/import.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/substrate/bin/node/bench/src/import.rs b/substrate/bin/node/bench/src/import.rs index 8334d230a8d2..0b972650ef5a 100644 --- a/substrate/bin/node/bench/src/import.rs +++ b/substrate/bin/node/bench/src/import.rs @@ -121,10 +121,10 @@ impl core::Benchmark for ImportBenchmark { .inspect_state(|| { match self.block_type { BlockType::RandomTransfersKeepAlive => { - // should be 10 per signed extrinsic + 1 per unsigned + // should be 9 per signed extrinsic + 1 per unsigned // we have 2 unsigned (timestamp and glutton bloat) while the rest are // signed in the block. - // those 10 events per signed are: + // those 9 events per signed are: // - transaction paid for the transaction payment // - withdraw (Balances::Withdraw) for charging the transaction fee // - new account (System::NewAccount) as we always transfer fund to @@ -132,13 +132,12 @@ impl core::Benchmark for ImportBenchmark { // - endowed (Balances::Endowed) for this new account // - issued (Balances::Issued) as total issuance is increased // - successful transfer (Event::Transfer) for this transfer operation - // - deposit (Balances::Deposit) for depositing the tx fee refund // - 2x deposit (Balances::Deposit and Treasury::Deposit) for depositing // the transaction fee into the treasury // - extrinsic success assert_eq!( kitchensink_runtime::System::events().len(), - (self.block.extrinsics.len() - 2) * 10 + 2, + (self.block.extrinsics.len() - 2) * 9 + 2, ); }, BlockType::Noop => {