diff --git a/Cargo.lock b/Cargo.lock index e3a72ca23d883..a81e7c409dcf8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15073,6 +15073,17 @@ dependencies = [ "unsigned-varint", ] +[[package]] +name = "quick_cache" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5253a3a0d56548d5b0be25414171dc780cc6870727746d05bd2bde352eee96c5" +dependencies = [ + "ahash 0.8.8", + "hashbrown 0.13.2", + "parking_lot 0.12.1", +] + [[package]] name = "quickcheck" version = "1.0.3" @@ -15508,6 +15519,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "parity-scale-codec", + "quick_cache", "rand 0.8.5", "relay-utils", "sc-chain-spec", diff --git a/bridges/modules/relayers/src/lib.rs b/bridges/modules/relayers/src/lib.rs index 7a3a0f9ea94cb..2c86ec01f5b91 100644 --- a/bridges/modules/relayers/src/lib.rs +++ b/bridges/modules/relayers/src/lib.rs @@ -63,7 +63,7 @@ pub mod pallet { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// Type of relayer reward. - type Reward: AtLeast32BitUnsigned + Copy + Parameter + MaxEncodedLen; + type Reward: AtLeast32BitUnsigned + Copy + Member + Parameter + MaxEncodedLen; /// Pay rewards scheme. type PaymentProcedure: PaymentProcedure; /// Stake and slash scheme. diff --git a/bridges/primitives/relayers/src/lib.rs b/bridges/primitives/relayers/src/lib.rs index 2a9ef6a8e1e9a..436f33db40080 100644 --- a/bridges/primitives/relayers/src/lib.rs +++ b/bridges/primitives/relayers/src/lib.rs @@ -140,8 +140,8 @@ pub struct RelayerRewardsKeyProvider(PhantomData<(AccountId, impl StorageDoubleMapKeyProvider for RelayerRewardsKeyProvider where - AccountId: Codec + EncodeLike, - Reward: Codec + EncodeLike, + AccountId: 'static + Codec + EncodeLike + Send + Sync, + Reward: 'static + Codec + EncodeLike + Send + Sync, { const MAP_NAME: &'static str = "RelayerRewards"; diff --git a/bridges/primitives/runtime/src/lib.rs b/bridges/primitives/runtime/src/lib.rs index 5daba0351ad48..d13c9b40efa0b 100644 --- a/bridges/primitives/runtime/src/lib.rs +++ b/bridges/primitives/runtime/src/lib.rs @@ -255,9 +255,9 @@ pub trait StorageMapKeyProvider { /// The same as `StorageMap::Hasher1`. type Hasher: StorageHasher; /// The same as `StorageMap::Key1`. - type Key: FullCodec; + type Key: FullCodec + Send + Sync; /// The same as `StorageMap::Value`. - type Value: FullCodec; + type Value: 'static + FullCodec; /// This is a copy of the /// `frame_support::storage::generator::StorageMap::storage_map_final_key`. @@ -277,13 +277,13 @@ pub trait StorageDoubleMapKeyProvider { /// The same as `StorageDoubleMap::Hasher1`. type Hasher1: StorageHasher; /// The same as `StorageDoubleMap::Key1`. - type Key1: FullCodec; + type Key1: FullCodec + Send + Sync; /// The same as `StorageDoubleMap::Hasher2`. type Hasher2: StorageHasher; /// The same as `StorageDoubleMap::Key2`. - type Key2: FullCodec; + type Key2: FullCodec + Send + Sync; /// The same as `StorageDoubleMap::Value`. - type Value: FullCodec; + type Value: 'static + FullCodec; /// This is a copy of the /// `frame_support::storage::generator::StorageDoubleMap::storage_double_map_final_key`. diff --git a/bridges/relays/client-substrate/Cargo.toml b/bridges/relays/client-substrate/Cargo.toml index cb7eae4f340c7..ea267ea5e302a 100644 --- a/bridges/relays/client-substrate/Cargo.toml +++ b/bridges/relays/client-substrate/Cargo.toml @@ -22,6 +22,7 @@ rand = "0.8.5" scale-info = { version = "2.11.1", features = ["derive"] } tokio = { version = "1.37", features = ["rt-multi-thread"] } thiserror = { workspace = true } +quick_cache = "0.3" # Bridge dependencies diff --git a/bridges/relays/client-substrate/src/chain.rs b/bridges/relays/client-substrate/src/chain.rs index 40269fe64c879..227e9c31c5bfc 100644 --- a/bridges/relays/client-substrate/src/chain.rs +++ b/bridges/relays/client-substrate/src/chain.rs @@ -36,6 +36,9 @@ use sp_runtime::{ }; use std::{fmt::Debug, time::Duration}; +/// Signed block type of given chain. +pub type SignedBlockOf = ::SignedBlock; + /// Substrate-based chain from minimal relay-client point of view. pub trait Chain: ChainBase + Clone { /// Chain name. diff --git a/bridges/relays/client-substrate/src/client.rs b/bridges/relays/client-substrate/src/client.rs deleted file mode 100644 index 2e7cb7455f76c..0000000000000 --- a/bridges/relays/client-substrate/src/client.rs +++ /dev/null @@ -1,1032 +0,0 @@ -// Copyright 2019-2021 Parity Technologies (UK) Ltd. -// This file is part of Parity Bridges Common. - -// Parity Bridges Common is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity Bridges Common is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity Bridges Common. If not, see . - -//! Substrate node client. - -use crate::{ - chain::{Chain, ChainWithTransactions}, - guard::Environment, - rpc::{ - SubstrateAuthorClient, SubstrateChainClient, SubstrateFinalityClient, - SubstrateFrameSystemClient, SubstrateStateClient, SubstrateSystemClient, - }, - transaction_stall_timeout, AccountKeyPairOf, ChainWithGrandpa, ConnectionParams, Error, HashOf, - HeaderIdOf, Result, SignParam, TransactionTracker, UnsignedTransaction, -}; - -use async_std::sync::{Arc, Mutex, RwLock}; -use async_trait::async_trait; -use bp_runtime::{HeaderIdProvider, StorageDoubleMapKeyProvider, StorageMapKeyProvider}; -use codec::{Decode, Encode}; -use frame_support::weights::Weight; -use futures::{SinkExt, StreamExt}; -use jsonrpsee::{ - core::DeserializeOwned, - ws_client::{WsClient as RpcClient, WsClientBuilder as RpcClientBuilder}, -}; -use num_traits::{Saturating, Zero}; -use pallet_transaction_payment::RuntimeDispatchInfo; -use relay_utils::{relay_loop::RECONNECT_DELAY, STALL_TIMEOUT}; -use sp_core::{ - storage::{StorageData, StorageKey}, - Bytes, Hasher, Pair, -}; -use sp_runtime::{ - traits::Header as HeaderT, - transaction_validity::{TransactionSource, TransactionValidity}, -}; -use sp_trie::StorageProof; -use sp_version::RuntimeVersion; -use std::{cmp::Ordering, future::Future}; - -const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; -const SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF: &str = - "GrandpaApi_generate_key_ownership_proof"; -const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; -const SUB_API_TX_PAYMENT_QUERY_INFO: &str = "TransactionPaymentApi_query_info"; -const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; - -/// The difference between best block number and number of its ancestor, that is enough -/// for us to consider that ancestor an "ancient" block with dropped state. -/// -/// The relay does not assume that it is connected to the archive node, so it always tries -/// to use the best available chain state. But sometimes it still may use state of some -/// old block. If the state of that block is already dropped, relay will see errors when -/// e.g. it tries to prove something. -/// -/// By default Substrate-based nodes are storing state for last 256 blocks. We'll use -/// half of this value. -pub const ANCIENT_BLOCK_THRESHOLD: u32 = 128; - -/// Returns `true` if we think that the state is already discarded for given block. -pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: N) -> bool { - best.saturating_sub(block) >= N::from(ANCIENT_BLOCK_THRESHOLD) -} - -/// Opaque justifications subscription type. -pub struct Subscription( - pub(crate) Mutex>>, - // The following field is not explicitly used by the code. But when it is dropped, - // the bakground task receives a shutdown signal. - #[allow(dead_code)] pub(crate) futures::channel::oneshot::Sender<()>, -); - -/// Opaque GRANDPA authorities set. -pub type OpaqueGrandpaAuthoritiesSet = Vec; - -/// A simple runtime version. It only includes the `spec_version` and `transaction_version`. -#[derive(Copy, Clone, Debug)] -pub struct SimpleRuntimeVersion { - /// Version of the runtime specification. - pub spec_version: u32, - /// All existing dispatches are fully compatible when this number doesn't change. - pub transaction_version: u32, -} - -impl SimpleRuntimeVersion { - /// Create a new instance of `SimpleRuntimeVersion` from a `RuntimeVersion`. - pub const fn from_runtime_version(runtime_version: &RuntimeVersion) -> Self { - Self { - spec_version: runtime_version.spec_version, - transaction_version: runtime_version.transaction_version, - } - } -} - -/// Chain runtime version in client -#[derive(Copy, Clone, Debug)] -pub enum ChainRuntimeVersion { - /// Auto query from chain. - Auto, - /// Custom runtime version, defined by user. - Custom(SimpleRuntimeVersion), -} - -/// Substrate client type. -/// -/// Cloning `Client` is a cheap operation that only clones internal references. Different -/// clones of the same client are guaranteed to use the same references. -pub struct Client { - // Lock order: `submit_signed_extrinsic_lock`, `data` - /// Client connection params. - params: Arc, - /// Saved chain runtime version. - chain_runtime_version: ChainRuntimeVersion, - /// If several tasks are submitting their transactions simultaneously using - /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of - /// transactions will be rejected from the pool. This lock is here to prevent situations like - /// that. - submit_signed_extrinsic_lock: Arc>, - /// Genesis block hash. - genesis_hash: HashOf, - /// Shared dynamic data. - data: Arc>, -} - -/// Client data, shared by all `Client` clones. -struct ClientData { - /// Tokio runtime handle. - tokio: Arc, - /// Substrate RPC client. - client: Arc, -} - -/// Already encoded value. -struct PreEncoded(Vec); - -impl Encode for PreEncoded { - fn encode(&self) -> Vec { - self.0.clone() - } -} - -#[async_trait] -impl relay_utils::relay_loop::Client for Client { - type Error = Error; - - async fn reconnect(&mut self) -> Result<()> { - let mut data = self.data.write().await; - let (tokio, client) = Self::build_client(&self.params).await?; - data.tokio = tokio; - data.client = client; - Ok(()) - } -} - -impl Clone for Client { - fn clone(&self) -> Self { - Client { - params: self.params.clone(), - chain_runtime_version: self.chain_runtime_version, - submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), - genesis_hash: self.genesis_hash, - data: self.data.clone(), - } - } -} - -impl std::fmt::Debug for Client { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("Client").field("genesis_hash", &self.genesis_hash).finish() - } -} - -impl Client { - /// Returns client that is able to call RPCs on Substrate node over websocket connection. - /// - /// This function will keep connecting to given Substrate node until connection is established - /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. - pub async fn new(params: ConnectionParams) -> Self { - let params = Arc::new(params); - loop { - match Self::try_connect(params.clone()).await { - Ok(client) => return client, - Err(error) => log::error!( - target: "bridge", - "Failed to connect to {} node: {:?}. Going to retry in {}s", - C::NAME, - error, - RECONNECT_DELAY.as_secs(), - ), - } - - async_std::task::sleep(RECONNECT_DELAY).await; - } - } - - /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection - /// has been established or error otherwise. - pub async fn try_connect(params: Arc) -> Result { - let (tokio, client) = Self::build_client(¶ms).await?; - - let number: C::BlockNumber = Zero::zero(); - let genesis_hash_client = client.clone(); - let genesis_hash = tokio - .spawn(async move { - SubstrateChainClient::::block_hash(&*genesis_hash_client, Some(number)).await - }) - .await??; - - let chain_runtime_version = params.chain_runtime_version; - let mut client = Self { - params, - chain_runtime_version, - submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), - genesis_hash, - data: Arc::new(RwLock::new(ClientData { tokio, client })), - }; - Self::ensure_correct_runtime_version(&mut client, chain_runtime_version).await?; - Ok(client) - } - - // Check runtime version to understand if we need are connected to expected version, or we - // need to wait for upgrade, we need to abort immediately. - async fn ensure_correct_runtime_version>( - env: &mut E, - expected: ChainRuntimeVersion, - ) -> Result<()> { - // we are only interested if version mode is bundled or passed using CLI - let expected = match expected { - ChainRuntimeVersion::Auto => return Ok(()), - ChainRuntimeVersion::Custom(expected) => expected, - }; - - // we need to wait if actual version is < than expected, we are OK of versions are the - // same and we need to abort if actual version is > than expected - let actual = SimpleRuntimeVersion::from_runtime_version(&env.runtime_version().await?); - match actual.spec_version.cmp(&expected.spec_version) { - Ordering::Less => - Err(Error::WaitingForRuntimeUpgrade { chain: C::NAME.into(), expected, actual }), - Ordering::Equal => Ok(()), - Ordering::Greater => { - log::error!( - target: "bridge", - "The {} client is configured to use runtime version {expected:?} and actual \ - version is {actual:?}. Aborting", - C::NAME, - ); - env.abort().await; - Err(Error::Custom("Aborted".into())) - }, - } - } - - /// Build client to use in connection. - async fn build_client( - params: &ConnectionParams, - ) -> Result<(Arc, Arc)> { - let tokio = tokio::runtime::Runtime::new()?; - - let uri = match params.uri { - Some(ref uri) => uri.clone(), - None => { - format!( - "{}://{}:{}{}", - if params.secure { "wss" } else { "ws" }, - params.host, - params.port, - match params.path { - Some(ref path) => format!("/{}", path), - None => String::new(), - }, - ) - }, - }; - log::info!(target: "bridge", "Connecting to {} node at {}", C::NAME, uri); - - let client = tokio - .spawn(async move { - RpcClientBuilder::default() - .max_buffer_capacity_per_subscription(MAX_SUBSCRIPTION_CAPACITY) - .build(&uri) - .await - }) - .await??; - - Ok((Arc::new(tokio), Arc::new(client))) - } -} - -impl Client { - /// Return simple runtime version, only include `spec_version` and `transaction_version`. - pub async fn simple_runtime_version(&self) -> Result { - Ok(match &self.chain_runtime_version { - ChainRuntimeVersion::Auto => { - let runtime_version = self.runtime_version().await?; - SimpleRuntimeVersion::from_runtime_version(&runtime_version) - }, - ChainRuntimeVersion::Custom(version) => *version, - }) - } - - /// Returns true if client is connected to at least one peer and is in synced state. - pub async fn ensure_synced(&self) -> Result<()> { - self.jsonrpsee_execute(|client| async move { - let health = SubstrateSystemClient::::health(&*client).await?; - let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); - if is_synced { - Ok(()) - } else { - Err(Error::ClientNotSynced(health)) - } - }) - .await - } - - /// Return hash of the genesis block. - pub fn genesis_hash(&self) -> &C::Hash { - &self.genesis_hash - } - - /// Return hash of the best finalized block. - pub async fn best_finalized_header_hash(&self) -> Result { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateChainClient::::finalized_head(&*client).await?) - }) - .await - .map_err(|e| Error::FailedToReadBestFinalizedHeaderHash { - chain: C::NAME.into(), - error: e.boxed(), - }) - } - - /// Return number of the best finalized block. - pub async fn best_finalized_header_number(&self) -> Result { - Ok(*self.best_finalized_header().await?.number()) - } - - /// Return header of the best finalized block. - pub async fn best_finalized_header(&self) -> Result { - self.header_by_hash(self.best_finalized_header_hash().await?).await - } - - /// Returns the best Substrate header. - pub async fn best_header(&self) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(|client| async move { - Ok(SubstrateChainClient::::header(&*client, None).await?) - }) - .await - .map_err(|e| Error::FailedToReadBestHeader { chain: C::NAME.into(), error: e.boxed() }) - } - - /// Get a Substrate block from its hash. - pub async fn get_block(&self, block_hash: Option) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::block(&*client, block_hash).await?) - }) - .await - } - - /// Get a Substrate header by its hash. - pub async fn header_by_hash(&self, block_hash: C::Hash) -> Result - where - C::Header: DeserializeOwned, - { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::header(&*client, Some(block_hash)).await?) - }) - .await - .map_err(|e| Error::FailedToReadHeaderByHash { - chain: C::NAME.into(), - hash: format!("{block_hash}"), - error: e.boxed(), - }) - } - - /// Get a Substrate block hash by its number. - pub async fn block_hash_by_number(&self, number: C::BlockNumber) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateChainClient::::block_hash(&*client, Some(number)).await?) - }) - .await - } - - /// Get a Substrate header by its number. - pub async fn header_by_number(&self, block_number: C::BlockNumber) -> Result - where - C::Header: DeserializeOwned, - { - let block_hash = Self::block_hash_by_number(self, block_number).await?; - let header_by_hash = Self::header_by_hash(self, block_hash).await?; - Ok(header_by_hash) - } - - /// Return runtime version. - pub async fn runtime_version(&self) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateStateClient::::runtime_version(&*client).await?) - }) - .await - } - - /// Read value from runtime storage. - pub async fn storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read `MapStorage` value from runtime storage. - pub async fn storage_map_value( - &self, - pallet_prefix: &str, - key: &T::Key, - block_hash: Option, - ) -> Result> { - let storage_key = T::final_key(pallet_prefix, key); - - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::Value::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read `DoubleMapStorage` value from runtime storage. - pub async fn storage_double_map_value( - &self, - pallet_prefix: &str, - key1: &T::Key1, - key2: &T::Key2, - block_hash: Option, - ) -> Result> { - let storage_key = T::final_key(pallet_prefix, key1, key2); - - self.raw_storage_value(storage_key, block_hash) - .await? - .map(|encoded_value| { - T::Value::decode(&mut &encoded_value.0[..]).map_err(Error::ResponseParseFailed) - }) - .transpose() - } - - /// Read raw value from runtime storage. - pub async fn raw_storage_value( - &self, - storage_key: StorageKey, - block_hash: Option, - ) -> Result> { - let cloned_storage_key = storage_key.clone(); - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateStateClient::::storage(&*client, storage_key.clone(), block_hash) - .await?) - }) - .await - .map_err(|e| Error::FailedToReadRuntimeStorageValue { - chain: C::NAME.into(), - key: cloned_storage_key, - error: e.boxed(), - }) - } - - /// Get the nonce of the given Substrate account. - /// - /// Note: It's the caller's responsibility to make sure `account` is a valid SS58 address. - pub async fn next_account_index(&self, account: C::AccountId) -> Result { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateFrameSystemClient::::account_next_index(&*client, account).await?) - }) - .await - } - - /// Submit unsigned extrinsic for inclusion in a block. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result { - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - let best_header_hash = self.best_header().await?.hash(); - self.validate_transaction(best_header_hash, PreEncoded(transaction.0.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - self.jsonrpsee_execute(move |client| async move { - let tx_hash = SubstrateAuthorClient::::submit_extrinsic(&*client, transaction) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - async fn build_sign_params(&self, signer: AccountKeyPairOf) -> Result> - where - C: ChainWithTransactions, - { - let runtime_version = self.simple_runtime_version().await?; - Ok(SignParam:: { - spec_version: runtime_version.spec_version, - transaction_version: runtime_version.transaction_version, - genesis_hash: self.genesis_hash, - signer, - }) - } - - /// Submit an extrinsic signed by given account. - /// - /// All calls of this method are synchronized, so there can't be more than one active - /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen - /// if all client instances are clones of the same initial `Client`. - /// - /// Note: The given transaction needs to be SCALE encoded beforehand. - pub async fn submit_signed_extrinsic( - &self, - signer: &AccountKeyPairOf, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Nonce) -> Result> - + Send - + 'static, - ) -> Result - where - C: ChainWithTransactions, - C::AccountId: From<::Public>, - { - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(signer.public().into()).await?; - let best_header = self.best_header().await?; - let signing_data = self.build_sign_params(signer.clone()).await?; - - // By using parent of best block here, we are protecing again best-block reorganizations. - // E.g. transaction may have been submitted when the best block was `A[num=100]`. Then it - // has been changed to `B[num=100]`. Hash of `A` has been included into transaction - // signature payload. So when signature will be checked, the check will fail and transaction - // will be dropped from the pool. - let best_header_id = best_header.parent_id().unwrap_or_else(|| best_header.id()); - - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); - - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - self.validate_transaction(best_header_id.1, PreEncoded(signed_extrinsic.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - self.jsonrpsee_execute(move |client| async move { - let tx_hash = - SubstrateAuthorClient::::submit_extrinsic(&*client, Bytes(signed_extrinsic)) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - Ok(tx_hash) - }) - .await - } - - /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status - /// after submission. - pub async fn submit_and_watch_signed_extrinsic( - &self, - signer: &AccountKeyPairOf, - prepare_extrinsic: impl FnOnce(HeaderIdOf, C::Nonce) -> Result> - + Send - + 'static, - ) -> Result> - where - C: ChainWithTransactions, - C::AccountId: From<::Public>, - { - let self_clone = self.clone(); - let signing_data = self.build_sign_params(signer.clone()).await?; - let _guard = self.submit_signed_extrinsic_lock.lock().await; - let transaction_nonce = self.next_account_index(signer.public().into()).await?; - let best_header = self.best_header().await?; - let best_header_id = best_header.id(); - - let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; - let stall_timeout = transaction_stall_timeout( - extrinsic.era.mortality_period(), - C::AVERAGE_BLOCK_INTERVAL, - STALL_TIMEOUT, - ); - let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); - - // one last check that the transaction is valid. Most of checks happen in the relay loop and - // it is the "final" check before submission. - self.validate_transaction(best_header_id.1, PreEncoded(signed_extrinsic.clone())) - .await - .map_err(|e| { - log::error!(target: "bridge", "Pre-submit {} transaction validation failed: {:?}", C::NAME, e); - e - })??; - - let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - let (tracker, subscription) = self - .jsonrpsee_execute(move |client| async move { - let tx_hash = C::Hasher::hash(&signed_extrinsic); - let subscription = SubstrateAuthorClient::::submit_and_watch_extrinsic( - &*client, - Bytes(signed_extrinsic), - ) - .await - .map_err(|e| { - log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); - e - })?; - log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); - let tracker = TransactionTracker::new( - self_clone, - stall_timeout, - tx_hash, - Subscription(Mutex::new(receiver), cancel_sender), - ); - Ok((tracker, subscription)) - }) - .await?; - self.data.read().await.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "extrinsic".into(), - subscription, - sender, - cancel_receiver, - )); - Ok(tracker) - } - - /// Returns pending extrinsics from transaction pool. - pub async fn pending_extrinsics(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - Ok(SubstrateAuthorClient::::pending_extrinsics(&*client).await?) - }) - .await - } - - /// Validate transaction at given block state. - pub async fn validate_transaction( - &self, - at_block: C::Hash, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_TXPOOL_VALIDATE_TRANSACTION.to_string(); - let data = Bytes((TransactionSource::External, transaction, at_block).encode()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, Some(at_block)).await?; - let validity = TransactionValidity::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(validity) - }) - .await - } - - /// Returns weight of the given transaction. - pub async fn extimate_extrinsic_weight( - &self, - transaction: SignedTransaction, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let transaction_len = transaction.encoded_size() as u32; - - let call = SUB_API_TX_PAYMENT_QUERY_INFO.to_string(); - let data = Bytes((transaction, transaction_len).encode()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, None).await?; - let dispatch_info = - RuntimeDispatchInfo::::decode(&mut &encoded_response.0[..]) - .map_err(Error::ResponseParseFailed)?; - - Ok(dispatch_info.weight) - }) - .await - } - - /// Get the GRANDPA authority set at given block. - pub async fn grandpa_authorities_set( - &self, - block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - let call = SUB_API_GRANDPA_AUTHORITIES.to_string(); - let data = Bytes(Vec::new()); - - let encoded_response = - SubstrateStateClient::::call(&*client, call, data, Some(block)).await?; - let authority_list = encoded_response.0; - - Ok(authority_list) - }) - .await - } - - /// Execute runtime call at given block, provided the input and output types. - /// It also performs the input encode and output decode. - pub async fn typed_state_call( - &self, - method_name: String, - input: Input, - at_block: Option, - ) -> Result { - let encoded_output = self - .state_call(method_name.clone(), Bytes(input.encode()), at_block) - .await - .map_err(|e| Error::ErrorExecutingRuntimeCall { - chain: C::NAME.into(), - method: method_name, - error: e.boxed(), - })?; - Output::decode(&mut &encoded_output.0[..]).map_err(Error::ResponseParseFailed) - } - - /// Execute runtime call at given block. - pub async fn state_call( - &self, - method: String, - data: Bytes, - at_block: Option, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateStateClient::::call(&*client, method, data, at_block) - .await - .map_err(Into::into) - }) - .await - } - - /// Returns storage proof of given storage keys. - pub async fn prove_storage( - &self, - keys: Vec, - at_block: C::Hash, - ) -> Result { - self.jsonrpsee_execute(move |client| async move { - SubstrateStateClient::::prove_storage(&*client, keys, Some(at_block)) - .await - .map(|proof| { - StorageProof::new(proof.proof.into_iter().map(|b| b.0).collect::>()) - }) - .map_err(Into::into) - }) - .await - } - - /// Return `tokenDecimals` property from the set of chain properties. - pub async fn token_decimals(&self) -> Result> { - self.jsonrpsee_execute(move |client| async move { - let system_properties = SubstrateSystemClient::::properties(&*client).await?; - Ok(system_properties.get("tokenDecimals").and_then(|v| v.as_u64())) - }) - .await - } - - /// Return new finality justifications stream. - pub async fn subscribe_finality_justifications>( - &self, - ) -> Result> { - let subscription = self - .jsonrpsee_execute(move |client| async move { - Ok(FC::subscribe_justifications(&client).await?) - }) - .await?; - let (cancel_sender, cancel_receiver) = futures::channel::oneshot::channel(); - let (sender, receiver) = futures::channel::mpsc::channel(MAX_SUBSCRIPTION_CAPACITY); - self.data.read().await.tokio.spawn(Subscription::background_worker( - C::NAME.into(), - "justification".into(), - subscription, - sender, - cancel_receiver, - )); - Ok(Subscription(Mutex::new(receiver), cancel_sender)) - } - - /// Generates a proof of key ownership for the given authority in the given set. - pub async fn generate_grandpa_key_ownership_proof( - &self, - at: HashOf, - set_id: sp_consensus_grandpa::SetId, - authority_id: sp_consensus_grandpa::AuthorityId, - ) -> Result> - where - C: ChainWithGrandpa, - { - self.typed_state_call( - SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF.into(), - (set_id, authority_id), - Some(at), - ) - .await - } - - /// Execute jsonrpsee future in tokio context. - async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result - where - MF: FnOnce(Arc) -> F + Send + 'static, - F: Future> + Send + 'static, - T: Send + 'static, - { - let data = self.data.read().await; - let client = data.client.clone(); - data.tokio.spawn(make_jsonrpsee_future(client)).await? - } - - /// Returns `true` if version guard can be started. - /// - /// There's no reason to run version guard when version mode is set to `Auto`. It can - /// lead to relay shutdown when chain is upgraded, even though we have explicitly - /// said that we don't want to shutdown. - pub fn can_start_version_guard(&self) -> bool { - !matches!(self.chain_runtime_version, ChainRuntimeVersion::Auto) - } -} - -impl Subscription { - /// Consumes subscription and returns future statuses stream. - pub fn into_stream(self) -> impl futures::Stream { - futures::stream::unfold(Some(self), |mut this| async move { - let Some(this) = this.take() else { return None }; - let item = this.0.lock().await.next().await.unwrap_or(None); - match item { - Some(item) => Some((item, Some(this))), - None => { - // let's make it explicit here - let _ = this.1.send(()); - None - }, - } - }) - } - - /// Return next item from the subscription. - pub async fn next(&self) -> Result> { - let mut receiver = self.0.lock().await; - let item = receiver.next().await; - Ok(item.unwrap_or(None)) - } - - /// Background worker that is executed in tokio context as `jsonrpsee` requires. - async fn background_worker( - chain_name: String, - item_type: String, - subscription: jsonrpsee::core::client::Subscription, - mut sender: futures::channel::mpsc::Sender>, - cancel_receiver: futures::channel::oneshot::Receiver<()>, - ) { - log::trace!( - target: "bridge", - "Starting background worker for {} {} subscription stream.", - chain_name, - item_type, - ); - - futures::pin_mut!(subscription, cancel_receiver); - loop { - match futures::future::select(subscription.next(), &mut cancel_receiver).await { - futures::future::Either::Left((Some(Ok(item)), _)) => - if sender.send(Some(item)).await.is_err() { - log::trace!( - target: "bridge", - "{} {} subscription stream: no listener. Stopping background worker.", - chain_name, - item_type, - ); - - break - }, - futures::future::Either::Left((Some(Err(e)), _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned '{:?}'. Stream needs to be restarted. Stopping background worker.", - chain_name, - item_type, - e, - ); - let _ = sender.send(None).await; - break - }, - futures::future::Either::Left((None, _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream has returned None. Stream needs to be restarted. Stopping background worker.", - chain_name, - item_type, - ); - let _ = sender.send(None).await; - break - }, - futures::future::Either::Right((_, _)) => { - log::trace!( - target: "bridge", - "{} {} subscription stream: listener has been dropped. Stopping background worker.", - chain_name, - item_type, - ); - break; - }, - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{guard::tests::TestEnvironment, test_chain::TestChain}; - use futures::{channel::mpsc::unbounded, FutureExt}; - - async fn run_ensure_correct_runtime_version( - expected: ChainRuntimeVersion, - actual: RuntimeVersion, - ) -> Result<()> { - let ( - (mut runtime_version_tx, runtime_version_rx), - (slept_tx, _slept_rx), - (aborted_tx, mut aborted_rx), - ) = (unbounded(), unbounded(), unbounded()); - runtime_version_tx.send(actual).await.unwrap(); - let mut env = TestEnvironment { runtime_version_rx, slept_tx, aborted_tx }; - - let ensure_correct_runtime_version = - Client::::ensure_correct_runtime_version(&mut env, expected).boxed(); - let aborted = aborted_rx.next().map(|_| Err(Error::Custom("".into()))).boxed(); - futures::pin_mut!(ensure_correct_runtime_version, aborted); - futures::future::select(ensure_correct_runtime_version, aborted) - .await - .into_inner() - .0 - } - - #[async_std::test] - async fn ensure_correct_runtime_version_works() { - // when we are configured to use auto version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Auto, - RuntimeVersion { - spec_version: 100, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Ok(()), - )); - // when actual == expected - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { - spec_version: 100, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Ok(()), - )); - // when actual spec version < expected spec version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { spec_version: 99, transaction_version: 100, ..Default::default() }, - ) - .await, - Err(Error::WaitingForRuntimeUpgrade { - expected: SimpleRuntimeVersion { spec_version: 100, transaction_version: 100 }, - actual: SimpleRuntimeVersion { spec_version: 99, transaction_version: 100 }, - .. - }), - )); - // when actual spec version > expected spec version - assert!(matches!( - run_ensure_correct_runtime_version( - ChainRuntimeVersion::Custom(SimpleRuntimeVersion { - spec_version: 100, - transaction_version: 100 - }), - RuntimeVersion { - spec_version: 101, - transaction_version: 100, - ..Default::default() - }, - ) - .await, - Err(Error::Custom(_)), - )); - } -} diff --git a/bridges/relays/client-substrate/src/client/caching.rs b/bridges/relays/client-substrate/src/client/caching.rs new file mode 100644 index 0000000000000..abea864db7381 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/caching.rs @@ -0,0 +1,329 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Client implementation that is caching (whenever possible) results of its backend +//! method calls. + +use crate::{ + client::{Client, SharedSubscriptionFactory}, + error::{Error, Result}, + AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, ChainWithGrandpa, ChainWithTransactions, + HashOf, HeaderIdOf, HeaderOf, NonceOf, SignedBlockOf, SimpleRuntimeVersion, Subscription, + TransactionTracker, UnsignedTransaction, ANCIENT_BLOCK_THRESHOLD, +}; + +use async_std::sync::{Arc, Mutex, RwLock}; +use async_trait::async_trait; +use codec::Encode; +use frame_support::weights::Weight; +use quick_cache::unsync::Cache; +use sp_consensus_grandpa::{AuthorityId, OpaqueKeyOwnershipProof, SetId}; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Pair, +}; +use sp_runtime::transaction_validity::TransactionValidity; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; + +/// `quick_cache::unsync::Cache` wrapped in async-aware synchronization primitives. +type SyncCache = Arc>>; + +/// Client implementation that is caching (whenever possible) results of its backend +/// method calls. Apart from caching call results, it also supports some (at the +/// moment: justifications) subscription sharing, meaning that the single server +/// subscription may be shared by multiple subscribers at the client side. +#[derive(Clone)] +pub struct CachingClient> { + backend: B, + data: Arc>, +} + +/// Client data, shared by all `CachingClient` clones. +struct ClientData { + grandpa_justifications: Arc>>>, + beefy_justifications: Arc>>>, + // `quick_cache::sync::Cache` has the `get_or_insert_async` method, which fits our needs, + // but it uses synchronization primitives that are not aware of async execution. They + // can block the executor threads and cause deadlocks => let's use primitives from + // `async_std` crate around `quick_cache::unsync::Cache` + header_hash_by_number_cache: SyncCache, HashOf>, + header_by_hash_cache: SyncCache, HeaderOf>, + block_by_hash_cache: SyncCache, SignedBlockOf>, + raw_storage_value_cache: SyncCache<(HashOf, StorageKey), Option>, + state_call_cache: SyncCache<(HashOf, String, Bytes), Bytes>, +} + +impl> CachingClient { + /// Creates new `CachingClient` on top of given `backend`. + pub fn new(backend: B) -> Self { + // most of relayer operations will never touch more than `ANCIENT_BLOCK_THRESHOLD` + // headers, so we'll use this as a cache capacity for all chain-related caches + let chain_state_capacity = ANCIENT_BLOCK_THRESHOLD as usize; + CachingClient { + backend, + data: Arc::new(ClientData { + grandpa_justifications: Arc::new(Mutex::new(None)), + beefy_justifications: Arc::new(Mutex::new(None)), + header_hash_by_number_cache: Arc::new(RwLock::new(Cache::new( + chain_state_capacity, + ))), + header_by_hash_cache: Arc::new(RwLock::new(Cache::new(chain_state_capacity))), + block_by_hash_cache: Arc::new(RwLock::new(Cache::new(chain_state_capacity))), + raw_storage_value_cache: Arc::new(RwLock::new(Cache::new(1_024))), + state_call_cache: Arc::new(RwLock::new(Cache::new(1_024))), + }), + } + } + + /// Try to get value from the cache, or compute and insert it using given future. + async fn get_or_insert_async( + &self, + cache: &Arc>>, + key: &K, + with: impl std::future::Future>, + ) -> Result { + // try to get cached value first using read lock + { + let cache = cache.read().await; + if let Some(value) = cache.get(key) { + return Ok(value.clone()) + } + } + + // let's compute the value without holding any locks - it may cause additional misses and + // double insertions, but that's better than holding a lock for a while + let value = with.await?; + + // insert/update the value in the cache + cache.write().await.insert(key.clone(), value.clone()); + Ok(value) + } +} + +impl> std::fmt::Debug for CachingClient { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_fmt(format_args!("CachingClient<{:?}>", self.backend)) + } +} + +// TODO (https://github.com/paritytech/parity-bridges-common/issues/2133): this must be implemented for T: Client +#[async_trait] +impl> relay_utils::relay_loop::Client for CachingClient { + type Error = Error; + + async fn reconnect(&mut self) -> Result<()> { + >::reconnect(self).await + } +} + +#[async_trait] +impl> Client for CachingClient { + async fn ensure_synced(&self) -> Result<()> { + self.backend.ensure_synced().await + } + + async fn reconnect(&self) -> Result<()> { + self.backend.reconnect().await?; + // since we have new underlying client, we need to restart subscriptions too + *self.data.grandpa_justifications.lock().await = None; + *self.data.beefy_justifications.lock().await = None; + Ok(()) + } + + fn genesis_hash(&self) -> HashOf { + self.backend.genesis_hash() + } + + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result> { + self.get_or_insert_async( + &self.data.header_hash_by_number_cache, + &number, + self.backend.header_hash_by_number(number), + ) + .await + } + + async fn header_by_hash(&self, hash: HashOf) -> Result> { + self.get_or_insert_async( + &self.data.header_by_hash_cache, + &hash, + self.backend.header_by_hash(hash), + ) + .await + } + + async fn block_by_hash(&self, hash: HashOf) -> Result> { + self.get_or_insert_async( + &self.data.block_by_hash_cache, + &hash, + self.backend.block_by_hash(hash), + ) + .await + } + + async fn best_finalized_header_hash(&self) -> Result> { + // TODO: after https://github.com/paritytech/parity-bridges-common/issues/2074 we may + // use single-value-cache here, but for now let's just call the backend + self.backend.best_finalized_header_hash().await + } + + async fn best_header(&self) -> Result> { + // TODO: if after https://github.com/paritytech/parity-bridges-common/issues/2074 we'll + // be using subscriptions to get best blocks, we may use single-value-cache here, but for + // now let's just call the backend + self.backend.best_header().await + } + + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa, + { + let mut grandpa_justifications = self.data.grandpa_justifications.lock().await; + if let Some(ref grandpa_justifications) = *grandpa_justifications { + grandpa_justifications.subscribe().await + } else { + let subscription = self.backend.subscribe_grandpa_finality_justifications().await?; + *grandpa_justifications = Some(subscription.factory()); + Ok(subscription) + } + } + + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: SetId, + authority_id: AuthorityId, + ) -> Result> { + self.backend + .generate_grandpa_key_ownership_proof(at, set_id, authority_id) + .await + } + + async fn subscribe_beefy_finality_justifications(&self) -> Result> { + let mut beefy_justifications = self.data.beefy_justifications.lock().await; + if let Some(ref beefy_justifications) = *beefy_justifications { + beefy_justifications.subscribe().await + } else { + let subscription = self.backend.subscribe_beefy_finality_justifications().await?; + *beefy_justifications = Some(subscription.factory()); + Ok(subscription) + } + } + + async fn token_decimals(&self) -> Result> { + self.backend.token_decimals().await + } + + async fn runtime_version(&self) -> Result { + self.backend.runtime_version().await + } + + async fn simple_runtime_version(&self) -> Result { + self.backend.simple_runtime_version().await + } + + fn can_start_version_guard(&self) -> bool { + self.backend.can_start_version_guard() + } + + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + self.get_or_insert_async( + &self.data.raw_storage_value_cache, + &(at, storage_key.clone()), + self.backend.raw_storage_value(at, storage_key), + ) + .await + } + + async fn pending_extrinsics(&self) -> Result> { + self.backend.pending_extrinsics().await + } + + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result> { + self.backend.submit_unsigned_extrinsic(transaction).await + } + + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + self.backend.submit_signed_extrinsic(signer, prepare_extrinsic).await + } + + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + self.backend + .submit_and_watch_signed_extrinsic(signer, prepare_extrinsic) + .await + .map(|t| t.switch_environment(self.clone())) + } + + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.backend.validate_transaction(at, transaction).await + } + + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.backend.estimate_extrinsic_weight(at, transaction).await + } + + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let encoded_arguments = Bytes(arguments.encode()); + self.get_or_insert_async( + &self.data.state_call_cache, + &(at, method.clone(), encoded_arguments), + self.backend.raw_state_call(at, method, arguments), + ) + .await + } + + async fn prove_storage(&self, at: HashOf, keys: Vec) -> Result { + self.backend.prove_storage(at, keys).await + } +} diff --git a/bridges/relays/client-substrate/src/client/client.rs b/bridges/relays/client-substrate/src/client/client.rs new file mode 100644 index 0000000000000..9bc843d6e1abb --- /dev/null +++ b/bridges/relays/client-substrate/src/client/client.rs @@ -0,0 +1,225 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::{ + error::{Error, Result}, + AccountIdOf, AccountKeyPairOf, BlockNumberOf, Chain, ChainWithGrandpa, ChainWithTransactions, + HashOf, HeaderIdOf, HeaderOf, NonceOf, SignedBlockOf, SimpleRuntimeVersion, Subscription, + TransactionTracker, UnsignedTransaction, +}; + +use async_trait::async_trait; +use bp_runtime::{StorageDoubleMapKeyProvider, StorageMapKeyProvider}; +use codec::{Decode, Encode}; +use frame_support::weights::Weight; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Pair, +}; +use sp_runtime::{traits::Header as _, transaction_validity::TransactionValidity}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; +use std::fmt::Debug; + +/// Relay uses the `Client` to communicate with the node, connected to Substrate +/// chain `C`. +#[async_trait] +pub trait Client: 'static + Send + Sync + Clone + Debug { + /// Returns error if client has no connected peers or it believes it is far + /// behind the chain tip. + async fn ensure_synced(&self) -> Result<()>; + /// Reconnects the client. + async fn reconnect(&self) -> Result<()>; + + /// Return hash of the genesis block. + fn genesis_hash(&self) -> HashOf; + /// Get header hash by number. + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result>; + /// Get header by hash. + async fn header_by_hash(&self, hash: HashOf) -> Result>; + /// Get header by number. + async fn header_by_number(&self, number: BlockNumberOf) -> Result> { + self.header_by_hash(self.header_hash_by_number(number).await?).await + } + /// Get block by hash. + async fn block_by_hash(&self, hash: HashOf) -> Result>; + + /// Get best finalized header hash. + async fn best_finalized_header_hash(&self) -> Result>; + /// Get best finalized header number. + async fn best_finalized_header_number(&self) -> Result> { + Ok(*self.best_finalized_header().await?.number()) + } + /// Get best finalized header. + async fn best_finalized_header(&self) -> Result> { + self.header_by_hash(self.best_finalized_header_hash().await?).await + } + + /// Get best header. + async fn best_header(&self) -> Result>; + /// Get best header hash. + async fn best_header_hash(&self) -> Result> { + Ok(self.best_header().await?.hash()) + } + + /// Subscribe to GRANDPA finality justifications. + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa; + /// Generates a proof of key ownership for the given authority in the given set. + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: sp_consensus_grandpa::SetId, + authority_id: sp_consensus_grandpa::AuthorityId, + ) -> Result>; + + /// Subscribe to BEEFY finality justifications. + async fn subscribe_beefy_finality_justifications(&self) -> Result>; + + /// Return `tokenDecimals` property from the set of chain properties. + async fn token_decimals(&self) -> Result>; + /// Get runtime version of the connected chain. + async fn runtime_version(&self) -> Result; + /// Get partial runtime version, to use when signing transactions. + async fn simple_runtime_version(&self) -> Result; + /// Returns `true` if version guard can be started. + /// + /// There's no reason to run version guard when version mode is set to `Auto`. It can + /// lead to relay shutdown when chain is upgraded, even though we have explicitly + /// said that we don't want to shutdown. + fn can_start_version_guard(&self) -> bool; + + /// Read raw value from runtime storage. + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result>; + /// Read and decode value from runtime storage. + async fn storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + self.raw_storage_value(at, storage_key.clone()) + .await? + .map(|encoded_value| { + T::decode(&mut &encoded_value.0[..]).map_err(|e| { + Error::failed_to_read_storage_value::(at, storage_key, e.into()) + }) + }) + .transpose() + } + /// Read and decode value from runtime storage map. + /// + /// `pallet_prefix` is the name of the pallet (used in `construct_runtime`), which + /// "contains" the storage map. + async fn storage_map_value( + &self, + at: HashOf, + pallet_prefix: &str, + storage_key: &T::Key, + ) -> Result> { + self.storage_value(at, T::final_key(pallet_prefix, storage_key)).await + } + /// Read and decode value from runtime storage double map. + /// + /// `pallet_prefix` is the name of the pallet (used in `construct_runtime`), which + /// "contains" the storage double map. + async fn storage_double_map_value( + &self, + at: HashOf, + pallet_prefix: &str, + key1: &T::Key1, + key2: &T::Key2, + ) -> Result> { + self.storage_value(at, T::final_key(pallet_prefix, key1, key2)).await + } + + /// Returns pending extrinsics from transaction pool. + async fn pending_extrinsics(&self) -> Result>; + /// Submit unsigned extrinsic for inclusion in a block. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result>; + /// Submit an extrinsic signed by given account. + /// + /// All calls of this method are synchronized, so there can't be more than one active + /// `submit_signed_extrinsic()` call. This guarantees that no nonces collision may happen + /// if all client instances are clones of the same initial `Client`. + /// + /// Note: The given transaction needs to be SCALE encoded beforehand. + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>; + /// Does exactly the same as `submit_signed_extrinsic`, but keeps watching for extrinsic status + /// after submission. + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>; + /// Validate transaction at given block. + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result; + /// Returns weight of the given transaction. + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result; + + /// Execute runtime call at given block. + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result; + /// Execute runtime call at given block, provided the input and output types. + /// It also performs the input encode and output decode. + async fn state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let encoded_arguments = arguments.encode(); + let encoded_output = self.raw_state_call(at, method.clone(), arguments).await?; + Ret::decode(&mut &encoded_output.0[..]).map_err(|e| { + Error::failed_state_call::(at, method, Bytes(encoded_arguments), e.into()) + }) + } + + /// Returns storage proof of given storage keys. + async fn prove_storage(&self, at: HashOf, keys: Vec) -> Result; +} diff --git a/bridges/relays/client-substrate/src/client/mod.rs b/bridges/relays/client-substrate/src/client/mod.rs new file mode 100644 index 0000000000000..a051d15e9ccba --- /dev/null +++ b/bridges/relays/client-substrate/src/client/mod.rs @@ -0,0 +1,93 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Layered Substrate client implementation. + +use crate::{Chain, ConnectionParams}; + +use caching::CachingClient; +use num_traits::Saturating; +use rpc::RpcClient; +use sp_version::RuntimeVersion; + +pub mod caching; +pub mod rpc; + +// don't want to move all the trait code (200+ lines) here and there's no better name +#[allow(clippy::module_inception)] +mod client; +mod rpc_api; +mod subscription; + +pub use client::Client; +pub use subscription::{SharedSubscriptionFactory, Subscription}; + +/// Type of RPC client with caching support. +pub type RpcWithCachingClient = CachingClient>; + +/// Creates new RPC client with caching support. +pub async fn rpc_with_caching(params: ConnectionParams) -> RpcWithCachingClient { + let rpc = rpc::RpcClient::::new(params).await; + caching::CachingClient::new(rpc) +} + +/// The difference between best block number and number of its ancestor, that is enough +/// for us to consider that ancestor an "ancient" block with dropped state. +/// +/// The relay does not assume that it is connected to the archive node, so it always tries +/// to use the best available chain state. But sometimes it still may use state of some +/// old block. If the state of that block is already dropped, relay will see errors when +/// e.g. it tries to prove something. +/// +/// By default Substrate-based nodes are storing state for last 256 blocks. We'll use +/// half of this value. +pub const ANCIENT_BLOCK_THRESHOLD: u32 = 128; + +/// Returns `true` if we think that the state is already discarded for given block. +pub fn is_ancient_block + PartialOrd + Saturating>(block: N, best: N) -> bool { + best.saturating_sub(block) >= N::from(ANCIENT_BLOCK_THRESHOLD) +} + +/// Opaque GRANDPA authorities set. +pub type OpaqueGrandpaAuthoritiesSet = Vec; + +/// A simple runtime version. It only includes the `spec_version` and `transaction_version`. +#[derive(Copy, Clone, Debug)] +pub struct SimpleRuntimeVersion { + /// Version of the runtime specification. + pub spec_version: u32, + /// All existing dispatches are fully compatible when this number doesn't change. + pub transaction_version: u32, +} + +impl SimpleRuntimeVersion { + /// Create a new instance of `SimpleRuntimeVersion` from a `RuntimeVersion`. + pub const fn from_runtime_version(runtime_version: &RuntimeVersion) -> Self { + Self { + spec_version: runtime_version.spec_version, + transaction_version: runtime_version.transaction_version, + } + } +} + +/// Chain runtime version in client +#[derive(Copy, Clone, Debug)] +pub enum ChainRuntimeVersion { + /// Auto query from chain. + Auto, + /// Custom runtime version, defined by user. + Custom(SimpleRuntimeVersion), +} diff --git a/bridges/relays/client-substrate/src/client/rpc.rs b/bridges/relays/client-substrate/src/client/rpc.rs new file mode 100644 index 0000000000000..ee8dba89d2437 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/rpc.rs @@ -0,0 +1,673 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +//! Client implementation that connects to the Substrate node over `ws`/`wss` connection +//! and is using RPC methods to get required data and submit transactions. + +use crate::{ + client::{ + rpc_api::{ + SubstrateAuthorClient, SubstrateBeefyClient, SubstrateChainClient, + SubstrateFrameSystemClient, SubstrateGrandpaClient, SubstrateStateClient, + SubstrateSystemClient, + }, + Client, + }, + error::{Error, Result}, + guard::Environment, + transaction_stall_timeout, AccountIdOf, AccountKeyPairOf, BalanceOf, BlockNumberOf, Chain, + ChainRuntimeVersion, ChainWithGrandpa, ChainWithTransactions, ConnectionParams, HashOf, + HeaderIdOf, HeaderOf, NonceOf, SignParam, SignedBlockOf, SimpleRuntimeVersion, Subscription, + TransactionTracker, UnsignedTransaction, +}; + +use async_std::sync::{Arc, Mutex, RwLock}; +use async_trait::async_trait; +use bp_runtime::HeaderIdProvider; +use codec::Encode; +use frame_support::weights::Weight; +use futures::{TryFutureExt, TryStreamExt}; +use jsonrpsee::ws_client::{WsClient, WsClientBuilder}; +use num_traits::Zero; +use pallet_transaction_payment::RuntimeDispatchInfo; +use relay_utils::{relay_loop::RECONNECT_DELAY, STALL_TIMEOUT}; +use sp_core::{ + storage::{StorageData, StorageKey}, + Bytes, Hasher, Pair, +}; +use sp_runtime::transaction_validity::{TransactionSource, TransactionValidity}; +use sp_trie::StorageProof; +use sp_version::RuntimeVersion; +use std::{cmp::Ordering, future::Future, marker::PhantomData}; + +const MAX_SUBSCRIPTION_CAPACITY: usize = 4096; + +const SUB_API_TXPOOL_VALIDATE_TRANSACTION: &str = "TaggedTransactionQueue_validate_transaction"; +const SUB_API_TX_PAYMENT_QUERY_INFO: &str = "TransactionPaymentApi_query_info"; +const SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF: &str = + "GrandpaApi_generate_key_ownership_proof"; + +/// Client implementation that connects to the Substrate node over `ws`/`wss` connection +/// and is using RPC methods to get required data and submit transactions. +pub struct RpcClient { + // Lock order: `submit_signed_extrinsic_lock`, `data` + /// Client connection params. + params: Arc, + /// If several tasks are submitting their transactions simultaneously using + /// `submit_signed_extrinsic` method, they may get the same transaction nonce. So one of + /// transactions will be rejected from the pool. This lock is here to prevent situations like + /// that. + submit_signed_extrinsic_lock: Arc>, + /// Genesis block hash. + genesis_hash: HashOf, + /// Shared dynamic data. + data: Arc>, + /// Generic arguments dump. + _phantom: PhantomData, +} + +/// Client data, shared by all `RpcClient` clones. +struct ClientData { + /// Tokio runtime handle. + tokio: Arc, + /// Substrate RPC client. + client: Arc, +} + +impl std::fmt::Debug for RpcClient { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.write_fmt(format_args!("RpcClient<{}>", C::NAME)) + } +} + +impl RpcClient { + /// Returns client that is able to call RPCs on Substrate node over websocket connection. + /// + /// This function will keep connecting to given Substrate node until connection is established + /// and is functional. If attempt fail, it will wait for `RECONNECT_DELAY` and retry again. + pub async fn new(params: ConnectionParams) -> Self { + let params = Arc::new(params); + loop { + match Self::try_connect(params.clone()).await { + Ok(client) => return client, + Err(error) => log::error!( + target: "bridge", + "Failed to connect to {} node: {:?}. Going to retry in {}s", + C::NAME, + error, + RECONNECT_DELAY.as_secs(), + ), + } + + async_std::task::sleep(RECONNECT_DELAY).await; + } + } + + /// Try to connect to Substrate node over websocket. Returns Substrate RPC client if connection + /// has been established or error otherwise. + async fn try_connect(params: Arc) -> Result { + let (tokio, client) = Self::build_client(¶ms).await?; + + let genesis_hash_client = client.clone(); + let genesis_hash = tokio + .spawn(async move { + SubstrateChainClient::::block_hash(&*genesis_hash_client, Some(Zero::zero())) + .await + }) + .await??; + + let chain_runtime_version = params.chain_runtime_version; + let mut client = Self { + params, + submit_signed_extrinsic_lock: Arc::new(Mutex::new(())), + genesis_hash, + data: Arc::new(RwLock::new(ClientData { tokio, client })), + _phantom: PhantomData, + }; + Self::ensure_correct_runtime_version(&mut client, chain_runtime_version).await?; + Ok(client) + } + + // Check runtime version to understand if we need are connected to expected version, or we + // need to wait for upgrade, we need to abort immediately. + async fn ensure_correct_runtime_version>( + env: &mut E, + expected: ChainRuntimeVersion, + ) -> Result<()> { + // we are only interested if version mode is bundled or passed using CLI + let expected = match expected { + ChainRuntimeVersion::Auto => return Ok(()), + ChainRuntimeVersion::Custom(expected) => expected, + }; + + // we need to wait if actual version is < than expected, we are OK of versions are the + // same and we need to abort if actual version is > than expected + let actual = SimpleRuntimeVersion::from_runtime_version(&env.runtime_version().await?); + match actual.spec_version.cmp(&expected.spec_version) { + Ordering::Less => + Err(Error::WaitingForRuntimeUpgrade { chain: C::NAME.into(), expected, actual }), + Ordering::Equal => Ok(()), + Ordering::Greater => { + log::error!( + target: "bridge", + "The {} client is configured to use runtime version {expected:?} and actual \ + version is {actual:?}. Aborting", + C::NAME, + ); + env.abort().await; + Err(Error::Custom("Aborted".into())) + }, + } + } + + /// Build client to use in connection. + async fn build_client( + params: &ConnectionParams, + ) -> Result<(Arc, Arc)> { + let tokio = tokio::runtime::Runtime::new()?; + let uri = match params.uri { + Some(ref uri) => uri.clone(), + None => { + format!( + "{}://{}:{}{}", + if params.secure { "wss" } else { "ws" }, + params.host, + params.port, + match params.path { + Some(ref path) => format!("/{}", path), + None => String::new(), + }, + ) + }, + }; + log::info!(target: "bridge", "Connecting to {} node at {}", C::NAME, uri); + + let client = tokio + .spawn(async move { + WsClientBuilder::default() + .max_buffer_capacity_per_subscription(MAX_SUBSCRIPTION_CAPACITY) + .build(&uri) + .await + }) + .await??; + + Ok((Arc::new(tokio), Arc::new(client))) + } + + /// Execute jsonrpsee future in tokio context. + async fn jsonrpsee_execute(&self, make_jsonrpsee_future: MF) -> Result + where + MF: FnOnce(Arc) -> F + Send + 'static, + F: Future> + Send + 'static, + T: Send + 'static, + { + let data = self.data.read().await; + let client = data.client.clone(); + data.tokio.spawn(make_jsonrpsee_future(client)).await? + } + + /// Prepare parameters used to sign chain transactions. + async fn build_sign_params(&self, signer: AccountKeyPairOf) -> Result> + where + C: ChainWithTransactions, + { + let runtime_version = self.simple_runtime_version().await?; + Ok(SignParam:: { + spec_version: runtime_version.spec_version, + transaction_version: runtime_version.transaction_version, + genesis_hash: self.genesis_hash, + signer, + }) + } + + /// Get the nonce of the given Substrate account. + pub async fn next_account_index(&self, account: AccountIdOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateFrameSystemClient::::account_next_index(&*client, account).await?) + }) + .await + } +} + +impl Clone for RpcClient { + fn clone(&self) -> Self { + RpcClient { + params: self.params.clone(), + submit_signed_extrinsic_lock: self.submit_signed_extrinsic_lock.clone(), + genesis_hash: self.genesis_hash, + data: self.data.clone(), + _phantom: PhantomData, + } + } +} + +#[async_trait] +impl Client for RpcClient { + async fn ensure_synced(&self) -> Result<()> { + let health = self + .jsonrpsee_execute(|client| async move { + Ok(SubstrateSystemClient::::health(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_get_system_health::(e))?; + + let is_synced = !health.is_syncing && (!health.should_have_peers || health.peers > 0); + if is_synced { + Ok(()) + } else { + Err(Error::ClientNotSynced(health)) + } + } + + async fn reconnect(&self) -> Result<()> { + let mut data = self.data.write().await; + let (tokio, client) = Self::build_client(&self.params).await?; + data.tokio = tokio; + data.client = client; + Ok(()) + } + + fn genesis_hash(&self) -> HashOf { + self.genesis_hash + } + + async fn header_hash_by_number(&self, number: BlockNumberOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::block_hash(&*client, Some(number)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_header_hash_by_number::(number, e)) + } + + async fn header_by_hash(&self, hash: HashOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::header(&*client, Some(hash)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_header_by_hash::(hash, e)) + } + + async fn block_by_hash(&self, hash: HashOf) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateChainClient::::block(&*client, Some(hash)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_block_by_hash::(hash, e)) + } + + async fn best_finalized_header_hash(&self) -> Result> { + self.jsonrpsee_execute(|client| async move { + Ok(SubstrateChainClient::::finalized_head(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_read_best_finalized_header_hash::(e)) + } + + async fn best_header(&self) -> Result> { + self.jsonrpsee_execute(|client| async move { + Ok(SubstrateChainClient::::header(&*client, None).await?) + }) + .await + .map_err(|e| Error::failed_to_read_best_header::(e)) + } + + async fn subscribe_grandpa_finality_justifications(&self) -> Result> + where + C: ChainWithGrandpa, + { + Subscription::new( + C::NAME.into(), + "GRANDPA justifications".into(), + self.jsonrpsee_execute(move |client| async move { + Ok(Box::new( + SubstrateGrandpaClient::::subscribe_justifications(&*client) + .await? + .map_err(Into::into), + )) + }) + .map_err(|e| Error::failed_to_subscribe_justification::(e)) + .await?, + ) + .await + } + + async fn generate_grandpa_key_ownership_proof( + &self, + at: HashOf, + set_id: sp_consensus_grandpa::SetId, + authority_id: sp_consensus_grandpa::AuthorityId, + ) -> Result> { + self.state_call( + at, + SUB_API_GRANDPA_GENERATE_KEY_OWNERSHIP_PROOF.into(), + (set_id, authority_id), + ) + .await + } + + async fn subscribe_beefy_finality_justifications(&self) -> Result> { + Subscription::new( + C::NAME.into(), + "BEEFY justifications".into(), + self.jsonrpsee_execute(move |client| async move { + Ok(Box::new( + SubstrateBeefyClient::::subscribe_justifications(&*client) + .await? + .map_err(Into::into), + )) + }) + .map_err(|e| Error::failed_to_subscribe_justification::(e)) + .await?, + ) + .await + } + + async fn token_decimals(&self) -> Result> { + self.jsonrpsee_execute(move |client| async move { + let system_properties = SubstrateSystemClient::::properties(&*client).await?; + Ok(system_properties.get("tokenDecimals").and_then(|v| v.as_u64())) + }) + .await + } + + async fn runtime_version(&self) -> Result { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateStateClient::::runtime_version(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_read_runtime_version::(e)) + } + + async fn simple_runtime_version(&self) -> Result { + Ok(match self.params.chain_runtime_version { + ChainRuntimeVersion::Auto => { + let runtime_version = self.runtime_version().await?; + SimpleRuntimeVersion::from_runtime_version(&runtime_version) + }, + ChainRuntimeVersion::Custom(ref version) => *version, + }) + } + + fn can_start_version_guard(&self) -> bool { + !matches!(self.params.chain_runtime_version, ChainRuntimeVersion::Auto) + } + + async fn raw_storage_value( + &self, + at: HashOf, + storage_key: StorageKey, + ) -> Result> { + let cloned_storage_key = storage_key.clone(); + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateStateClient::::storage(&*client, cloned_storage_key, Some(at)).await?) + }) + .await + .map_err(|e| Error::failed_to_read_storage_value::(at, storage_key, e)) + } + + async fn pending_extrinsics(&self) -> Result> { + self.jsonrpsee_execute(move |client| async move { + Ok(SubstrateAuthorClient::::pending_extrinsics(&*client).await?) + }) + .await + .map_err(|e| Error::failed_to_get_pending_extrinsics::(e)) + } + + async fn submit_unsigned_extrinsic(&self, transaction: Bytes) -> Result> { + self.jsonrpsee_execute(move |client| async move { + let tx_hash = SubstrateAuthorClient::::submit_extrinsic(&*client, transaction) + .await + .map_err(|e| { + log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); + e + })?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(tx_hash) + }) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e)) + } + + async fn submit_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(signer.public().into()).await?; + let best_header = self.best_header().await?; + let signing_data = self.build_sign_params(signer.clone()).await?; + + // By using parent of best block here, we are protecting again best-block reorganizations. + // E.g. transaction may have been submitted when the best block was `A[num=100]`. Then it + // has been changed to `B[num=100]`. Hash of `A` has been included into transaction + // signature payload. So when signature will be checked, the check will fail and transaction + // will be dropped from the pool. + let best_header_id = best_header.parent_id().unwrap_or_else(|| best_header.id()); + + let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; + let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); + self.submit_unsigned_extrinsic(Bytes(signed_extrinsic)).await + } + + async fn submit_and_watch_signed_extrinsic( + &self, + signer: &AccountKeyPairOf, + prepare_extrinsic: impl FnOnce(HeaderIdOf, NonceOf) -> Result> + + Send + + 'static, + ) -> Result> + where + C: ChainWithTransactions, + AccountIdOf: From< as Pair>::Public>, + { + let self_clone = self.clone(); + let signing_data = self.build_sign_params(signer.clone()).await?; + let _guard = self.submit_signed_extrinsic_lock.lock().await; + let transaction_nonce = self.next_account_index(signer.public().into()).await?; + let best_header = self.best_header().await?; + let best_header_id = best_header.id(); + self.jsonrpsee_execute(move |client| async move { + let extrinsic = prepare_extrinsic(best_header_id, transaction_nonce)?; + let stall_timeout = transaction_stall_timeout( + extrinsic.era.mortality_period(), + C::AVERAGE_BLOCK_INTERVAL, + STALL_TIMEOUT, + ); + let signed_extrinsic = C::sign_transaction(signing_data, extrinsic)?.encode(); + let tx_hash = C::Hasher::hash(&signed_extrinsic); + let subscription = SubstrateAuthorClient::::submit_and_watch_extrinsic( + &*client, + Bytes(signed_extrinsic), + ) + .await + .map_err(|e| { + log::error!(target: "bridge", "Failed to send transaction to {} node: {:?}", C::NAME, e); + e + })?; + log::trace!(target: "bridge", "Sent transaction to {} node: {:?}", C::NAME, tx_hash); + Ok(TransactionTracker::new( + self_clone, + stall_timeout, + tx_hash, + Subscription::new( + C::NAME.into(), + "transaction events".into(), + Box::new(subscription.map_err(Into::into)), + ) + .await?, + )) + }) + .await + .map_err(|e| Error::failed_to_submit_transaction::(e)) + } + + async fn validate_transaction( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + self.state_call( + at, + SUB_API_TXPOOL_VALIDATE_TRANSACTION.into(), + (TransactionSource::External, transaction, at), + ) + .await + } + + async fn estimate_extrinsic_weight( + &self, + at: HashOf, + transaction: SignedTransaction, + ) -> Result { + let transaction_len = transaction.encoded_size() as u32; + let dispatch_info: RuntimeDispatchInfo> = self + .state_call(at, SUB_API_TX_PAYMENT_QUERY_INFO.into(), (transaction, transaction_len)) + .await?; + + Ok(dispatch_info.weight) + } + + async fn raw_state_call( + &self, + at: HashOf, + method: String, + arguments: Args, + ) -> Result { + let arguments = Bytes(arguments.encode()); + let arguments_clone = arguments.clone(); + let method_clone = method.clone(); + self.jsonrpsee_execute(move |client| async move { + SubstrateStateClient::::call(&*client, method, arguments, Some(at)) + .await + .map_err(Into::into) + }) + .await + .map_err(|e| Error::failed_state_call::(at, method_clone, arguments_clone, e)) + } + + async fn prove_storage(&self, at: HashOf, keys: Vec) -> Result { + let keys_clone = keys.clone(); + self.jsonrpsee_execute(move |client| async move { + SubstrateStateClient::::prove_storage(&*client, keys, Some(at)) + .await + .map(|proof| StorageProof::new(proof.proof.into_iter().map(|b| b.0))) + .map_err(Into::into) + }) + .await + .map_err(|e| Error::failed_to_prove_storage::(at, keys_clone, e)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{guard::tests::TestEnvironment, test_chain::TestChain}; + use futures::{channel::mpsc::unbounded, FutureExt, SinkExt, StreamExt}; + + async fn run_ensure_correct_runtime_version( + expected: ChainRuntimeVersion, + actual: RuntimeVersion, + ) -> Result<()> { + let ( + (mut runtime_version_tx, runtime_version_rx), + (slept_tx, _slept_rx), + (aborted_tx, mut aborted_rx), + ) = (unbounded(), unbounded(), unbounded()); + runtime_version_tx.send(actual).await.unwrap(); + let mut env = TestEnvironment { runtime_version_rx, slept_tx, aborted_tx }; + + let ensure_correct_runtime_version = + RpcClient::::ensure_correct_runtime_version(&mut env, expected).boxed(); + let aborted = aborted_rx.next().map(|_| Err(Error::Custom("".into()))).boxed(); + futures::pin_mut!(ensure_correct_runtime_version, aborted); + futures::future::select(ensure_correct_runtime_version, aborted) + .await + .into_inner() + .0 + } + + #[async_std::test] + async fn ensure_correct_runtime_version_works() { + // when we are configured to use auto version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Auto, + RuntimeVersion { + spec_version: 100, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Ok(()), + )); + // when actual == expected + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { + spec_version: 100, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Ok(()), + )); + // when actual spec version < expected spec version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { spec_version: 99, transaction_version: 100, ..Default::default() }, + ) + .await, + Err(Error::WaitingForRuntimeUpgrade { + expected: SimpleRuntimeVersion { spec_version: 100, transaction_version: 100 }, + actual: SimpleRuntimeVersion { spec_version: 99, transaction_version: 100 }, + .. + }), + )); + // when actual spec version > expected spec version + assert!(matches!( + run_ensure_correct_runtime_version( + ChainRuntimeVersion::Custom(SimpleRuntimeVersion { + spec_version: 100, + transaction_version: 100 + }), + RuntimeVersion { + spec_version: 101, + transaction_version: 100, + ..Default::default() + }, + ) + .await, + Err(Error::Custom(_)), + )); + } +} diff --git a/bridges/relays/client-substrate/src/rpc.rs b/bridges/relays/client-substrate/src/client/rpc_api.rs similarity index 80% rename from bridges/relays/client-substrate/src/rpc.rs rename to bridges/relays/client-substrate/src/client/rpc_api.rs index 60c29cdeb5c77..5af4034064262 100644 --- a/bridges/relays/client-substrate/src/rpc.rs +++ b/bridges/relays/client-substrate/src/client/rpc_api.rs @@ -16,15 +16,9 @@ //! The most generic Substrate node RPC interface. -use async_trait::async_trait; - use crate::{Chain, ChainWithGrandpa, TransactionStatusOf}; -use jsonrpsee::{ - core::{client::Subscription, ClientError}, - proc_macros::rpc, - ws_client::WsClient, -}; +use jsonrpsee::proc_macros::rpc; use pallet_transaction_payment_rpc_runtime_api::FeeDetails; use sc_rpc_api::{state::ReadProof, system::Health}; use sp_core::{ @@ -106,15 +100,6 @@ pub(crate) trait SubstrateState { ) -> RpcResult>; } -/// RPC methods that we are using for a certain finality gadget. -#[async_trait] -pub trait SubstrateFinalityClient { - /// Subscribe to finality justifications. - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError>; -} - /// RPC methods of Substrate `grandpa` namespace, that we are using. #[rpc(client, client_bounds(C: ChainWithGrandpa), namespace = "grandpa")] pub(crate) trait SubstrateGrandpa { @@ -123,17 +108,6 @@ pub(crate) trait SubstrateGrandpa { async fn subscribe_justifications(&self); } -/// RPC finality methods of Substrate `grandpa` namespace, that we are using. -pub struct SubstrateGrandpaFinalityClient; -#[async_trait] -impl SubstrateFinalityClient for SubstrateGrandpaFinalityClient { - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError> { - SubstrateGrandpaClient::::subscribe_justifications(client).await - } -} - // TODO: Use `ChainWithBeefy` instead of `Chain` after #1606 is merged /// RPC methods of Substrate `beefy` namespace, that we are using. #[rpc(client, client_bounds(C: Chain), namespace = "beefy")] @@ -143,18 +117,6 @@ pub(crate) trait SubstrateBeefy { async fn subscribe_justifications(&self); } -/// RPC finality methods of Substrate `beefy` namespace, that we are using. -pub struct SubstrateBeefyFinalityClient; -// TODO: Use `ChainWithBeefy` instead of `Chain` after #1606 is merged -#[async_trait] -impl SubstrateFinalityClient for SubstrateBeefyFinalityClient { - async fn subscribe_justifications( - client: &WsClient, - ) -> Result, ClientError> { - SubstrateBeefyClient::::subscribe_justifications(client).await - } -} - /// RPC methods of Substrate `system` frame pallet, that we are using. #[rpc(client, client_bounds(C: Chain), namespace = "system")] pub(crate) trait SubstrateFrameSystem { diff --git a/bridges/relays/client-substrate/src/client/subscription.rs b/bridges/relays/client-substrate/src/client/subscription.rs new file mode 100644 index 0000000000000..643376ac65c18 --- /dev/null +++ b/bridges/relays/client-substrate/src/client/subscription.rs @@ -0,0 +1,228 @@ +// Copyright 2019-2021 Parity Technologies (UK) Ltd. +// This file is part of Parity Bridges Common. + +// Parity Bridges Common is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity Bridges Common is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity Bridges Common. If not, see . + +use crate::error::Result; + +use async_std::{ + channel::{bounded, Receiver, Sender}, + stream::StreamExt, +}; +use futures::{future::FutureExt, Stream}; +use sp_runtime::DeserializeOwned; +use std::future::Future; + +/// Once channel reaches this capacity, the subscription breaks. +const CHANNEL_CAPACITY: usize = 128; + +/// Underlying subscription type. +pub type UnderlyingSubscription = Box> + Unpin + Send>; + +/// Subscription factory that produces subscriptions, sharing the same background thread. +#[derive(Clone)] +pub struct SharedSubscriptionFactory { + subscribers_sender: Sender>>, +} + +impl SharedSubscriptionFactory { + /// Create new subscription factory. + pub async fn new( + chain_name: String, + item_type: String, + subscribe: impl Future>> + Send + 'static, + ) -> Self { + let (subscribers_sender, subscribers_receiver) = bounded(CHANNEL_CAPACITY); + async_std::task::spawn(background_worker( + chain_name, + item_type, + subscribe, + subscribers_receiver, + )); + Self { subscribers_sender } + } + + /// Produce new subscription. + pub async fn subscribe(&self) -> Result> { + let (items_sender, items_receiver) = bounded(CHANNEL_CAPACITY); + self.subscribers_sender.try_send(items_sender)?; + + Ok(Subscription { items_receiver, subscribers_sender: self.subscribers_sender.clone() }) + } +} + +/// Subscription to some chain events. +pub struct Subscription { + items_receiver: Receiver>, + subscribers_sender: Sender>>, +} + +impl Subscription { + /// Create new subscription. + pub async fn new( + chain_name: String, + item_type: String, + subscription: UnderlyingSubscription, + ) -> Result { + SharedSubscriptionFactory::::new( + chain_name, + item_type, + futures::future::ready(Ok(subscription)), + ) + .await + .subscribe() + .await + } + + /// Return subscription factory for this subscription. + pub fn factory(&self) -> SharedSubscriptionFactory { + SharedSubscriptionFactory { subscribers_sender: self.subscribers_sender.clone() } + } + + /// Consumes subscription and returns future items stream. + pub fn into_stream(self) -> impl futures::Stream { + futures::stream::unfold(self, |mut this| async { + let item = this.items_receiver.next().await.unwrap_or(None); + item.map(|i| (i, this)) + }) + } + + /// Return next item from the subscription. + pub async fn next(&self) -> Result> { + Ok(self.items_receiver.recv().await?) + } +} + +/// Background worker that is executed in tokio context as `jsonrpsee` requires. +/// +/// This task may exit under some circumstances. It'll send the correspondent +/// message (`Err` or `None`) to all known listeners. Also, when it stops, all +/// subsequent reads and new subscribers will get the connection error (`ChannelError`). +async fn background_worker( + chain_name: String, + item_type: String, + subscribe: impl Future>> + Send + 'static, + mut subscribers_receiver: Receiver>>, +) { + fn log_task_exit(chain_name: &str, item_type: &str, reason: &str) { + log::debug!( + target: "bridge", + "Background task of {} subscription of {} has stopped: {}", + item_type, + chain_name, + reason, + ); + } + + async fn notify_subscribers( + chain_name: &str, + item_type: &str, + subscribers: &mut Vec>>, + result: Option>, + ) { + let result_to_send = match result { + Some(Ok(item)) => Some(item), + Some(Err(e)) => { + log::debug!( + target: "bridge", + "{} stream of {} has returned error: {:?}. It may need to be restarted", + item_type, + chain_name, + e, + ); + None + }, + None => { + log::debug!( + target: "bridge", + "{} stream of {} has returned `None`. It may need to be restarted", + item_type, + chain_name, + ); + None + }, + }; + + let mut i = 0; + while i < subscribers.len() { + let result_to_send = result_to_send.clone(); + let send_result = subscribers[i].try_send(result_to_send); + match send_result { + Ok(_) => { + i += 1; + }, + Err(_) => { + subscribers.swap_remove(i); + }, + } + } + } + + log::trace!( + target: "bridge", + "Starting background task for {} {} subscription stream.", + chain_name, + item_type, + ); + + // wait for first subscriber until actually starting subscription + let subscriber = match subscribers_receiver.next().await { + Some(subscriber) => subscriber, + None => { + // it means that the last subscriber/factory has been dropped, so we need to + // exit too + return log_task_exit(&chain_name, &item_type, "client has stopped") + }, + }; + + // actually subscribe + let mut subscribers = vec![subscriber]; + let mut jsonrpsee_subscription = match subscribe.await { + Ok(jsonrpsee_subscription) => jsonrpsee_subscription, + Err(e) => { + let reason = format!("failed to subscribe: {:?}", e); + notify_subscribers(&chain_name, &item_type, &mut subscribers, Some(Err(e))).await; + + // we cant't do anything without underlying subscription, so let's exit + return log_task_exit(&chain_name, &item_type, &reason) + }, + }; + + // start listening for new items and receivers + loop { + futures::select! { + subscriber = subscribers_receiver.next().fuse() => { + match subscriber { + Some(subscriber) => subscribers.push(subscriber), + None => { + // it means that the last subscriber/factory has been dropped, so we need to + // exit too + return log_task_exit(&chain_name, &item_type, "client has stopped") + }, + } + }, + item = jsonrpsee_subscription.next().fuse() => { + let is_stream_finished = item.is_none(); + let item = item.map(|r| r.map_err(Into::into)); + notify_subscribers(&chain_name, &item_type, &mut subscribers, item).await; + + // it means that the underlying client has dropped, so we can't do anything here + // and need to stop the task + if is_stream_finished { + return log_task_exit(&chain_name, &item_type, "stream has finished"); + } + }, + } + } +} diff --git a/bridges/relays/client-substrate/src/error.rs b/bridges/relays/client-substrate/src/error.rs index 0b44668181887..4be65864eb440 100644 --- a/bridges/relays/client-substrate/src/error.rs +++ b/bridges/relays/client-substrate/src/error.rs @@ -16,12 +16,13 @@ //! Substrate node RPC errors. -use crate::SimpleRuntimeVersion; +use crate::{BlockNumberOf, Chain, HashOf, SimpleRuntimeVersion}; + use bp_polkadot_core::parachains::ParaId; use jsonrpsee::core::ClientError as RpcError; use relay_utils::MaybeConnectionError; use sc_rpc_api::system::Health; -use sp_core::storage::StorageKey; +use sp_core::{storage::StorageKey, Bytes}; use sp_runtime::transaction_validity::TransactionValidityError; use thiserror::Error; @@ -42,12 +43,10 @@ pub enum Error { /// The response from the server could not be SCALE decoded. #[error("Response parse failed: {0}")] ResponseParseFailed(#[from] codec::Error), - /// Account does not exist on the chain. - #[error("Account does not exist on the chain.")] - AccountDoesNotExist, - /// Runtime storage is missing some mandatory value. - #[error("Mandatory storage value is missing from the runtime storage.")] - MissingMandatoryStorageValue, + /// Internal channel error - communication channel is either closed, or full. + /// It can be solved with reconnect. + #[error("Internal communication channel error: {0:?}.")] + ChannelError(String), /// Required parachain head is not present at the relay chain. #[error("Parachain {0:?} head {1} is missing from the relay chain storage.")] MissingRequiredParachainHead(ParaId, u64), @@ -57,6 +56,14 @@ pub enum Error { /// The client we're connected to is not synced, so we can't rely on its state. #[error("Substrate client is not synced {0}.")] ClientNotSynced(Health), + /// Failed to get system health. + #[error("Failed to get system health of {chain} node: {error:?}.")] + FailedToGetSystemHealth { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, /// Failed to read best finalized header hash from given chain. #[error("Failed to read best finalized header hash of {chain}: {error:?}.")] FailedToReadBestFinalizedHeaderHash { @@ -73,6 +80,16 @@ pub enum Error { /// Underlying error. error: Box, }, + /// Failed to read header hash by number from given chain. + #[error("Failed to read header hash by number {number} of {chain}: {error:?}.")] + FailedToReadHeaderHashByNumber { + /// Name of the chain where the error has happened. + chain: String, + /// Number of the header we've tried to read. + number: String, + /// Underlying error. + error: Box, + }, /// Failed to read header by hash from given chain. #[error("Failed to read header {hash} of {chain}: {error:?}.")] FailedToReadHeaderByHash { @@ -83,35 +100,92 @@ pub enum Error { /// Underlying error. error: Box, }, - /// Failed to execute runtime call at given chain. - #[error("Failed to execute runtime call {method} at {chain}: {error:?}.")] - ErrorExecutingRuntimeCall { + /// Failed to read block by hash from given chain. + #[error("Failed to read block {hash} of {chain}: {error:?}.")] + FailedToReadBlockByHash { /// Name of the chain where the error has happened. chain: String, - /// Runtime method name. - method: String, + /// Hash of the header we've tried to read. + hash: String, /// Underlying error. error: Box, }, /// Failed to read sotrage value at given chain. #[error("Failed to read storage value {key:?} at {chain}: {error:?}.")] - FailedToReadRuntimeStorageValue { + FailedToReadStorageValue { /// Name of the chain where the error has happened. chain: String, + /// Hash of the block we've tried to read value from. + hash: String, /// Runtime storage key key: StorageKey, /// Underlying error. error: Box, }, + /// Failed to read runtime version of given chain. + #[error("Failed to read runtime version of {chain}: {error:?}.")] + FailedToReadRuntimeVersion { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to get pending extrinsics. + #[error("Failed to get pending extrinsics of {chain}: {error:?}.")] + FailedToGetPendingExtrinsics { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Failed to submit transaction. + #[error("Failed to submit {chain} transaction: {error:?}.")] + FailedToSubmitTransaction { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, + /// Runtime call has failed. + #[error("Runtime call {method} with arguments {arguments:?} of chain {chain} at {hash} has failed: {error:?}.")] + FailedStateCall { + /// Name of the chain where the error has happened. + chain: String, + /// Hash of the block we've tried to call at. + hash: String, + /// Runtime API method. + method: String, + /// Encoded method arguments. + arguments: Bytes, + /// Underlying error. + error: Box, + }, + /// Failed to prove storage keys. + #[error("Failed to prove storage keys {storage_keys:?} of {chain} at {hash}: {error:?}.")] + FailedToProveStorage { + /// Name of the chain where the error has happened. + chain: String, + /// Hash of the block we've tried to prove keys at. + hash: String, + /// Storage keys we have tried to prove. + storage_keys: Vec, + /// Underlying error. + error: Box, + }, + /// Failed to subscribe to GRANDPA justifications stream. + #[error("Failed to subscribe to {chain} justifications: {error:?}.")] + FailedToSubscribeJustifications { + /// Name of the chain where the error has happened. + chain: String, + /// Underlying error. + error: Box, + }, /// The bridge pallet is halted and all transactions will be rejected. #[error("Bridge pallet is halted.")] BridgePalletIsHalted, /// The bridge pallet is not yet initialized and all transactions will be rejected. #[error("Bridge pallet is not initialized.")] BridgePalletIsNotInitialized, - /// There's no best head of the parachain at the `pallet-bridge-parachains` at the target side. - #[error("No head of the ParaId({0}) at the bridge parachains pallet at {1}.")] - NoParachainHeadAtTarget(u32, String), /// An error has happened when we have tried to parse storage proof. #[error("Error when parsing storage proof: {0:?}.")] StorageProofError(bp_runtime::StorageProofError), @@ -136,7 +210,19 @@ pub enum Error { impl From for Error { fn from(error: tokio::task::JoinError) -> Self { - Error::Custom(format!("Failed to wait tokio task: {error}")) + Error::ChannelError(format!("failed to wait tokio task: {error}")) + } +} + +impl From> for Error { + fn from(error: async_std::channel::TrySendError) -> Self { + Error::ChannelError(format!("`try_send` has failed: {error:?}")) + } +} + +impl From for Error { + fn from(error: async_std::channel::RecvError) -> Self { + Error::ChannelError(format!("`recv` has failed: {error:?}")) } } @@ -145,21 +231,145 @@ impl Error { pub fn boxed(self) -> Box { Box::new(self) } + + /// Returns nested error reference. + pub fn nested(&self) -> Option<&Self> { + match *self { + Self::FailedToReadBestFinalizedHeaderHash { ref error, .. } => Some(&**error), + Self::FailedToReadBestHeader { ref error, .. } => Some(&**error), + Self::FailedToReadHeaderHashByNumber { ref error, .. } => Some(&**error), + Self::FailedToReadHeaderByHash { ref error, .. } => Some(&**error), + Self::FailedToReadBlockByHash { ref error, .. } => Some(&**error), + Self::FailedToReadStorageValue { ref error, .. } => Some(&**error), + Self::FailedToReadRuntimeVersion { ref error, .. } => Some(&**error), + Self::FailedToGetPendingExtrinsics { ref error, .. } => Some(&**error), + Self::FailedToSubmitTransaction { ref error, .. } => Some(&**error), + Self::FailedStateCall { ref error, .. } => Some(&**error), + Self::FailedToProveStorage { ref error, .. } => Some(&**error), + Self::FailedToGetSystemHealth { ref error, .. } => Some(&**error), + Self::FailedToSubscribeJustifications { ref error, .. } => Some(&**error), + _ => None, + } + } + + /// Constructs `FailedToReadHeaderHashByNumber` variant. + pub fn failed_to_read_header_hash_by_number( + number: BlockNumberOf, + e: Error, + ) -> Self { + Error::FailedToReadHeaderHashByNumber { + chain: C::NAME.into(), + number: format!("{number}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadHeaderByHash` variant. + pub fn failed_to_read_header_by_hash(hash: HashOf, e: Error) -> Self { + Error::FailedToReadHeaderByHash { + chain: C::NAME.into(), + hash: format!("{hash}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadBlockByHash` variant. + pub fn failed_to_read_block_by_hash(hash: HashOf, e: Error) -> Self { + Error::FailedToReadHeaderByHash { + chain: C::NAME.into(), + hash: format!("{hash}"), + error: e.boxed(), + } + } + + /// Constructs `FailedToReadBestFinalizedHeaderHash` variant. + pub fn failed_to_read_best_finalized_header_hash(e: Error) -> Self { + Error::FailedToReadBestFinalizedHeaderHash { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadBestHeader` variant. + pub fn failed_to_read_best_header(e: Error) -> Self { + Error::FailedToReadBestHeader { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadRuntimeVersion` variant. + pub fn failed_to_read_runtime_version(e: Error) -> Self { + Error::FailedToReadRuntimeVersion { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToReadStorageValue` variant. + pub fn failed_to_read_storage_value( + at: HashOf, + key: StorageKey, + e: Error, + ) -> Self { + Error::FailedToReadStorageValue { + chain: C::NAME.into(), + hash: format!("{at}"), + key, + error: e.boxed(), + } + } + + /// Constructs `FailedToGetPendingExtrinsics` variant. + pub fn failed_to_get_pending_extrinsics(e: Error) -> Self { + Error::FailedToGetPendingExtrinsics { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubmitTransaction` variant. + pub fn failed_to_submit_transaction(e: Error) -> Self { + Error::FailedToSubmitTransaction { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedStateCall` variant. + pub fn failed_state_call( + at: HashOf, + method: String, + arguments: Bytes, + e: Error, + ) -> Self { + Error::FailedStateCall { + chain: C::NAME.into(), + hash: format!("{at}"), + method, + arguments, + error: e.boxed(), + } + } + + /// Constructs `FailedToProveStorage` variant. + pub fn failed_to_prove_storage( + at: HashOf, + storage_keys: Vec, + e: Error, + ) -> Self { + Error::FailedToProveStorage { + chain: C::NAME.into(), + hash: format!("{at}"), + storage_keys, + error: e.boxed(), + } + } + + /// Constructs `FailedToGetSystemHealth` variant. + pub fn failed_to_get_system_health(e: Error) -> Self { + Error::FailedToGetSystemHealth { chain: C::NAME.into(), error: e.boxed() } + } + + /// Constructs `FailedToSubscribeJustifications` variant. + pub fn failed_to_subscribe_justification(e: Error) -> Self { + Error::FailedToSubscribeJustifications { chain: C::NAME.into(), error: e.boxed() } + } } impl MaybeConnectionError for Error { fn is_connection_error(&self) -> bool { match *self { - Error::RpcError(RpcError::Transport(_)) | - Error::RpcError(RpcError::RestartNeeded(_)) | + Error::ChannelError(_) => true, + Error::RpcError(ref e) => + matches!(*e, RpcError::Transport(_) | RpcError::RestartNeeded(_),), Error::ClientNotSynced(_) => true, - Error::FailedToReadBestFinalizedHeaderHash { ref error, .. } => - error.is_connection_error(), - Error::FailedToReadBestHeader { ref error, .. } => error.is_connection_error(), - Error::FailedToReadHeaderByHash { ref error, .. } => error.is_connection_error(), - Error::ErrorExecutingRuntimeCall { ref error, .. } => error.is_connection_error(), - Error::FailedToReadRuntimeStorageValue { ref error, .. } => error.is_connection_error(), - _ => false, + _ => self.nested().map(|e| e.is_connection_error()).unwrap_or(false), } } } diff --git a/bridges/relays/client-substrate/src/guard.rs b/bridges/relays/client-substrate/src/guard.rs index 47454892cd039..3dbf95bff8e10 100644 --- a/bridges/relays/client-substrate/src/guard.rs +++ b/bridges/relays/client-substrate/src/guard.rs @@ -98,7 +98,7 @@ fn conditions_check_delay() -> Duration { } #[async_trait] -impl Environment for Client { +impl> Environment for Clnt { type Error = Error; async fn runtime_version(&mut self) -> Result { diff --git a/bridges/relays/client-substrate/src/lib.rs b/bridges/relays/client-substrate/src/lib.rs index d5b8d4dcced2d..65647889c6f8b 100644 --- a/bridges/relays/client-substrate/src/lib.rs +++ b/bridges/relays/client-substrate/src/lib.rs @@ -21,7 +21,6 @@ mod chain; mod client; mod error; -mod rpc; mod sync_header; mod transaction_tracker; @@ -37,14 +36,15 @@ pub use crate::{ AccountKeyPairOf, BlockWithJustification, CallOf, Chain, ChainWithBalances, ChainWithGrandpa, ChainWithMessages, ChainWithRuntimeVersion, ChainWithTransactions, ChainWithUtilityPallet, FullRuntimeUtilityPallet, MockedRuntimeUtilityPallet, Parachain, - RelayChain, SignParam, TransactionStatusOf, UnsignedTransaction, UtilityPallet, + RelayChain, SignParam, SignedBlockOf, TransactionStatusOf, UnsignedTransaction, + UtilityPallet, }, client::{ - is_ancient_block, ChainRuntimeVersion, Client, OpaqueGrandpaAuthoritiesSet, - SimpleRuntimeVersion, Subscription, ANCIENT_BLOCK_THRESHOLD, + is_ancient_block, rpc_with_caching as new, ChainRuntimeVersion, Client, + OpaqueGrandpaAuthoritiesSet, RpcWithCachingClient, SimpleRuntimeVersion, Subscription, + ANCIENT_BLOCK_THRESHOLD, }, error::{Error, Result}, - rpc::{SubstrateBeefyFinalityClient, SubstrateFinalityClient, SubstrateGrandpaFinalityClient}, sync_header::SyncHeader, transaction_tracker::TransactionTracker, }; diff --git a/bridges/relays/client-substrate/src/metrics/float_storage_value.rs b/bridges/relays/client-substrate/src/metrics/float_storage_value.rs index 7bb92693b38d2..27c9d8cd7a8b6 100644 --- a/bridges/relays/client-substrate/src/metrics/float_storage_value.rs +++ b/bridges/relays/client-substrate/src/metrics/float_storage_value.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Bridges Common. If not, see . -use crate::{chain::Chain, client::Client, Error as SubstrateError}; +use crate::{Chain, Client, Error as SubstrateError}; use async_std::sync::{Arc, RwLock}; use async_trait::async_trait; @@ -66,20 +66,20 @@ impl FloatStorageValue for FixedU128OrOne { /// Metric that represents fixed-point runtime storage value as float gauge. #[derive(Clone, Debug)] -pub struct FloatStorageValueMetric { +pub struct FloatStorageValueMetric { value_converter: V, - client: Client, + client: Clnt, storage_key: StorageKey, metric: Gauge, shared_value_ref: F64SharedRef, - _phantom: PhantomData, + _phantom: PhantomData<(C, V)>, } -impl FloatStorageValueMetric { +impl FloatStorageValueMetric { /// Create new metric. pub fn new( value_converter: V, - client: Client, + client: Clnt, storage_key: StorageKey, name: String, help: String, @@ -101,32 +101,39 @@ impl FloatStorageValueMetric { } } -impl Metric for FloatStorageValueMetric { +impl, V: FloatStorageValue> Metric + for FloatStorageValueMetric +{ fn register(&self, registry: &Registry) -> Result<(), PrometheusError> { register(self.metric.clone(), registry).map(drop) } } #[async_trait] -impl StandaloneMetric for FloatStorageValueMetric { +impl, V: FloatStorageValue> StandaloneMetric + for FloatStorageValueMetric +{ fn update_interval(&self) -> Duration { C::AVERAGE_BLOCK_INTERVAL * UPDATE_INTERVAL_IN_BLOCKS } async fn update(&self) { - let value = self - .client - .raw_storage_value(self.storage_key.clone(), None) - .await - .and_then(|maybe_storage_value| { - self.value_converter.decode(maybe_storage_value).map(|maybe_fixed_point_value| { - maybe_fixed_point_value.map(|fixed_point_value| { - fixed_point_value.into_inner().unique_saturated_into() as f64 / - V::Value::DIV.unique_saturated_into() as f64 - }) + let value = async move { + let best_header_hash = self.client.best_header_hash().await?; + let maybe_storage_value = self + .client + .raw_storage_value(best_header_hash, self.storage_key.clone()) + .await?; + self.value_converter.decode(maybe_storage_value).map(|maybe_fixed_point_value| { + maybe_fixed_point_value.map(|fixed_point_value| { + fixed_point_value.into_inner().unique_saturated_into() as f64 / + V::Value::DIV.unique_saturated_into() as f64 }) }) - .map_err(|e| e.to_string()); + } + .await + .map_err(|e| e.to_string()); + relay_utils::metrics::set_gauge_value(&self.metric, value.clone()); *self.shared_value_ref.write().await = value.ok().and_then(|x| x); } diff --git a/bridges/relays/client-substrate/src/transaction_tracker.rs b/bridges/relays/client-substrate/src/transaction_tracker.rs index b181a945c2c15..14a97b7a049d7 100644 --- a/bridges/relays/client-substrate/src/transaction_tracker.rs +++ b/bridges/relays/client-substrate/src/transaction_tracker.rs @@ -16,7 +16,7 @@ //! Helper for tracking transaction invalidation events. -use crate::{Chain, Client, Error, HashOf, HeaderIdOf, Subscription, TransactionStatusOf}; +use crate::{Chain, Error, HashOf, HeaderIdOf, Subscription, TransactionStatusOf}; use async_trait::async_trait; use futures::{future::Either, Future, FutureExt, Stream, StreamExt}; @@ -31,8 +31,10 @@ pub trait Environment: Send + Sync { async fn header_id_by_hash(&self, hash: HashOf) -> Result, Error>; } +// TODO (https://github.com/paritytech/parity-bridges-common/issues/2133): remove `Environment` trait +// after test client is implemented #[async_trait] -impl Environment for Client { +impl> Environment for T { async fn header_id_by_hash(&self, hash: HashOf) -> Result, Error> { self.header_by_hash(hash).await.map(|h| HeaderId(*h.number(), hash)) } @@ -76,6 +78,21 @@ impl> TransactionTracker { Self { environment, stall_timeout, transaction_hash, subscription } } + // TODO (https://github.com/paritytech/parity-bridges-common/issues/2133): remove me after + // test client is implemented + /// Converts self into tracker with different environment. + pub fn switch_environment>( + self, + environment: NewE, + ) -> TransactionTracker { + TransactionTracker { + environment, + stall_timeout: self.stall_timeout, + transaction_hash: self.transaction_hash, + subscription: self.subscription, + } + } + /// Wait for final transaction status and return it along with last known internal invalidation /// status. async fn do_wait( @@ -306,22 +323,26 @@ mod tests { TrackedTransactionStatus>, InvalidationStatus>, )> { - let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (mut sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), + Subscription::new("test".into(), "test".into(), Box::new(receiver)) + .await + .unwrap(), ); - let wait_for_stall_timeout = futures::future::pending(); + // we can't do `.now_or_never()` on `do_wait()` call, because `Subscription` has its own + // background thread, which may cause additional async task switches => let's leave some + // relatively small timeout here + let wait_for_stall_timeout = async_std::task::sleep(std::time::Duration::from_millis(100)); let wait_for_stall_timeout_rest = futures::future::ready(()); - sender.send(Some(status)).await.unwrap(); - tx_tracker - .do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest) - .now_or_never() - .map(|(ts, is)| (ts, is.unwrap())) + sender.send(Ok(status)).await.unwrap(); + + let (ts, is) = + tx_tracker.do_wait(wait_for_stall_timeout, wait_for_stall_timeout_rest).await; + is.map(|is| (ts, is)) } #[async_std::test] @@ -429,13 +450,14 @@ mod tests { #[async_std::test] async fn lost_on_timeout_when_waiting_for_invalidation_status() { - let (cancel_sender, _cancel_receiver) = futures::channel::oneshot::channel(); let (_sender, receiver) = futures::channel::mpsc::channel(1); let tx_tracker = TransactionTracker::::new( TestEnvironment(Ok(HeaderId(0, Default::default()))), Duration::from_secs(0), Default::default(), - Subscription(async_std::sync::Mutex::new(receiver), cancel_sender), + Subscription::new("test".into(), "test".into(), Box::new(receiver)) + .await + .unwrap(), ); let wait_for_stall_timeout = futures::future::ready(()).shared(); diff --git a/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs b/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs index 6246bdbf01515..d985d35c9e802 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/chain_schema.rs @@ -123,11 +123,11 @@ macro_rules! declare_chain_connection_params_cli_schema { #[allow(dead_code)] pub async fn into_client( self, - ) -> anyhow::Result> { + ) -> anyhow::Result<$crate::cli::DefaultClient> { let chain_runtime_version = self .[<$chain_prefix _runtime_version>] .into_runtime_version(Chain::RUNTIME_VERSION)?; - Ok(relay_substrate_client::Client::new(relay_substrate_client::ConnectionParams { + Ok(relay_substrate_client::new(relay_substrate_client::ConnectionParams { uri: self.[<$chain_prefix _uri>], host: self.[<$chain_prefix _host>], port: self.[<$chain_prefix _port>], diff --git a/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs b/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs index b98e41b2a43e4..3921685d9e8ad 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/detect_equivocations.rs @@ -23,7 +23,7 @@ use crate::{ }; use async_trait::async_trait; -use relay_substrate_client::ChainWithTransactions; +use relay_substrate_client::{ChainWithTransactions, Client}; use structopt::StructOpt; /// Start equivocation detection loop. diff --git a/bridges/relays/lib-substrate-relay/src/cli/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/mod.rs index 270608bf6ed8e..ddb3e416dc326 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/mod.rs @@ -35,6 +35,11 @@ pub mod relay_parachains; /// The target that will be used when publishing logs related to this pallet. pub const LOG_TARGET: &str = "bridge"; +/// Default Substrate client type that we are using. We'll use it all over the glue CLI code +/// to avoid multiple level generic arguments and constraints. We still allow usage of other +/// clients in the **core logic code**. +pub type DefaultClient = relay_substrate_client::RpcWithCachingClient; + /// Lane id. #[derive(Debug, Clone, PartialEq, Eq)] pub struct HexLaneId(pub [u8; 4]); diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs index 093f98ef21ed2..ea92a0c9acce1 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers.rs @@ -29,6 +29,7 @@ use crate::{ finality::SubstrateFinalitySyncPipeline, HeadersToRelay, }; +use relay_substrate_client::Client; /// Chain headers relaying params. #[derive(StructOpt)] diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs index a796df6721b8c..05a061c2ea606 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/mod.rs @@ -37,7 +37,7 @@ use structopt::StructOpt; use futures::{FutureExt, TryFutureExt}; use crate::{ - cli::{bridge::MessagesCliBridge, HexLaneId, PrometheusParams}, + cli::{bridge::MessagesCliBridge, DefaultClient, HexLaneId, PrometheusParams}, messages_lane::{MessagesRelayLimits, MessagesRelayParams}, on_demand::OnDemandRelay, HeadersToRelay, TaggedAccount, TransactionParams, @@ -46,7 +46,7 @@ use bp_messages::LaneId; use bp_runtime::BalanceOf; use relay_substrate_client::{ AccountIdOf, AccountKeyPairOf, Chain, ChainWithBalances, ChainWithMessages, - ChainWithRuntimeVersion, ChainWithTransactions, Client, + ChainWithRuntimeVersion, ChainWithTransactions, }; use relay_utils::metrics::MetricsParams; use sp_core::Pair; @@ -118,7 +118,7 @@ impl< /// Parameters that are associated with one side of the bridge. pub struct BridgeEndCommonParams { /// Chain client. - pub client: Client, + pub client: DefaultClient, /// Params used for sending transactions to the chain. pub tx_params: TransactionParams>, /// Accounts, which balances are exposed as metrics by the relay process. @@ -165,7 +165,7 @@ where target_to_source_headers_relay: Arc>, lane_id: LaneId, maybe_limits: Option, - ) -> MessagesRelayParams { + ) -> MessagesRelayParams, DefaultClient> { MessagesRelayParams { source_client: self.source.client.clone(), source_transaction_params: self.source.tx_params.clone(), @@ -317,28 +317,30 @@ where // Need 2x capacity since we consider both directions for each lane let mut message_relays = Vec::with_capacity(lanes.len() * 2); for lane in lanes { - let left_to_right_messages = crate::messages_lane::run::< - ::MessagesLane, - >(self.left_to_right().messages_relay_params( - left_to_right_on_demand_headers.clone(), - right_to_left_on_demand_headers.clone(), - lane, - Self::L2R::maybe_messages_limits(), - )) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); + let left_to_right_messages = + crate::messages_lane::run::<::MessagesLane, _, _>( + self.left_to_right().messages_relay_params( + left_to_right_on_demand_headers.clone(), + right_to_left_on_demand_headers.clone(), + lane, + Self::L2R::maybe_messages_limits(), + ), + ) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); message_relays.push(left_to_right_messages); - let right_to_left_messages = crate::messages_lane::run::< - ::MessagesLane, - >(self.right_to_left().messages_relay_params( - right_to_left_on_demand_headers.clone(), - left_to_right_on_demand_headers.clone(), - lane, - Self::R2L::maybe_messages_limits(), - )) - .map_err(|e| anyhow::format_err!("{}", e)) - .boxed(); + let right_to_left_messages = + crate::messages_lane::run::<::MessagesLane, _, _>( + self.right_to_left().messages_relay_params( + right_to_left_on_demand_headers.clone(), + left_to_right_on_demand_headers.clone(), + lane, + Self::R2L::maybe_messages_limits(), + ), + ) + .map_err(|e| anyhow::format_err!("{}", e)) + .boxed(); message_relays.push(right_to_left_messages); } diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs index 7f6f407778236..8104be7af807a 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/parachain_to_parachain.rs @@ -23,6 +23,7 @@ use crate::{ cli::{ bridge::{CliBridgeBase, MessagesCliBridge, ParachainToRelayHeadersCliBridge}, relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, + DefaultClient, }, finality::SubstrateFinalitySyncPipeline, on_demand::{ @@ -52,9 +53,9 @@ pub struct ParachainToParachainBridge< pub common: Full2WayBridgeCommonParams<::Target, ::Target>, /// Client of the left relay chain. - pub left_relay: Client<::SourceRelay>, + pub left_relay: DefaultClient<::SourceRelay>, /// Client of the right relay chain. - pub right_relay: Client<::SourceRelay>, + pub right_relay: DefaultClient<::SourceRelay>, } /// Create set of configuration objects specific to parachain-to-parachain relayer. @@ -175,25 +176,33 @@ where ) .await?; - let left_relay_to_right_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.left_relay.clone(), - self.common.right.client.clone(), - self.common.right.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); - let right_relay_to_left_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); + let left_relay_to_right_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.left_relay.clone(), + self.common.right.client.clone(), + self.common.right.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); + let right_relay_to_left_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.right_relay.clone(), + self.common.left.client.clone(), + self.common.left.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); let left_to_right_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.left_relay.clone(), self.common.right.client.clone(), @@ -202,6 +211,8 @@ where ); let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.right_relay.clone(), self.common.left.client.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs index 5911fe49df4ad..6c078973fedc0 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_parachain.rs @@ -26,6 +26,7 @@ use crate::{ RelayToRelayHeadersCliBridge, }, relay_headers_and_messages::{Full2WayBridgeBase, Full2WayBridgeCommonParams}, + DefaultClient, }, finality::SubstrateFinalitySyncPipeline, on_demand::{ @@ -54,7 +55,7 @@ pub struct RelayToParachainBridge< pub common: Full2WayBridgeCommonParams<::Target, ::Target>, /// Client of the right relay chain. - pub right_relay: Client<::SourceRelay>, + pub right_relay: DefaultClient<::SourceRelay>, } /// Create set of configuration objects specific to relay-to-parachain relayer. @@ -167,23 +168,28 @@ where .await?; let left_to_right_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), self.common.shared.headers_to_relay(), None, ); - let right_relay_to_left_on_demand_headers = - OnDemandHeadersRelay::<::RelayFinality>::new( - self.right_relay.clone(), - self.common.left.client.clone(), - self.common.left.tx_params.clone(), - self.common.shared.headers_to_relay(), - Some(self.common.metrics_params.clone()), - ); + let right_relay_to_left_on_demand_headers = OnDemandHeadersRelay::< + ::RelayFinality, + _, + _, + >::new( + self.right_relay.clone(), + self.common.left.client.clone(), + self.common.left.tx_params.clone(), + self.common.shared.headers_to_relay(), + Some(self.common.metrics_params.clone()), + ); let right_to_left_on_demand_parachains = OnDemandParachainsRelay::< ::ParachainFinality, + _, + _, >::new( self.right_relay.clone(), self.common.left.client.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs index 832df4ae4003c..3f8c8bb40c99c 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_headers_and_messages/relay_to_relay.rs @@ -32,7 +32,7 @@ use crate::{ on_demand::{headers::OnDemandHeadersRelay, OnDemandRelay}, }; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, ChainWithRuntimeVersion, ChainWithTransactions, + AccountIdOf, AccountKeyPairOf, ChainWithRuntimeVersion, ChainWithTransactions, Client, }; use sp_core::Pair; @@ -148,7 +148,7 @@ where .await?; let left_to_right_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.left.client.clone(), self.common.right.client.clone(), self.common.right.tx_params.clone(), @@ -156,7 +156,7 @@ where None, ); let right_to_left_on_demand_headers = - OnDemandHeadersRelay::<::Finality>::new( + OnDemandHeadersRelay::<::Finality, _, _>::new( self.common.right.client.clone(), self.common.left.client.clone(), self.common.left.tx_params.clone(), diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs index e5b07b241581e..4efd3eb23a32b 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_messages.rs @@ -29,7 +29,8 @@ use structopt::StructOpt; use bp_messages::MessageNonce; use bp_runtime::HeaderIdProvider; use relay_substrate_client::{ - AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithRuntimeVersion, ChainWithTransactions, + AccountIdOf, AccountKeyPairOf, BalanceOf, Chain, ChainWithRuntimeVersion, + ChainWithTransactions, Client, }; use relay_utils::UniqueSaturatedInto; @@ -98,7 +99,7 @@ where let target_sign = data.target_sign.to_keypair::()?; let target_transactions_mortality = data.target_sign.transactions_mortality()?; - crate::messages_lane::run::(MessagesRelayParams { + crate::messages_lane::run::(MessagesRelayParams { source_client, source_transaction_params: TransactionParams { signer: source_sign, diff --git a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs index 00f8cf79ef1fb..77cd395ff7225 100644 --- a/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/cli/relay_parachains.rs @@ -21,7 +21,7 @@ use async_trait::async_trait; use bp_polkadot_core::BlockNumber as RelayBlockNumber; use bp_runtime::HeaderIdProvider; use parachains_relay::parachains_loop::{AvailableHeader, SourceClient, TargetClient}; -use relay_substrate_client::Parachain; +use relay_substrate_client::{Client, Parachain}; use relay_utils::metrics::{GlobalMetrics, StandaloneMetric}; use std::sync::Arc; use structopt::StructOpt; @@ -30,7 +30,7 @@ use crate::{ cli::{ bridge::{CliBridgeBase, ParachainToRelayHeadersCliBridge}, chain_schema::*, - PrometheusParams, + DefaultClient, PrometheusParams, }, parachains::{source::ParachainsSource, target::ParachainsTarget, ParachainsPipelineAdapter}, TransactionParams, @@ -72,16 +72,19 @@ pub struct RelayParachainHeadParams { #[async_trait] pub trait ParachainsRelayer: ParachainToRelayHeadersCliBridge where - ParachainsSource: + ParachainsSource>: SourceClient>, - ParachainsTarget: - TargetClient>, + ParachainsTarget< + Self::ParachainFinality, + DefaultClient, + DefaultClient, + >: TargetClient>, ::Source: Parachain, { /// Start relaying parachains finality. async fn relay_parachains(data: RelayParachainsParams) -> anyhow::Result<()> { let source_chain_client = data.source.into_client::().await?; - let source_client = ParachainsSource::::new( + let source_client = ParachainsSource::::new( source_chain_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -91,7 +94,7 @@ where mortality: data.target_sign.target_transactions_mortality, }; let target_chain_client = data.target.into_client::().await?; - let target_client = ParachainsTarget::::new( + let target_client = ParachainsTarget::::new( source_chain_client, target_chain_client, target_transaction_params, @@ -121,7 +124,7 @@ where .map_err(|e| anyhow::format_err!("{}", e))? .id(); - let source_client = ParachainsSource::::new( + let source_client = ParachainsSource::::new( source_chain_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -131,7 +134,7 @@ where mortality: data.target_sign.target_transactions_mortality, }; let target_chain_client = data.target.into_client::().await?; - let target_client = ParachainsTarget::::new( + let target_client = ParachainsTarget::::new( source_chain_client, target_chain_client, target_transaction_params, diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs b/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs index f6d58cbaa4ab4..f8077923b8202 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/mod.rs @@ -69,7 +69,7 @@ pub trait SubstrateEquivocationDetectionPipeline: /// Add relay guards if required. async fn start_relay_guards( - source_client: &Client, + source_client: &impl Client, enable_version_guard: bool, ) -> relay_substrate_client::Result<()> { if enable_version_guard { @@ -199,8 +199,8 @@ macro_rules! generate_report_equivocation_call_builder { /// Run Substrate-to-Substrate equivocations detection loop. pub async fn run( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, metrics_params: MetricsParams, ) -> anyhow::Result<()> { @@ -212,8 +212,8 @@ pub async fn run( ); equivocation_detector::run( - SubstrateEquivocationSource::

::new(source_client, source_transaction_params), - SubstrateEquivocationTarget::

::new(target_client), + SubstrateEquivocationSource::::new(source_client, source_transaction_params), + SubstrateEquivocationTarget::::new(target_client), P::TargetChain::AVERAGE_BLOCK_INTERVAL, metrics_params, futures::future::pending(), diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/source.rs b/bridges/relays/lib-substrate-relay/src/equivocation/source.rs index a0c7dcf5cbc32..66d651600a1ec 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/source.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/source.rs @@ -35,29 +35,35 @@ use relay_substrate_client::{ use relay_utils::relay_loop::Client as RelayClient; /// Substrate node as equivocation source. -pub struct SubstrateEquivocationSource { - client: Client, +pub struct SubstrateEquivocationSource { + client: SourceClnt, transaction_params: TransactionParams>, } -impl SubstrateEquivocationSource

{ +impl> + SubstrateEquivocationSource +{ /// Create new instance of `SubstrateEquivocationSource`. pub fn new( - client: Client, + client: SourceClnt, transaction_params: TransactionParams>, ) -> Self { Self { client, transaction_params } } } -impl Clone for SubstrateEquivocationSource

{ +impl> Clone + for SubstrateEquivocationSource +{ fn clone(&self) -> Self { Self { client: self.client.clone(), transaction_params: self.transaction_params.clone() } } } #[async_trait] -impl RelayClient for SubstrateEquivocationSource

{ +impl> RelayClient + for SubstrateEquivocationSource +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -66,8 +72,9 @@ impl RelayClient for SubstrateEquivoc } #[async_trait] -impl - SourceClientBase> for SubstrateEquivocationSource

+impl> + SourceClientBase> + for SubstrateEquivocationSource { type FinalityProofsStream = SubstrateFinalityProofsStream

; @@ -77,10 +84,11 @@ impl } #[async_trait] -impl - SourceClient> for SubstrateEquivocationSource

+impl> + SourceClient> + for SubstrateEquivocationSource { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn report_equivocation( &self, diff --git a/bridges/relays/lib-substrate-relay/src/equivocation/target.rs b/bridges/relays/lib-substrate-relay/src/equivocation/target.rs index 6eee2ab91d45b..7d054e843d0db 100644 --- a/bridges/relays/lib-substrate-relay/src/equivocation/target.rs +++ b/bridges/relays/lib-substrate-relay/src/equivocation/target.rs @@ -34,27 +34,33 @@ use sp_runtime::traits::Header; use std::marker::PhantomData; /// Substrate node as equivocation source. -pub struct SubstrateEquivocationTarget { - client: Client, +pub struct SubstrateEquivocationTarget { + client: TargetClnt, _phantom: PhantomData

, } -impl SubstrateEquivocationTarget

{ +impl> + SubstrateEquivocationTarget +{ /// Create new instance of `SubstrateEquivocationTarget`. - pub fn new(client: Client) -> Self { + pub fn new(client: TargetClnt) -> Self { Self { client, _phantom: Default::default() } } } -impl Clone for SubstrateEquivocationTarget

{ +impl> Clone + for SubstrateEquivocationTarget +{ fn clone(&self) -> Self { Self { client: self.client.clone(), _phantom: Default::default() } } } #[async_trait] -impl RelayClient for SubstrateEquivocationTarget

{ +impl> RelayClient + for SubstrateEquivocationTarget +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -63,8 +69,9 @@ impl RelayClient for SubstrateEquivoc } #[async_trait] -impl - TargetClient> for SubstrateEquivocationTarget

+impl> + TargetClient> + for SubstrateEquivocationTarget { async fn best_finalized_header_number( &self, diff --git a/bridges/relays/lib-substrate-relay/src/finality/initialize.rs b/bridges/relays/lib-substrate-relay/src/finality/initialize.rs index 5dde46c39dd67..a972f743e117c 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/initialize.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/initialize.rs @@ -39,8 +39,8 @@ pub async fn initialize< TargetChain: ChainWithTransactions, F, >( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_signer: AccountKeyPairOf, prepare_initialize_transaction: F, dry_run: bool, @@ -101,8 +101,8 @@ async fn do_initialize< TargetChain: ChainWithTransactions, F, >( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_signer: AccountKeyPairOf, prepare_initialize_transaction: F, dry_run: bool, diff --git a/bridges/relays/lib-substrate-relay/src/finality/mod.rs b/bridges/relays/lib-substrate-relay/src/finality/mod.rs index 0293e1da224a6..a2379eb4812e2 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/mod.rs @@ -77,7 +77,7 @@ pub trait SubstrateFinalitySyncPipeline: BaseSubstrateFinalitySyncPipeline { /// Add relay guards if required. async fn start_relay_guards( - target_client: &Client, + target_client: &impl Client, enable_version_guard: bool, ) -> relay_substrate_client::Result<()> { if enable_version_guard { @@ -240,8 +240,8 @@ macro_rules! generate_submit_finality_proof_ex_call_builder { /// Run Substrate-to-Substrate finality sync loop. pub async fn run( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, headers_to_relay: HeadersToRelay, transaction_params: TransactionParams>, metrics_params: MetricsParams, @@ -255,8 +255,8 @@ pub async fn run( ); finality_relay::run( - SubstrateFinalitySource::

::new(source_client, None), - SubstrateFinalityTarget::

::new(target_client, transaction_params.clone()), + SubstrateFinalitySource::::new(source_client, None), + SubstrateFinalityTarget::::new(target_client, transaction_params.clone()), finality_relay::FinalitySyncParams { tick: std::cmp::max( P::SourceChain::AVERAGE_BLOCK_INTERVAL, @@ -279,12 +279,12 @@ pub async fn run( /// Relay single header. No checks are made to ensure that transaction will succeed. pub async fn relay_single_header( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, transaction_params: TransactionParams>, header_number: BlockNumberOf, ) -> anyhow::Result<()> { - let finality_source = SubstrateFinalitySource::

::new(source_client, None); + let finality_source = SubstrateFinalitySource::::new(source_client, None); let (header, proof) = finality_source.header_and_finality_proof(header_number).await?; let Some(proof) = proof else { return Err(anyhow::format_err!( @@ -295,7 +295,7 @@ pub async fn relay_single_header( )); }; - let finality_target = SubstrateFinalityTarget::

::new(target_client, transaction_params); + let finality_target = SubstrateFinalityTarget::::new(target_client, transaction_params); let tx_tracker = finality_target.submit_finality_proof(header, proof, false).await?; match tx_tracker.wait().await { TrackedTransactionStatus::Finalized(_) => Ok(()), diff --git a/bridges/relays/lib-substrate-relay/src/finality/source.rs b/bridges/relays/lib-substrate-relay/src/finality/source.rs index c94af6108957a..f6fa5c24add50 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/source.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/source.rs @@ -40,22 +40,24 @@ use relay_utils::{relay_loop::Client as RelayClient, UniqueSaturatedInto}; pub type RequiredHeaderNumberRef = Arc::BlockNumber>>; /// Substrate node as finality source. -pub struct SubstrateFinalitySource { - client: Client, +pub struct SubstrateFinalitySource { + client: SourceClnt, maximal_header_number: Option>, } -impl SubstrateFinalitySource

{ +impl> + SubstrateFinalitySource +{ /// Create new headers source using given client. pub fn new( - client: Client, + client: SourceClnt, maximal_header_number: Option>, ) -> Self { SubstrateFinalitySource { client, maximal_header_number } } /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { + pub fn client(&self) -> &SourceClnt { &self.client } @@ -174,7 +176,9 @@ impl SubstrateFinalitySource

{ } } -impl Clone for SubstrateFinalitySource

{ +impl Clone + for SubstrateFinalitySource +{ fn clone(&self) -> Self { SubstrateFinalitySource { client: self.client.clone(), @@ -184,7 +188,9 @@ impl Clone for SubstrateFinalitySource

{ } #[async_trait] -impl RelayClient for SubstrateFinalitySource

{ +impl> RelayClient + for SubstrateFinalitySource +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -193,8 +199,8 @@ impl RelayClient for SubstrateFinalitySource

SourceClientBase> - for SubstrateFinalitySource

+impl> + SourceClientBase> for SubstrateFinalitySource { type FinalityProofsStream = SubstrateFinalityProofsStream

; @@ -204,8 +210,8 @@ impl SourceClientBase SourceClient> - for SubstrateFinalitySource

+impl> + SourceClient> for SubstrateFinalitySource { async fn best_finalized_block_number(&self) -> Result, Error> { let mut finalized_header_number = self.on_chain_best_finalized_block_number().await?; @@ -235,7 +241,7 @@ impl SourceClient( - client: &Client, + client: &impl Client, number: BlockNumberOf, ) -> Result< ( @@ -244,8 +250,8 @@ async fn header_and_finality_proof( ), Error, > { - let header_hash = client.block_hash_by_number(number).await?; - let signed_block = client.get_block(Some(header_hash)).await?; + let header_hash = client.header_hash_by_number(number).await?; + let signed_block = client.block_by_hash(header_hash).await?; let justification = signed_block .justification(P::FinalityEngine::ID) diff --git a/bridges/relays/lib-substrate-relay/src/finality/target.rs b/bridges/relays/lib-substrate-relay/src/finality/target.rs index 0874fa53549c5..cb09bf99b2983 100644 --- a/bridges/relays/lib-substrate-relay/src/finality/target.rs +++ b/bridges/relays/lib-substrate-relay/src/finality/target.rs @@ -28,22 +28,25 @@ use async_trait::async_trait; use bp_runtime::BlockNumberOf; use finality_relay::TargetClient; use relay_substrate_client::{ - AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, TransactionEra, - TransactionTracker, UnsignedTransaction, + AccountIdOf, AccountKeyPairOf, Chain, Client, Error, HeaderIdOf, HeaderOf, SyncHeader, + TransactionEra, TransactionTracker, UnsignedTransaction, }; use relay_utils::relay_loop::Client as RelayClient; +use sp_core::Pair; use sp_runtime::traits::Header; /// Substrate client as Substrate finality target. -pub struct SubstrateFinalityTarget { - client: Client, +pub struct SubstrateFinalityTarget { + client: TargetClnt, transaction_params: TransactionParams>, } -impl SubstrateFinalityTarget

{ +impl> + SubstrateFinalityTarget +{ /// Create new Substrate headers target. pub fn new( - client: Client, + client: TargetClnt, transaction_params: TransactionParams>, ) -> Self { SubstrateFinalityTarget { client, transaction_params } @@ -65,7 +68,9 @@ impl SubstrateFinalityTarget

{ } } -impl Clone for SubstrateFinalityTarget

{ +impl Clone + for SubstrateFinalityTarget +{ fn clone(&self) -> Self { SubstrateFinalityTarget { client: self.client.clone(), @@ -75,7 +80,9 @@ impl Clone for SubstrateFinalityTarget

{ } #[async_trait] -impl RelayClient for SubstrateFinalityTarget

{ +impl> RelayClient + for SubstrateFinalityTarget +{ type Error = Error; async fn reconnect(&mut self) -> Result<(), Error> { @@ -84,10 +91,12 @@ impl RelayClient for SubstrateFinalityTarget

TargetClient> - for SubstrateFinalityTarget

+impl> + TargetClient> for SubstrateFinalityTarget +where + AccountIdOf: From< as Pair>::Public>, { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn best_finalized_source_block_id(&self) -> Result, Error> { // we can't continue to relay finality if target node is out of sync, because @@ -109,10 +118,10 @@ impl TargetClient Result>, Self::Error> { Ok(self .client - .typed_state_call( + .state_call( + self.client.best_header().await?.hash(), P::SourceChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), - Some(self.client.best_header().await?.hash()), ) .await .unwrap_or_else(|e| { diff --git a/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs b/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs index e517b0fd9b9ab..32f2fd0080fbb 100644 --- a/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs +++ b/bridges/relays/lib-substrate-relay/src/finality_base/engine.rs @@ -32,7 +32,7 @@ use codec::{Decode, Encode}; use num_traits::{One, Zero}; use relay_substrate_client::{ BlockNumberOf, Chain, ChainWithGrandpa, Client, Error as SubstrateError, HashOf, HeaderOf, - Subscription, SubstrateFinalityClient, SubstrateGrandpaFinalityClient, + Subscription, }; use sp_consensus_grandpa::{AuthorityList as GrandpaAuthoritiesSet, GRANDPA_ENGINE_ID}; use sp_core::{storage::StorageKey, Bytes}; @@ -59,8 +59,6 @@ pub trait Engine: Send { const ID: ConsensusEngineId; /// A reader that can extract the consensus log from the header digest and interpret it. type ConsensusLogReader: ConsensusLogReader; - /// Type of Finality RPC client used by this engine. - type FinalityClient: SubstrateFinalityClient; /// Type of finality proofs, used by consensus engine. type FinalityProof: FinalityProof, BlockNumberOf> + Decode + Encode; /// The context needed for verifying finality proofs. @@ -88,10 +86,10 @@ pub trait Engine: Send { /// Returns `Ok(true)` if finality pallet at the bridged chain has already been initialized. async fn is_initialized( - target_client: &Client, + target_client: &impl Client, ) -> Result { Ok(target_client - .raw_storage_value(Self::is_initialized_key(), None) + .raw_storage_value(target_client.best_header_hash().await?, Self::is_initialized_key()) .await? .is_some()) } @@ -102,10 +100,13 @@ pub trait Engine: Send { /// Returns `Ok(true)` if finality pallet at the bridged chain is halted. async fn is_halted( - target_client: &Client, + target_client: &impl Client, ) -> Result { Ok(target_client - .storage_value::(Self::pallet_operating_mode_key(), None) + .storage_value::( + target_client.best_header_hash().await?, + Self::pallet_operating_mode_key(), + ) .await? .map(|operating_mode| operating_mode.is_halted()) .unwrap_or(false)) @@ -113,17 +114,15 @@ pub trait Engine: Send { /// A method to subscribe to encoded finality proofs, given source client. async fn source_finality_proofs( - source_client: &Client, - ) -> Result, SubstrateError> { - source_client.subscribe_finality_justifications::().await - } + source_client: &impl Client, + ) -> Result, SubstrateError>; /// Verify and optimize finality proof before sending it to the target node. /// /// Apart from optimization, we expect this method to perform all required checks /// that the `header` and `proof` are valid at the current state of the target chain. async fn verify_and_optimize_proof( - target_client: &Client, + target_client: &impl Client, header: &C::Header, proof: &mut Self::FinalityProof, ) -> Result; @@ -138,19 +137,19 @@ pub trait Engine: Send { /// Prepare initialization data for the finality bridge pallet. async fn prepare_initialization_data( - client: Client, + client: impl Client, ) -> Result, BlockNumberOf>>; /// Get the context needed for validating a finality proof. async fn finality_verification_context( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result; /// Returns the finality info associated to the source headers synced with the target /// at the provided block. async fn synced_headers_finality_info( - target_client: &Client, + target_client: &impl Client, at: TargetChain::Hash, ) -> Result< Vec>, @@ -159,7 +158,7 @@ pub trait Engine: Send { /// Generate key ownership proof for the provided equivocation. async fn generate_source_key_ownership_proof( - source_client: &Client, + source_client: &impl Client, at: C::Hash, equivocation: &Self::EquivocationProof, ) -> Result; @@ -171,7 +170,7 @@ pub struct Grandpa(PhantomData); impl Grandpa { /// Read header by hash from the source client. async fn source_header( - source_client: &Client, + source_client: &impl Client, header_hash: C::Hash, ) -> Result, BlockNumberOf>> { source_client @@ -182,15 +181,15 @@ impl Grandpa { /// Read GRANDPA authorities set at given header. async fn source_authorities_set( - source_client: &Client, + source_client: &impl Client, header_hash: C::Hash, ) -> Result, BlockNumberOf>> { - let raw_authorities_set = source_client - .grandpa_authorities_set(header_hash) + const SUB_API_GRANDPA_AUTHORITIES: &str = "GrandpaApi_grandpa_authorities"; + + source_client + .state_call(header_hash, SUB_API_GRANDPA_AUTHORITIES.to_string(), ()) .await - .map_err(|err| Error::RetrieveAuthorities(C::NAME, header_hash, err))?; - GrandpaAuthoritiesSet::decode(&mut &raw_authorities_set[..]) - .map_err(|err| Error::DecodeAuthorities(C::NAME, header_hash, err)) + .map_err(|err| Error::RetrieveAuthorities(C::NAME, header_hash, err)) } } @@ -198,7 +197,6 @@ impl Grandpa { impl Engine for Grandpa { const ID: ConsensusEngineId = GRANDPA_ENGINE_ID; type ConsensusLogReader = GrandpaConsensusLogReader<::Number>; - type FinalityClient = SubstrateGrandpaFinalityClient; type FinalityProof = GrandpaJustification>; type FinalityVerificationContext = JustificationVerificationContext; type EquivocationProof = sp_consensus_grandpa::EquivocationProof, BlockNumberOf>; @@ -215,8 +213,14 @@ impl Engine for Grandpa { bp_header_chain::storage_keys::pallet_operating_mode_key(C::WITH_CHAIN_GRANDPA_PALLET_NAME) } + async fn source_finality_proofs( + client: &impl Client, + ) -> Result, SubstrateError> { + client.subscribe_grandpa_finality_justifications().await + } + async fn verify_and_optimize_proof( - target_client: &Client, + target_client: &impl Client, header: &C::Header, proof: &mut Self::FinalityProof, ) -> Result { @@ -265,7 +269,7 @@ impl Engine for Grandpa { /// Prepare initialization data for the GRANDPA verifier pallet. async fn prepare_initialization_data( - source_client: Client, + source_client: impl Client, ) -> Result, BlockNumberOf>> { // In ideal world we just need to get best finalized header and then to read GRANDPA // authorities set (`pallet_grandpa::CurrentSetId` + `GrandpaApi::grandpa_authorities()`) at @@ -385,14 +389,14 @@ impl Engine for Grandpa { } async fn finality_verification_context( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result { let current_authority_set_key = bp_header_chain::storage_keys::current_authority_set_key( C::WITH_CHAIN_GRANDPA_PALLET_NAME, ); let authority_set: AuthoritySet = target_client - .storage_value(current_authority_set_key, Some(at)) + .storage_value(at, current_authority_set_key) .await? .map(Ok) .unwrap_or(Err(SubstrateError::Custom(format!( @@ -411,11 +415,11 @@ impl Engine for Grandpa { } async fn synced_headers_finality_info( - target_client: &Client, + target_client: &impl Client, at: TargetChain::Hash, ) -> Result>>, SubstrateError> { let stored_headers_grandpa_info: Vec>> = target_client - .typed_state_call(C::SYNCED_HEADERS_GRANDPA_INFO_METHOD.to_string(), (), Some(at)) + .state_call(at, C::SYNCED_HEADERS_GRANDPA_INFO_METHOD.to_string(), ()) .await?; let mut headers_grandpa_info = vec![]; @@ -433,7 +437,7 @@ impl Engine for Grandpa { } async fn generate_source_key_ownership_proof( - source_client: &Client, + source_client: &impl Client, at: C::Hash, equivocation: &Self::EquivocationProof, ) -> Result { diff --git a/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs b/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs index 825960b1b3ef2..2e0a7089a931d 100644 --- a/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs +++ b/bridges/relays/lib-substrate-relay/src/finality_base/mod.rs @@ -50,7 +50,7 @@ pub type SubstrateFinalityProofsStream

= /// Subscribe to new finality proofs. pub async fn finality_proofs( - client: &Client, + client: &impl Client, ) -> Result, Error> { Ok(unfold( P::FinalityEngine::source_finality_proofs(client).await?, @@ -93,7 +93,7 @@ pub async fn finality_proofs( /// /// The runtime API method should be `FinalityApi::best_finalized()`. pub async fn best_synced_header_id( - target_client: &Client, + target_client: &impl Client, at: HashOf, ) -> Result>, Error> where @@ -102,6 +102,6 @@ where { // now let's read id of best finalized peer header at our best finalized block target_client - .typed_state_call(SourceChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), Some(at)) + .state_call(at, SourceChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), ()) .await } diff --git a/bridges/relays/lib-substrate-relay/src/messages_lane.rs b/bridges/relays/lib-substrate-relay/src/messages_lane.rs index a34b165289b2d..963a0afebfec1 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_lane.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_lane.rs @@ -88,13 +88,13 @@ impl MessageLane for MessageLaneAdapter

{ } /// Substrate <-> Substrate messages relay parameters. -pub struct MessagesRelayParams { +pub struct MessagesRelayParams { /// Messages source client. - pub source_client: Client, + pub source_client: SourceClnt, /// Source transaction params. pub source_transaction_params: TransactionParams>, /// Messages target client. - pub target_client: Client, + pub target_client: TargetClnt, /// Target transaction params. pub target_transaction_params: TransactionParams>, /// Optional on-demand source to target headers relay. @@ -179,8 +179,13 @@ impl>> } /// Run Substrate-to-Substrate messages sync loop. -pub async fn run(params: MessagesRelayParams

) -> anyhow::Result<()> +pub async fn run( + params: MessagesRelayParams, +) -> anyhow::Result<()> where + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, AccountIdOf: From< as Pair>::Public>, BalanceOf: TryFrom>, @@ -190,7 +195,7 @@ where let limits = match params.limits { Some(limits) => limits, None => - select_delivery_transaction_limits_rpc::

( + select_delivery_transaction_limits_rpc( ¶ms, P::TargetChain::max_extrinsic_weight(), P::SourceChain::MAX_UNREWARDED_RELAYERS_IN_CONFIRMATION_TX, @@ -250,14 +255,14 @@ where max_messages_size_in_single_batch, }, }, - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), params.lane_id, params.source_transaction_params, params.target_to_source_headers_relay, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, params.lane_id, @@ -278,8 +283,8 @@ where /// Deliver range of Substrate-to-Substrate messages. No checks are made to ensure that transaction /// will succeed. pub async fn relay_messages_range( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, source_transaction_params: TransactionParams>, target_transaction_params: TransactionParams>, at_source_block: HeaderIdOf, @@ -295,14 +300,14 @@ where let relayer_id_at_source: AccountIdOf = source_transaction_params.signer.public().into(); messages_relay::relay_messages_range( - SubstrateMessagesSource::

::new( + SubstrateMessagesSource::::new( source_client.clone(), target_client.clone(), lane_id, source_transaction_params, None, ), - SubstrateMessagesTarget::

::new( + SubstrateMessagesTarget::::new( target_client, source_client, lane_id, @@ -508,12 +513,15 @@ macro_rules! generate_receive_message_delivery_proof_call_builder { } /// Returns maximal number of messages and their maximal cumulative dispatch weight. -async fn select_delivery_transaction_limits_rpc( - params: &MessagesRelayParams

, +async fn select_delivery_transaction_limits_rpc( + params: &MessagesRelayParams, max_extrinsic_weight: Weight, max_unconfirmed_messages_at_inbound_lane: MessageNonce, ) -> anyhow::Result where + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, { // We may try to guess accurate value, based on maximal number of messages and per-message @@ -529,20 +537,21 @@ where let weight_for_messages_dispatch = max_extrinsic_weight - weight_for_delivery_tx; // weight of empty message delivery with outbound lane state - let delivery_tx_with_zero_messages = dummy_messages_delivery_transaction::

(params, 0)?; + let best_target_block_hash = params.target_client.best_header_hash().await?; + let delivery_tx_with_zero_messages = dummy_messages_delivery_transaction::(params, 0)?; let delivery_tx_with_zero_messages_weight = params .target_client - .extimate_extrinsic_weight(delivery_tx_with_zero_messages) + .estimate_extrinsic_weight(best_target_block_hash, delivery_tx_with_zero_messages) .await .map_err(|e| { anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) })?; // weight of single message delivery with outbound lane state - let delivery_tx_with_one_message = dummy_messages_delivery_transaction::

(params, 1)?; + let delivery_tx_with_one_message = dummy_messages_delivery_transaction::(params, 1)?; let delivery_tx_with_one_message_weight = params .target_client - .extimate_extrinsic_weight(delivery_tx_with_one_message) + .estimate_extrinsic_weight(best_target_block_hash, delivery_tx_with_one_message) .await .map_err(|e| { anyhow::format_err!("Failed to estimate delivery extrinsic weight: {:?}", e) @@ -577,8 +586,8 @@ where } /// Returns dummy message delivery transaction with zero messages and `1kb` proof. -fn dummy_messages_delivery_transaction( - params: &MessagesRelayParams

, +fn dummy_messages_delivery_transaction( + params: &MessagesRelayParams, messages: u32, ) -> anyhow::Result<::SignedTransaction> where diff --git a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs b/bridges/relays/lib-substrate-relay/src/messages_metrics.rs index b30e75bd8bacb..8845f43dcb62a 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_metrics.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_metrics.rs @@ -36,7 +36,7 @@ use std::{fmt::Debug, marker::PhantomData}; /// Add relay accounts balance metrics. pub async fn add_relay_balances_metrics( - client: Client, + client: impl Client, metrics: &MetricsParams, relay_accounts: &Vec>>, lanes: &[LaneId], diff --git a/bridges/relays/lib-substrate-relay/src/messages_source.rs b/bridges/relays/lib-substrate-relay/src/messages_source.rs index 49deff046f9ca..1f597e278da40 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_source.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_source.rs @@ -63,19 +63,21 @@ pub type SubstrateMessagesProof = (Weight, FromBridgedChainMessagesProof = Vec<(MessagePayload, &'a mut OutboundMessageDetails)>; /// Substrate client as Substrate messages source. -pub struct SubstrateMessagesSource { - source_client: Client, - target_client: Client, +pub struct SubstrateMessagesSource { + source_client: SourceClnt, + target_client: TargetClnt, lane_id: LaneId, transaction_params: TransactionParams>, target_to_source_headers_relay: Option>>, } -impl SubstrateMessagesSource

{ +impl, TargetClnt> + SubstrateMessagesSource +{ /// Create new Substrate headers source. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, lane_id: LaneId, transaction_params: TransactionParams>, target_to_source_headers_relay: Option< @@ -98,22 +100,25 @@ impl SubstrateMessagesSource

{ ) -> Result, SubstrateError> { self.source_client .storage_value( + id.hash(), outbound_lane_data_key( P::TargetChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, ), - Some(id.1), ) .await } /// Ensure that the messages pallet at source chain is active. async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.source_client).await + ensure_messages_pallet_active::(&self.source_client) + .await } } -impl Clone for SubstrateMessagesSource

{ +impl Clone + for SubstrateMessagesSource +{ fn clone(&self) -> Self { Self { source_client: self.source_client.clone(), @@ -126,7 +131,12 @@ impl Clone for SubstrateMessagesSource

{ } #[async_trait] -impl RelayClient for SubstrateMessagesSource

{ +impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for SubstrateMessagesSource +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -150,13 +160,17 @@ impl RelayClient for SubstrateMessagesSource

{ } #[async_trait] -impl SourceClient> for SubstrateMessagesSource

+impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > SourceClient> for SubstrateMessagesSource where AccountIdOf: From< as Pair>::Public>, { type BatchTransaction = BatchProofTransaction; - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn state(&self) -> Result>, SubstrateError> { // we can't continue to deliver confirmations if source node is out of sync, because @@ -169,7 +183,7 @@ where // we can't relay confirmations if messages pallet at source chain is halted self.ensure_pallet_active().await?; - read_client_state(&self.source_client, Some(&self.target_client)).await + read_client_state_from_both_chains(&self.source_client, &self.target_client).await } async fn latest_generated_nonce( @@ -203,12 +217,12 @@ where id: SourceHeaderIdOf>, nonces: RangeInclusive, ) -> Result>, SubstrateError> { - let mut out_msgs_details = self + let mut out_msgs_details: Vec<_> = self .source_client - .typed_state_call::<_, Vec<_>>( + .state_call::<_, Vec<_>>( + id.hash(), P::TargetChain::TO_CHAIN_MESSAGE_DETAILS_METHOD.into(), (self.lane_id, *nonces.start(), *nonces.end()), - Some(id.1), ) .await?; validate_out_msgs_details::(&out_msgs_details, nonces)?; @@ -226,7 +240,7 @@ where out_msg_details.nonce, ); let msg_payload: MessagePayload = - self.source_client.storage_value(msg_key, Some(id.1)).await?.ok_or_else(|| { + self.source_client.storage_value(id.hash(), msg_key).await?.ok_or_else(|| { SubstrateError::Custom(format!( "Message to {} {:?}/{} is missing from runtime the storage of {} at {:?}", P::TargetChain::NAME, @@ -240,15 +254,16 @@ where msgs_to_refine.push((msg_payload, out_msg_details)); } + let best_target_header_hash = self.target_client.best_header_hash().await?; for mut msgs_to_refine_batch in split_msgs_to_refine::(self.lane_id, msgs_to_refine)? { let in_msgs_details = self .target_client - .typed_state_call::<_, Vec>( + .state_call::<_, Vec>( + best_target_header_hash, P::SourceChain::FROM_CHAIN_MESSAGE_DETAILS_METHOD.into(), (self.lane_id, &msgs_to_refine_batch), - None, ) .await?; if in_msgs_details.len() != msgs_to_refine_batch.len() { @@ -326,7 +341,7 @@ where let proof = self .source_client - .prove_storage(storage_keys, id.1) + .prove_storage(id.1, storage_keys) .await? .into_iter_nodes() .collect(); @@ -387,15 +402,19 @@ where } /// Ensure that the messages pallet at source chain is active. -pub(crate) async fn ensure_messages_pallet_active( - client: &Client, +pub(crate) async fn ensure_messages_pallet_active( + client: &AtChainClient, ) -> Result<(), SubstrateError> where AtChain: ChainWithMessages, WithChain: ChainWithMessages, + AtChainClient: Client, { let operating_mode = client - .storage_value(operating_mode_key(WithChain::WITH_CHAIN_MESSAGES_PALLET_NAME), None) + .storage_value( + client.best_header_hash().await?, + operating_mode_key(WithChain::WITH_CHAIN_MESSAGES_PALLET_NAME), + ) .await?; let is_halted = operating_mode == Some(MessagesOperatingMode::Basic(BasicOperatingMode::Halted)); @@ -412,11 +431,10 @@ where /// bridge GRANDPA pallet deployed and it provides `best_finalized_header_id_method_name` /// runtime API to read the best finalized Bridged chain header. /// -/// If `peer_client` is `None`, the value of `actual_best_finalized_peer_at_best_self` will -/// always match the `best_finalized_peer_at_best_self`. +/// The value of `actual_best_finalized_peer_at_best_self` will always match +/// the `best_finalized_peer_at_best_self`. pub async fn read_client_state( - self_client: &Client, - peer_client: Option<&Client>, + self_client: &impl Client, ) -> Result, HeaderIdOf>, SubstrateError> where SelfChain: Chain, @@ -431,30 +449,42 @@ where let peer_on_self_best_finalized_id = best_synced_header_id::(self_client, self_best_id.hash()).await?; - // read actual header, matching the `peer_on_self_best_finalized_id` from the peer chain - let actual_peer_on_self_best_finalized_id = - match (peer_client, peer_on_self_best_finalized_id.as_ref()) { - (Some(peer_client), Some(peer_on_self_best_finalized_id)) => { - let actual_peer_on_self_best_finalized = - peer_client.header_by_number(peer_on_self_best_finalized_id.number()).await?; - Some(actual_peer_on_self_best_finalized.id()) - }, - _ => peer_on_self_best_finalized_id, - }; - Ok(ClientState { best_self: self_best_id, best_finalized_self: self_best_finalized_id, best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, - actual_best_finalized_peer_at_best_self: actual_peer_on_self_best_finalized_id, + actual_best_finalized_peer_at_best_self: peer_on_self_best_finalized_id, }) } +/// Does the same stuff as `read_client_state`, but properly fills the +/// `actual_best_finalized_peer_at_best_self` field of the result. +pub async fn read_client_state_from_both_chains( + self_client: &impl Client, + peer_client: &impl Client, +) -> Result, HeaderIdOf>, SubstrateError> +where + SelfChain: Chain, + PeerChain: Chain, +{ + let mut client_state = read_client_state::(self_client).await?; + client_state.actual_best_finalized_peer_at_best_self = + match client_state.best_finalized_peer_at_best_self.as_ref() { + Some(peer_on_self_best_finalized_id) => { + let actual_peer_on_self_best_finalized = + peer_client.header_by_number(peer_on_self_best_finalized_id.number()).await?; + Some(actual_peer_on_self_best_finalized.id()) + }, + _ => client_state.best_finalized_peer_at_best_self, + }; + Ok(client_state) +} + /// Reads best `PeerChain` header known to the `SelfChain` using provided runtime API method. /// /// Method is supposed to be the `FinalityApi::best_finalized()` method. pub async fn best_finalized_peer_header_at_self( - self_client: &Client, + self_client: &impl Client, at_self_hash: HashOf, ) -> Result>, SubstrateError> where @@ -463,10 +493,10 @@ where { // now let's read id of best finalized peer header at our best finalized block self_client - .typed_state_call::<_, Option<_>>( + .state_call::<_, Option<_>>( + at_self_hash, PeerChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), - Some(at_self_hash), ) .await } diff --git a/bridges/relays/lib-substrate-relay/src/messages_target.rs b/bridges/relays/lib-substrate-relay/src/messages_target.rs index 633b11f0b8028..a0b06783cc36b 100644 --- a/bridges/relays/lib-substrate-relay/src/messages_target.rs +++ b/bridges/relays/lib-substrate-relay/src/messages_target.rs @@ -23,7 +23,9 @@ use crate::{ BatchProofTransaction, MessageLaneAdapter, ReceiveMessagesProofCallBuilder, SubstrateMessageLane, }, - messages_source::{ensure_messages_pallet_active, read_client_state, SubstrateMessagesProof}, + messages_source::{ + ensure_messages_pallet_active, read_client_state_from_both_chains, SubstrateMessagesProof, + }, on_demand::OnDemandRelay, TransactionParams, }; @@ -52,20 +54,24 @@ pub type SubstrateMessagesDeliveryProof = (UnrewardedRelayersState, FromBridgedChainMessagesDeliveryProof>); /// Substrate client as Substrate messages target. -pub struct SubstrateMessagesTarget { - target_client: Client, - source_client: Client, +pub struct SubstrateMessagesTarget { + target_client: TargetClnt, + source_client: SourceClnt, lane_id: LaneId, relayer_id_at_source: AccountIdOf, transaction_params: TransactionParams>, source_to_target_headers_relay: Option>>, } -impl SubstrateMessagesTarget

{ +impl SubstrateMessagesTarget +where + P: SubstrateMessageLane, + TargetClnt: Client, +{ /// Create new Substrate headers target. pub fn new( - target_client: Client, - source_client: Client, + target_client: TargetClnt, + source_client: SourceClnt, lane_id: LaneId, relayer_id_at_source: AccountIdOf, transaction_params: TransactionParams>, @@ -90,22 +96,25 @@ impl SubstrateMessagesTarget

{ ) -> Result>>, SubstrateError> { self.target_client .storage_value( + id.hash(), inbound_lane_data_key( P::SourceChain::WITH_CHAIN_MESSAGES_PALLET_NAME, &self.lane_id, ), - Some(id.1), ) .await } /// Ensure that the messages pallet at target chain is active. async fn ensure_pallet_active(&self) -> Result<(), SubstrateError> { - ensure_messages_pallet_active::(&self.target_client).await + ensure_messages_pallet_active::(&self.target_client) + .await } } -impl Clone for SubstrateMessagesTarget

{ +impl Clone + for SubstrateMessagesTarget +{ fn clone(&self) -> Self { Self { target_client: self.target_client.clone(), @@ -119,7 +128,12 @@ impl Clone for SubstrateMessagesTarget

{ } #[async_trait] -impl RelayClient for SubstrateMessagesTarget

{ +impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for SubstrateMessagesTarget +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -143,14 +157,18 @@ impl RelayClient for SubstrateMessagesTarget

{ } #[async_trait] -impl TargetClient> for SubstrateMessagesTarget

+impl< + P: SubstrateMessageLane, + SourceClnt: Client, + TargetClnt: Client, + > TargetClient> for SubstrateMessagesTarget where AccountIdOf: From< as Pair>::Public>, BalanceOf: TryFrom>, { type BatchTransaction = BatchProofTransaction; - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn state(&self) -> Result>, SubstrateError> { // we can't continue to deliver confirmations if source node is out of sync, because @@ -163,7 +181,7 @@ where // we can't relay messages if messages pallet at target chain is halted self.ensure_pallet_active().await?; - read_client_state(&self.target_client, Some(&self.source_client)).await + read_client_state_from_both_chains(&self.target_client, &self.source_client).await } async fn latest_received_nonce( @@ -219,7 +237,7 @@ where ); let proof = self .target_client - .prove_storage(vec![inbound_data_key], id.1) + .prove_storage(id.hash(), vec![inbound_data_key]) .await? .into_iter_nodes() .collect(); diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs index 74f3a70c5e81b..a536e468c10bd 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/headers.rs @@ -55,25 +55,30 @@ use crate::{ /// relay) needs it to continue its regular work. When enough headers are relayed, on-demand stops /// syncing headers. #[derive(Clone)] -pub struct OnDemandHeadersRelay { +pub struct OnDemandHeadersRelay { /// Relay task name. relay_task_name: String, /// Shared reference to maximal required finalized header number. required_header_number: RequiredHeaderNumberRef, /// Client of the source chain. - source_client: Client, + source_client: SourceClnt, /// Client of the target chain. - target_client: Client, + target_client: TargetClnt, } -impl OnDemandHeadersRelay

{ +impl< + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, + TargetClnt: Client, + > OnDemandHeadersRelay +{ /// Create new on-demand headers relay. /// /// If `metrics_params` is `Some(_)`, the metrics of the finality relay are registered. /// Otherwise, all required metrics must be exposed outside of this method. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, target_transaction_params: TransactionParams>, headers_to_relay: HeadersToRelay, metrics_params: Option, @@ -106,8 +111,12 @@ impl OnDemandHeadersRelay

{ } #[async_trait] -impl OnDemandRelay - for OnDemandHeadersRelay

+impl< + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, + TargetClnt: Client, + > OnDemandRelay + for OnDemandHeadersRelay { async fn reconnect(&self) -> Result<(), SubstrateError> { // using clone is fine here (to avoid mut requirement), because clone on Client clones @@ -141,7 +150,7 @@ impl OnDemandRelay::new(self.source_client.clone(), None); + SubstrateFinalitySource::::new(self.source_client.clone(), None); let (header, mut proof) = finality_source.prove_block_finality(current_required_header).await?; let header_id = header.id(); @@ -201,8 +210,8 @@ impl OnDemandRelay( - source_client: Client, - target_client: Client, + source_client: impl Client, + target_client: impl Client, target_transaction_params: TransactionParams>, headers_to_relay: HeadersToRelay, required_header_number: RequiredHeaderNumberRef, @@ -212,7 +221,7 @@ async fn background_task( { let relay_task_name = on_demand_headers_relay_name::(); let target_transactions_mortality = target_transaction_params.mortality; - let mut finality_source = SubstrateFinalitySource::

::new( + let mut finality_source = SubstrateFinalitySource::::new( source_client.clone(), Some(required_header_number.clone()), ); @@ -249,7 +258,8 @@ async fn background_task( // read best finalized source header number from target let best_finalized_source_header_at_target = - best_finalized_source_header_at_target::

(&finality_target, &relay_task_name).await; + best_finalized_source_header_at_target::(&finality_target, &relay_task_name) + .await; if matches!(best_finalized_source_header_at_target, Err(ref e) if e.is_connection_error()) { relay_utils::relay_loop::reconnect_failed_client( FailedClient::Target, @@ -413,13 +423,17 @@ async fn mandatory_headers_scan_range( /// it. /// /// Returns `true` if header was found and (asked to be) relayed and `false` otherwise. -async fn relay_mandatory_header_from_range( - finality_source: &SubstrateFinalitySource

, +async fn relay_mandatory_header_from_range( + finality_source: &SubstrateFinalitySource, required_header_number: &RequiredHeaderNumberRef, best_finalized_source_header_at_target: String, range: (BlockNumberOf, BlockNumberOf), relay_task_name: &str, -) -> Result { +) -> Result +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ // search for mandatory header first let mandatory_source_header_number = find_mandatory_header_in_range(finality_source, range).await?; @@ -454,10 +468,14 @@ async fn relay_mandatory_header_from_range( /// Read best finalized source block number from source client. /// /// Returns `None` if we have failed to read the number. -async fn best_finalized_source_header_at_source( - finality_source: &SubstrateFinalitySource

, +async fn best_finalized_source_header_at_source( + finality_source: &SubstrateFinalitySource, relay_task_name: &str, -) -> Result, relay_substrate_client::Error> { +) -> Result, relay_substrate_client::Error> +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ finality_source.on_chain_best_finalized_block_number().await.map_err(|error| { log::error!( target: "bridge", @@ -473,11 +491,16 @@ async fn best_finalized_source_header_at_source( - finality_target: &SubstrateFinalityTarget

, +async fn best_finalized_source_header_at_target( + finality_target: &SubstrateFinalityTarget, relay_task_name: &str, -) -> Result, as RelayClient>::Error> +) -> Result< + BlockNumberOf, + as RelayClient>::Error, +> where + P: SubstrateFinalitySyncPipeline, + TargetClnt: Client, AccountIdOf: From< as sp_core::Pair>::Public>, { finality_target @@ -499,10 +522,14 @@ where /// Read first mandatory header in given inclusive range. /// /// Returns `Ok(None)` if there were no mandatory headers in the range. -async fn find_mandatory_header_in_range( - finality_source: &SubstrateFinalitySource

, +async fn find_mandatory_header_in_range( + finality_source: &SubstrateFinalitySource, range: (BlockNumberOf, BlockNumberOf), -) -> Result>, relay_substrate_client::Error> { +) -> Result>, relay_substrate_client::Error> +where + P: SubstrateFinalitySyncPipeline, + SourceClnt: Client, +{ let mut current = range.0; while current <= range.1 { let header = finality_source.client().header_by_number(current).await?; diff --git a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs index 966bdc3107203..654cb6628d5f0 100644 --- a/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs +++ b/bridges/relays/lib-substrate-relay/src/on_demand/parachains.rs @@ -53,29 +53,34 @@ use std::fmt::Debug; /// (e.g. messages relay) needs it to continue its regular work. When enough parachain headers /// are relayed, on-demand stops syncing headers. #[derive(Clone)] -pub struct OnDemandParachainsRelay { +pub struct OnDemandParachainsRelay { /// Relay task name. relay_task_name: String, /// Channel used to communicate with background task and ask for relay of parachain heads. required_header_number_sender: Sender>, /// Source relay chain client. - source_relay_client: Client, + source_relay_client: SourceRelayClnt, /// Target chain client. - target_client: Client, + target_client: TargetClnt, /// On-demand relay chain relay. on_demand_source_relay_to_target_headers: Arc>, } -impl OnDemandParachainsRelay

{ +impl< + P: SubstrateParachainsPipeline, + SourceRelayClnt: Client, + TargetClnt: Client, + > OnDemandParachainsRelay +{ /// Create new on-demand parachains relay. /// /// Note that the argument is the source relay chain client, not the parachain client. /// That's because parachain finality is determined by the relay chain and we don't /// need to connect to the parachain itself here. pub fn new( - source_relay_client: Client, - target_client: Client, + source_relay_client: SourceRelayClnt, + target_client: TargetClnt, target_transaction_params: TransactionParams>, on_demand_source_relay_to_target_headers: Arc< dyn OnDemandRelay, @@ -114,10 +119,13 @@ impl OnDemandParachainsRelay

{ } #[async_trait] -impl OnDemandRelay - for OnDemandParachainsRelay

+impl + OnDemandRelay + for OnDemandParachainsRelay where P::SourceParachain: Chain, + SourceRelayClnt: Client, + TargetClnt: Client, { async fn reconnect(&self) -> Result<(), SubstrateError> { // using clone is fine here (to avoid mut requirement), because clone on Client clones @@ -147,7 +155,7 @@ where required_parachain_header: BlockNumberOf, ) -> Result<(HeaderIdOf, Vec>), SubstrateError> { // select headers to prove - let parachains_source = ParachainsSource::

::new( + let parachains_source = ParachainsSource::::new( self.source_relay_client.clone(), Arc::new(Mutex::new(AvailableHeader::Missing)), ); @@ -231,8 +239,8 @@ where /// Background task that is responsible for starting parachain headers relay. async fn background_task( - source_relay_client: Client, - target_client: Client, + source_relay_client: impl Client, + target_client: impl Client, target_transaction_params: TransactionParams>, on_demand_source_relay_to_target_headers: Arc< dyn OnDemandRelay, @@ -255,9 +263,11 @@ async fn background_task( let parachains_relay_task = futures::future::Fuse::terminated(); futures::pin_mut!(parachains_relay_task); - let mut parachains_source = - ParachainsSource::

::new(source_relay_client.clone(), required_para_header_ref.clone()); - let mut parachains_target = ParachainsTarget::

::new( + let mut parachains_source = ParachainsSource::::new( + source_relay_client.clone(), + required_para_header_ref.clone(), + ); + let mut parachains_target = ParachainsTarget::::new( source_relay_client.clone(), target_client.clone(), target_transaction_params.clone(), @@ -446,9 +456,9 @@ struct RelayData { } /// Read required data from source and target clients. -async fn read_relay_data( - source: &ParachainsSource

, - target: &ParachainsTarget

, +async fn read_relay_data( + source: &ParachainsSource, + target: &ParachainsTarget, required_header_number: BlockNumberOf, ) -> Result< RelayData< @@ -459,7 +469,9 @@ async fn read_relay_data( FailedClient, > where - ParachainsTarget

: + SourceRelayClnt: Client, + TargetClnt: Client, + ParachainsTarget: TargetClient> + RelayClient, { let map_target_err = |e| { @@ -642,13 +654,19 @@ trait SelectHeadersToProveEnvironment { } #[async_trait] -impl<'a, P: SubstrateParachainsPipeline> +impl<'a, P: SubstrateParachainsPipeline, SourceRelayClnt, TargetClnt> SelectHeadersToProveEnvironment< BlockNumberOf, HashOf, BlockNumberOf, HashOf, - > for (&'a OnDemandParachainsRelay

, &'a ParachainsSource

) + > + for ( + &'a OnDemandParachainsRelay, + &'a ParachainsSource, + ) where + SourceRelayClnt: Client, + TargetClnt: Client, { fn parachain_id(&self) -> ParaId { ParaId(P::SourceParachain::PARACHAIN_ID) @@ -665,7 +683,6 @@ impl<'a, P: SubstrateParachainsPipeline> ) -> Result, SubstrateError> { Ok(crate::messages_source::read_client_state::( &self.0.target_client, - None, ) .await? .best_finalized_peer_at_best_self diff --git a/bridges/relays/lib-substrate-relay/src/parachains/source.rs b/bridges/relays/lib-substrate-relay/src/parachains/source.rs index 4cc512b9d9b45..11b9d6dbf5bd3 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/source.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/source.rs @@ -37,22 +37,24 @@ pub type RequiredHeaderIdRef = Arc>>>; /// Substrate client as parachain heads source. #[derive(Clone)] -pub struct ParachainsSource { - client: Client, +pub struct ParachainsSource { + client: SourceRelayClnt, max_head_id: RequiredHeaderIdRef, } -impl ParachainsSource

{ +impl> + ParachainsSource +{ /// Creates new parachains source client. pub fn new( - client: Client, + client: SourceRelayClnt, max_head_id: RequiredHeaderIdRef, ) -> Self { ParachainsSource { client, max_head_id } } /// Returns reference to the underlying RPC client. - pub fn client(&self) -> &Client { + pub fn client(&self) -> &SourceRelayClnt { &self.client } @@ -64,8 +66,8 @@ impl ParachainsSource

{ let para_id = ParaId(P::SourceParachain::PARACHAIN_ID); let storage_key = parachain_head_storage_key_at_source(P::SourceRelayChain::PARAS_PALLET_NAME, para_id); - let para_head = self.client.raw_storage_value(storage_key, Some(at_block.1)).await?; - let para_head = para_head.map(|h| ParaHead::decode(&mut &h.0[..])).transpose()?; + let para_head: Option = + self.client.storage_value(at_block.hash(), storage_key).await?; let para_head = match para_head { Some(para_head) => para_head, None => return Ok(None), @@ -76,7 +78,9 @@ impl ParachainsSource

{ } #[async_trait] -impl RelayClient for ParachainsSource

{ +impl> RelayClient + for ParachainsSource +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -85,8 +89,8 @@ impl RelayClient for ParachainsSource

{ } #[async_trait] -impl SourceClient> - for ParachainsSource

+impl> + SourceClient> for ParachainsSource where P::SourceParachain: Chain, { @@ -151,7 +155,7 @@ where parachain_head_storage_key_at_source(P::SourceRelayChain::PARAS_PALLET_NAME, parachain); let parachain_heads_proof = self .client - .prove_storage(vec![storage_key.clone()], at_block.1) + .prove_storage(at_block.hash(), vec![storage_key.clone()]) .await? .into_iter_nodes() .collect(); @@ -165,10 +169,8 @@ where // rereading actual value here let parachain_head = self .client - .raw_storage_value(storage_key, Some(at_block.1)) + .storage_value::(at_block.hash(), storage_key) .await? - .map(|h| ParaHead::decode(&mut &h.0[..])) - .transpose()? .ok_or_else(|| { SubstrateError::Custom(format!( "Failed to read expected parachain {parachain:?} head at {at_block:?}" diff --git a/bridges/relays/lib-substrate-relay/src/parachains/target.rs b/bridges/relays/lib-substrate-relay/src/parachains/target.rs index 531d55b532236..f66b193340c1a 100644 --- a/bridges/relays/lib-substrate-relay/src/parachains/target.rs +++ b/bridges/relays/lib-substrate-relay/src/parachains/target.rs @@ -42,31 +42,42 @@ use relay_substrate_client::{ }; use relay_utils::relay_loop::Client as RelayClient; use sp_core::Pair; +use sp_runtime::traits::Header; /// Substrate client as parachain heads source. -pub struct ParachainsTarget { - source_client: Client, - target_client: Client, +pub struct ParachainsTarget { + source_client: SourceClnt, + target_client: TargetClnt, transaction_params: TransactionParams>, } -impl ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, + > ParachainsTarget +{ /// Creates new parachains target client. pub fn new( - source_client: Client, - target_client: Client, + source_client: SourceClnt, + target_client: TargetClnt, transaction_params: TransactionParams>, ) -> Self { ParachainsTarget { source_client, target_client, transaction_params } } /// Returns reference to the underlying RPC client. - pub fn target_client(&self) -> &Client { + pub fn target_client(&self) -> &TargetClnt { &self.target_client } } -impl Clone for ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Clone, + > Clone for ParachainsTarget +{ fn clone(&self) -> Self { ParachainsTarget { source_client: self.source_client.clone(), @@ -77,7 +88,12 @@ impl Clone for ParachainsTarget

{ } #[async_trait] -impl RelayClient for ParachainsTarget

{ +impl< + P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, + > RelayClient for ParachainsTarget +{ type Error = SubstrateError; async fn reconnect(&mut self) -> Result<(), SubstrateError> { @@ -88,14 +104,17 @@ impl RelayClient for ParachainsTarget

{ } #[async_trait] -impl

TargetClient> for ParachainsTarget

+impl TargetClient> + for ParachainsTarget where P: SubstrateParachainsPipeline, + SourceClnt: Client, + TargetClnt: Client, AccountIdOf: From< as Pair>::Public>, P::SourceParachain: ChainBase, P::SourceRelayChain: ChainBase, { - type TransactionTracker = TransactionTracker>; + type TransactionTracker = TransactionTracker; async fn best_block(&self) -> Result, Self::Error> { let best_header = self.target_client.best_header().await?; @@ -109,10 +128,10 @@ where at_block: &HeaderIdOf, ) -> Result, Self::Error> { self.target_client - .typed_state_call::<_, Option>>( + .state_call::<_, Option>>( + at_block.hash(), P::SourceRelayChain::BEST_FINALIZED_HEADER_ID_METHOD.into(), (), - Some(at_block.1), ) .await? .map(Ok) @@ -124,7 +143,11 @@ where ) -> Result>, Self::Error> { Ok(self .target_client - .typed_state_call(P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), (), None) + .state_call( + self.target_client.best_header().await?.hash(), + P::SourceRelayChain::FREE_HEADERS_INTERVAL_METHOD.into(), + (), + ) .await .unwrap_or_else(|e| { log::info!( @@ -151,7 +174,7 @@ where &P::SourceParachain::PARACHAIN_ID.into(), ); let storage_value: Option = - self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + self.target_client.storage_value(at_block.hash(), storage_key).await?; let para_info = match storage_value { Some(para_info) => para_info, None => return Ok(None), @@ -172,7 +195,7 @@ where ¶_info.best_head_hash.head_hash, ); let storage_value: Option = - self.target_client.storage_value(storage_key, Some(at_block.hash())).await?; + self.target_client.storage_value(at_block.hash(), storage_key).await?; let para_head_number = match storage_value { Some(para_head_data) => para_head_data.decode_parachain_head_data::()?.number,