Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
companion for #8783 - jsonrpsee (#4344)
Browse files Browse the repository at this point in the history
* add jsonrpsee glue code

* diener --substrate dp-jsonrpsee-integration-2

* cargo fmt

* update substrate

* fix build

* update substrate

* fix tests

* update substrate

* update substrate

* revert Cargo.toml

* revert changes in Cargo.toml

* jsonrpsee v0.11

* fix staking miner

* chore: update jsonrpsee v0.12

* update companion

* update companion

* fix changes in substrate

* revert requires_full_sync removal

* fix: read WS address from polkadot output

* fit nits

* fix more nits

* update lockfile for {"substrate"}

* cargo fmt

Co-authored-by: parity-processbot <>
  • Loading branch information
niklasad1 authored May 10, 2022
1 parent b0b9366 commit e7c4ac3
Show file tree
Hide file tree
Showing 20 changed files with 495 additions and 749 deletions.
1,012 changes: 347 additions & 665 deletions Cargo.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion node/metrics/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ use std::collections::HashMap;

const DEFAULT_PROMETHEUS_PORT: u16 = 9616;

#[substrate_test_utils::test]
#[substrate_test_utils::test(flavor = "multi_thread")]
async fn runtime_can_publish_metrics() {
let mut alice_config =
node_config(|| {}, tokio::runtime::Handle::current(), Alice, Vec::new(), true);
Expand Down
2 changes: 1 addition & 1 deletion node/network/protocol/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,4 +21,4 @@ rand = "0.8"
derive_more = "0.99"

[dev-dependencies]
rand_chacha = "0.3.1"
rand_chacha = "0.3.1"
2 changes: 1 addition & 1 deletion node/service/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -199,4 +199,4 @@ runtime-metrics = [
"polkadot-runtime-parachains/runtime-metrics"
]

staging-client = ["polkadot-node-core-provisioner/staging-client"]
staging-client = ["polkadot-node-core-provisioner/staging-client"]
7 changes: 5 additions & 2 deletions node/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,10 @@ fn new_partial<RuntimeApi, ExecutorDispatch, ChainSelection>(
sc_consensus::DefaultImportQueue<Block, FullClient<RuntimeApi, ExecutorDispatch>>,
sc_transaction_pool::FullPool<Block, FullClient<RuntimeApi, ExecutorDispatch>>,
(
impl service::RpcExtensionBuilder,
impl Fn(
polkadot_rpc::DenyUnsafe,
polkadot_rpc::SubscriptionTaskExecutor,
) -> Result<polkadot_rpc::RpcExtension, SubstrateServiceError>,
(
babe::BabeBlockImport<
Block,
Expand Down Expand Up @@ -938,7 +941,7 @@ where
client: client.clone(),
keystore: keystore_container.sync_keystore(),
network: network.clone(),
rpc_extensions_builder: Box::new(rpc_extensions_builder),
rpc_builder: Box::new(rpc_extensions_builder),
transaction_pool: transaction_pool.clone(),
task_manager: &mut task_manager,
system_rpc_tx,
Expand Down
4 changes: 4 additions & 0 deletions node/test/service/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -190,9 +190,13 @@ pub fn node_config(
rpc_ws: None,
rpc_ipc: None,
rpc_max_payload: None,
rpc_max_request_size: None,
rpc_max_response_size: None,
rpc_ws_max_connections: None,
rpc_cors: None,
rpc_methods: Default::default(),
rpc_id_provider: None,
rpc_max_subs_per_conn: None,
ws_max_out_buffer_capacity: None,
prometheus_config: None,
telemetry_endpoints: None,
Expand Down
2 changes: 1 addition & 1 deletion node/test/service/tests/build-blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ use futures::{future, pin_mut, select, FutureExt};
use polkadot_test_service::*;
use sp_keyring::Sr25519Keyring;

#[substrate_test_utils::test]
#[substrate_test_utils::test(flavor = "multi_thread")]
async fn ensure_test_service_build_blocks() {
let mut builder = sc_cli::LoggerBuilder::new("");
builder.with_colors(false);
Expand Down
4 changes: 2 additions & 2 deletions node/test/service/tests/call-function.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
use polkadot_test_service::*;
use sp_keyring::Sr25519Keyring::{Alice, Bob, Charlie};

#[substrate_test_utils::test]
#[substrate_test_utils::test(flavor = "multi_thread")]
async fn call_function_actually_work() {
let alice_config =
node_config(|| {}, tokio::runtime::Handle::current(), Alice, Vec::new(), true);
Expand All @@ -30,7 +30,7 @@ async fn call_function_actually_work() {
});
let output = alice.send_extrinsic(function, Bob).await.unwrap();

let res = output.result.expect("return value expected");
let res = output.result;
let json = serde_json::from_str::<serde_json::Value>(res.as_str()).expect("valid JSON");
let object = json.as_object().expect("JSON is an object");
assert!(object.contains_key("jsonrpc"), "key jsonrpc exists");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@
const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_adder_collator_puppet_worker");

// If this test is failing, make sure to run all tests with the `real-overseer` feature being enabled.
#[substrate_test_utils::test]

#[substrate_test_utils::test(flavor = "multi_thread")]
async fn collating_using_adder_collator() {
use polkadot_primitives::v2::Id as ParaId;
use sp_keyring::AccountKeyring::*;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
const PUPPET_EXE: &str = env!("CARGO_BIN_EXE_undying_collator_puppet_worker");

// If this test is failing, make sure to run all tests with the `real-overseer` feature being enabled.
#[substrate_test_utils::test]
#[substrate_test_utils::test(flavor = "multi_thread")]
async fn collating_using_undying_collator() {
use polkadot_primitives::v2::Id as ParaId;
use sp_keyring::AccountKeyring::*;
Expand Down
2 changes: 1 addition & 1 deletion rpc/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
edition = "2021"

[dependencies]
jsonrpc-core = "18.0.0"
jsonrpsee = { version = "0.12.0", features = ["server"] }
polkadot-primitives = { path = "../primitives" }
sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master" }
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master" }
Expand Down
93 changes: 51 additions & 42 deletions rpc/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@

use std::sync::Arc;

use jsonrpsee::RpcModule;
use polkadot_primitives::v2::{AccountId, Balance, Block, BlockNumber, Hash, Nonce};
use sc_client_api::AuxStore;
use sc_consensus_babe::Epoch;
use sc_finality_grandpa::FinalityProofProvider;
pub use sc_rpc::{DenyUnsafe, SubscriptionTaskExecutor};
use sc_sync_state_rpc::{SyncStateRpcApi, SyncStateRpcHandler};
use sp_api::ProvideRuntimeApi;
use sp_block_builder::BlockBuilder;
use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata};
Expand All @@ -35,7 +35,7 @@ use sp_keystore::SyncCryptoStorePtr;
use txpool_api::TransactionPool;

/// A type representing all RPC extensions.
pub type RpcExtension = jsonrpc_core::IoHandler<sc_rpc::Metadata>;
pub type RpcExtension = RpcModule<()>;

/// Extra dependencies for BABE.
pub struct BabeDeps {
Expand Down Expand Up @@ -115,13 +115,16 @@ where
B: sc_client_api::Backend<Block> + Send + Sync + 'static,
B::State: sc_client_api::StateBackend<sp_runtime::traits::HashFor<Block>>,
{
use frame_rpc_system::{FullSystem, SystemApi};
use pallet_mmr_rpc::{Mmr, MmrApi};
use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi};
use sc_consensus_babe_rpc::BabeRpcHandler;
use sc_finality_grandpa_rpc::{GrandpaApi, GrandpaRpcHandler};

let mut io = jsonrpc_core::IoHandler::default();
use beefy_gadget_rpc::{BeefyApiServer, BeefyRpcHandler};
use frame_rpc_system::{SystemApiServer, SystemRpc};
use pallet_mmr_rpc::{MmrApiServer, MmrRpc};
use pallet_transaction_payment_rpc::{TransactionPaymentApiServer, TransactionPaymentRpc};
use sc_consensus_babe_rpc::{BabeApiServer, BabeRpc};
use sc_finality_grandpa_rpc::{GrandpaApiServer, GrandpaRpc};
use sc_sync_state_rpc::{SyncStateRpc, SyncStateRpcApiServer};
use substrate_state_trie_migration_rpc::StateMigrationApiServer;

let mut io = RpcModule::new(());
let FullDeps { client, pool, select_chain, chain_spec, deny_unsafe, babe, grandpa, beefy } =
deps;
let BabeDeps { keystore, babe_config, shared_epoch_changes } = babe;
Expand All @@ -133,41 +136,47 @@ where
finality_provider,
} = grandpa;

io.extend_with(substrate_state_trie_migration_rpc::StateMigrationApi::to_delegate(
substrate_state_trie_migration_rpc::MigrationRpc::new(client.clone(), backend, deny_unsafe),
));

io.extend_with(SystemApi::to_delegate(FullSystem::new(client.clone(), pool, deny_unsafe)));
io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone())));
io.extend_with(MmrApi::to_delegate(Mmr::new(client.clone())));
io.extend_with(sc_consensus_babe_rpc::BabeApi::to_delegate(BabeRpcHandler::new(
client.clone(),
shared_epoch_changes.clone(),
keystore,
babe_config,
select_chain,
deny_unsafe,
)));
io.extend_with(GrandpaApi::to_delegate(GrandpaRpcHandler::new(
shared_authority_set.clone(),
shared_voter_state,
justification_stream,
subscription_executor,
finality_provider,
)));
io.extend_with(SyncStateRpcApi::to_delegate(SyncStateRpcHandler::new(
chain_spec,
client,
shared_authority_set,
shared_epoch_changes,
)?));
io.merge(
substrate_state_trie_migration_rpc::MigrationRpc::new(client.clone(), backend, deny_unsafe)
.into_rpc(),
)?;
io.merge(SystemRpc::new(client.clone(), pool.clone(), deny_unsafe).into_rpc())?;
io.merge(TransactionPaymentRpc::new(client.clone()).into_rpc())?;
io.merge(MmrRpc::new(client.clone()).into_rpc())?;
io.merge(
BabeRpc::new(
client.clone(),
shared_epoch_changes.clone(),
keystore,
babe_config,
select_chain,
deny_unsafe,
)
.into_rpc(),
)?;
io.merge(
GrandpaRpc::new(
subscription_executor,
shared_authority_set.clone(),
shared_voter_state,
justification_stream,
finality_provider,
)
.into_rpc(),
)?;
io.merge(
SyncStateRpc::new(chain_spec, client, shared_authority_set, shared_epoch_changes)?
.into_rpc(),
)?;

let handler: beefy_gadget_rpc::BeefyRpcHandler<Block> = beefy_gadget_rpc::BeefyRpcHandler::new(
beefy.beefy_commitment_stream,
beefy.beefy_best_block_stream,
beefy.subscription_executor,
io.merge(
BeefyRpcHandler::<Block>::new(
beefy.beefy_commitment_stream,
beefy.beefy_best_block_stream,
beefy.subscription_executor,
)?
.into_rpc(),
)?;
io.extend_with(beefy_gadget_rpc::BeefyApi::to_delegate(handler));

Ok(io)
}
2 changes: 1 addition & 1 deletion runtime/parachains/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -109,4 +109,4 @@ try-runtime = [
"pallet-vesting/try-runtime",
]
runtime-metrics = ["sp-tracing/with-tracing", "polkadot-runtime-metrics/runtime-metrics"]
vstaging = []
vstaging = []
13 changes: 11 additions & 2 deletions tests/benchmark_block_works.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,12 @@ use nix::{
sys::signal::{kill, Signal::SIGINT},
unistd::Pid,
};
use std::{path::Path, process::Command, result::Result, time::Duration};
use std::{
path::Path,
process::{self, Command},
result::Result,
time::Duration,
};
use tempfile::tempdir;

pub mod common;
Expand All @@ -47,6 +52,8 @@ async fn benchmark_block_works() {
/// Builds a chain with one block for the given runtime and base path.
async fn build_chain(runtime: &str, base_path: &Path) -> Result<(), String> {
let mut cmd = Command::new(cargo_bin("polkadot"))
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(["--chain", &runtime, "--force-authoring", "--alice"])
.arg("-d")
.arg(base_path)
Expand All @@ -55,8 +62,10 @@ async fn build_chain(runtime: &str, base_path: &Path) -> Result<(), String> {
.spawn()
.unwrap();

let (ws_url, _) = common::find_ws_url_from_output(cmd.stderr.take().unwrap());

// Wait for the chain to produce one block.
let ok = common::wait_n_finalized_blocks(1, Duration::from_secs(60)).await;
let ok = common::wait_n_finalized_blocks(1, Duration::from_secs(60), &ws_url).await;
// Send SIGINT to node.
kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap();
// Wait for the node to handle it and exit.
Expand Down
35 changes: 32 additions & 3 deletions tests/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,13 @@
use polkadot_core_primitives::Block;
use remote_externalities::rpc_api::get_finalized_head;
use std::{
io::{BufRead, BufReader, Read},
process::{Child, ExitStatus},
thread,
time::Duration,
};
use tokio::time::timeout;

static LOCALHOST_WS: &str = "ws://127.0.0.1:9944/";

/// Wait for the given `child` the given amount of `secs`.
///
/// Returns the `Some(exit status)` or `None` if the process did not finish in the given time.
Expand All @@ -46,8 +45,9 @@ pub fn wait_for(child: &mut Child, secs: usize) -> Option<ExitStatus> {
pub async fn wait_n_finalized_blocks(
n: usize,
timeout_duration: Duration,
url: &str,
) -> Result<(), tokio::time::error::Elapsed> {
timeout(timeout_duration, wait_n_finalized_blocks_from(n, LOCALHOST_WS)).await
timeout(timeout_duration, wait_n_finalized_blocks_from(n, url)).await
}

/// Wait for at least `n` blocks to be finalized from a specified node.
Expand All @@ -65,3 +65,32 @@ async fn wait_n_finalized_blocks_from(n: usize, url: &str) {
interval.tick().await;
}
}

/// Read the WS address from the output.
///
/// This is hack to get the actual binded sockaddr because
/// polkadot assigns a random port if the specified port was already binded.
///
/// You must call `Command::new("cmd").stdout(process::Stdio::piped()).stderr(process::Stdio::piped())`
/// for this to work.
pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) {
let mut data = String::new();

let ws_url = BufReader::new(read)
.lines()
.find_map(|line| {
let line = line.expect("failed to obtain next line from stdout for port discovery");

data.push_str(&line);

// does the line contain our port (we expect this specific output from substrate).
let sock_addr = match line.split_once("Running JSON-RPC WS server: addr=") {
None => return None,
Some((_, after)) => after.split_once(",").unwrap().0,
};

Some(format!("ws://{}", sock_addr))
})
.expect("We should get a WebSocket address");
(ws_url, data)
}
Loading

0 comments on commit e7c4ac3

Please sign in to comment.