Skip to content

Commit

Permalink
Switch to wasm only, port paritytech/cumulus#1054
Browse files Browse the repository at this point in the history
  • Loading branch information
jasl committed Mar 28, 2022
1 parent bdc99e4 commit 1e17ef1
Show file tree
Hide file tree
Showing 9 changed files with 133 additions and 120 deletions.
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ sc-transaction-pool-api = { git = "https://github.com/paritytech/substrate", bra
sc-tracing = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.18" }

# Substrate Primitive Dependencies
sp-io = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.18" }
sp-api = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.18" }
sp-block-builder = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.18" }
sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.18" }
Expand All @@ -84,7 +85,6 @@ sp-transaction-pool = { git = "https://github.com/paritytech/substrate", branch

# Cumulus dependencies
cumulus-client-cli = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.18" }
cumulus-client-collator = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.18" }
cumulus-client-consensus-aura = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.18" }
cumulus-client-consensus-relay-chain = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.18" }
cumulus-client-consensus-common = { git = "https://github.com/paritytech/cumulus", branch = "polkadot-v0.9.18" }
Expand Down
10 changes: 5 additions & 5 deletions node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,7 @@ macro_rules! construct_async_run {
#[cfg(feature = "phala-native")]
if runner.config().chain_spec.is_phala() {
return runner.async_run(|$config| {
let $components = new_partial::<phala_parachain_runtime::RuntimeApi, PhalaParachainRuntimeExecutor, _>(
let $components = new_partial::<phala_parachain_runtime::RuntimeApi, _>(
&$config,
crate::service::phala::parachain_build_import_queue,
)?;
Expand All @@ -366,7 +366,7 @@ macro_rules! construct_async_run {
#[cfg(feature = "khala-native")]
if runner.config().chain_spec.is_khala() {
return runner.async_run(|$config| {
let $components = new_partial::<khala_parachain_runtime::RuntimeApi, KhalaParachainRuntimeExecutor, _>(
let $components = new_partial::<khala_parachain_runtime::RuntimeApi, _>(
&$config,
crate::service::khala::parachain_build_import_queue,
)?;
Expand All @@ -378,7 +378,7 @@ macro_rules! construct_async_run {
#[cfg(feature = "rhala-native")]
if runner.config().chain_spec.is_rhala() {
return runner.async_run(|$config| {
let $components = new_partial::<rhala_parachain_runtime::RuntimeApi, RhalaParachainRuntimeExecutor, _>(
let $components = new_partial::<rhala_parachain_runtime::RuntimeApi, _>(
&$config,
crate::service::rhala::parachain_build_import_queue,
)?;
Expand All @@ -390,7 +390,7 @@ macro_rules! construct_async_run {
#[cfg(feature = "thala-native")]
if runner.config().chain_spec.is_thala() {
return runner.async_run(|$config| {
let $components = new_partial::<thala_parachain_runtime::RuntimeApi, ThalaParachainRuntimeExecutor, _>(
let $components = new_partial::<thala_parachain_runtime::RuntimeApi, _>(
&$config,
crate::service::thala::parachain_build_import_queue,
)?;
Expand All @@ -402,7 +402,7 @@ macro_rules! construct_async_run {
#[cfg(feature = "shell-native")]
if runner.config().chain_spec.is_shell() {
return runner.async_run(|$config| {
let $components = new_partial::<shell_parachain_runtime::RuntimeApi, ShellParachainRuntimeExecutor, _>(
let $components = new_partial::<shell_parachain_runtime::RuntimeApi, _>(
&$config,
crate::service::shell::parachain_build_import_queue,
)?;
Expand Down
24 changes: 13 additions & 11 deletions node/src/service/khala.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,8 @@ use cumulus_client_consensus_aura::{
use cumulus_primitives_core::ParaId;

pub use parachains_common::{AccountId, Balance, Block, Hash, Header, Index as Nonce};
use sc_executor::NativeElseWasmExecutor;
use sc_executor::WasmExecutor;

use sc_client_api::ExecutorProvider;
use sc_service::{
Configuration, TFullClient, TaskManager,
};
Expand All @@ -34,14 +33,14 @@ impl sc_executor::NativeExecutionDispatch for RuntimeExecutor {
/// Build the import queue for the parachain runtime.
#[allow(clippy::type_complexity)]
pub fn parachain_build_import_queue(
client: Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<RuntimeExecutor>>>,
client: Arc<TFullClient<Block, RuntimeApi, WasmExecutor<crate::service::HostFunctions>>>,
config: &Configuration,
telemetry: Option<TelemetryHandle>,
task_manager: &TaskManager,
) -> Result<
sc_consensus::DefaultImportQueue<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<RuntimeExecutor>>,
TFullClient<Block, RuntimeApi, WasmExecutor<crate::service::HostFunctions>>,
>,
sc_service::Error,
> {
Expand Down Expand Up @@ -70,7 +69,7 @@ pub fn parachain_build_import_queue(
Ok((time, slot))
},
registry: config.prometheus_registry(),
can_author_with: sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()),
can_author_with: sp_consensus::AlwaysCanAuthor,
spawner: &task_manager.spawn_essential_handle(),
telemetry,
})
Expand All @@ -85,9 +84,9 @@ pub async fn start_parachain_node(
id: ParaId,
) -> sc_service::error::Result<(
TaskManager,
Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<RuntimeExecutor>>>,
Arc<TFullClient<Block, RuntimeApi, WasmExecutor<crate::service::HostFunctions>>>,
)> {
crate::service::start_node_impl::<RuntimeApi, RuntimeExecutor, _, _, _>(
crate::service::start_node_impl::<RuntimeApi, _, _, _>(
parachain_config,
polkadot_config,
collator_options,
Expand Down Expand Up @@ -118,6 +117,7 @@ pub async fn start_parachain_node(
proposer_factory,
create_inherent_data_providers: move |_, (relay_parent, validation_data)| {
let relay_chain_interface = relay_chain_interface.clone();

async move {
let parachain_inherent =
cumulus_primitives_parachain_inherent::ParachainInherentData::create_at(
Expand All @@ -126,11 +126,12 @@ pub async fn start_parachain_node(
&validation_data,
id,
).await;
let time = sp_timestamp::InherentDataProvider::from_system_time();

let timestamp = sp_timestamp::InherentDataProvider::from_system_time();

let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
*time,
*timestamp,
slot_duration,
);

Expand All @@ -139,11 +140,12 @@ pub async fn start_parachain_node(
"Failed to create parachain inherent",
)
})?;
Ok((time, slot, parachain_inherent))

Ok((timestamp, slot, parachain_inherent))
}
},
block_import: client.clone(),
para_client: client,
para_client: client.clone(),
backoff_authoring_blocks: Option::<()>::None,
sync_oracle,
keystore,
Expand Down
60 changes: 33 additions & 27 deletions node/src/service/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ use cumulus_relay_chain_rpc_interface::RelayChainRPCInterface;
use polkadot_service::CollatorPair;

pub use parachains_common::{AccountId, Balance, Block, Hash, Header, Index as Nonce};
use sc_executor::NativeElseWasmExecutor;
use sc_executor::WasmExecutor;

use sc_network::NetworkService;
use sc_service::{
Expand All @@ -51,6 +51,13 @@ pub mod thala;
#[cfg(feature = "shell-native")]
pub mod shell;

#[cfg(not(feature = "runtime-benchmarks"))]
type HostFunctions = sp_io::SubstrateHostFunctions;

#[cfg(feature = "runtime-benchmarks")]
type HostFunctions =
(sp_io::SubstrateHostFunctions, frame_benchmarking::benchmarking::HostFunctions);

async fn build_relay_chain_interface(
polkadot_config: Configuration,
parachain_config: &Configuration,
Expand All @@ -75,28 +82,28 @@ async fn build_relay_chain_interface(
/// Use this macro if you don't actually need the full service, but just the builder in order to
/// be able to perform chain operations.
#[allow(clippy::type_complexity)]
pub fn new_partial<RuntimeApi, Executor, BIQ>(
pub fn new_partial<RuntimeApi, BIQ>(
config: &Configuration,
build_import_queue: BIQ,
) -> Result<
PartialComponents<
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>,
TFullBackend<Block>,
(),
sc_consensus::DefaultImportQueue<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>,
>,
sc_transaction_pool::FullPool<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>,
>,
(Option<Telemetry>, Option<TelemetryWorkerHandle>),
>,
sc_service::Error,
>
where
RuntimeApi: ConstructRuntimeApi<Block, TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>
RuntimeApi: ConstructRuntimeApi<Block, TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>>
+ Send
+ Sync
+ 'static,
Expand All @@ -109,16 +116,15 @@ pub fn new_partial<RuntimeApi, Executor, BIQ>(
> + sp_offchain::OffchainWorkerApi<Block>
+ sp_block_builder::BlockBuilder<Block>,
sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
Executor: sc_executor::NativeExecutionDispatch + 'static,
BIQ: FnOnce(
Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
Arc<TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>>,
&Configuration,
Option<TelemetryHandle>,
&TaskManager,
) -> Result<
sc_consensus::DefaultImportQueue<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>,
>,
sc_service::Error,
>,
Expand All @@ -134,16 +140,17 @@ pub fn new_partial<RuntimeApi, Executor, BIQ>(
})
.transpose()?;

let executor = sc_executor::NativeElseWasmExecutor::<Executor>::new(
let executor = sc_executor::WasmExecutor::<HostFunctions>::new(
config.wasm_method,
config.default_heap_pages,
config.max_runtime_instances,
None,
config.runtime_cache_size,
);

let (client, backend, keystore_container, task_manager) =
sc_service::new_full_parts::<Block, RuntimeApi, _>(
config,
&config,
telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
executor,
)?;
Expand Down Expand Up @@ -189,7 +196,7 @@ pub fn new_partial<RuntimeApi, Executor, BIQ>(
///
/// This is the actual implementation that is abstract over the executor and the runtime api.
#[sc_tracing::logging::prefix_logs_with("Parachain")]
async fn start_node_impl<RuntimeApi, Executor, RB, BIQ, BIC>(
async fn start_node_impl<RuntimeApi, RB, BIQ, BIC>(
parachain_config: Configuration,
polkadot_config: Configuration,
collator_options: CollatorOptions,
Expand All @@ -199,10 +206,10 @@ async fn start_node_impl<RuntimeApi, Executor, RB, BIQ, BIC>(
build_consensus: BIC,
) -> sc_service::error::Result<(
TaskManager,
Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
Arc<TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>>,
)>
where
RuntimeApi: ConstructRuntimeApi<Block, TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>
RuntimeApi: ConstructRuntimeApi<Block, TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>>
+ Send
+ Sync
+ 'static,
Expand All @@ -219,34 +226,33 @@ async fn start_node_impl<RuntimeApi, Executor, RB, BIQ, BIC>(
+ substrate_frame_rpc_system::AccountNonceApi<Block, AccountId, Nonce>
+ pallet_mq_runtime_api::MqApi<Block>,
sc_client_api::StateBackendFor<TFullBackend<Block>, Block>: sp_api::StateBackend<BlakeTwo256>,
Executor: sc_executor::NativeExecutionDispatch + 'static,
RB: Fn(
Arc<TFullClient<Block, RuntimeApi, Executor>>,
Arc<TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>>,
) -> Result<jsonrpc_core::IoHandler<sc_rpc::Metadata>, sc_service::Error>
+ Send
+ 'static,
BIQ: FnOnce(
Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
Arc<TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>>,
&Configuration,
Option<TelemetryHandle>,
&TaskManager,
) -> Result<
sc_consensus::DefaultImportQueue<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>,
>,
sc_service::Error,
> + 'static,
BIC: FnOnce(
Arc<TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>>,
Arc<TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>>,
Option<&Registry>,
Option<TelemetryHandle>,
&TaskManager,
Arc<dyn RelayChainInterface>,
Arc<
sc_transaction_pool::FullPool<
Block,
TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<Executor>>,
TFullClient<Block, RuntimeApi, WasmExecutor<HostFunctions>>,
>,
>,
Arc<NetworkService<Block, Hash>>,
Expand All @@ -260,7 +266,7 @@ async fn start_node_impl<RuntimeApi, Executor, RB, BIQ, BIC>(

let parachain_config = prepare_node_config(parachain_config);

let params = new_partial::<RuntimeApi, Executor, BIQ>(&parachain_config, build_import_queue)?;
let params = new_partial::<RuntimeApi, BIQ>(&parachain_config, build_import_queue)?;
let (mut telemetry, telemetry_worker_handle) = params.other;

let client = params.client.clone();
Expand All @@ -274,11 +280,11 @@ async fn start_node_impl<RuntimeApi, Executor, RB, BIQ, BIC>(
&mut task_manager,
collator_options.clone(),
)
.await
.map_err(|e| match e {
RelayChainError::ServiceError(polkadot_service::Error::Sub(x)) => x,
s => s.to_string().into(),
})?;
.await
.map_err(|e| match e {
RelayChainError::ServiceError(polkadot_service::Error::Sub(x)) => x,
s => s.to_string().into(),
})?;

let block_announce_validator = BlockAnnounceValidator::new(relay_chain_interface.clone(), id);

Expand Down Expand Up @@ -363,7 +369,7 @@ async fn start_node_impl<RuntimeApi, Executor, RB, BIQ, BIC>(
announce_block,
client: client.clone(),
task_manager: &mut task_manager,
relay_chain_interface,
relay_chain_interface: relay_chain_interface.clone(),
spawner,
parachain_consensus,
import_queue,
Expand Down
Loading

0 comments on commit 1e17ef1

Please sign in to comment.