Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pruntime: Detect checkpoint breaking changes in unittest #1381

Merged
merged 3 commits into from
Sep 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions crates/phactory/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ libc = "0.2"
environmental = "1"
once_cell = "1"
im = "15"
scale-info = { version = "2.9", default-features = false, features = ["derive"] }

[dev-dependencies]
insta = "1.7.2"
Expand Down
8 changes: 8 additions & 0 deletions crates/phactory/api/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,14 @@ fn main() {
".pruntime_rpc",
"#[derive(::serde::Serialize, ::serde::Deserialize)]",
);
for name in [
"AttestationReport",
"InitRuntimeResponse",
"Attestation",
"NetworkConfig",
] {
builder = builder.type_attribute(name, "#[derive(::scale_info::TypeInfo)]");
}
builder = builder.field_attribute("InitRuntimeResponse.attestation", "#[serde(skip,default)]");
for field in [
"GetContractInfoRequest.contracts",
Expand Down
10 changes: 6 additions & 4 deletions crates/phactory/api/src/storage_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ pub trait StorageSynchronizer {
fn state_validated(&self) -> bool;
}

#[derive(Serialize, Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, ::scale_info::TypeInfo)]
pub struct BlockSyncState<Validator> {
validator: Validator,
main_bridge: u64,
Expand Down Expand Up @@ -272,9 +272,10 @@ pub struct Counters {
pub waiting_for_paraheaders: bool,
}

#[derive(Serialize, Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, ::scale_info::TypeInfo)]
pub struct SolochainSynchronizer<Validator> {
sync_state: BlockSyncState<Validator>,
#[codec(skip)]
state_roots: VecDeque<Hash>,
}

Expand Down Expand Up @@ -344,11 +345,12 @@ impl<Validator: BlockValidator> StorageSynchronizer for SolochainSynchronizer<Va
}
}

#[derive(Serialize, Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, ::scale_info::TypeInfo)]
pub struct ParachainSynchronizer<Validator> {
sync_state: BlockSyncState<Validator>,
last_relaychain_state_root: Option<Hash>,
para_header_number_next: chain::BlockNumber,
#[codec(skip)]
para_state_roots: VecDeque<Hash>,
}

Expand Down Expand Up @@ -473,7 +475,7 @@ impl<Validator: BlockValidator> StorageSynchronizer for ParachainSynchronizer<Va

// We create this new type to help serialize the original dyn StorageSynchronizer.
// Because it it impossible to impl Serialize/Deserialize for dyn StorageSynchronizer.
#[derive(Serialize, Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, ::scale_info::TypeInfo)]
pub enum Synchronizer<Validator> {
Solo(SolochainSynchronizer<Validator>),
Para(ParachainSynchronizer<Validator>),
Expand Down
4 changes: 2 additions & 2 deletions crates/phactory/src/contracts/pink.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,14 @@ pub use phala_types::contract::InkCommand;

pub(crate) mod http_counters;

#[derive(Serialize, Deserialize, Default, Clone)]
#[derive(Serialize, Deserialize, Default, Clone, ::scale_info::TypeInfo)]
pub struct ClusterConfig {
pub log_handler: Option<AccountId>,
pub runtime_version: (u32, u32),
pub secret_salt: [u8; 32],
}

#[derive(Serialize, Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, ::scale_info::TypeInfo)]
pub struct Cluster {
pub id: ContractClusterId,
pub config: ClusterConfig,
Expand Down
9 changes: 6 additions & 3 deletions crates/phactory/src/contracts/support.rs
Original file line number Diff line number Diff line change
Expand Up @@ -102,12 +102,13 @@ impl<'de> Deserialize<'de> for SidevmHandle {
}
}

#[derive(Serialize, Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, ::scale_info::TypeInfo)]
struct SidevmInfo {
code: Vec<u8>,
code_hash: H256,
start_time: String,
auto_restart: bool,
#[codec(skip)]
handle: Arc<Mutex<SidevmHandle>>,
}

Expand All @@ -116,10 +117,12 @@ pub(crate) enum SidevmCode {
Code(Vec<u8>),
}

#[derive(Serialize, Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, ::scale_info::TypeInfo)]
pub struct Contract {
send_mq: SignedMessageChannel,
#[codec(skip)]
cmd_rcv_mq: SecretReceiver<RawData>,
#[codec(skip)]
#[serde(with = "crate::secret_channel::ecdh_serde")]
ecdh_key: KeyPair,
cluster_id: phala_mq::ContractClusterId,
Expand All @@ -130,7 +133,7 @@ pub struct Contract {
on_block_end: Option<OnBlockEnd>,
}

#[derive(Copy, Clone, Serialize, Deserialize)]
#[derive(Copy, Clone, Serialize, Deserialize, ::scale_info::TypeInfo)]
struct OnBlockEnd {
selector: u32,
gas_limit: u64,
Expand Down
20 changes: 11 additions & 9 deletions crates/phactory/src/contracts/support/keeper.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
use im::OrdMap as BTreeMap;
use pink::types::AccountId;
use serde::{Deserialize, Serialize};
use sidevm::service::Spawner;

use crate::{contracts::Contract, im_helpers::ordmap_for_each_mut};
use crate::{contracts::Contract, im_helpers::{ordmap_for_each_mut, OrdMap}};

type ContractMap = BTreeMap<AccountId, Contract>;
type ContractMap = OrdMap<AccountId, Contract>;

#[derive(Default, Serialize, Deserialize, Clone)]
#[derive(Default, Serialize, Deserialize, Clone, ::scale_info::TypeInfo)]
pub struct ContractsKeeper {
#[cfg_attr(not(test), codec(skip))]
contracts: ContractMap,
#[codec(skip)]
#[serde(skip)]
pub(crate) weight_changed: bool,
}
Expand Down Expand Up @@ -45,6 +46,7 @@ impl ContractsKeeper {
}

pub fn drain(&mut self) -> impl Iterator<Item = Contract> {
#[allow(clippy::iter_kv_map)]
std::mem::take(&mut self.contracts)
.into_iter()
.map(|(_, v)| v)
Expand All @@ -71,7 +73,7 @@ impl ToWeight for Contract {
}

pub(super) fn calc_cache_quotas<K: AsRef<[u8]> + Ord, C: ToWeight>(
contracts: &BTreeMap<K, C>,
contracts: &OrdMap<K, C>,
) -> impl Iterator<Item = (&[u8], usize)> {
let total_weight = contracts
.values()
Expand All @@ -97,7 +99,7 @@ mod tests {

#[test]
fn zero_quotas_works() {
let mut contracts = BTreeMap::new();
let mut contracts = OrdMap::new();
contracts.insert(b"foo", 0_u32);
contracts.insert(b"bar", 0_u32);

Expand All @@ -107,7 +109,7 @@ mod tests {

#[test]
fn little_quotas_works() {
let mut contracts = BTreeMap::new();
let mut contracts = OrdMap::new();
contracts.insert(b"foo", 0_u32);
contracts.insert(b"bar", 1_u32);

Expand All @@ -120,7 +122,7 @@ mod tests {

#[test]
fn it_wont_overflow() {
let mut contracts = BTreeMap::new();
let mut contracts = OrdMap::new();
contracts.insert(b"foo", 0_u32);
contracts.insert(b"bar", u32::MAX);
contracts.insert(b"baz", u32::MAX);
Expand All @@ -138,7 +140,7 @@ mod tests {

#[test]
fn fraction_works() {
let mut contracts = BTreeMap::new();
let mut contracts = OrdMap::new();
contracts.insert(b"foo", 0_u32);
contracts.insert(b"bar", 1);
contracts.insert(b"baz", u32::MAX);
Expand Down
7 changes: 6 additions & 1 deletion crates/phactory/src/im_helpers.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,10 @@
#[cfg(not(test))]
pub use im::OrdMap;
#[cfg(test)]
pub use std::collections::BTreeMap as OrdMap;

pub fn ordmap_for_each_mut<K: Ord + Clone, V: Clone>(
map: &mut im::OrdMap<K, V>,
map: &mut OrdMap<K, V>,
mut f: impl FnMut((&K, &mut V)),
) {
let snapshot = map.clone();
Expand Down
58 changes: 40 additions & 18 deletions crates/phactory/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ use core::convert::TryInto;
use parity_scale_codec::{Decode, Encode};
use phala_types::{AttestationProvider, HandoverChallenge};
use ring::rand::SecureRandom;
use scale_info::TypeInfo;
use serde_json::{json, Value};
use sp_core::{crypto::Pair, sr25519, H256};

Expand Down Expand Up @@ -79,17 +80,21 @@ mod types;
// runtime definition locally.
type RuntimeHasher = <chain::Runtime as frame_system::Config>::Hashing;

#[derive(Serialize, Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, ::scale_info::TypeInfo)]
struct RuntimeState {
#[codec(skip)]
send_mq: MessageSendQueue,

#[serde(skip)]
#[codec(skip)]
recv_mq: MessageDispatcher,

// chain storage synchonizing
#[cfg_attr(not(test), codec(skip))]
storage_synchronizer: Synchronizer<LightValidation<chain::Runtime>>,

// TODO.kevin: use a better serialization approach
#[codec(skip)]
chain_storage: ChainStorage,

#[serde(with = "more::scale_bytes")]
Expand Down Expand Up @@ -218,11 +223,12 @@ enum RuntimeDataSeal {
V1(PersistentRuntimeData),
}

#[derive(Serialize, Deserialize, Clone)]
#[derive(Serialize, Deserialize, Clone, TypeInfo)]
#[serde(bound(deserialize = "Platform: Deserialize<'de>"))]
pub struct Phactory<Platform> {
platform: Platform,
#[serde(skip)]
#[codec(skip)]
pub args: Arc<InitArgs>,
dev_mode: bool,
attestation_provider: Option<AttestationProvider>,
Expand All @@ -231,47 +237,70 @@ pub struct Phactory<Platform> {
runtime_state: Option<RuntimeState>,
endpoints: BTreeMap<EndpointType, String>,
#[serde(skip)]
#[codec(skip)]
signed_endpoints: Option<GetEndpointResponse>,
// The deserialzation of system requires the mq, which inside the runtime_state, to be ready.
#[serde(skip)]
system: Option<system::System<Platform>>,

// tmp key for WorkerKey handover encryption
#[codec(skip)]
#[serde(skip)]
pub(crate) handover_ecdh_key: Option<EcdhKey>,

#[codec(skip)]
#[serde(skip)]
handover_last_challenge: Option<HandoverChallenge<chain::BlockNumber>>,

#[codec(skip)]
#[serde(skip)]
#[serde(default = "Instant::now")]
last_checkpoint: Instant,

#[codec(skip)]
#[serde(skip)]
query_scheduler: RequestScheduler<AccountId>,

#[serde(default)]
netconfig: Option<NetworkConfig>,

#[codec(skip)]
#[serde(skip)]
can_load_chain_state: bool,

#[codec(skip)]
#[serde(skip)]
trusted_sk: bool,

#[codec(skip)]
#[serde(skip)]
pub(crate) rcu_dispatching: bool,

#[codec(skip)]
#[serde(skip)]
pub(crate) pending_effects: Vec<::pink::types::ExecSideEffects>,

#[codec(skip)]
#[serde(skip)]
#[serde(default = "Instant::now")]
started_at: Instant,

#[codec(skip)]
#[serde(skip)]
pub(crate) cluster_state_to_apply: Option<ClusterState<'static>>,
}

#[test]
fn show_type_changes_that_affect_the_checkpoint() {
fn travel_types<T: TypeInfo>() -> String {
use scale_info::{IntoPortable, PortableRegistry};
let mut registry = Default::default();
let _ = T::type_info().into_portable(&mut registry);
serde_json::to_string_pretty(&PortableRegistry::from(registry).types).unwrap()
}
insta::assert_display_snapshot!(travel_types::<Phactory<()>>());
}

#[derive(Serialize, Deserialize, Clone)]
struct ClusterState<'a> {
block_number: BlockNumber,
Expand Down Expand Up @@ -532,7 +561,7 @@ impl<Platform: pal::Platform + Serialize + DeserializeOwned> Phactory<Platform>
let file = File::create(&checkpoint_file).context("Failed to create checkpoint file")?;
self.take_checkpoint_to_writer(&key, file)
.context("Take checkpoint to writer failed")?;
info!("Checkpoint saved to {}", checkpoint_file);
info!("Checkpoint saved to {checkpoint_file}");
self.last_checkpoint = Instant::now();
remove_outdated_checkpoints(
&self.args.storage_path,
Expand Down Expand Up @@ -589,40 +618,33 @@ impl<Platform: pal::Platform + Serialize + DeserializeOwned> Phactory<Platform>
Ok(file) => file,
Err(err) if matches!(err.kind(), ErrorKind::NotFound) => {
// This should never happen unless it was removed just after the glob.
anyhow::bail!("Checkpoint file {:?} is not found", ckpt_filename);
anyhow::bail!("Checkpoint file {ckpt_filename:?} is not found");
}
Err(err) => {
error!(
"Failed to open checkpoint file {:?}: {:?}",
ckpt_filename, err
);
error!("Failed to open checkpoint file {ckpt_filename:?}: {err:?}",);
if args.remove_corrupted_checkpoint {
error!("Removing {:?}", ckpt_filename);
error!("Removing {ckpt_filename:?}");
std::fs::remove_file(ckpt_filename)
.context("Failed to remove corrupted checkpoint file")?;
}
anyhow::bail!(
"Failed to open checkpoint file {:?}: {:?}",
ckpt_filename,
err
);
anyhow::bail!("Failed to open checkpoint file {ckpt_filename:?}: {err:?}");
}
};

info!("Loading checkpoint from file {:?}", ckpt_filename);
info!("Loading checkpoint from file {ckpt_filename:?}");
match Self::restore_from_checkpoint_reader(&runtime_data.sk, file, args) {
Ok(state) => {
info!("Succeeded to load checkpoint file {:?}", ckpt_filename);
info!("Succeeded to load checkpoint file {ckpt_filename:?}");
Ok(Some(state))
}
Err(_err /*Don't leak it into the log*/) => {
error!("Failed to load checkpoint file {:?}", ckpt_filename);
error!("Failed to load checkpoint file {ckpt_filename:?}");
if args.remove_corrupted_checkpoint {
error!("Removing {:?}", ckpt_filename);
std::fs::remove_file(ckpt_filename)
.context("Failed to remove corrupted checkpoint file")?;
}
anyhow::bail!("Failed to load checkpoint file {:?}", ckpt_filename);
anyhow::bail!("Failed to load checkpoint file {ckpt_filename:?}");
}
}
}
Expand Down
Loading
Loading