diff --git a/CHANGELOG.md b/CHANGELOG.md index 97a3f82d467..20588ac599a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,9 +3,13 @@ ## [unreleased] ### Protocol Changes +* Congestion Control [NEP-0539](https://github.com/near/NEPs/pull/539) +* Stateless Validation [NEP-0509](https://github.com/near/NEPs/pull/509) ### Non-protocol Changes +* Enforce rate limits to received network messages [#11617](https://github.com/near/nearcore/issues/11617). Rate limits are configured by default, but they can be overridden through the experimental configuration option `received_messages_rate_limits`. + ## 1.40.0 ### Protocol Changes diff --git a/Cargo.lock b/Cargo.lock index 36cd02372ec..aeef7cc92d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2652,7 +2652,6 @@ dependencies = [ "borsh 1.0.0", "clap", "indicatif", - "near-async", "near-chain", "near-chain-configs", "near-crypto", @@ -2660,6 +2659,7 @@ dependencies = [ "near-primitives", "near-store", "near-test-contracts", + "near-time", "near-vm-runner", "nearcore", "node-runtime", @@ -3188,6 +3188,7 @@ dependencies = [ "primitive-types 0.10.1", "rand", "reed-solomon-erasure", + "regex", "rlp", "serde", "serde_json", @@ -3727,7 +3728,6 @@ dependencies = [ "clap", "futures", "near-actix-test-utils", - "near-async", "near-chain", "near-chain-configs", "near-chunks", @@ -3742,6 +3742,7 @@ dependencies = [ "near-primitives", "near-store", "near-telemetry", + "near-time", "nearcore", "pin-project", "rand", @@ -3803,11 +3804,11 @@ dependencies = [ "anyhow", "borsh 1.0.0", "clap", - "near-async", "near-chain-configs", "near-crypto", "near-primitives", "near-primitives-core", + "near-time", "num-rational 0.3.2", "serde", "serde_json", @@ -3911,12 +3912,12 @@ dependencies = [ "bytesize", "chrono", "derive_more", - "near-async", "near-config-utils", "near-crypto", "near-o11y", "near-parameters", "near-primitives", + "near-time", "num-rational 0.3.2", "once_cell", "serde", @@ -3931,9 +3932,9 @@ dependencies = [ name = "near-chain-primitives" version = "0.0.0" dependencies = [ - "near-async", "near-crypto", "near-primitives", + "near-time", "thiserror", "time", "tracing", @@ -4046,12 +4047,12 @@ version = "0.0.0" dependencies = [ "actix", "chrono", - "near-async", "near-chain-configs", "near-chain-primitives", "near-chunks-primitives", "near-crypto", "near-primitives", + "near-time", "serde", "serde_json", "strum", @@ -4127,11 +4128,11 @@ name = "near-dyn-configs" version = "0.0.0" dependencies = [ "anyhow", - "near-async", "near-chain-configs", "near-crypto", "near-o11y", "near-primitives", + "near-time", "once_cell", "prometheus", "serde", @@ -4328,11 +4329,11 @@ dependencies = [ "arbitrary", "awc", "libfuzzer-sys", - "near-async", "near-jsonrpc", "near-jsonrpc-primitives", "near-jsonrpc-tests", "near-primitives", + "near-time", "once_cell", "serde", "serde_json", @@ -4374,6 +4375,7 @@ dependencies = [ "near-o11y", "near-primitives", "near-store", + "near-time", "once_cell", "serde", "serde_json", @@ -4449,6 +4451,7 @@ dependencies = [ "criterion", "crossbeam-channel", "derive_more", + "enum-map", "futures", "futures-util", "im", @@ -4476,6 +4479,7 @@ dependencies = [ "reed-solomon-erasure", "rlimit", "serde", + "serde_json", "sha2 0.10.6", "smart-default", "strum", @@ -4572,11 +4576,11 @@ dependencies = [ "anyhow", "chrono", "clap", - "near-async", "near-jsonrpc", "near-network", "near-o11y", "near-primitives", + "near-time", "once_cell", "prometheus", "tokio", @@ -4681,7 +4685,6 @@ dependencies = [ "insta", "near-account-id", "near-actix-test-utils", - "near-async", "near-chain-configs", "near-client", "near-client-primitives", @@ -4690,6 +4693,7 @@ dependencies = [ "near-o11y", "near-parameters", "near-primitives", + "near-time", "node-runtime", "paperclip", "serde", @@ -4727,12 +4731,12 @@ dependencies = [ "anyhow", "chrono", "clap", - "near-async", "near-jsonrpc", "near-network", "near-o11y", "near-ping", "near-primitives", + "near-time", "once_cell", "sha2 0.10.6", "time", @@ -4787,7 +4791,6 @@ dependencies = [ "itertools", "itoa", "lru 0.12.3", - "near-async", "near-chain", "near-chain-configs", "near-chunks", @@ -4797,6 +4800,7 @@ dependencies = [ "near-parameters", "near-primitives", "near-stdx", + "near-time", "near-vm-runner", "num_cpus", "once_cell", @@ -7300,13 +7304,13 @@ version = "0.0.0" dependencies = [ "borsh 1.0.0", "clap", - "near-async", "near-chain", "near-chain-configs", "near-chain-primitives", "near-epoch-manager", "near-primitives", "near-store", + "near-time", "nearcore", "serde", "serde_json", @@ -7349,7 +7353,6 @@ dependencies = [ "cloud-storage", "insta", "itertools", - "near-async", "near-chain", "near-chain-configs", "near-client", @@ -7361,6 +7364,7 @@ dependencies = [ "near-primitives-core", "near-store", "near-test-contracts", + "near-time", "nearcore", "node-runtime", "once_cell", diff --git a/Cargo.toml b/Cargo.toml index b810771b09f..906d4ebaa6d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -150,6 +150,7 @@ cargo_metadata = "0.14.1" cc = "1.0" cfg-if = "1.0" chrono = { version = "0.4", default-features = false, features = [ + "clock", "alloc", "serde", ] } diff --git a/chain/chain-primitives/Cargo.toml b/chain/chain-primitives/Cargo.toml index 14f92ce6309..d37b76b74de 100644 --- a/chain/chain-primitives/Cargo.toml +++ b/chain/chain-primitives/Cargo.toml @@ -16,7 +16,7 @@ thiserror.workspace = true time.workspace = true tracing.workspace = true -near-async.workspace = true +near-time.workspace = true near-primitives.workspace = true near-crypto.workspace = true diff --git a/chain/chain-primitives/src/error.rs b/chain/chain-primitives/src/error.rs index a201710170e..6458b1b1b58 100644 --- a/chain/chain-primitives/src/error.rs +++ b/chain/chain-primitives/src/error.rs @@ -1,10 +1,10 @@ -use near_async::time::Utc; use near_primitives::block::BlockValidityError; use near_primitives::challenge::{ChunkProofs, ChunkState}; use near_primitives::errors::{EpochError, StorageError}; use near_primitives::shard_layout::ShardLayoutError; use near_primitives::sharding::{ChunkHash, ShardChunkHeader}; use near_primitives::types::{BlockHeight, EpochId, ShardId}; +use near_time::Utc; use std::io; #[derive(thiserror::Error, Debug)] diff --git a/chain/chain/Cargo.toml b/chain/chain/Cargo.toml index 82887bc6923..4c61c39e19b 100644 --- a/chain/chain/Cargo.toml +++ b/chain/chain/Cargo.toml @@ -118,3 +118,4 @@ statelessnet_protocol = [ "near-primitives/statelessnet_protocol", ] sandbox = ["near-o11y/sandbox", "near-primitives/sandbox"] +testloop = [] diff --git a/chain/chain/src/block_processing_utils.rs b/chain/chain/src/block_processing_utils.rs index 988486beac9..cc96b3d171b 100644 --- a/chain/chain/src/block_processing_utils.rs +++ b/chain/chain/src/block_processing_utils.rs @@ -8,9 +8,9 @@ use near_primitives::challenge::{ChallengeBody, ChallengesResult}; use near_primitives::hash::CryptoHash; use near_primitives::sharding::{ReceiptProof, ShardChunkHeader, StateSyncInfo}; use near_primitives::types::ShardId; -use once_cell::sync::OnceCell; use std::collections::HashMap; -use std::sync::Arc; +use std::sync::{Arc, Condvar, Mutex}; +use std::time::Duration; /// Max number of blocks that can be in the pool at once. /// This number will likely never be hit unless there are many forks in the chain. @@ -24,10 +24,8 @@ pub(crate) struct BlockPreprocessInfo { pub(crate) challenges_result: ChallengesResult, pub(crate) challenged_blocks: Vec, pub(crate) provenance: Provenance, - /// This field will be set when the apply_chunks has finished. - /// This is used to provide a way for caller to wait for the finishing of applying chunks of - /// a block - pub(crate) apply_chunks_done: Arc>, + /// Used to get notified when the applying chunks of a block finishes. + pub(crate) apply_chunks_done_tracker: ApplyChunksDoneTracker, /// This is used to calculate block processing time metric pub(crate) block_start_processing_time: Instant, } @@ -129,7 +127,7 @@ impl BlocksInProcessing { /// Returns true if new blocks are done applying chunks pub(crate) fn wait_for_all_blocks(&self) -> bool { for (_, (_, block_preprocess_info)) in self.preprocessed_blocks.iter() { - let _ = block_preprocess_info.apply_chunks_done.wait(); + let _ = block_preprocess_info.apply_chunks_done_tracker.wait_until_done(); } !self.preprocessed_blocks.is_empty() } @@ -144,8 +142,125 @@ impl BlocksInProcessing { .get(block_hash) .ok_or(BlockNotInPoolError)? .1 - .apply_chunks_done - .wait(); + .apply_chunks_done_tracker + .wait_until_done(); Ok(()) } } + +/// This is used to for the thread that applies chunks to notify other waiter threads. +/// The thread applying the chunks should call `set_done` to send the notification. +/// The waiter threads should call `wait_until_done` to wait (blocked) for the notification. +#[derive(Clone)] +pub struct ApplyChunksDoneTracker(Arc<(Mutex, Condvar)>); + +impl ApplyChunksDoneTracker { + pub fn new() -> Self { + Self(Arc::new((Mutex::new(false), Condvar::new()))) + } + + /// Notifies all threads waiting on `wait_until_done` that apply chunks is done. + /// This should be called only once. + /// Returns an error if it is called more than once or the mutex used internally is poisoned. + pub fn set_done(&mut self) -> Result<(), &'static str> { + let (lock, cvar) = &*self.0; + match lock.lock() { + Ok(mut guard) => { + if *guard { + Err("Apply chunks done marker is already set to true.") + } else { + *guard = true; + cvar.notify_all(); + Ok(()) + } + } + Err(_poisoned) => Err("Mutex is poisoned."), + } + } + + /// Blocks the current thread until the `set_done` is called after applying the chunks. + /// to indicate that apply chunks is done. + pub fn wait_until_done(&self) { + #[cfg(feature = "testloop")] + let mut testloop_total_wait_time = Duration::from_millis(0); + + let (lock, cvar) = &*self.0; + match lock.lock() { + Ok(mut guard) => loop { + let done = *guard; + if done { + break; + } + const WAIT_TIMEOUT: Duration = Duration::from_millis(100); + match cvar.wait_timeout(guard, WAIT_TIMEOUT) { + Ok(result) => { + guard = result.0; + + // Panics during testing (eg. due to assertion failures) cause the waiter + // threads to miss the notification (see issue #11447). Thus, for testing only, + // we limit the total wait time for waiting for the notification. + #[cfg(feature = "testloop")] + if result.1.timed_out() { + const TESTLOOP_MAX_WAIT_TIME: Duration = Duration::from_millis(5000); + testloop_total_wait_time += WAIT_TIMEOUT; + if testloop_total_wait_time >= TESTLOOP_MAX_WAIT_TIME { + break; + } + } + } + Err(_poisoned) => { + tracing::error!("Mutex is poisoned."); + break; + } + } + }, + Err(_poisoned) => { + tracing::error!("Mutex is poisoned."); + () + } + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::Arc; + use std::time::Duration; + + use super::ApplyChunksDoneTracker; + + #[test] + fn test_apply_chunks_with_multiple_waiters() { + let shared_value: Arc = Arc::new(AtomicBool::new(false)); + + let mut tracker = ApplyChunksDoneTracker::new(); + let waiter1 = tracker.clone(); + let waiter2 = tracker.clone(); + let waiter3 = tracker.clone(); + + let (results_sender, results_receiver) = std::sync::mpsc::channel(); + + // Spawn waiter tasks + for waiter in [waiter1, waiter2, waiter3] { + let current_sender = results_sender.clone(); + let current_shared_value = shared_value.clone(); + std::thread::spawn(move || { + waiter.wait_until_done(); + let read_value = current_shared_value.load(Ordering::Relaxed); + current_sender.send(read_value).unwrap(); + }); + } + + // Wait 300ms then set the shared_value to true, and notify the waiters. + std::thread::sleep(Duration::from_millis(300)); + shared_value.store(true, Ordering::Relaxed); + tracker.set_done().unwrap(); + + // Check values that waiters read + for _ in 0..3 { + let waiter_value = results_receiver.recv().unwrap(); + assert_eq!(waiter_value, true); + } + } +} diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index d55ff462ac9..f1a405e3d27 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -1,5 +1,5 @@ use crate::block_processing_utils::{ - BlockPreprocessInfo, BlockProcessingArtifact, BlocksInProcessing, + ApplyChunksDoneTracker, BlockPreprocessInfo, BlockProcessingArtifact, BlocksInProcessing, }; use crate::blocks_delay_tracker::BlocksDelayTracker; use crate::chain_update::ChainUpdate; @@ -100,7 +100,6 @@ use near_store::flat::{store_helper, FlatStorageReadyStatus, FlatStorageStatus}; use near_store::get_genesis_state_roots; use near_store::DBCol; use node_runtime::bootstrap_congestion_info; -use once_cell::sync::OnceCell; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use std::collections::{BTreeMap, HashMap, HashSet}; use std::fmt::{Debug, Formatter}; @@ -1796,7 +1795,7 @@ impl Chain { let block = block.into_inner(); let block_hash = *block.hash(); let block_height = block.header().height(); - let apply_chunks_done_marker = block_preprocess_info.apply_chunks_done.clone(); + let apply_chunks_done_tracker = block_preprocess_info.apply_chunks_done_tracker.clone(); self.blocks_in_processing.add(block, block_preprocess_info)?; // 3) schedule apply chunks, which will be executed in the rayon thread pool. @@ -1804,7 +1803,7 @@ impl Chain { block_hash, block_height, apply_chunk_work, - apply_chunks_done_marker, + apply_chunks_done_tracker, apply_chunks_done_sender, ); @@ -1812,14 +1811,14 @@ impl Chain { } /// Applying chunks async by starting the work at the rayon thread pool - /// `apply_chunks_done_marker`: a marker that will be set to true once applying chunks is finished + /// `apply_chunks_done_tracker`: notifies the threads that wait for applying chunks is finished /// `apply_chunks_done_sender`: a sender to send a ApplyChunksDoneMessage message once applying chunks is finished fn schedule_apply_chunks( &self, block_hash: CryptoHash, block_height: BlockHeight, work: Vec, - apply_chunks_done_marker: Arc>, + mut apply_chunks_done_tracker: ApplyChunksDoneTracker, apply_chunks_done_sender: Option>, ) { let sc = self.apply_chunks_sender.clone(); @@ -1829,7 +1828,7 @@ impl Chain { // If we encounter error here, that means the receiver is deallocated and the client // thread is already shut down. The node is already crashed, so we can unwrap here sc.send((block_hash, res)).unwrap(); - if let Err(_) = apply_chunks_done_marker.set(()) { + if let Err(_) = apply_chunks_done_tracker.set_done() { // This should never happen, if it does, it means there is a bug in our code. log_assert!(false, "apply chunks are called twice for block {block_hash:?}"); } @@ -1853,7 +1852,6 @@ impl Chain { self.should_produce_state_witness_for_this_or_next_epoch(me, block.header())?; let mut chain_update = self.chain_update(); let new_head = chain_update.postprocess_block( - me, &block, block_preprocess_info, apply_results, @@ -2249,7 +2247,7 @@ impl Chain { challenges_result, challenged_blocks, provenance: provenance.clone(), - apply_chunks_done: Arc::new(OnceCell::new()), + apply_chunks_done_tracker: ApplyChunksDoneTracker::new(), block_start_processing_time: block_received_time, }, )) @@ -4033,7 +4031,6 @@ impl Chain { ChainUpdate::new( &mut self.chain_store, self.epoch_manager.clone(), - self.shard_tracker.clone(), self.runtime_adapter.clone(), self.doomslug_threshold_mode, self.transaction_validity_period, @@ -4326,12 +4323,6 @@ impl Chain { self.chain_store.get_chunk_extra(block_hash, shard_uid) } - /// Get destination shard id for a given receipt id. - #[inline] - pub fn get_shard_id_for_receipt_id(&self, receipt_id: &CryptoHash) -> Result { - self.chain_store.get_shard_id_for_receipt_id(receipt_id) - } - /// Get next block hash for which there is a new chunk for the shard. /// If sharding changes before we can find a block with a new chunk for the shard, /// find the first block that contains a new chunk for any of the shards that split from the diff --git a/chain/chain/src/chain_update.rs b/chain/chain/src/chain_update.rs index 0291b6b64c8..c83cda618e9 100644 --- a/chain/chain/src/chain_update.rs +++ b/chain/chain/src/chain_update.rs @@ -11,7 +11,6 @@ use crate::update_shard::{NewChunkResult, OldChunkResult, ReshardingResult, Shar use crate::{metrics, DoomslugThresholdMode}; use crate::{Chain, Doomslug}; use near_chain_primitives::error::Error; -use near_epoch_manager::shard_tracker::ShardTracker; use near_epoch_manager::types::BlockHeaderInfo; use near_epoch_manager::EpochManagerAdapter; use near_primitives::apply::ApplyChunkReason; @@ -20,13 +19,11 @@ use near_primitives::block_header::BlockHeader; #[cfg(feature = "new_epoch_sync")] use near_primitives::epoch_manager::{block_info::BlockInfo, epoch_sync::EpochSyncInfo}; use near_primitives::hash::CryptoHash; -use near_primitives::shard_layout::{account_id_to_shard_id, account_id_to_shard_uid, ShardUId}; +use near_primitives::shard_layout::{account_id_to_shard_uid, ShardUId}; use near_primitives::sharding::ShardChunk; use near_primitives::state_sync::{ReceiptProofResponse, ShardStateSyncResponseHeader}; use near_primitives::types::chunk_extra::ChunkExtra; -use near_primitives::types::{ - AccountId, BlockExtra, BlockHeight, BlockHeightDelta, NumShards, ShardId, -}; +use near_primitives::types::{BlockExtra, BlockHeight, BlockHeightDelta, NumShards, ShardId}; use near_primitives::version::ProtocolFeature; use near_primitives::views::LightClientBlockView; use std::collections::HashMap; @@ -41,7 +38,6 @@ use tracing::{debug, info, warn}; /// Safe to stop process mid way (Ctrl+C or crash). pub struct ChainUpdate<'a> { epoch_manager: Arc, - shard_tracker: ShardTracker, runtime_adapter: Arc, chain_store_update: ChainStoreUpdate<'a>, doomslug_threshold_mode: DoomslugThresholdMode, @@ -53,7 +49,6 @@ impl<'a> ChainUpdate<'a> { pub fn new( chain_store: &'a mut ChainStore, epoch_manager: Arc, - shard_tracker: ShardTracker, runtime_adapter: Arc, doomslug_threshold_mode: DoomslugThresholdMode, transaction_validity_period: BlockHeightDelta, @@ -61,7 +56,6 @@ impl<'a> ChainUpdate<'a> { let chain_store_update: ChainStoreUpdate<'_> = chain_store.store_update(); Self::new_impl( epoch_manager, - shard_tracker, runtime_adapter, doomslug_threshold_mode, transaction_validity_period, @@ -71,7 +65,6 @@ impl<'a> ChainUpdate<'a> { fn new_impl( epoch_manager: Arc, - shard_tracker: ShardTracker, runtime_adapter: Arc, doomslug_threshold_mode: DoomslugThresholdMode, transaction_validity_period: BlockHeightDelta, @@ -79,7 +72,6 @@ impl<'a> ChainUpdate<'a> { ) -> Self { ChainUpdate { epoch_manager, - shard_tracker, runtime_adapter, chain_store_update, doomslug_threshold_mode, @@ -92,60 +84,6 @@ impl<'a> ChainUpdate<'a> { self.chain_store_update.commit() } - /// For all the outgoing receipts generated in block `hash` at the shards we - /// are tracking in this epoch, save a mapping from receipt ids to the - /// destination shard ids that the receipt will be sent to in the next - /// block. - /// - /// Note that this function should be called after `save_block` is called on - /// this block because it requires that the block info is available in - /// EpochManager, otherwise it will return an error. - fn save_receipt_id_to_shard_id_for_block( - &mut self, - account_id: Option<&AccountId>, - hash: &CryptoHash, - prev_hash: &CryptoHash, - shard_ids: &[ShardId], - ) -> Result<(), Error> { - let mut list = vec![]; - for &shard_id in shard_ids { - if self.shard_tracker.care_about_shard(account_id, prev_hash, shard_id, true) { - list.push(self.get_receipt_id_to_shard_id(hash, shard_id)?); - } - } - for map in list { - for (receipt_id, shard_id) in map { - self.chain_store_update.save_receipt_id_to_shard_id(receipt_id, shard_id); - } - } - Ok(()) - } - - /// Returns a mapping from the receipt id to the destination shard id. - fn get_receipt_id_to_shard_id( - &mut self, - hash: &CryptoHash, - shard_id: u64, - ) -> Result, Error> { - let outgoing_receipts = self.chain_store_update.get_outgoing_receipts(hash, shard_id); - let outgoing_receipts = if let Ok(outgoing_receipts) = outgoing_receipts { - outgoing_receipts - } else { - return Ok(HashMap::new()); - }; - let shard_layout = self.epoch_manager.get_shard_layout_from_prev_block(hash)?; - let outgoing_receipts = outgoing_receipts - .iter() - .map(|receipt| { - ( - *receipt.receipt_id(), - account_id_to_shard_id(receipt.receiver_id(), &shard_layout), - ) - }) - .collect(); - Ok(outgoing_receipts) - } - pub(crate) fn apply_chunk_postprocessing( &mut self, block: &Block, @@ -456,13 +394,11 @@ impl<'a> ChainUpdate<'a> { )] pub(crate) fn postprocess_block( &mut self, - me: &Option, block: &Block, block_preprocess_info: BlockPreprocessInfo, apply_chunks_results: Vec<(ShardId, Result)>, should_save_state_transition_data: bool, ) -> Result, Error> { - let shard_ids = self.epoch_manager.shard_ids(block.header().epoch_id())?; let prev_hash = block.header().prev_hash(); let results = apply_chunks_results.into_iter().map(|(shard_id, x)| { if let Err(err) = &x { @@ -528,14 +464,6 @@ impl<'a> ChainUpdate<'a> { self.chain_store_update.save_block(block.clone()); self.chain_store_update.inc_block_refcount(prev_hash)?; - // Save receipt_id_to_shard_id for all outgoing receipts generated in this block - self.save_receipt_id_to_shard_id_for_block( - me.as_ref(), - block.hash(), - prev_hash, - &shard_ids, - )?; - // Update the chain head if it's the new tip let res = self.update_head(block.header())?; @@ -774,7 +702,7 @@ impl<'a> ChainUpdate<'a> { // TODO(nikurt): Determine the value correctly. let is_first_block_with_chunk_of_version = false; - let prev_block = self.chain_store_update.get_block(block_header.prev_hash())?; + let block = self.chain_store_update.get_block(block_header.hash())?; let apply_result = self.runtime_adapter.apply_chunk( RuntimeStorageConfig::new(chunk_header.prev_state_root(), true), @@ -794,7 +722,7 @@ impl<'a> ChainUpdate<'a> { gas_price, challenges_result: block_header.challenges_result().clone(), random_seed: *block_header.random_value(), - congestion_info: prev_block.block_congestion_info(), + congestion_info: block.block_congestion_info(), }, &receipts, chunk.transactions(), @@ -879,8 +807,9 @@ impl<'a> ChainUpdate<'a> { // Don't continue return Ok(false); } + let block = self.chain_store_update.get_block(block_header.hash())?; + let prev_hash = block_header.prev_hash(); - let prev_block = self.chain_store_update.get_block(prev_hash)?; let prev_block_header = self.chain_store_update.get_block_header(prev_hash)?; let shard_uid = self.epoch_manager.shard_id_to_uid(shard_id, block_header.epoch_id())?; @@ -899,7 +828,7 @@ impl<'a> ChainUpdate<'a> { ApplyChunkBlockContext::from_header( &block_header, prev_block_header.next_gas_price(), - prev_block.block_congestion_info(), + block.block_congestion_info(), ), &[], &[], diff --git a/chain/chain/src/garbage_collection.rs b/chain/chain/src/garbage_collection.rs index b03042272da..acefa3d7f52 100644 --- a/chain/chain/src/garbage_collection.rs +++ b/chain/chain/src/garbage_collection.rs @@ -876,29 +876,6 @@ impl<'a> ChainStoreUpdate<'a> { fn gc_outgoing_receipts(&mut self, block_hash: &CryptoHash, shard_id: ShardId) { let mut store_update = self.store().store_update(); - match self.get_outgoing_receipts(block_hash, shard_id).map(|receipts| { - receipts.iter().map(|receipt| *receipt.receipt_id()).collect::>() - }) { - Ok(receipt_ids) => { - for receipt_id in receipt_ids { - let key: Vec = receipt_id.into(); - store_update.decrement_refcount(DBCol::ReceiptIdToShardId, &key); - self.chain_store().receipt_id_to_shard_id.pop(&key); - } - } - Err(error) => { - match error { - Error::DBNotFoundErr(_) => { - // Sometimes we don't save outgoing receipts. See the usages of save_outgoing_receipt. - // The invariant is that DBCol::OutgoingReceipts has same receipts as DBCol::ReceiptIdToShardId. - } - _ => { - tracing::error!(target: "chain", "Error getting outgoing receipts for block {}, shard {}: {:?}", block_hash, shard_id, error); - } - } - } - } - let key = get_block_shard_id(block_hash, shard_id); store_update.delete(DBCol::OutgoingReceipts, &key); self.chain_store().outgoing_receipts.pop(&key); @@ -930,7 +907,7 @@ impl<'a> ChainStoreUpdate<'a> { let mut store_update = self.store().store_update(); match col { DBCol::OutgoingReceipts => { - panic!("Must use gc_outgoing_receipts"); + panic!("Outgoing receipts must be garbage collected by calling gc_outgoing_receipts"); } DBCol::IncomingReceipts => { store_update.delete(col, key); @@ -973,9 +950,6 @@ impl<'a> ChainStoreUpdate<'a> { store_update.delete(col, key); self.chain_store().block_refcounts.pop(key); } - DBCol::ReceiptIdToShardId => { - panic!("Must use gc_outgoing_receipts"); - } DBCol::Transactions => { store_update.decrement_refcount(col, key); self.chain_store().transactions.pop(key); @@ -1072,6 +1046,7 @@ impl<'a> ChainStoreUpdate<'a> { | DBCol::FlatStateDeltaMetadata | DBCol::FlatStorageStatus | DBCol::Misc + | DBCol::_ReceiptIdToShardId => unreachable!(), #[cfg(feature = "new_epoch_sync")] DBCol::EpochSyncInfo => unreachable!(), diff --git a/chain/chain/src/store/mod.rs b/chain/chain/src/store/mod.rs index 24a9d2b068f..2c7d3768304 100644 --- a/chain/chain/src/store/mod.rs +++ b/chain/chain/src/store/mod.rs @@ -303,9 +303,6 @@ pub trait ChainStoreAccess { chunk_hash: &ChunkHash, ) -> Result>, Error>; - /// Get destination shard id for receipt id. - fn get_shard_id_for_receipt_id(&self, receipt_id: &CryptoHash) -> Result; - fn get_transaction( &self, tx_hash: &CryptoHash, @@ -437,8 +434,6 @@ pub struct ChainStore { pub(crate) incoming_receipts: CellLruCache, Arc>>, /// Invalid chunks. pub(crate) invalid_chunks: CellLruCache, Arc>, - /// Mapping from receipt id to destination shard id - pub(crate) receipt_id_to_shard_id: CellLruCache, ShardId>, /// Transactions pub(crate) transactions: CellLruCache, Arc>, /// Receipts @@ -491,7 +486,6 @@ impl ChainStore { outgoing_receipts: CellLruCache::new(CACHE_SIZE), incoming_receipts: CellLruCache::new(CACHE_SIZE), invalid_chunks: CellLruCache::new(CACHE_SIZE), - receipt_id_to_shard_id: CellLruCache::new(CHUNK_CACHE_SIZE), transactions: CellLruCache::new(CHUNK_CACHE_SIZE), receipts: CellLruCache::new(CHUNK_CACHE_SIZE), block_merkle_tree: CellLruCache::new(CACHE_SIZE), @@ -1340,17 +1334,6 @@ impl ChainStoreAccess for ChainStore { .map_err(|err| err.into()) } - fn get_shard_id_for_receipt_id(&self, receipt_id: &CryptoHash) -> Result { - option_to_not_found( - self.read_with_cache( - DBCol::ReceiptIdToShardId, - &self.receipt_id_to_shard_id, - receipt_id.as_ref(), - ), - format_args!("RECEIPT ID: {}", receipt_id), - ) - } - fn get_transaction( &self, tx_hash: &CryptoHash, @@ -1422,7 +1405,6 @@ pub(crate) struct ChainStoreCacheUpdate { outcomes: HashMap<(CryptoHash, CryptoHash), ExecutionOutcomeWithProof>, outcome_ids: HashMap<(CryptoHash, ShardId), Vec>, invalid_chunks: HashMap>, - receipt_id_to_shard_id: HashMap, transactions: HashMap>, receipts: HashMap>, block_refcounts: HashMap, @@ -1745,15 +1727,6 @@ impl<'a> ChainStoreAccess for ChainStoreUpdate<'a> { } } - fn get_shard_id_for_receipt_id(&self, receipt_id: &CryptoHash) -> Result { - if let Some(shard_id) = self.chain_store_cache_update.receipt_id_to_shard_id.get(receipt_id) - { - Ok(*shard_id) - } else { - self.chain_store.get_shard_id_for_receipt_id(receipt_id) - } - } - fn get_transaction( &self, tx_hash: &CryptoHash, @@ -2050,10 +2023,6 @@ impl<'a> ChainStoreUpdate<'a> { .insert((*hash, shard_id), Arc::new(outgoing_receipts)); } - pub fn save_receipt_id_to_shard_id(&mut self, receipt_id: CryptoHash, shard_id: ShardId) { - self.chain_store_cache_update.receipt_id_to_shard_id.insert(receipt_id, shard_id); - } - pub fn save_incoming_receipt( &mut self, hash: &CryptoHash, @@ -2543,10 +2512,6 @@ impl<'a> ChainStoreUpdate<'a> { } } - for (receipt_id, shard_id) in self.chain_store_cache_update.receipt_id_to_shard_id.iter() { - let data = borsh::to_vec(&shard_id)?; - store_update.increment_refcount(DBCol::ReceiptIdToShardId, receipt_id.as_ref(), &data); - } for (block_hash, refcount) in self.chain_store_cache_update.block_refcounts.iter() { store_update.set_ser(DBCol::BlockRefCount, block_hash.as_ref(), refcount)?; } @@ -2717,7 +2682,6 @@ impl<'a> ChainStoreUpdate<'a> { outgoing_receipts, incoming_receipts, invalid_chunks, - receipt_id_to_shard_id, transactions, receipts, block_refcounts, @@ -2777,9 +2741,6 @@ impl<'a> ChainStoreUpdate<'a> { for (hash, invalid_chunk) in invalid_chunks { self.chain_store.invalid_chunks.put(hash.into(), invalid_chunk); } - for (receipt_id, shard_id) in receipt_id_to_shard_id { - self.chain_store.receipt_id_to_shard_id.put(receipt_id.into(), shard_id); - } for (hash, transaction) in transactions { self.chain_store.transactions.put(hash.into(), transaction); } diff --git a/chain/chain/src/test_utils/kv_runtime.rs b/chain/chain/src/test_utils/kv_runtime.rs index 5a7e708656b..22cdc11d06b 100644 --- a/chain/chain/src/test_utils/kv_runtime.rs +++ b/chain/chain/src/test_utils/kv_runtime.rs @@ -1035,6 +1035,13 @@ impl EpochManagerAdapter for MockEpochManager { #[cfg(feature = "new_epoch_sync")] fn force_update_aggregator(&self, _epoch_id: &EpochId, _hash: &CryptoHash) {} + + fn get_epoch_all_validators( + &self, + _epoch_id: &EpochId, + ) -> Result, EpochError> { + Ok(self.validators.iter().map(|(_, v)| v.clone()).collect()) + } } impl RuntimeAdapter for KeyValueRuntime { diff --git a/chain/chain/src/tests/simple_chain.rs b/chain/chain/src/tests/simple_chain.rs index b9fef94f01f..8fa5ffab107 100644 --- a/chain/chain/src/tests/simple_chain.rs +++ b/chain/chain/src/tests/simple_chain.rs @@ -34,7 +34,7 @@ fn build_chain() { if cfg!(feature = "nightly") { insta::assert_snapshot!(hash, @"C3zeKRZubVungxfrSdq379TSCYnuz2YzjEkcJTdm3pU4"); } else { - insta::assert_snapshot!(hash, @"2WHohfYksQnwKwSEoTKpkseu2RWthbGf9kmGetgHgfQQ"); + insta::assert_snapshot!(hash, @"EKBbsbiindwuPwbiARE9LevUffurNhprbSaUjgPKCwEq"); } for i in 1..5 { @@ -52,7 +52,7 @@ fn build_chain() { if cfg!(feature = "nightly") { insta::assert_snapshot!(hash, @"EjLaoHRiAdRp2NcDqwbMcAYYxGfcv5R7GuYUNfRpaJvB"); } else { - insta::assert_snapshot!(hash, @"HJuuENeSwwikoR9BZA7cSonxAPZgY5mKQWL2pSXwjAwZ"); + insta::assert_snapshot!(hash, @"9Ag5sa6bF9knuJKe9XECTKZi7HwtDhCSxCZ8P9AdSvWH"); } } diff --git a/chain/client-primitives/Cargo.toml b/chain/client-primitives/Cargo.toml index 803d566e950..d2ad45f7b5a 100644 --- a/chain/client-primitives/Cargo.toml +++ b/chain/client-primitives/Cargo.toml @@ -22,7 +22,7 @@ time.workspace = true tracing.workspace = true yansi.workspace = true -near-async.workspace = true +near-time.workspace = true near-chain-primitives.workspace = true near-chain-configs.workspace = true near-chunks-primitives.workspace = true @@ -31,17 +31,13 @@ near-primitives.workspace = true [features] nightly_protocol = [ - "near-async/nightly_protocol", "near-chain-configs/nightly_protocol", "near-primitives/nightly_protocol", ] nightly = [ - "near-async/nightly", "near-chain-configs/nightly", "near-primitives/nightly", "nightly_protocol", ] sandbox = [] -test_features = [ - "near-primitives/test_features", -] +test_features = ["near-primitives/test_features"] diff --git a/chain/client-primitives/src/debug.rs b/chain/client-primitives/src/debug.rs index 61c3c2647c7..aacf1128e06 100644 --- a/chain/client-primitives/src/debug.rs +++ b/chain/client-primitives/src/debug.rs @@ -1,7 +1,6 @@ //! Structs in this module are used for debug purposes, and might change at any time //! without backwards compatibility of JSON encoding. use crate::types::StatusError; -use near_async::time::Utc; use near_primitives::congestion_info::CongestionInfo; use near_primitives::types::EpochId; use near_primitives::views::{ @@ -15,6 +14,7 @@ use near_primitives::{ types::{AccountId, BlockHeight}, views::ValidatorInfo, }; +use near_time::Utc; use std::collections::HashMap; #[derive(serde::Serialize, serde::Deserialize, Debug)] diff --git a/chain/client-primitives/src/types.rs b/chain/client-primitives/src/types.rs index 5088e1dc5c1..8b81916423a 100644 --- a/chain/client-primitives/src/types.rs +++ b/chain/client-primitives/src/types.rs @@ -93,7 +93,7 @@ pub enum ShardSyncStatus { StateDownloadHeader, StateDownloadParts, StateApplyScheduling, - StateApplyComplete, + StateApplyInProgress, StateApplyFinalizing, ReshardingScheduling, ReshardingApplying, @@ -106,7 +106,7 @@ impl ShardSyncStatus { ShardSyncStatus::StateDownloadHeader => 0, ShardSyncStatus::StateDownloadParts => 1, ShardSyncStatus::StateApplyScheduling => 2, - ShardSyncStatus::StateApplyComplete => 3, + ShardSyncStatus::StateApplyInProgress => 3, ShardSyncStatus::StateApplyFinalizing => 4, ShardSyncStatus::ReshardingScheduling => 5, ShardSyncStatus::ReshardingApplying => 6, @@ -130,7 +130,7 @@ impl ToString for ShardSyncStatus { ShardSyncStatus::StateDownloadHeader => "header".to_string(), ShardSyncStatus::StateDownloadParts => "parts".to_string(), ShardSyncStatus::StateApplyScheduling => "apply scheduling".to_string(), - ShardSyncStatus::StateApplyComplete => "apply complete".to_string(), + ShardSyncStatus::StateApplyInProgress => "apply in progress".to_string(), ShardSyncStatus::StateApplyFinalizing => "apply finalizing".to_string(), ShardSyncStatus::ReshardingScheduling => "resharding scheduling".to_string(), ShardSyncStatus::ReshardingApplying => "resharding applying".to_string(), diff --git a/chain/client/Cargo.toml b/chain/client/Cargo.toml index 35b7227ccf7..2d0b35d7445 100644 --- a/chain/client/Cargo.toml +++ b/chain/client/Cargo.toml @@ -60,7 +60,7 @@ near-parameters.workspace = true near-performance-metrics-macros.workspace = true near-performance-metrics.workspace = true near-pool.workspace = true -near-primitives.workspace = true +near-primitives = { workspace = true, features = ["clock"] } near-store.workspace = true near-telemetry.workspace = true near-vm-runner.workspace = true diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index ae736ebd245..2447ac06b99 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -246,7 +246,7 @@ impl Client { state_sync_adapter: Arc>, runtime_adapter: Arc, network_adapter: PeerManagerAdapter, - shards_manager_adapter: Sender, + shards_manager_sender: Sender, validator_signer: MutableValidatorSigner, enable_doomslug: bool, rng_seed: RngSeed, @@ -390,7 +390,7 @@ impl Client { epoch_manager, shard_tracker, runtime_adapter, - shards_manager_adapter, + shards_manager_adapter: shards_manager_sender, sharded_tx_pool, network_adapter, validator_signer, diff --git a/chain/client/src/client_actor.rs b/chain/client/src/client_actor.rs index 6ad09f31e6d..4899dca9167 100644 --- a/chain/client/src/client_actor.rs +++ b/chain/client/src/client_actor.rs @@ -1847,7 +1847,7 @@ impl ClientActorInner { break; }; - if next_header.height() < min_height_included - 1 { + if next_header.height() + 1 < min_height_included { break; } diff --git a/chain/client/src/info.rs b/chain/client/src/info.rs index 1add3835dcb..d0dbf5fa7de 100644 --- a/chain/client/src/info.rs +++ b/chain/client/src/info.rs @@ -9,7 +9,6 @@ use near_client_primitives::types::StateSyncStatus; use near_epoch_manager::EpochManagerAdapter; use near_network::types::NetworkInfo; use near_primitives::block::Tip; -use near_primitives::hash::CryptoHash; use near_primitives::network::PeerId; use near_primitives::telemetry::{ TelemetryAgentInfo, TelemetryChainInfo, TelemetryInfo, TelemetrySystemInfo, @@ -27,7 +26,7 @@ use near_primitives::views::{ }; use near_telemetry::TelemetryEvent; use std::cmp::min; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::fmt::Write; use std::num::NonZeroUsize; use std::sync::Arc; @@ -289,22 +288,9 @@ impl InfoHelper { &mut self, epoch_manager: &dyn EpochManagerAdapter, epoch_id: &EpochId, - last_block_hash: &CryptoHash, ) -> usize { *self.num_validators_per_epoch.get_or_insert(*epoch_id, || { - let block_producers: HashSet = epoch_manager - .get_epoch_block_producers_ordered(epoch_id, last_block_hash) - .unwrap_or(vec![]) - .into_iter() - .map(|(validator_stake, _)| validator_stake.account_id().clone()) - .collect(); - let chunk_producers: HashSet = epoch_manager - .get_epoch_chunk_producers(epoch_id) - .unwrap_or(vec![]) - .into_iter() - .map(|validator_stake| validator_stake.account_id().clone()) - .collect(); - block_producers.union(&chunk_producers).count() + epoch_manager.get_epoch_all_validators(epoch_id).unwrap_or_default().len() }) } @@ -320,11 +306,8 @@ impl InfoHelper { let is_syncing = client.sync_status.is_syncing(); let head = unwrap_or_return!(client.chain.head()); let validator_info = if !is_syncing { - let num_validators = self.get_num_validators( - client.epoch_manager.as_ref(), - &head.epoch_id, - &head.last_block_hash, - ); + let num_validators = + self.get_num_validators(client.epoch_manager.as_ref(), &head.epoch_id); let account_id = signer.as_ref().map(|x| x.validator_id()); let is_validator = if let Some(account_id) = account_id { match client.epoch_manager.get_validator_by_account_id( @@ -922,6 +905,7 @@ mod tests { use near_epoch_manager::test_utils::*; use near_epoch_manager::EpochManager; use near_network::test_utils::peer_id_from_seed; + use near_primitives::hash::CryptoHash; use near_store::genesis::initialize_genesis_state; #[test] @@ -1057,7 +1041,7 @@ mod tests { let mut info_helper = InfoHelper::new(Clock::real(), noop().into_sender(), &client_config); assert_eq!( num_validators, - info_helper.get_num_validators(&epoch_manager_adapter, &epoch_id, &last_block_hash) + info_helper.get_num_validators(&epoch_manager_adapter, &epoch_id) ); } } diff --git a/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs b/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs index 517540d40cc..488cdf2cfbf 100644 --- a/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs +++ b/chain/client/src/stateless_validation/partial_witness/partial_witness_tracker.rs @@ -22,7 +22,7 @@ use super::encoding::{WitnessEncoder, WitnessEncoderCache, WitnessPart}; /// Max number of chunks to keep in the witness tracker cache. We reach here only after validation /// of the partial_witness so the LRU cache size need not be too large. /// This effectively limits memory usage to the size of the cache multiplied by -/// MAX_COMPRESSED_STATE_WITNESS_SIZE, currently 40 * 32MiB = 1280MiB +/// MAX_COMPRESSED_STATE_WITNESS_SIZE, currently 40 * 48MiB = 1920MiB. const WITNESS_PARTS_CACHE_SIZE: usize = 40; /// Number of entries to keep in LRU cache of the processed state witnesses diff --git a/chain/client/src/sync/state.rs b/chain/client/src/sync/state.rs index 7fb06ae100b..78691476d9c 100644 --- a/chain/client/src/sync/state.rs +++ b/chain/client/src/sync/state.rs @@ -299,8 +299,8 @@ impl StateSync { state_parts_task_scheduler, )?; } - ShardSyncStatus::StateApplyComplete => { - self.sync_shards_apply_complete_status( + ShardSyncStatus::StateApplyInProgress => { + self.sync_shards_apply_status( shard_id, shard_sync_download, sync_hash, @@ -1004,7 +1004,7 @@ impl StateSync { Ok(()) => { *shard_sync_download = ShardSyncDownload { downloads: vec![], - status: ShardSyncStatus::StateApplyComplete, + status: ShardSyncStatus::StateApplyInProgress, } } Err(err) => { @@ -1019,7 +1019,7 @@ impl StateSync { Ok(()) } - fn sync_shards_apply_complete_status( + fn sync_shards_apply_status( &mut self, shard_id: ShardId, shard_sync_download: &mut ShardSyncDownload, diff --git a/chain/epoch-manager/src/adapter.rs b/chain/epoch-manager/src/adapter.rs index 492f6bc1a4b..25bd92da8c8 100644 --- a/chain/epoch-manager/src/adapter.rs +++ b/chain/epoch-manager/src/adapter.rs @@ -190,6 +190,12 @@ pub trait EpochManagerAdapter: Send + Sync { epoch_id: &EpochId, ) -> Result, EpochError>; + /// Returns all validators for a given epoch. + fn get_epoch_all_validators( + &self, + epoch_id: &EpochId, + ) -> Result, EpochError>; + /// Block producers for given height for the main block. Return EpochError if outside of known boundaries. fn get_block_producer( &self, @@ -1127,4 +1133,13 @@ impl EpochManagerAdapter for EpochManagerHandle { let mut epoch_manager = self.write(); epoch_manager.epoch_info_aggregator = EpochInfoAggregator::new(*epoch_id, *hash); } + + /// Returns the set of chunk validators for a given epoch + fn get_epoch_all_validators( + &self, + epoch_id: &EpochId, + ) -> Result, EpochError> { + let epoch_manager = self.read(); + Ok(epoch_manager.get_epoch_info(epoch_id)?.validators_iter().collect::>()) + } } diff --git a/chain/jsonrpc/fuzz/Cargo.toml b/chain/jsonrpc/fuzz/Cargo.toml index 2dd5ec01ca5..c9f439a9ce0 100644 --- a/chain/jsonrpc/fuzz/Cargo.toml +++ b/chain/jsonrpc/fuzz/Cargo.toml @@ -24,7 +24,7 @@ serde.workspace = true serde_json.workspace = true tokio.workspace = true -near-async.workspace = true +near-time.workspace = true near-jsonrpc.workspace = true near-jsonrpc-primitives.workspace = true near-jsonrpc-tests.workspace = true diff --git a/chain/jsonrpc/fuzz/fuzz_targets_disabled/fuzz_target_1.rs b/chain/jsonrpc/fuzz/fuzz_targets_disabled/fuzz_target_1.rs index ba1f53d5f32..2f29dd5eedd 100644 --- a/chain/jsonrpc/fuzz/fuzz_targets_disabled/fuzz_target_1.rs +++ b/chain/jsonrpc/fuzz/fuzz_targets_disabled/fuzz_target_1.rs @@ -1,7 +1,7 @@ #![no_main] use actix::System; use libfuzzer_sys::{arbitrary, fuzz_target}; -use near_async::time::Clock; +use near_time::Clock; use serde::ser::{Serialize, Serializer}; use serde_json::json; use tokio; diff --git a/chain/jsonrpc/jsonrpc-tests/Cargo.toml b/chain/jsonrpc/jsonrpc-tests/Cargo.toml index 3f1f015c4a7..81e391b3acd 100644 --- a/chain/jsonrpc/jsonrpc-tests/Cargo.toml +++ b/chain/jsonrpc/jsonrpc-tests/Cargo.toml @@ -20,6 +20,7 @@ borsh.workspace = true serde.workspace = true serde_json.workspace = true +near-time.workspace = true near-async.workspace = true near-chain-configs.workspace = true near-crypto.workspace = true @@ -61,7 +62,5 @@ nightly_protocol = [ "near-primitives/nightly_protocol", "near-store/nightly_protocol", ] -statelessnet_protocol = [ - "near-primitives/statelessnet_protocol", -] +statelessnet_protocol = ["near-primitives/statelessnet_protocol"] sandbox = ["near-jsonrpc/sandbox", "near-o11y/sandbox"] diff --git a/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json b/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json index 35cd91c401d..9b3402fcc21 100644 --- a/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json +++ b/chain/jsonrpc/jsonrpc-tests/res/genesis_config.json @@ -1,5 +1,5 @@ { - "protocol_version": 67, + "protocol_version": 69, "genesis_time": "1970-01-01T00:00:00.000000000Z", "chain_id": "sample", "genesis_height": 0, diff --git a/chain/jsonrpc/jsonrpc-tests/src/lib.rs b/chain/jsonrpc/jsonrpc-tests/src/lib.rs index 176e73cf3e1..af940f9db64 100644 --- a/chain/jsonrpc/jsonrpc-tests/src/lib.rs +++ b/chain/jsonrpc/jsonrpc-tests/src/lib.rs @@ -2,7 +2,6 @@ use std::sync::Arc; use actix::Addr; use futures::{future, future::LocalBoxFuture, FutureExt, TryFutureExt}; -use near_async::time::Clock; use near_async::{ actix::AddrWithAutoSpanContextExt, messaging::{noop, IntoMultiSender}, @@ -17,6 +16,7 @@ use near_jsonrpc_primitives::{ }; use near_network::tcp; use near_primitives::types::NumBlocks; +use near_time::Clock; use once_cell::sync::Lazy; use serde_json::json; @@ -72,7 +72,7 @@ macro_rules! test_with_client { near_actix_test_utils::run_actix(async { let (_view_client_addr, addr) = - test_utils::start_all(near_async::time::Clock::real(), $node_type); + test_utils::start_all(near_time::Clock::real(), $node_type); let $client = new_client(&format!("http://{}", addr)); diff --git a/chain/jsonrpc/jsonrpc-tests/tests/http_query.rs b/chain/jsonrpc/jsonrpc-tests/tests/http_query.rs index d7abf885314..0d87af34e7f 100644 --- a/chain/jsonrpc/jsonrpc-tests/tests/http_query.rs +++ b/chain/jsonrpc/jsonrpc-tests/tests/http_query.rs @@ -2,9 +2,9 @@ use actix::System; use futures::{future, FutureExt}; use near_actix_test_utils::run_actix; -use near_async::time::Clock; use near_jsonrpc::client::new_http_client; use near_o11y::testonly::init_test_logger; +use near_time::Clock; use near_jsonrpc_tests as test_utils; diff --git a/chain/jsonrpc/jsonrpc-tests/tests/rpc_query.rs b/chain/jsonrpc/jsonrpc-tests/tests/rpc_query.rs index 347c7ffcb53..9ae48513c05 100644 --- a/chain/jsonrpc/jsonrpc-tests/tests/rpc_query.rs +++ b/chain/jsonrpc/jsonrpc-tests/tests/rpc_query.rs @@ -6,7 +6,6 @@ use futures::{future, FutureExt}; use serde_json::json; use near_actix_test_utils::run_actix; -use near_async::time::Clock; use near_crypto::{KeyType, PublicKey, Signature}; use near_jsonrpc::client::{new_client, ChunkId}; use near_jsonrpc_primitives::types::query::QueryResponseKind; @@ -17,6 +16,7 @@ use near_primitives::account::{AccessKey, AccessKeyPermission}; use near_primitives::hash::CryptoHash; use near_primitives::types::{BlockId, BlockReference, EpochId, SyncCheckpoint}; use near_primitives::views::QueryRequest; +use near_time::Clock; use near_jsonrpc_tests::{self as test_utils, test_with_client}; diff --git a/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs b/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs index 6cb02647bb0..69b10403d84 100644 --- a/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs +++ b/chain/jsonrpc/jsonrpc-tests/tests/rpc_transactions.rs @@ -5,7 +5,6 @@ use actix::{Actor, System}; use futures::{future, FutureExt, TryFutureExt}; use near_actix_test_utils::run_actix; -use near_async::time::Clock; use near_crypto::{InMemorySigner, KeyType}; use near_jsonrpc::client::new_client; use near_jsonrpc_primitives::types::transactions::{RpcTransactionStatusRequest, TransactionInfo}; @@ -16,6 +15,7 @@ use near_primitives::serialize::to_base64; use near_primitives::transaction::SignedTransaction; use near_primitives::types::BlockReference; use near_primitives::views::{FinalExecutionStatus, TxExecutionStatus}; +use near_time::Clock; use near_jsonrpc_tests::{self as test_utils, test_with_client}; diff --git a/chain/jsonrpc/res/rpc_errors_schema.json b/chain/jsonrpc/res/rpc_errors_schema.json index 099032c3b05..770640c385e 100644 --- a/chain/jsonrpc/res/rpc_errors_schema.json +++ b/chain/jsonrpc/res/rpc_errors_schema.json @@ -133,10 +133,12 @@ "props": { "final_accounts_balance": "", "final_postponed_receipts_balance": "", + "forwarded_buffered_receipts_balance": "", "incoming_receipts_balance": "", "incoming_validator_rewards": "", "initial_accounts_balance": "", "initial_postponed_receipts_balance": "", + "new_buffered_receipts_balance": "", "new_delayed_receipts_balance": "", "other_burnt_amount": "", "outgoing_receipts_balance": "", diff --git a/chain/network/Cargo.toml b/chain/network/Cargo.toml index d6972fc4097..8aeabef35f3 100644 --- a/chain/network/Cargo.toml +++ b/chain/network/Cargo.toml @@ -26,6 +26,7 @@ bytesize.workspace = true chrono.workspace = true crossbeam-channel.workspace = true derive_more.workspace = true +enum-map.workspace = true futures-util.workspace = true futures.workspace = true im.workspace = true @@ -71,6 +72,7 @@ rlimit.workspace = true tempfile.workspace = true turn.workspace = true webrtc-util.workspace = true +serde_json.workspace = true [features] nightly_protocol = [ diff --git a/chain/network/src/config.rs b/chain/network/src/config.rs index 45237cc09fc..6e020c799c2 100644 --- a/chain/network/src/config.rs +++ b/chain/network/src/config.rs @@ -3,6 +3,7 @@ use crate::concurrency::rate; use crate::network_protocol::PeerAddr; use crate::network_protocol::PeerInfo; use crate::peer_manager::peer_store; +use crate::rate_limits::messages_limits; use crate::snapshot_hosts; use crate::stun; use crate::tcp; @@ -192,6 +193,9 @@ pub struct NetworkConfig { // * ignoring received deleted edges as well pub skip_tombstones: Option, + /// Configuration of rate limits for incoming messages. + pub received_messages_rate_limits: messages_limits::Config, + #[cfg(test)] pub(crate) event_sink: near_async::messaging::Sender, @@ -237,6 +241,9 @@ impl NetworkConfig { ) { self.routing_table_update_rate_limit = rate::Limit { qps, burst } } + if let Some(rate_limits) = overrides.received_messages_rate_limits { + self.received_messages_rate_limits.apply_overrides(rate_limits); + } } pub fn new( @@ -371,6 +378,8 @@ impl NetworkConfig { } else { None }, + // Use a preset to configure rate limits and override entries with user defined values later. + received_messages_rate_limits: messages_limits::Config::standard_preset(), #[cfg(test)] event_sink: near_async::messaging::IntoSender::into_sender( near_async::messaging::noop(), @@ -448,6 +457,7 @@ impl NetworkConfig { enable_outbound: true, }), skip_tombstones: None, + received_messages_rate_limits: messages_limits::Config::default(), #[cfg(test)] event_sink: near_async::messaging::IntoSender::into_sender( near_async::messaging::noop(), @@ -500,6 +510,11 @@ impl NetworkConfig { self.routing_table_update_rate_limit .validate() .context("routing_table_update_rate_limit")?; + + if let Err(err) = self.received_messages_rate_limits.validate() { + anyhow::bail!("One or more invalid rate limits: {err:?}"); + } + Ok(VerifiedConfig { node_id: self.node_id(), inner: self }) } } @@ -538,6 +553,9 @@ mod test { use crate::network_protocol; use crate::network_protocol::testonly as data; use crate::network_protocol::{AccountData, VersionedAccountData}; + use crate::rate_limits::messages_limits::{ + RateLimitedPeerMessageKey::BlockHeaders, SingleMessageConfig, + }; use crate::tcp; use crate::testonly::make_rng; use near_async::time; @@ -676,4 +694,19 @@ mod test { let sad = ad.sign(&signer.into()).unwrap(); assert!(sad.payload().len() <= network_protocol::MAX_ACCOUNT_DATA_SIZE_BYTES); } + + #[test] + fn received_messages_rate_limits_error() { + let mut nc = config::NetworkConfig::from_seed("123", tcp::ListenerAddr::reserve_for_test()); + nc.received_messages_rate_limits + .rate_limits + .insert(BlockHeaders, SingleMessageConfig::new(1, -4.0, None)); + assert!(nc.verify().is_err()); + + let mut nc = config::NetworkConfig::from_seed("123", tcp::ListenerAddr::reserve_for_test()); + nc.received_messages_rate_limits + .rate_limits + .insert(BlockHeaders, SingleMessageConfig::new(1, 4.0, None)); + assert!(nc.verify().is_ok()); + } } diff --git a/chain/network/src/config_json.rs b/chain/network/src/config_json.rs index 0595eb18109..90ef57c44fd 100644 --- a/chain/network/src/config_json.rs +++ b/chain/network/src/config_json.rs @@ -1,4 +1,5 @@ use crate::network_protocol::PeerAddr; +use crate::rate_limits::messages_limits; use crate::stun; use near_async::time::Duration; @@ -285,6 +286,7 @@ pub struct NetworkConfigOverrides { pub accounts_data_broadcast_rate_limit_qps: Option, pub routing_table_update_rate_limit_burst: Option, pub routing_table_update_rate_limit_qps: Option, + pub received_messages_rate_limits: Option, } impl Default for ExperimentalConfig { diff --git a/chain/network/src/lib.rs b/chain/network/src/lib.rs index 05bf13f6e0b..506a0ff9c05 100644 --- a/chain/network/src/lib.rs +++ b/chain/network/src/lib.rs @@ -1,4 +1,5 @@ pub use crate::peer_manager::peer_manager_actor::{Event, PeerManagerActor}; +pub use crate::rate_limits::messages_limits::OverrideConfig as MessagesLimitsOverrideConfig; mod accounts_data; mod announce_accounts; @@ -6,6 +7,7 @@ mod network_protocol; mod peer; mod peer_manager; mod private_actix; +mod rate_limits; mod snapshot_hosts; mod stats; mod store; diff --git a/chain/network/src/peer/peer_actor.rs b/chain/network/src/peer/peer_actor.rs index db38f2750b0..bce87330329 100644 --- a/chain/network/src/peer/peer_actor.rs +++ b/chain/network/src/peer/peer_actor.rs @@ -22,6 +22,7 @@ use crate::peer_manager::network_state::{NetworkState, PRUNE_EDGES_AFTER}; use crate::peer_manager::peer_manager_actor::Event; use crate::peer_manager::peer_manager_actor::MAX_TIER2_PEERS; use crate::private_actix::{RegisterPeerError, SendMessage}; +use crate::rate_limits::messages_limits; use crate::routing::edge::verify_nonce; use crate::routing::NetworkTopologyChange; use crate::shards_manager::ShardsManagerRequestFromNetwork; @@ -190,6 +191,9 @@ pub(crate) struct PeerActor { // TODO: move it to ConnectingStatus::Outbound. // When ready, use connection.peer_info instead. peer_info: DisplayOption, + + /// Per-message rate limits for incoming messages. + received_messages_rate_limits: messages_limits::RateLimits, } impl Debug for PeerActor { @@ -311,6 +315,10 @@ impl PeerActor { // That likely requires bigger changes and account_id here is later used for debug / logging purposes only. account_id: network_state.config.validator.account_id(), }; + let received_messages_rate_limits = messages_limits::RateLimits::from_config( + &network_state.config.received_messages_rate_limits, + clock.now(), + ); // recv is the HandshakeSignal returned by this spawn_inner() call. let (send, recv): (HandshakeSignalSender, HandshakeSignal) = tokio::sync::oneshot::channel(); @@ -351,6 +359,7 @@ impl PeerActor { } .into(), network_state, + received_messages_rate_limits, } }), recv, @@ -1732,12 +1741,18 @@ impl actix::Handler for PeerActor { tracing::trace!(target: "network", "Received message: {}", peer_msg); + let now = self.clock.now(); { let labels = [peer_msg.msg_variant()]; metrics::PEER_MESSAGE_RECEIVED_BY_TYPE_TOTAL.with_label_values(&labels).inc(); metrics::PEER_MESSAGE_RECEIVED_BY_TYPE_BYTES .with_label_values(&labels) .inc_by(msg.len() as u64); + if !self.received_messages_rate_limits.is_allowed(&peer_msg, now) { + metrics::PEER_MESSAGE_RATE_LIMITED_BY_TYPE_TOTAL.with_label_values(&labels).inc(); + tracing::debug!(target: "network", "Peer {} is being rate limited for message {}", self.peer_info, peer_msg.msg_variant()); + return; + } } match &self.peer_status { PeerStatus::Connecting { .. } => self.handle_msg_connecting(ctx, peer_msg), @@ -1746,7 +1761,7 @@ impl actix::Handler for PeerActor { tracing::warn!(target: "network", "Received {} from closing connection {:?}. Ignoring", peer_msg, self.peer_type); return; } - conn.last_time_received_message.store(self.clock.now()); + conn.last_time_received_message.store(now); // Check if the message type is allowed given the TIER of the connection: // TIER1 connections are reserved exclusively for BFT consensus messages. if !conn.tier.is_allowed(&peer_msg) { @@ -1763,7 +1778,7 @@ impl actix::Handler for PeerActor { // case when our peer doesn't use that logic yet. if let Some(skip_tombstones) = self.network_state.config.skip_tombstones { if let PeerMessage::SyncRoutingTable(routing_table) = &mut peer_msg { - if conn.established_time + skip_tombstones > self.clock.now() { + if conn.established_time + skip_tombstones > now { routing_table .edges .retain(|edge| edge.edge_type() == EdgeState::Active); diff --git a/chain/network/src/peer/tests/mod.rs b/chain/network/src/peer/tests/mod.rs index fd35dbe7b0d..9e3cfb80df3 100644 --- a/chain/network/src/peer/tests/mod.rs +++ b/chain/network/src/peer/tests/mod.rs @@ -1,2 +1,3 @@ mod communication; +mod rate_limits; mod stream; diff --git a/chain/network/src/peer/tests/rate_limits.rs b/chain/network/src/peer/tests/rate_limits.rs new file mode 100644 index 00000000000..a507802a15b --- /dev/null +++ b/chain/network/src/peer/tests/rate_limits.rs @@ -0,0 +1,209 @@ +use crate::broadcast::Receiver; +use crate::config::NetworkConfig; +use crate::network_protocol::{testonly as data, PartialEncodedChunkRequestMsg, RoutedMessageBody}; +use crate::network_protocol::{Encoding, PeerMessage}; +use crate::peer::testonly::{Event, PeerConfig, PeerHandle}; +use crate::peer_manager::peer_manager_actor::Event as PME; +use crate::rate_limits::messages_limits; +use crate::tcp; +use crate::testonly::{make_rng, Rng}; +use near_async::time::FakeClock; +use near_o11y::testonly::init_test_logger; +use near_primitives::hash::CryptoHash; +use rand::Rng as _; +use std::sync::Arc; +use std::time::Duration; +use tokio::time::{sleep, sleep_until, Instant}; + +#[tokio::test] +// Verifies that peer traffic is rate limited per message type. Not all messages are rate limited. +// This test works by sending many messages very quickly and then check how many of them +// were effectively processed by the receiver. +async fn test_message_rate_limits() -> anyhow::Result<()> { + init_test_logger(); + tracing::info!("test_message_rate_limits"); + + let mut clock = FakeClock::default(); + let mut rng = make_rng(89028037453); + let (outbound, inbound) = setup_test_peers(&mut clock, &mut rng).await; + + const MESSAGES: u32 = 7; + // Let's gather all events received from now on. We'll check them later, after producing messages. + let mut events = inbound.events.from_now(); + let messages_samples = send_messages(&inbound, &outbound, &mut rng, MESSAGES).await; + + // Check how many messages of each type have been received. + let messages_received = + wait_for_similar_messages(&messages_samples, &mut events, Duration::from_secs(3)).await; + tracing::debug!(target:"test","received {messages_received:?} messages"); + // BlockRequest gets rate limited (7 sent vs 5 bucket_start). + assert!(messages_received[0] < MESSAGES); + // PartialEncodedChunkRequest gets rate limited (7 sent vs 5 bucket_start). + assert!(messages_received[1] < MESSAGES); + // Transaction doesn't get rate limited (7 sent vs 50 bucket_start). + assert_eq!(messages_received[2], MESSAGES); + + Ok(()) +} + +#[tokio::test] +// Verifies that peer traffic is not rate limited when messages are sent at regular intervals, +// and the total number of messages is below the limit. +async fn test_message_rate_limits_over_time() -> anyhow::Result<()> { + init_test_logger(); + tracing::info!("test_message_rate_limits_over_time"); + + let mut clock = FakeClock::default(); + let mut rng = make_rng(89028037453); + let (outbound, inbound) = setup_test_peers(&mut clock, &mut rng).await; + + const MESSAGES: u32 = 4; + const INTERVAL: Duration = Duration::from_secs(2); + // Let's gather all events received from now on. We'll check them later, after producing messages. + let mut events = inbound.events.from_now(); + + // Send 4 messages of each type every 2 seconds, three times. + let mut messages_samples = Vec::new(); + let now = clock.now(); + for i in 0..3 { + messages_samples = send_messages(&inbound, &outbound, &mut rng, MESSAGES).await; + // Advance the fake clock to refresh rate limits. + clock.advance_until(now + INTERVAL * (i + 1)); + // Give some time to peer actors to process messages. + sleep(Duration::from_secs(1)).await; + } + + let messages_received = + wait_for_similar_messages(&messages_samples, &mut events, Duration::from_secs(3)).await; + tracing::debug!(target:"test","received {messages_received:?} messages"); + // BlockRequest and PartialEncodedChunkRequest don't get rate limited + // 12 sent vs 5 bucket_start + 2.5 refilled * 4s + assert_eq!(messages_received[0], MESSAGES * 3); + assert_eq!(messages_received[1], MESSAGES * 3); + // Transaction doesn't get rate limited (12 sent vs 50 bucket_start). + assert_eq!(messages_received[2], MESSAGES * 3); + + Ok(()) +} + +/// Waits up to `duration` and then checks how many events equal (in type only) to each one of `samples` +/// have been received. +/// +/// Returns a vector of the same size of `samples`. +async fn wait_for_similar_messages( + samples: &[PeerMessage], + events: &mut Receiver, + duration: Duration, +) -> Vec { + let mut messages_received = vec![0; 3]; + sleep_until(Instant::now() + duration).await; + while let Some(event) = events.try_recv() { + match event { + Event::Network(PME::MessageProcessed(_, got)) => { + for (i, sample) in samples.iter().enumerate() { + if sample.msg_variant() == got.msg_variant() { + messages_received[i] += 1; + } + } + } + _ => {} + } + } + messages_received +} + +/// Setup two connected peers. +/// +/// Rate limits configuration: +/// - `BlockRequest`, `PartialEncodedChunkRequest`: bucket_start = 5, bucket_max = 10, refill_rate = 2.5/s +/// - `Transaction`: bucket_start = bucket_max = 50, refill_rate = 5/s +async fn setup_test_peers(clock: &mut FakeClock, mut rng: &mut Rng) -> (PeerHandle, PeerHandle) { + let chain = Arc::new(data::Chain::make(clock, &mut rng, 12)); + + // Customize the network configuration to set some arbitrary rate limits. + let add_rate_limits = |mut network_config: NetworkConfig| { + let rate_limits = &mut network_config.received_messages_rate_limits.rate_limits; + use messages_limits::RateLimitedPeerMessageKey::*; + rate_limits + .insert(BlockRequest, messages_limits::SingleMessageConfig::new(10, 2.5, Some(5))); + rate_limits.insert( + PartialEncodedChunkRequest, + messages_limits::SingleMessageConfig::new(10, 2.5, Some(5)), + ); + rate_limits.insert(Transaction, messages_limits::SingleMessageConfig::new(50, 5.0, None)); + network_config + }; + + let inbound_cfg = PeerConfig { + chain: chain.clone(), + network: add_rate_limits(chain.make_config(&mut rng)), + force_encoding: Some(Encoding::Proto), + }; + let outbound_cfg = PeerConfig { + chain: chain.clone(), + network: add_rate_limits(chain.make_config(&mut rng)), + force_encoding: Some(Encoding::Proto), + }; + let (outbound_stream, inbound_stream) = + tcp::Stream::loopback(inbound_cfg.id(), tcp::Tier::T2).await; + let mut inbound = PeerHandle::start_endpoint(clock.clock(), inbound_cfg, inbound_stream).await; + let mut outbound = + PeerHandle::start_endpoint(clock.clock(), outbound_cfg, outbound_stream).await; + + outbound.complete_handshake().await; + inbound.complete_handshake().await; + (outbound, inbound) +} + +/// Sends samples of various messages: +/// - `BlockRequest` +/// - `PartialEncodedChunkRequest` +/// - `Transaction` +/// +/// Messages are sent `count` times each. +/// +/// Returns a vector with an example of one of each message above (useful for comparisons.) +async fn send_messages( + inbound: &PeerHandle, + outbound: &PeerHandle, + rng: &mut Rng, + count: u32, +) -> Vec { + let mut messages_samples = Vec::new(); + + tracing::info!(target:"test","send BlockRequest"); + let message = PeerMessage::BlockRequest(CryptoHash::default()); + for _ in 0..count { + outbound.send(message.clone()).await; + } + messages_samples.push(message); + + tracing::info!(target:"test","send PartialEncodedChunkRequest"); + // Duplicated routed messages are filtered out so we must tweak each message to make it unique. + + for i in 0..count { + let message = PeerMessage::Routed(Box::new(outbound.routed_message( + RoutedMessageBody::PartialEncodedChunkRequest(PartialEncodedChunkRequestMsg { + chunk_hash: outbound.cfg.chain.blocks[5].chunks()[2].chunk_hash(), + part_ords: vec![rng.gen()], + tracking_shards: Default::default(), + }), + inbound.cfg.id(), + 1, + None, + ))); + outbound.send(message.clone()).await; + if i == count - 1 { + messages_samples.push(message); + } + } + + tracing::info!(target:"test","send Transaction"); + let message = PeerMessage::Transaction(data::make_signed_transaction(rng)); + for _ in 0..count { + outbound.send(message.clone()).await; + } + messages_samples.push(message); + + messages_samples +} diff --git a/chain/network/src/rate_limits/messages_limits.rs b/chain/network/src/rate_limits/messages_limits.rs new file mode 100644 index 00000000000..fead2da18d0 --- /dev/null +++ b/chain/network/src/rate_limits/messages_limits.rs @@ -0,0 +1,444 @@ +//! This module facilitates the initialization and the storage +//! of rate limits per message. + +use std::collections::HashMap; + +use enum_map::{enum_map, EnumMap}; +use near_async::time::Instant; + +use crate::network_protocol::{PeerMessage, RoutedMessageBody}; + +use super::token_bucket::{TokenBucket, TokenBucketError}; + +/// Object responsible to manage the rate limits of all network messages +/// for a single connection/peer. +#[derive(Default)] +pub struct RateLimits { + buckets: EnumMap>, +} + +impl RateLimits { + /// Creates all buckets as configured in `config`. + /// See also [TokenBucket::new]. + pub fn from_config(config: &Config, start_time: Instant) -> Self { + let mut buckets = enum_map! { _ => None }; + // Configuration is assumed to be correct. Any failure to build a bucket is ignored. + for (key, message_config) in &config.rate_limits { + let initial_size = message_config.initial_size.unwrap_or(message_config.maximum_size); + match TokenBucket::new( + initial_size, + message_config.maximum_size, + message_config.refill_rate, + start_time, + ) { + Ok(bucket) => buckets[*key] = Some(bucket), + Err(err) => { + tracing::warn!(target: "network", "ignoring rate limit for {key} due to an error ({err})") + } + } + } + Self { buckets } + } + + /// Checks if the given message is under the rate limits. + /// + /// # Arguments + /// + /// * `message` - The network message to be checked + /// * `now` - Current time + /// + /// Returns `true` if the message should be allowed to continue. Otherwise, + /// if it should be rate limited, returns `false`. + pub fn is_allowed(&mut self, message: &PeerMessage, now: Instant) -> bool { + if let Some((key, cost)) = get_key_and_token_cost(message) { + if let Some(bucket) = &mut self.buckets[key] { + return bucket.acquire(cost, now); + } + } + true + } +} + +/// Rate limit configuration for a single network message. +#[derive(Clone, serde::Serialize, serde::Deserialize, Debug)] +#[cfg_attr(test, derive(PartialEq))] +pub struct SingleMessageConfig { + pub maximum_size: u32, + pub refill_rate: f32, + /// Optional initial size. Defaults to `maximum_size` if absent. + pub initial_size: Option, +} + +impl SingleMessageConfig { + pub fn new(maximum_size: u32, refill_rate: f32, initial_size: Option) -> Self { + Self { maximum_size, refill_rate, initial_size } + } +} + +/// Network messages rate limits configuration. +#[derive(Default, Clone)] +pub struct Config { + pub rate_limits: HashMap, +} + +/// Struct to manage user defined overrides for [Config]. The key difference with the base struct +/// is that in this values can be set to `None` to disable preset rate limits. +#[derive(serde::Serialize, serde::Deserialize, Default, Clone, Debug)] +pub struct OverrideConfig { + pub rate_limits: HashMap>, +} + +impl Config { + /// Validates this configuration object. + /// + /// # Errors + /// + /// If at least one error is present, returns the list of all configuration errors. + pub fn validate(&self) -> Result<(), Vec<(RateLimitedPeerMessageKey, TokenBucketError)>> { + let mut errors = Vec::new(); + for (key, message_config) in &self.rate_limits { + if let Err(err) = TokenBucket::validate_refill_rate(message_config.refill_rate) { + errors.push((*key, err)); + } + } + if errors.is_empty() { + Ok(()) + } else { + Err(errors) + } + } + + /// Returns a good preset of rate limit configuration valid for any type of node. + pub fn standard_preset() -> Self { + // TODO(trisfald): make preset + Self::default() + } + + /// Applies rate limits configuration overrides to `self`. In practice, merges the two configurations + /// giving preference to the values defined by the `overrides` parameter. + pub fn apply_overrides(&mut self, overrides: OverrideConfig) { + for (key, message_config) in overrides.rate_limits { + match message_config { + Some(value) => self.rate_limits.insert(key, value), + None => self.rate_limits.remove(&key), + }; + } + } +} + +/// This enum represents the variants of [PeerMessage] that can be rate limited. +/// It is meant to be used as an index for mapping peer messages to a value. +#[derive( + Clone, + Copy, + enum_map::Enum, + strum::Display, + Debug, + PartialEq, + Eq, + Hash, + serde::Serialize, + serde::Deserialize, +)] +#[allow(clippy::large_enum_variant)] +pub enum RateLimitedPeerMessageKey { + SyncRoutingTable, + DistanceVector, + RequestUpdateNonce, + SyncAccountsData, + PeersRequest, + PeersResponse, + BlockHeadersRequest, + BlockHeaders, + BlockRequest, + Block, + Transaction, + SyncSnapshotHosts, + StateRequestHeader, + StateRequestPart, + VersionedStateResponse, + BlockApproval, + ForwardTx, + TxStatusRequest, + TxStatusResponse, + StateResponse, + PartialEncodedChunkRequest, + PartialEncodedChunkResponse, + VersionedPartialEncodedChunk, + PartialEncodedChunkForward, + ChunkEndorsement, + ChunkStateWitnessAck, + PartialEncodedStateWitness, + PartialEncodedStateWitnessForward, +} + +/// Given a `PeerMessage` returns a tuple containing the `RateLimitedPeerMessageKey` +/// corresponding to the message's type and its the cost (in tokens) for rate limiting +/// purposes. +/// +/// Returns `Some` if the message has the potential to be rate limited (through the correct configuration). +/// Returns `None` if the message is not meant to be rate limited in any scenario. +fn get_key_and_token_cost(message: &PeerMessage) -> Option<(RateLimitedPeerMessageKey, u32)> { + use RateLimitedPeerMessageKey::*; + match message { + PeerMessage::SyncRoutingTable(_) => Some((SyncRoutingTable, 1)), + PeerMessage::DistanceVector(_) => Some((DistanceVector, 1)), + PeerMessage::RequestUpdateNonce(_) => Some((RequestUpdateNonce, 1)), + PeerMessage::SyncAccountsData(_) => Some((SyncAccountsData, 1)), + PeerMessage::PeersRequest(_) => Some((PeersRequest, 1)), + PeerMessage::PeersResponse(_) => Some((PeersResponse, 1)), + PeerMessage::BlockHeadersRequest(_) => Some((BlockHeadersRequest, 1)), + PeerMessage::BlockHeaders(_) => Some((BlockHeaders, 1)), + PeerMessage::BlockRequest(_) => Some((BlockRequest, 1)), + PeerMessage::Block(_) => Some((Block, 1)), + PeerMessage::Transaction(_) => Some((Transaction, 1)), + PeerMessage::Routed(msg) => match msg.body { + RoutedMessageBody::BlockApproval(_) => Some((BlockApproval, 1)), + RoutedMessageBody::ForwardTx(_) => Some((ForwardTx, 1)), + RoutedMessageBody::TxStatusRequest(_, _) => Some((TxStatusRequest, 1)), + RoutedMessageBody::TxStatusResponse(_) => Some((TxStatusResponse, 1)), + RoutedMessageBody::StateResponse(_) => Some((StateResponse, 1)), + RoutedMessageBody::PartialEncodedChunkRequest(_) => { + Some((PartialEncodedChunkRequest, 1)) + } + RoutedMessageBody::PartialEncodedChunkResponse(_) => { + Some((PartialEncodedChunkResponse, 1)) + } + RoutedMessageBody::VersionedPartialEncodedChunk(_) => { + Some((VersionedPartialEncodedChunk, 1)) + } + RoutedMessageBody::PartialEncodedChunkForward(_) => { + Some((PartialEncodedChunkForward, 1)) + } + RoutedMessageBody::ChunkEndorsement(_) => Some((ChunkEndorsement, 1)), + RoutedMessageBody::ChunkStateWitnessAck(_) => Some((ChunkStateWitnessAck, 1)), + RoutedMessageBody::PartialEncodedStateWitness(_) => { + Some((PartialEncodedStateWitness, 1)) + } + RoutedMessageBody::PartialEncodedStateWitnessForward(_) => { + Some((PartialEncodedStateWitnessForward, 1)) + } + RoutedMessageBody::Ping(_) + | RoutedMessageBody::Pong(_) + | RoutedMessageBody::_UnusedChunkStateWitness + | RoutedMessageBody::_UnusedVersionedStateResponse + | RoutedMessageBody::_UnusedPartialEncodedChunk + | RoutedMessageBody::_UnusedQueryRequest + | RoutedMessageBody::_UnusedQueryResponse + | RoutedMessageBody::_UnusedReceiptOutcomeRequest(_) + | RoutedMessageBody::_UnusedReceiptOutcomeResponse + | RoutedMessageBody::_UnusedStateRequestHeader + | RoutedMessageBody::_UnusedStateRequestPart => None, + }, + PeerMessage::SyncSnapshotHosts(_) => Some((SyncSnapshotHosts, 1)), + PeerMessage::StateRequestHeader(_, _) => Some((StateRequestHeader, 1)), + PeerMessage::StateRequestPart(_, _, _) => Some((StateRequestPart, 1)), + PeerMessage::VersionedStateResponse(_) => Some((VersionedStateResponse, 1)), + PeerMessage::Tier1Handshake(_) + | PeerMessage::Tier2Handshake(_) + | PeerMessage::HandshakeFailure(_, _) + | PeerMessage::LastEdge(_) + | PeerMessage::Disconnect(_) + | PeerMessage::Challenge(_) => None, + } +} + +#[cfg(test)] +mod tests { + use near_async::time::Duration; + use near_primitives::hash::CryptoHash; + + use crate::network_protocol::{Disconnect, PeerMessage}; + + use super::*; + + #[test] + fn is_allowed() { + let disconnect = + PeerMessage::Disconnect(Disconnect { remove_from_connection_store: false }); + let block_request = PeerMessage::BlockRequest(CryptoHash::default()); + let now = Instant::now(); + + // Test message that can't be rate limited. + { + let mut limits = RateLimits::default(); + assert!(limits.is_allowed(&disconnect, now)); + } + + // Test message that might be rate limited, but the system is not configured to do so. + { + let mut limits = RateLimits::default(); + assert!(limits.is_allowed(&block_request, now)); + } + + // Test rate limited message with enough tokens. + { + let mut limits = RateLimits::default(); + limits.buckets[RateLimitedPeerMessageKey::BlockRequest] = + Some(TokenBucket::new(1, 1, 0.0, now).unwrap()); + assert!(limits.is_allowed(&block_request, now)); + } + + // Test rate limited message without enough tokens. + { + let mut limits = RateLimits::default(); + limits.buckets[RateLimitedPeerMessageKey::BlockRequest] = + Some(TokenBucket::new(0, 1, 0.0, now).unwrap()); + assert!(!limits.is_allowed(&block_request, now)); + } + } + + #[test] + fn configuration() { + use RateLimitedPeerMessageKey::*; + let mut config = Config::default(); + + config.rate_limits.insert(Block, SingleMessageConfig::new(5, 1.0, Some(1))); + config.rate_limits.insert(BlockApproval, SingleMessageConfig::new(5, 1.0, None)); + config.rate_limits.insert(BlockHeaders, SingleMessageConfig::new(1, -4.0, None)); + + let now = Instant::now(); + let mut limits = RateLimits::from_config(&config, now); + + // Bucket should exist with capacity = 1. + assert!(!limits.buckets[Block].as_mut().unwrap().acquire(2, now)); + // Bucket should exist with capacity = 5. + assert!(limits.buckets[BlockApproval].as_mut().unwrap().acquire(2, now)); + // Bucket should not exist due to a config error. + assert!(limits.buckets[BlockHeaders].is_none()); + // Buckets are not instantiated for message types not present in the config. + assert!(limits.buckets[RequestUpdateNonce].is_none()); + } + + #[test] + fn configuration_errors() { + use RateLimitedPeerMessageKey::*; + let mut config = Config::default(); + assert!(config.validate().is_ok()); + + config.rate_limits.insert(Block, SingleMessageConfig::new(0, 1.0, None)); + assert!(config.validate().is_ok()); + + config.rate_limits.insert(BlockApproval, SingleMessageConfig::new(0, -1.0, None)); + assert_eq!( + config.validate(), + Err(vec![(BlockApproval, TokenBucketError::InvalidRefillRate(-1.0))]) + ); + + config.rate_limits.insert(BlockHeaders, SingleMessageConfig::new(0, -2.0, None)); + let result = config.validate(); + let error = result.expect_err("a configuration error is expected"); + assert!(error + .iter() + .find(|(key, err)| *key == BlockApproval + && *err == TokenBucketError::InvalidRefillRate(-1.0)) + .is_some()); + assert!(error + .iter() + .find(|(key, err)| *key == BlockHeaders + && *err == TokenBucketError::InvalidRefillRate(-2.0)) + .is_some()); + } + + #[test] + fn buckets_get_refreshed() { + use RateLimitedPeerMessageKey::*; + let mut config = Config::default(); + let now = Instant::now(); + + config.rate_limits.insert(Block, SingleMessageConfig::new(5, 1.0, Some(0))); + config.rate_limits.insert(BlockApproval, SingleMessageConfig::new(5, 1.0, Some(0))); + + let mut limits = RateLimits::from_config(&config, now); + + assert!(!limits.buckets[Block].as_mut().unwrap().acquire(1, now)); + assert!(!limits.buckets[BlockApproval].as_mut().unwrap().acquire(1, now)); + + let now = now + Duration::seconds(1); + + assert!(limits.buckets[Block].as_mut().unwrap().acquire(1, now)); + assert!(limits.buckets[BlockApproval].as_mut().unwrap().acquire(1, now)); + } + + #[test] + fn apply_overrides() { + use RateLimitedPeerMessageKey::*; + + // Create a config with three entries. + let mut config = Config::default(); + config.rate_limits.insert(Block, SingleMessageConfig::new(1, 1.0, None)); + config.rate_limits.insert(BlockApproval, SingleMessageConfig::new(2, 1.0, None)); + config.rate_limits.insert(BlockHeaders, SingleMessageConfig::new(3, 1.0, None)); + + // Override the config with the following patch: + // - one entry is modified + // - one entry is untouched + // - one entry is removed + // - one entry is added + let mut overrides = OverrideConfig::default(); + overrides.rate_limits.insert(Block, Some(SingleMessageConfig::new(4, 1.0, None))); + overrides.rate_limits.insert(BlockHeaders, None); + overrides + .rate_limits + .insert(StateRequestHeader, Some(SingleMessageConfig::new(5, 1.0, None))); + + config.apply_overrides(overrides); + assert_eq!(config.rate_limits.len(), 3); + assert_eq!(config.rate_limits.get(&Block), Some(&SingleMessageConfig::new(4, 1.0, None))); + assert_eq!(config.rate_limits.get(&BlockHeaders), None); + assert_eq!( + config.rate_limits.get(&StateRequestHeader), + Some(&SingleMessageConfig::new(5, 1.0, None)) + ); + } + + #[test] + fn override_config_deserialization() { + use RateLimitedPeerMessageKey::*; + + // Check object with no entries. + let json = serde_json::json!({"rate_limits": {}}); + let config: OverrideConfig = + serde_json::from_value(json).expect("deserializing OverrideConfig should work"); + assert_eq!(config.rate_limits.len(), 0); + + // Check object with a single entry. + let json = serde_json::json!({"rate_limits": { + "Block": { + "maximum_size": 1, + "refill_rate": 1.0, + "initial_size": 1, + } + }}); + let config: OverrideConfig = + serde_json::from_value(json).expect("deserializing OverrideConfig should work"); + assert_eq!(config.rate_limits.len(), 1); + assert!(config.rate_limits.contains_key(&Block)); + + // Check object with multiple entries. + let json = serde_json::json!({"rate_limits": { + "Block": { + "maximum_size": 1, + "refill_rate": 1.0, + "initial_size": 1, + }, + "BlockApproval": { + "maximum_size": 2, + "refill_rate": 1.0, + } + }}); + let config: OverrideConfig = + serde_json::from_value(json).expect("deserializing OverrideConfig should work"); + assert_eq!(config.rate_limits.len(), 2); + assert!(config.rate_limits.contains_key(&Block)); + assert!(config.rate_limits.contains_key(&BlockApproval)); + + // Check object with errors. + let json = serde_json::json!({"rate_limits": { + "Block": { + "foo": 1, + } + }}); + assert!(serde_json::from_value::(json).is_err()); + } +} diff --git a/chain/network/src/rate_limits/mod.rs b/chain/network/src/rate_limits/mod.rs new file mode 100644 index 00000000000..e623412c3b2 --- /dev/null +++ b/chain/network/src/rate_limits/mod.rs @@ -0,0 +1,2 @@ +pub mod messages_limits; +pub mod token_bucket; diff --git a/chain/network/src/rate_limits/token_bucket.rs b/chain/network/src/rate_limits/token_bucket.rs new file mode 100644 index 00000000000..5b5cd8c8eb7 --- /dev/null +++ b/chain/network/src/rate_limits/token_bucket.rs @@ -0,0 +1,288 @@ +//! Implementation of the token bucket algorithm, used to put limits on +//! bandwidth and burstiness of network traffic. +//! +//! The algorithm depicts an imaginary bucket into which tokens are added +//! at regular intervals of time. The bucket has a well defined maximum size +//! and overflowing tokens are simply discarded. +//! Network traffic (packets, messages, etc) 'consume' a given amount of tokens +//! in order to be allowed to pass. +//! If there aren't enough tokens in the bucket, the traffic might be stopped +//! or delayed. However, this module responsibility stops at telling +//! whether or not the incoming messages are allowed. + +use near_async::time::Instant; + +#[derive(thiserror::Error, Debug, PartialEq)] +pub enum TokenBucketError { + #[error("invalid value for refill rate ({0})")] + InvalidRefillRate(f32), +} + +/// Into how many parts a token can be divided. +const TOKEN_PARTS_NUMBER: u64 = 1 << 31; + +/// Struct to hold the state for the token bucket algorithm. +/// +/// The precision guarantee is, at least, such that a bucket having `refill_rate` = 0.001s +/// update at regular intervals every 10ms will successfully generate a token after 1000±1s. +pub struct TokenBucket { + /// Maximum number of tokens the bucket can hold. + maximum_size: u32, + /// Tokens in the bucket. They are stored as `tokens * TOKEN_PARTS_NUMBER`. + /// In this way we can refill the bucket at shorter intervals. + size: u64, + /// Refill rate in token per second. + refill_rate: f32, + /// Last time the bucket was refreshed. + last_refill: Instant, +} + +impl TokenBucket { + /// Creates a new token bucket. + /// + /// # Arguments + /// + /// * `initial_size` - Initial amount of tokens in the bucket + /// * `maximum_size` - Maximum amount of tokens the bucket can hold + /// * `refill_rate` - Bucket refill rate in token per second + /// * `start_time` - Point in time used as a start to calculate the bucket refill. + /// + /// # Errors + /// + /// Returns an error if any of the arguments has an invalid value. + pub fn new( + initial_size: u32, + maximum_size: u32, + refill_rate: f32, + start_time: Instant, + ) -> Result { + let size = to_tokens_with_parts(maximum_size.min(initial_size)); + TokenBucket::validate_refill_rate(refill_rate)?; + Ok(Self { maximum_size, size, refill_rate, last_refill: start_time }) + } + + /// Makes an attempt to acquire `token` tokens. + /// + /// This method takes a parameter called `now` which should be equivalent to the current time. + /// The latter is used to refill the bucket before subtracting tokens. + /// + /// If the tokens are available they are subtracted from the current `size` and + /// the method returns `true`. Otherwise, `size` is not changed and the method + /// returns `false`. + pub fn acquire(&mut self, tokens: u32, now: Instant) -> bool { + self.refill(now); + let tokens = to_tokens_with_parts(tokens); + if self.size >= tokens { + self.size -= tokens; + true + } else { + false + } + } + + /// Refills the bucket with the right number of tokens according to + /// the `refill_rate` and the new current time `now`. + /// + /// For example: if `refill_rate` == 1 and `now - last_refill` == 1s then exactly 1 token + /// will be added. + fn refill(&mut self, now: Instant) { + // Sanity check: now should be bigger than the last refill time. + if now <= self.last_refill { + return; + } + // Compute how many tokens should be added to the current size. + let duration = now - self.last_refill; + let tokens_to_add = duration.as_secs_f64() * self.refill_rate as f64; + let tokens_to_add = (tokens_to_add * TOKEN_PARTS_NUMBER as f64) as u64; + // Update `last_refill` and `size` only if there's a change. This is done to prevent + // losing token parts to clamping if the duration is too small. + if tokens_to_add > 0 { + self.size = self + .size + .saturating_add(tokens_to_add) + .min(to_tokens_with_parts(self.maximum_size)); + self.last_refill = now; + } + } + + /// Returns an error if the value provided is not in the correct range for + /// `refill_rate`. + pub(crate) fn validate_refill_rate(refill_rate: f32) -> Result<(), TokenBucketError> { + if refill_rate < 0.0 { + return Err(TokenBucketError::InvalidRefillRate(refill_rate)); + } + if !refill_rate.is_normal() && refill_rate != 0.0 { + return Err(TokenBucketError::InvalidRefillRate(refill_rate)); + } + Ok(()) + } +} + +/// Transforms a value of `tokens` without a fractional part into a representation +/// having a fractional part. +fn to_tokens_with_parts(tokens: u32) -> u64 { + // Safe (check the test `token_fractional_representation_cant_overflow`). + tokens as u64 * TOKEN_PARTS_NUMBER +} + +#[cfg(test)] +mod tests { + use super::*; + use near_async::time::{Duration, Instant}; + + #[test] + fn token_fractional_representation_cant_overflow() { + assert!(TOKEN_PARTS_NUMBER.saturating_mul(u32::MAX as u64) < u64::MAX); + } + + #[test] + fn initial_more_than_max() { + let bucket = + TokenBucket::new(5, 2, 1.0, Instant::now()).expect("bucket should be well formed"); + assert_eq!(bucket.size, to_tokens_with_parts(2)); + assert_eq!(bucket.maximum_size, 2); + } + + #[test] + fn invalid_refill_rate() { + assert!(TokenBucket::new(2, 2, f32::NAN, Instant::now()).is_err()); + assert!(TokenBucket::new(2, 2, f32::INFINITY, Instant::now()).is_err()); + assert!(TokenBucket::new(2, 2, f32::NEG_INFINITY, Instant::now()).is_err()); + assert!(TokenBucket::new(2, 2, -1.0, Instant::now()).is_err()); + } + + #[test] + fn valid_refill_rate() { + assert!(TokenBucket::new(2, 2, 0.0, Instant::now()).is_ok()); + assert!(TokenBucket::new(2, 2, 0.3, Instant::now()).is_ok()); + } + + #[test] + fn acquire() { + let now = Instant::now(); + let mut bucket = TokenBucket::new(5, 10, 1.0, now).expect("bucket should be well formed"); + + assert!(bucket.acquire(0, now)); + assert_eq!(bucket.size, to_tokens_with_parts(5)); + + assert!(bucket.acquire(1, now)); + assert_eq!(bucket.size, to_tokens_with_parts(4)); + + assert!(!bucket.acquire(10, now)); + assert_eq!(bucket.size, to_tokens_with_parts(4)); + + assert!(bucket.acquire(4, now)); + assert_eq!(bucket.size, to_tokens_with_parts(0)); + + assert!(!bucket.acquire(1, now)); + assert_eq!(bucket.size, to_tokens_with_parts(0)); + } + + #[test] + fn max_is_zero() { + let now = Instant::now(); + let mut bucket = TokenBucket::new(0, 0, 0.0, now).expect("bucket should be well formed"); + assert!(bucket.acquire(0, now)); + assert!(!bucket.acquire(1, now)); + } + + #[test] + fn buckets_get_refilled() { + let now = Instant::now(); + let mut bucket = + TokenBucket::new(0, 1000, 10.0, now).expect("bucket should be well formed"); + assert!(!bucket.acquire(1, now)); + assert!(bucket.acquire(1, now + Duration::milliseconds(500))); + } + + #[test] + fn zero_refill_rate() { + let now = Instant::now(); + let mut bucket = TokenBucket::new(10, 10, 0.0, now).expect("bucket should be well formed"); + assert!(bucket.acquire(10, now)); + assert!(!bucket.acquire(1, now)); + assert!(!bucket.acquire(1, now + Duration::seconds(100))); + } + + #[test] + fn refill_no_time_elapsed() { + let now = Instant::now(); + let mut bucket = TokenBucket::new(10, 10, 1.0, now).expect("bucket should be well formed"); + let size = bucket.size; + bucket.refill(now); + assert_eq!(bucket.size, size); + } + + #[test] + fn check_non_monotonic_clocks_safety() { + let now = Instant::now(); + let mut bucket = TokenBucket::new(10, 10, 1.0, now).expect("bucket should be well formed"); + let size = bucket.size; + bucket.refill(now - Duration::seconds(100)); + assert_eq!(bucket.size, size); + } + + #[test] + fn refill_partial_token() { + let now = Instant::now(); + let mut bucket = TokenBucket::new(0, 5, 0.4, now).expect("bucket should be well formed"); + assert!(!bucket.acquire(1, now)); + assert!(!bucket.acquire(1, now + Duration::seconds(1))); + assert!(!bucket.acquire(1, now + Duration::seconds(2))); + assert!(bucket.acquire(1, now + Duration::seconds(3))); + } + + #[test] + fn refill_overflow_bucket_max_size() { + let now = Instant::now(); + let mut bucket = TokenBucket::new(2, 5, 1.0, now).expect("bucket should be well formed"); + + bucket.refill(now + Duration::seconds(2)); + assert_eq!(bucket.size, to_tokens_with_parts(4)); + + bucket.refill(now + Duration::seconds(4)); + assert_eq!(bucket.size, to_tokens_with_parts(5)); + + assert!(bucket.acquire(5, now + Duration::seconds(4))); + assert_eq!(bucket.size, to_tokens_with_parts(0)); + + assert!(bucket.acquire(5, now + Duration::seconds(10))); + assert_eq!(bucket.size, to_tokens_with_parts(0)); + } + + #[test] + fn check_with_numeric_limits() { + let now = Instant::now(); + let mut bucket = TokenBucket::new(u32::MAX, u32::MAX, 1_000_000.0, now) + .expect("bucket should be well formed"); + + assert!(bucket.acquire(u32::MAX, now)); + assert!(!bucket.acquire(1, now)); + + let now = now + Duration::days(100); + assert!(bucket.acquire(u32::MAX, now)); + assert!(!bucket.acquire(1, now)); + } + + #[test] + /// Validate if `TokenBucket` meets the requirement of being able to refresh tokens successfully + /// when both the refill rate and the elapsed time are very low. + fn validate_guaranteed_resolution() { + let mut now = Instant::now(); + let mut bucket = TokenBucket::new(0, 10, 0.001, now).expect("bucket should be well formed"); + // Up to 999s: no new token added. + for _ in 0..99_900 { + now += Duration::milliseconds(10); + assert!(!bucket.acquire(1, now)); + } + // From 999s to 1001s: the new token should get added. + let mut tokens_added = 0; + for _ in 99_900..100_100 { + now += Duration::milliseconds(10); + if bucket.acquire(1, now) { + tokens_added += 1; + } + } + assert_eq!(tokens_added, 1); + } +} diff --git a/chain/network/src/stats/metrics.rs b/chain/network/src/stats/metrics.rs index 10f9066b2c0..e5735820546 100644 --- a/chain/network/src/stats/metrics.rs +++ b/chain/network/src/stats/metrics.rs @@ -192,6 +192,14 @@ pub(crate) static PEER_MESSAGE_SENT_BY_TYPE_TOTAL: Lazy = Lazy::n ) .unwrap() }); +pub(crate) static PEER_MESSAGE_RATE_LIMITED_BY_TYPE_TOTAL: Lazy = Lazy::new(|| { + try_create_int_counter_vec( + "near_peer_message_rate_limited_by_type_total", + "Number of messages dropped because rate limited by message types", + &["type"], + ) + .unwrap() +}); pub(crate) static SYNC_ACCOUNTS_DATA: Lazy = Lazy::new(|| { try_create_int_counter_vec( "near_sync_accounts_data", diff --git a/chain/rosetta-rpc/Cargo.toml b/chain/rosetta-rpc/Cargo.toml index be1940000be..577d09262e6 100644 --- a/chain/rosetta-rpc/Cargo.toml +++ b/chain/rosetta-rpc/Cargo.toml @@ -41,13 +41,12 @@ node-runtime.workspace = true [dev-dependencies] insta.workspace = true near-actix-test-utils.workspace = true -near-async.workspace = true +near-time.workspace = true [features] protocol_feature_nonrefundable_transfer_nep491 = [] nightly_protocol = [ "near-actix-test-utils/nightly_protocol", - "near-async/nightly_protocol", "near-chain-configs/nightly_protocol", "near-client-primitives/nightly_protocol", "near-client/nightly_protocol", @@ -59,7 +58,6 @@ nightly_protocol = [ ] nightly = [ "near-actix-test-utils/nightly", - "near-async/nightly", "near-chain-configs/nightly", "near-client-primitives/nightly", "near-client/nightly", diff --git a/chain/rosetta-rpc/src/adapters/mod.rs b/chain/rosetta-rpc/src/adapters/mod.rs index ac92f44f9c8..5437430499c 100644 --- a/chain/rosetta-rpc/src/adapters/mod.rs +++ b/chain/rosetta-rpc/src/adapters/mod.rs @@ -848,12 +848,12 @@ mod tests { use super::*; use actix::System; use near_actix_test_utils::run_actix; - use near_async::time::Clock; use near_client::test_utils::setup_no_network; use near_crypto::{KeyType, SecretKey}; use near_parameters::{RuntimeConfig, RuntimeConfigView}; use near_primitives::action::delegate::{DelegateAction, SignedDelegateAction}; use near_primitives::transaction::{Action, TransferAction}; + use near_time::Clock; #[test] fn test_convert_block_changes_to_transactions() { diff --git a/core/async/Cargo.toml b/core/async/Cargo.toml index a948baa5613..b949ca17296 100644 --- a/core/async/Cargo.toml +++ b/core/async/Cargo.toml @@ -23,6 +23,7 @@ tokio = { workspace = true, features = ["rt", "macros"] } tracing.workspace = true near-async-derive.workspace = true +# TODO(#11652): we use it only for logging. i think, it's a bit too much... near-o11y.workspace = true near-performance-metrics.workspace = true near-time = { workspace = true, features = ["clock"] } diff --git a/core/chain-configs/Cargo.toml b/core/chain-configs/Cargo.toml index 60e3a241943..9a34387c69d 100644 --- a/core/chain-configs/Cargo.toml +++ b/core/chain-configs/Cargo.toml @@ -25,7 +25,7 @@ smart-default.workspace = true time.workspace = true tracing.workspace = true -near-async.workspace = true +near-time.workspace = true near-crypto.workspace = true near-o11y = { workspace = true, optional = true } near-parameters.workspace = true @@ -35,13 +35,11 @@ near-config-utils.workspace = true [features] protocol_feature_nonrefundable_transfer_nep491 = [] nightly_protocol = [ - "near-async/nightly_protocol", "near-o11y/nightly_protocol", "near-parameters/nightly_protocol", "near-primitives/nightly_protocol", ] nightly = [ - "near-async/nightly", "near-o11y/nightly", "near-parameters/nightly", "near-primitives/nightly", diff --git a/core/chain-configs/src/client_config.rs b/core/chain-configs/src/client_config.rs index c85ca75154f..20d53cb1fdf 100644 --- a/core/chain-configs/src/client_config.rs +++ b/core/chain-configs/src/client_config.rs @@ -2,11 +2,11 @@ use crate::ExternalStorageLocation::GCS; use crate::MutableConfigValue; use bytesize::ByteSize; -use near_async::time::Duration; use near_primitives::types::{ AccountId, BlockHeight, BlockHeightDelta, Gas, NumBlocks, NumSeats, ShardId, }; use near_primitives::version::Version; +use near_time::Duration; use std::cmp::{max, min}; use std::path::PathBuf; use std::sync::atomic::AtomicBool; @@ -48,7 +48,7 @@ pub struct GCConfig { pub gc_num_epochs_to_keep: u64, /// How often gc should be run - #[serde(with = "near_async::time::serde_duration_as_std")] + #[serde(with = "near_time::serde_duration_as_std")] pub gc_step_period: Duration, } @@ -120,7 +120,7 @@ pub struct DumpConfig { /// Feel free to set to `None`, defaults are sensible. #[serde(skip_serializing_if = "Option::is_none")] #[serde(default)] - #[serde(with = "near_async::time::serde_opt_duration_as_std")] + #[serde(with = "near_time::serde_opt_duration_as_std")] pub iteration_delay: Option, /// Location of a json file with credentials allowing write access to the bucket. #[serde(skip_serializing_if = "Option::is_none")] @@ -192,23 +192,23 @@ pub struct ReshardingConfig { /// The delay between writing batches to the db. The batch delay can be /// increased if resharding is consuming too many resources and interfering /// with regular node operation. - #[serde(with = "near_async::time::serde_duration_as_std")] + #[serde(with = "near_time::serde_duration_as_std")] pub batch_delay: Duration, /// The delay between attempts to start resharding while waiting for the /// state snapshot to become available. - #[serde(with = "near_async::time::serde_duration_as_std")] + #[serde(with = "near_time::serde_duration_as_std")] pub retry_delay: Duration, /// The delay between the resharding request is received and when the actor /// actually starts working on it. This delay should only be used in tests. - #[serde(with = "near_async::time::serde_duration_as_std")] + #[serde(with = "near_time::serde_duration_as_std")] pub initial_delay: Duration, /// The maximum time that the actor will wait for the snapshot to be ready, /// before starting resharding. Do not wait indefinitely since we want to /// report error early enough for the node maintainer to have time to recover. - #[serde(with = "near_async::time::serde_duration_as_std")] + #[serde(with = "near_time::serde_duration_as_std")] pub max_poll_time: Duration, } diff --git a/core/chain-configs/src/genesis_config.rs b/core/chain-configs/src/genesis_config.rs index 214d282d35f..810de152981 100644 --- a/core/chain-configs/src/genesis_config.rs +++ b/core/chain-configs/src/genesis_config.rs @@ -101,7 +101,7 @@ fn default_max_kickout_stake_threshold() -> u8 { } fn default_genesis_time() -> DateTime { - let time = near_async::time::Utc::now_utc(); + let time = near_time::Utc::now_utc(); DateTime::from_timestamp(time.unix_timestamp(), time.nanosecond()).unwrap_or_default() } diff --git a/core/chain-configs/src/test_genesis.rs b/core/chain-configs/src/test_genesis.rs index fca3409fa3c..65856252ea5 100644 --- a/core/chain-configs/src/test_genesis.rs +++ b/core/chain-configs/src/test_genesis.rs @@ -1,6 +1,5 @@ use std::collections::{HashMap, HashSet}; -use near_async::time::Clock; use near_crypto::PublicKey; use near_primitives::account::{AccessKey, Account}; use near_primitives::hash::CryptoHash; @@ -13,6 +12,7 @@ use near_primitives::types::{ }; use near_primitives::utils::from_timestamp; use near_primitives::version::PROTOCOL_VERSION; +use near_time::Clock; use num_rational::Rational32; use crate::{Genesis, GenesisConfig, GenesisContents, GenesisRecords}; @@ -238,11 +238,23 @@ impl TestGenesisBuilder { self } - pub fn kickouts_standard_90_percent(&mut self) -> &mut Self { + /// Validators with performance below 80% are kicked out, similarly to + /// mainnet as of 28 Jun 2024. + pub fn kickouts_standard_80_percent(&mut self) -> &mut Self { self.kickouts_config = Some(KickoutsConfig { - block_producer_kickout_threshold: 90, - chunk_producer_kickout_threshold: 90, - chunk_validator_only_kickout_threshold: 90, + block_producer_kickout_threshold: 80, + chunk_producer_kickout_threshold: 80, + chunk_validator_only_kickout_threshold: 80, + }); + self + } + + /// Only chunk validator-only nodes can be kicked out. + pub fn kickouts_for_chunk_validators_only(&mut self) -> &mut Self { + self.kickouts_config = Some(KickoutsConfig { + block_producer_kickout_threshold: 0, + chunk_producer_kickout_threshold: 0, + chunk_validator_only_kickout_threshold: 50, }); self } diff --git a/core/chain-configs/src/test_utils.rs b/core/chain-configs/src/test_utils.rs index 9400c5aad78..8df1fd78eb6 100644 --- a/core/chain-configs/src/test_utils.rs +++ b/core/chain-configs/src/test_utils.rs @@ -1,4 +1,3 @@ -use near_async::time::Clock; use near_crypto::{InMemorySigner, KeyType, PublicKey}; use near_primitives::account::{AccessKey, Account}; use near_primitives::hash::CryptoHash; @@ -9,6 +8,7 @@ use near_primitives::types::{ }; use near_primitives::utils::{from_timestamp, generate_random_string}; use near_primitives::version::PROTOCOL_VERSION; +use near_time::Clock; use num_rational::Ratio; use crate::{ diff --git a/core/chain-configs/src/updateable_config.rs b/core/chain-configs/src/updateable_config.rs index 283e7ddc64f..ebace65d3ec 100644 --- a/core/chain-configs/src/updateable_config.rs +++ b/core/chain-configs/src/updateable_config.rs @@ -1,7 +1,7 @@ -#[cfg(feature = "metrics")] -use near_async::time::Clock; use near_primitives::types::BlockHeight; use near_primitives::validator_signer::ValidatorSigner; +#[cfg(feature = "metrics")] +use near_time::Clock; use serde::{Deserialize, Serialize, Serializer}; use std::fmt::Debug; use std::sync::{Arc, Mutex}; @@ -106,7 +106,7 @@ pub struct UpdateableClientConfig { /// Time limit for adding transactions in produce_chunk() #[serde(default)] - #[serde(with = "near_async::time::serde_opt_duration_as_std")] + #[serde(with = "near_time::serde_opt_duration_as_std")] pub produce_chunk_add_transactions_time_limit: Option, } diff --git a/core/dyn-configs/Cargo.toml b/core/dyn-configs/Cargo.toml index 36ca12db087..ffdbff591e6 100644 --- a/core/dyn-configs/Cargo.toml +++ b/core/dyn-configs/Cargo.toml @@ -21,7 +21,7 @@ thiserror.workspace = true tokio.workspace = true tracing.workspace = true -near-async.workspace = true +near-time.workspace = true near-chain-configs.workspace = true near-crypto.workspace = true near-o11y.workspace = true @@ -29,14 +29,12 @@ near-primitives.workspace = true [features] nightly = [ - "near-async/nightly", "near-chain-configs/nightly", "near-o11y/nightly", "near-primitives/nightly", "nightly_protocol", ] nightly_protocol = [ - "near-async/nightly_protocol", "near-chain-configs/nightly_protocol", "near-o11y/nightly_protocol", "near-primitives/nightly_protocol", diff --git a/core/dyn-configs/src/lib.rs b/core/dyn-configs/src/lib.rs index f9d19ba13b7..320e44dec8c 100644 --- a/core/dyn-configs/src/lib.rs +++ b/core/dyn-configs/src/lib.rs @@ -1,9 +1,9 @@ #![doc = include_str!("../README.md")] -use near_async::time::Clock; use near_chain_configs::UpdateableClientConfig; use near_o11y::log_config::LogConfig; use near_primitives::validator_signer::ValidatorSigner; +use near_time::Clock; use std::path::PathBuf; use std::sync::Arc; use tokio::sync::broadcast::Sender; diff --git a/core/parameters/res/runtime_configs/80.yaml b/core/parameters/res/runtime_configs/68.yaml similarity index 100% rename from core/parameters/res/runtime_configs/80.yaml rename to core/parameters/res/runtime_configs/68.yaml diff --git a/core/parameters/res/runtime_configs/69.yaml b/core/parameters/res/runtime_configs/69.yaml new file mode 100644 index 00000000000..16b426fd9b4 --- /dev/null +++ b/core/parameters/res/runtime_configs/69.yaml @@ -0,0 +1,73 @@ +# State Witness size limits. + +max_transaction_size: {old: 4_194_304, new: 1_572_864} + +per_receipt_storage_proof_size_limit: {old: 999_999_999_999_999, new: 4_000_000} +main_storage_proof_size_soft_limit: {old: 999_999_999_999_999, new: 3_000_000} + +max_receipt_size: {old: 999_999_999_999_999, new: 4_194_304} +new_transactions_validation_state_size_soft_limit: {old: 999_999_999_999_999, new: 572_864} + +# 100 kiB +outgoing_receipts_usual_size_limit: {old: 999_999_999_999_999, new: 102_400} + +# 4.5 MiB +outgoing_receipts_big_size_limit: {old: 999_999_999_999_999, new: 4_718_592} + +combined_transactions_size_limit: {old: 999_999_999_999_999, new: 4_194_304} + + +# Change the cost of sending receipt to another account to 50 TGas / MiB + +action_deploy_contract_per_byte: { + old: { + send_sir: 6_812_999, + send_not_sir: 6_812_999, + execution: 64_572_944, + }, + new: { + send_sir: 6_812_999, + send_not_sir: 47_683_715, + execution: 64_572_944, + } +} +action_function_call_per_byte: { + old: { + send_sir: 2_235_934, + send_not_sir: 2_235_934, + execution: 2_235_934, + }, + new: { + send_sir: 2_235_934, + send_not_sir: 47_683_715, + execution: 2_235_934, + } +} +action_add_function_call_key_per_byte: { + old: { + send_sir: 1_925_331, + send_not_sir: 1_925_331, + execution: 1_925_331, + }, + new: { + send_sir: 1_925_331, + send_not_sir: 47_683_715, + execution: 1_925_331, + } +} +data_receipt_creation_per_byte: { + old: { + send_sir: 17_212_011, + send_not_sir: 17_212_011, + execution: 17_212_011, + }, + new: { + send_sir: 17_212_011, + send_not_sir: 47_683_715, + execution: 17_212_011, + } +} +wasm_yield_resume_byte: { + old: 17_212_011, + new: 47_683_715 +} \ No newline at end of file diff --git a/core/parameters/res/runtime_configs/81.yaml b/core/parameters/res/runtime_configs/81.yaml deleted file mode 100644 index 8a920bca3f1..00000000000 --- a/core/parameters/res/runtime_configs/81.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# State Witness size limits. - -max_transaction_size: {old: 4_194_304, new: 1_572_864} - -per_receipt_storage_proof_size_limit: {old: 999_999_999_999_999, new: 4_000_000} -main_storage_proof_size_soft_limit: {old: 999_999_999_999_999, new: 3_000_000} - -max_receipt_size: {old: 999_999_999_999_999, new: 4_194_304} -new_transactions_validation_state_size_soft_limit: {old: 999_999_999_999_999, new: 572_864} - -# 100 kiB -outgoing_receipts_usual_size_limit: {old: 999_999_999_999_999, new: 102_400} - -# 4.5 MiB -outgoing_receipts_big_size_limit: {old: 999_999_999_999_999, new: 4_718_592} - -combined_transactions_size_limit: {old: 999_999_999_999_999, new: 4_194_304} diff --git a/core/parameters/res/runtime_configs/parameters.snap b/core/parameters/res/runtime_configs/parameters.snap index a4b8114dd1f..fb48e82260f 100644 --- a/core/parameters/res/runtime_configs/parameters.snap +++ b/core/parameters/res/runtime_configs/parameters.snap @@ -4,12 +4,12 @@ description: THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. --- burnt_gas_reward 3 / 10 pessimistic_gas_price_inflation 103 / 100 -main_storage_proof_size_soft_limit 999_999_999_999_999 -per_receipt_storage_proof_size_limit 999_999_999_999_999 -new_transactions_validation_state_size_soft_limit 999_999_999_999_999 -combined_transactions_size_limit 999_999_999_999_999 -outgoing_receipts_usual_size_limit 999_999_999_999_999 -outgoing_receipts_big_size_limit 999_999_999_999_999 +main_storage_proof_size_soft_limit 3_000_000 +per_receipt_storage_proof_size_limit 4_000_000 +new_transactions_validation_state_size_soft_limit 572_864 +combined_transactions_size_limit 4_194_304 +outgoing_receipts_usual_size_limit 102_400 +outgoing_receipts_big_size_limit 4_718_592 min_allowed_top_level_account_length 65 registrar_account_id registrar storage_amount_per_byte 10000000000000000000 @@ -25,7 +25,7 @@ data_receipt_creation_base - execution: 36_486_732_312 data_receipt_creation_per_byte - send_sir: 17_212_011 -- send_not_sir: 17_212_011 +- send_not_sir: 47_683_715 - execution: 17_212_011 action_create_account - send_sir: 3_850_000_000_000 @@ -41,7 +41,7 @@ action_deploy_contract - execution: 184_765_750_000 action_deploy_contract_per_byte - send_sir: 6_812_999 -- send_not_sir: 6_812_999 +- send_not_sir: 47_683_715 - execution: 64_572_944 action_function_call - send_sir: 200_000_000_000 @@ -49,7 +49,7 @@ action_function_call - execution: 780_000_000_000 action_function_call_per_byte - send_sir: 2_235_934 -- send_not_sir: 2_235_934 +- send_not_sir: 47_683_715 - execution: 2_235_934 action_transfer - send_sir: 115_123_062_500 @@ -69,7 +69,7 @@ action_add_function_call_key - execution: 102_217_625_000 action_add_function_call_key_per_byte - send_sir: 1_925_331 -- send_not_sir: 1_925_331 +- send_not_sir: 47_683_715 - execution: 1_925_331 action_delete_key - send_sir: 94_946_625_000 @@ -145,7 +145,7 @@ wasm_alt_bn128_g1_sum_element 5_000_000_000 wasm_yield_create_base 153_411_779_276 wasm_yield_create_byte 15_643_988 wasm_yield_resume_base 1_195_627_285_210 -wasm_yield_resume_byte 17_212_011 +wasm_yield_resume_byte 47_683_715 max_gas_burnt 300_000_000_000_000 max_gas_burnt_view 300_000_000_000_000 max_stack_height 262_144 @@ -164,8 +164,8 @@ max_length_method_name 256 max_arguments_length 4_194_304 max_length_returned_data 4_194_304 max_contract_size 4_194_304 -max_transaction_size 4_194_304 -max_receipt_size 999_999_999_999_999 +max_transaction_size 1_572_864 +max_receipt_size 4_194_304 max_length_storage_key 2_048 max_length_storage_value 4_194_304 max_promises_per_function_call_action 1_024 @@ -187,13 +187,13 @@ function_call_weight true vm_kind NearVm eth_implicit_accounts false yield_resume true -max_congestion_incoming_gas 9_223_372_036_854_775_807 -max_congestion_outgoing_gas 9_223_372_036_854_775_807 -max_congestion_memory_consumption 9_223_372_036_854_775_807 -max_congestion_missed_chunks 9_223_372_036_854_775_807 -max_outgoing_gas 9_223_372_036_854_775_807 -min_outgoing_gas 9_223_372_036_854_775_807 -allowed_shard_outgoing_gas 9_223_372_036_854_775_807 -max_tx_gas 9_223_372_036_854_775_807 -min_tx_gas 9_223_372_036_854_775_807 -reject_tx_congestion_threshold 1 / 1 +max_congestion_incoming_gas 20_000_000_000_000_000 +max_congestion_outgoing_gas 10_000_000_000_000_000 +max_congestion_memory_consumption 1_000_000_000 +max_congestion_missed_chunks 5 +max_outgoing_gas 300_000_000_000_000_000 +min_outgoing_gas 1_000_000_000_000_000 +allowed_shard_outgoing_gas 1_000_000_000_000_000 +max_tx_gas 500_000_000_000_000 +min_tx_gas 20_000_000_000_000 +reject_tx_congestion_threshold 50 / 100 diff --git a/core/parameters/src/config_store.rs b/core/parameters/src/config_store.rs index b5d9b7c30b9..16a8ce33de7 100644 --- a/core/parameters/src/config_store.rs +++ b/core/parameters/src/config_store.rs @@ -40,9 +40,9 @@ static CONFIG_DIFFS: &[(ProtocolVersion, &str)] = &[ (66, include_config!("66.yaml")), (67, include_config!("67.yaml")), // Congestion Control. - (80, include_config!("80.yaml")), + (68, include_config!("68.yaml")), // Stateless Validation. - (81, include_config!("81.yaml")), + (69, include_config!("69.yaml")), (129, include_config!("129.yaml")), // Introduce ETH-implicit accounts. (138, include_config!("138.yaml")), diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__129.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__129.json.snap index 083fa28d6e9..55a1bade8d1 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__129.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__129.json.snap @@ -18,7 +18,7 @@ expression: config_view }, "cost_per_byte": { "send_sir": 17212011, - "send_not_sir": 17212011, + "send_not_sir": 47683715, "execution": 17212011 } }, @@ -35,7 +35,7 @@ expression: config_view }, "deploy_contract_cost_per_byte": { "send_sir": 6812999, - "send_not_sir": 6812999, + "send_not_sir": 47683715, "execution": 64572944 }, "function_call_cost": { @@ -45,7 +45,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 2235934, - "send_not_sir": 2235934, + "send_not_sir": 47683715, "execution": 2235934 }, "transfer_cost": { @@ -71,7 +71,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 1925331, - "send_not_sir": 1925331, + "send_not_sir": 47683715, "execution": 1925331 } }, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__138.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__138.json.snap index 52b0b7c535e..8ea81429f74 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__138.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__138.json.snap @@ -18,7 +18,7 @@ expression: config_view }, "cost_per_byte": { "send_sir": 17212011, - "send_not_sir": 17212011, + "send_not_sir": 47683715, "execution": 17212011 } }, @@ -35,7 +35,7 @@ expression: config_view }, "deploy_contract_cost_per_byte": { "send_sir": 6812999, - "send_not_sir": 6812999, + "send_not_sir": 47683715, "execution": 64572944 }, "function_call_cost": { @@ -45,7 +45,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 2235934, - "send_not_sir": 2235934, + "send_not_sir": 47683715, "execution": 2235934 }, "transfer_cost": { @@ -71,7 +71,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 1925331, - "send_not_sir": 1925331, + "send_not_sir": 47683715, "execution": 1925331 } }, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__80.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__68.json.snap similarity index 100% rename from core/parameters/src/snapshots/near_parameters__config_store__tests__80.json.snap rename to core/parameters/src/snapshots/near_parameters__config_store__tests__68.json.snap diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__81.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__69.json.snap similarity index 98% rename from core/parameters/src/snapshots/near_parameters__config_store__tests__81.json.snap rename to core/parameters/src/snapshots/near_parameters__config_store__tests__69.json.snap index 8015c5ed7cd..f9986bfaa9d 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__81.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__69.json.snap @@ -18,7 +18,7 @@ expression: config_view }, "cost_per_byte": { "send_sir": 17212011, - "send_not_sir": 17212011, + "send_not_sir": 47683715, "execution": 17212011 } }, @@ -35,7 +35,7 @@ expression: config_view }, "deploy_contract_cost_per_byte": { "send_sir": 6812999, - "send_not_sir": 6812999, + "send_not_sir": 47683715, "execution": 64572944 }, "function_call_cost": { @@ -45,7 +45,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 2235934, - "send_not_sir": 2235934, + "send_not_sir": 47683715, "execution": 2235934 }, "transfer_cost": { @@ -71,7 +71,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 1925331, - "send_not_sir": 1925331, + "send_not_sir": 47683715, "execution": 1925331 } }, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_129.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_129.json.snap index 083fa28d6e9..55a1bade8d1 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_129.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_129.json.snap @@ -18,7 +18,7 @@ expression: config_view }, "cost_per_byte": { "send_sir": 17212011, - "send_not_sir": 17212011, + "send_not_sir": 47683715, "execution": 17212011 } }, @@ -35,7 +35,7 @@ expression: config_view }, "deploy_contract_cost_per_byte": { "send_sir": 6812999, - "send_not_sir": 6812999, + "send_not_sir": 47683715, "execution": 64572944 }, "function_call_cost": { @@ -45,7 +45,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 2235934, - "send_not_sir": 2235934, + "send_not_sir": 47683715, "execution": 2235934 }, "transfer_cost": { @@ -71,7 +71,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 1925331, - "send_not_sir": 1925331, + "send_not_sir": 47683715, "execution": 1925331 } }, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_138.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_138.json.snap index 52b0b7c535e..8ea81429f74 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_138.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_138.json.snap @@ -18,7 +18,7 @@ expression: config_view }, "cost_per_byte": { "send_sir": 17212011, - "send_not_sir": 17212011, + "send_not_sir": 47683715, "execution": 17212011 } }, @@ -35,7 +35,7 @@ expression: config_view }, "deploy_contract_cost_per_byte": { "send_sir": 6812999, - "send_not_sir": 6812999, + "send_not_sir": 47683715, "execution": 64572944 }, "function_call_cost": { @@ -45,7 +45,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 2235934, - "send_not_sir": 2235934, + "send_not_sir": 47683715, "execution": 2235934 }, "transfer_cost": { @@ -71,7 +71,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 1925331, - "send_not_sir": 1925331, + "send_not_sir": 47683715, "execution": 1925331 } }, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_80.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_68.json.snap similarity index 100% rename from core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_80.json.snap rename to core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_68.json.snap diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_81.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_69.json.snap similarity index 98% rename from core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_81.json.snap rename to core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_69.json.snap index 8015c5ed7cd..f9986bfaa9d 100644 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_81.json.snap +++ b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_69.json.snap @@ -18,7 +18,7 @@ expression: config_view }, "cost_per_byte": { "send_sir": 17212011, - "send_not_sir": 17212011, + "send_not_sir": 47683715, "execution": 17212011 } }, @@ -35,7 +35,7 @@ expression: config_view }, "deploy_contract_cost_per_byte": { "send_sir": 6812999, - "send_not_sir": 6812999, + "send_not_sir": 47683715, "execution": 64572944 }, "function_call_cost": { @@ -45,7 +45,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 2235934, - "send_not_sir": 2235934, + "send_not_sir": 47683715, "execution": 2235934 }, "transfer_cost": { @@ -71,7 +71,7 @@ expression: config_view }, "function_call_cost_per_byte": { "send_sir": 1925331, - "send_not_sir": 1925331, + "send_not_sir": 47683715, "execution": 1925331 } }, diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_83.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_83.json.snap deleted file mode 100644 index 03229d0941c..00000000000 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_83.json.snap +++ /dev/null @@ -1,246 +0,0 @@ ---- -source: core/parameters/src/config_store.rs -expression: config_view ---- -{ - "storage_amount_per_byte": "10000000000000000000", - "transaction_costs": { - "action_receipt_creation_config": { - "send_sir": 108059500000, - "send_not_sir": 108059500000, - "execution": 108059500000 - }, - "data_receipt_creation_config": { - "base_cost": { - "send_sir": 36486732312, - "send_not_sir": 36486732312, - "execution": 36486732312 - }, - "cost_per_byte": { - "send_sir": 17212011, - "send_not_sir": 17212011, - "execution": 17212011 - } - }, - "action_creation_config": { - "create_account_cost": { - "send_sir": 3850000000000, - "send_not_sir": 3850000000000, - "execution": 3850000000000 - }, - "deploy_contract_cost": { - "send_sir": 184765750000, - "send_not_sir": 184765750000, - "execution": 184765750000 - }, - "deploy_contract_cost_per_byte": { - "send_sir": 6812999, - "send_not_sir": 6812999, - "execution": 64572944 - }, - "function_call_cost": { - "send_sir": 200000000000, - "send_not_sir": 200000000000, - "execution": 780000000000 - }, - "function_call_cost_per_byte": { - "send_sir": 2235934, - "send_not_sir": 2235934, - "execution": 2235934 - }, - "transfer_cost": { - "send_sir": 115123062500, - "send_not_sir": 115123062500, - "execution": 115123062500 - }, - "stake_cost": { - "send_sir": 141715687500, - "send_not_sir": 141715687500, - "execution": 102217625000 - }, - "add_key_cost": { - "full_access_cost": { - "send_sir": 101765125000, - "send_not_sir": 101765125000, - "execution": 101765125000 - }, - "function_call_cost": { - "send_sir": 102217625000, - "send_not_sir": 102217625000, - "execution": 102217625000 - }, - "function_call_cost_per_byte": { - "send_sir": 1925331, - "send_not_sir": 1925331, - "execution": 1925331 - } - }, - "delete_key_cost": { - "send_sir": 94946625000, - "send_not_sir": 94946625000, - "execution": 94946625000 - }, - "delete_account_cost": { - "send_sir": 147489000000, - "send_not_sir": 147489000000, - "execution": 147489000000 - }, - "delegate_cost": { - "send_sir": 200000000000, - "send_not_sir": 200000000000, - "execution": 200000000000 - } - }, - "storage_usage_config": { - "num_bytes_account": 100, - "num_extra_bytes_record": 40 - }, - "burnt_gas_reward": [ - 3, - 10 - ], - "pessimistic_gas_price_inflation_ratio": [ - 103, - 100 - ] - }, - "wasm_config": { - "ext_costs": { - "base": 264768111, - "contract_loading_base": 35445963, - "contract_loading_bytes": 1089295, - "read_memory_base": 2609863200, - "read_memory_byte": 3801333, - "write_memory_base": 2803794861, - "write_memory_byte": 2723772, - "read_register_base": 2517165186, - "read_register_byte": 98562, - "write_register_base": 2865522486, - "write_register_byte": 3801564, - "utf8_decoding_base": 3111779061, - "utf8_decoding_byte": 291580479, - "utf16_decoding_base": 3543313050, - "utf16_decoding_byte": 163577493, - "sha256_base": 4540970250, - "sha256_byte": 24117351, - "keccak256_base": 5879491275, - "keccak256_byte": 21471105, - "keccak512_base": 5811388236, - "keccak512_byte": 36649701, - "ripemd160_base": 853675086, - "ripemd160_block": 680107584, - "ed25519_verify_base": 210000000000, - "ed25519_verify_byte": 9000000, - "ecrecover_base": 278821988457, - "log_base": 3543313050, - "log_byte": 13198791, - "storage_write_base": 64196736000, - "storage_write_key_byte": 70482867, - "storage_write_value_byte": 31018539, - "storage_write_evicted_byte": 32117307, - "storage_read_base": 56356845750, - "storage_read_key_byte": 30952533, - "storage_read_value_byte": 5611005, - "storage_remove_base": 53473030500, - "storage_remove_key_byte": 38220384, - "storage_remove_ret_value_byte": 11531556, - "storage_has_key_base": 54039896625, - "storage_has_key_byte": 30790845, - "storage_iter_create_prefix_base": 0, - "storage_iter_create_prefix_byte": 0, - "storage_iter_create_range_base": 0, - "storage_iter_create_from_byte": 0, - "storage_iter_create_to_byte": 0, - "storage_iter_next_base": 0, - "storage_iter_next_key_byte": 0, - "storage_iter_next_value_byte": 0, - "touching_trie_node": 16101955926, - "read_cached_trie_node": 2280000000, - "promise_and_base": 1465013400, - "promise_and_per_promise": 5452176, - "promise_return": 560152386, - "validator_stake_base": 911834726400, - "validator_total_stake_base": 911834726400, - "contract_compile_base": 0, - "contract_compile_bytes": 0, - "alt_bn128_g1_multiexp_base": 713000000000, - "alt_bn128_g1_multiexp_element": 320000000000, - "alt_bn128_g1_sum_base": 3000000000, - "alt_bn128_g1_sum_element": 5000000000, - "alt_bn128_pairing_check_base": 9686000000000, - "alt_bn128_pairing_check_element": 5102000000000, - "yield_create_base": 153411779276, - "yield_create_byte": 15643988, - "yield_resume_base": 1195627285210, - "yield_resume_byte": 1195627285210 - }, - "grow_mem_cost": 1, - "regular_op_cost": 822756, - "vm_kind": "", - "disable_9393_fix": false, - "storage_get_mode": "FlatStorage", - "fix_contract_loading_cost": false, - "implicit_account_creation": true, - "math_extension": true, - "ed25519_verify": true, - "alt_bn128": true, - "function_call_weight": true, - "eth_implicit_accounts": false, - "yield_resume_host_functions": true, - "limit_config": { - "max_gas_burnt": 300000000000000, - "max_stack_height": 262144, - "contract_prepare_version": 2, - "initial_memory_pages": 1024, - "max_memory_pages": 2048, - "registers_memory_limit": 1073741824, - "max_register_size": 104857600, - "max_number_registers": 100, - "max_number_logs": 100, - "max_total_log_length": 16384, - "max_total_prepaid_gas": 300000000000000, - "max_actions_per_receipt": 100, - "max_number_bytes_method_names": 2000, - "max_length_method_name": 256, - "max_arguments_length": 4194304, - "max_length_returned_data": 4194304, - "max_contract_size": 4194304, - "max_transaction_size": 4194304, - "max_receipt_size": 999999999999999, - "max_length_storage_key": 2048, - "max_length_storage_value": 4194304, - "max_promises_per_function_call_action": 1024, - "max_number_input_data_dependencies": 128, - "max_functions_number_per_contract": 10000, - "wasmer2_stack_limit": 204800, - "max_locals_per_contract": 1000000, - "account_id_validity_rules_version": 1, - "yield_timeout_length_in_blocks": 200, - "max_yield_payload_size": 1024, - "per_receipt_storage_proof_size_limit": 999999999999999 - } - }, - "account_creation_config": { - "min_allowed_top_level_account_length": 65, - "registrar_account_id": "registrar" - }, - "congestion_control_config": { - "max_congestion_incoming_gas": 20000000000000000, - "max_congestion_outgoing_gas": 2000000000000000, - "max_congestion_memory_consumption": 1000000000, - "max_congestion_missed_chunks": 2, - "max_outgoing_gas": 300000000000000000, - "min_outgoing_gas": 1000000000000000, - "allowed_shard_outgoing_gas": 1000000000000000, - "max_tx_gas": 500000000000000, - "min_tx_gas": 20000000000000, - "reject_tx_congestion_threshold": 0.25, - "outgoing_receipts_usual_size_limit": 999999999999999, - "outgoing_receipts_big_size_limit": 999999999999999 - }, - "witness_config": { - "main_storage_proof_size_soft_limit": 16000000, - "combined_transactions_size_limit": 999999999999999, - "new_transactions_validation_state_size_soft_limit": 999999999999999 - } -} diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_85.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_85.json.snap deleted file mode 100644 index 5d083862343..00000000000 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_85.json.snap +++ /dev/null @@ -1,246 +0,0 @@ ---- -source: core/parameters/src/config_store.rs -expression: config_view ---- -{ - "storage_amount_per_byte": "10000000000000000000", - "transaction_costs": { - "action_receipt_creation_config": { - "send_sir": 108059500000, - "send_not_sir": 108059500000, - "execution": 108059500000 - }, - "data_receipt_creation_config": { - "base_cost": { - "send_sir": 36486732312, - "send_not_sir": 36486732312, - "execution": 36486732312 - }, - "cost_per_byte": { - "send_sir": 17212011, - "send_not_sir": 17212011, - "execution": 17212011 - } - }, - "action_creation_config": { - "create_account_cost": { - "send_sir": 3850000000000, - "send_not_sir": 3850000000000, - "execution": 3850000000000 - }, - "deploy_contract_cost": { - "send_sir": 184765750000, - "send_not_sir": 184765750000, - "execution": 184765750000 - }, - "deploy_contract_cost_per_byte": { - "send_sir": 6812999, - "send_not_sir": 6812999, - "execution": 64572944 - }, - "function_call_cost": { - "send_sir": 200000000000, - "send_not_sir": 200000000000, - "execution": 780000000000 - }, - "function_call_cost_per_byte": { - "send_sir": 2235934, - "send_not_sir": 2235934, - "execution": 2235934 - }, - "transfer_cost": { - "send_sir": 115123062500, - "send_not_sir": 115123062500, - "execution": 115123062500 - }, - "stake_cost": { - "send_sir": 141715687500, - "send_not_sir": 141715687500, - "execution": 102217625000 - }, - "add_key_cost": { - "full_access_cost": { - "send_sir": 101765125000, - "send_not_sir": 101765125000, - "execution": 101765125000 - }, - "function_call_cost": { - "send_sir": 102217625000, - "send_not_sir": 102217625000, - "execution": 102217625000 - }, - "function_call_cost_per_byte": { - "send_sir": 1925331, - "send_not_sir": 1925331, - "execution": 1925331 - } - }, - "delete_key_cost": { - "send_sir": 94946625000, - "send_not_sir": 94946625000, - "execution": 94946625000 - }, - "delete_account_cost": { - "send_sir": 147489000000, - "send_not_sir": 147489000000, - "execution": 147489000000 - }, - "delegate_cost": { - "send_sir": 200000000000, - "send_not_sir": 200000000000, - "execution": 200000000000 - } - }, - "storage_usage_config": { - "num_bytes_account": 100, - "num_extra_bytes_record": 40 - }, - "burnt_gas_reward": [ - 3, - 10 - ], - "pessimistic_gas_price_inflation_ratio": [ - 103, - 100 - ] - }, - "wasm_config": { - "ext_costs": { - "base": 264768111, - "contract_loading_base": 35445963, - "contract_loading_bytes": 1089295, - "read_memory_base": 2609863200, - "read_memory_byte": 3801333, - "write_memory_base": 2803794861, - "write_memory_byte": 2723772, - "read_register_base": 2517165186, - "read_register_byte": 98562, - "write_register_base": 2865522486, - "write_register_byte": 3801564, - "utf8_decoding_base": 3111779061, - "utf8_decoding_byte": 291580479, - "utf16_decoding_base": 3543313050, - "utf16_decoding_byte": 163577493, - "sha256_base": 4540970250, - "sha256_byte": 24117351, - "keccak256_base": 5879491275, - "keccak256_byte": 21471105, - "keccak512_base": 5811388236, - "keccak512_byte": 36649701, - "ripemd160_base": 853675086, - "ripemd160_block": 680107584, - "ed25519_verify_base": 210000000000, - "ed25519_verify_byte": 9000000, - "ecrecover_base": 278821988457, - "log_base": 3543313050, - "log_byte": 13198791, - "storage_write_base": 64196736000, - "storage_write_key_byte": 70482867, - "storage_write_value_byte": 31018539, - "storage_write_evicted_byte": 32117307, - "storage_read_base": 56356845750, - "storage_read_key_byte": 30952533, - "storage_read_value_byte": 5611005, - "storage_remove_base": 53473030500, - "storage_remove_key_byte": 38220384, - "storage_remove_ret_value_byte": 11531556, - "storage_has_key_base": 54039896625, - "storage_has_key_byte": 30790845, - "storage_iter_create_prefix_base": 0, - "storage_iter_create_prefix_byte": 0, - "storage_iter_create_range_base": 0, - "storage_iter_create_from_byte": 0, - "storage_iter_create_to_byte": 0, - "storage_iter_next_base": 0, - "storage_iter_next_key_byte": 0, - "storage_iter_next_value_byte": 0, - "touching_trie_node": 16101955926, - "read_cached_trie_node": 2280000000, - "promise_and_base": 1465013400, - "promise_and_per_promise": 5452176, - "promise_return": 560152386, - "validator_stake_base": 911834726400, - "validator_total_stake_base": 911834726400, - "contract_compile_base": 0, - "contract_compile_bytes": 0, - "alt_bn128_g1_multiexp_base": 713000000000, - "alt_bn128_g1_multiexp_element": 320000000000, - "alt_bn128_g1_sum_base": 3000000000, - "alt_bn128_g1_sum_element": 5000000000, - "alt_bn128_pairing_check_base": 9686000000000, - "alt_bn128_pairing_check_element": 5102000000000, - "yield_create_base": 153411779276, - "yield_create_byte": 15643988, - "yield_resume_base": 1195627285210, - "yield_resume_byte": 1195627285210 - }, - "grow_mem_cost": 1, - "regular_op_cost": 822756, - "vm_kind": "", - "disable_9393_fix": false, - "storage_get_mode": "FlatStorage", - "fix_contract_loading_cost": false, - "implicit_account_creation": true, - "math_extension": true, - "ed25519_verify": true, - "alt_bn128": true, - "function_call_weight": true, - "eth_implicit_accounts": false, - "yield_resume_host_functions": true, - "limit_config": { - "max_gas_burnt": 300000000000000, - "max_stack_height": 262144, - "contract_prepare_version": 2, - "initial_memory_pages": 1024, - "max_memory_pages": 2048, - "registers_memory_limit": 1073741824, - "max_register_size": 104857600, - "max_number_registers": 100, - "max_number_logs": 100, - "max_total_log_length": 16384, - "max_total_prepaid_gas": 300000000000000, - "max_actions_per_receipt": 100, - "max_number_bytes_method_names": 2000, - "max_length_method_name": 256, - "max_arguments_length": 4194304, - "max_length_returned_data": 4194304, - "max_contract_size": 4194304, - "max_transaction_size": 4194304, - "max_receipt_size": 999999999999999, - "max_length_storage_key": 2048, - "max_length_storage_value": 4194304, - "max_promises_per_function_call_action": 1024, - "max_number_input_data_dependencies": 128, - "max_functions_number_per_contract": 10000, - "wasmer2_stack_limit": 204800, - "max_locals_per_contract": 1000000, - "account_id_validity_rules_version": 1, - "yield_timeout_length_in_blocks": 200, - "max_yield_payload_size": 1024, - "per_receipt_storage_proof_size_limit": 4000000 - } - }, - "account_creation_config": { - "min_allowed_top_level_account_length": 65, - "registrar_account_id": "registrar" - }, - "congestion_control_config": { - "max_congestion_incoming_gas": 20000000000000000, - "max_congestion_outgoing_gas": 2000000000000000, - "max_congestion_memory_consumption": 1000000000, - "max_congestion_missed_chunks": 2, - "max_outgoing_gas": 300000000000000000, - "min_outgoing_gas": 1000000000000000, - "allowed_shard_outgoing_gas": 1000000000000000, - "max_tx_gas": 500000000000000, - "min_tx_gas": 20000000000000, - "reject_tx_congestion_threshold": 0.25, - "outgoing_receipts_usual_size_limit": 999999999999999, - "outgoing_receipts_big_size_limit": 999999999999999 - }, - "witness_config": { - "main_storage_proof_size_soft_limit": 3000000, - "combined_transactions_size_limit": 999999999999999, - "new_transactions_validation_state_size_soft_limit": 999999999999999 - } -} diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_87.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_87.json.snap deleted file mode 100644 index 4f06f78ad76..00000000000 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_87.json.snap +++ /dev/null @@ -1,246 +0,0 @@ ---- -source: core/parameters/src/config_store.rs -expression: config_view ---- -{ - "storage_amount_per_byte": "10000000000000000000", - "transaction_costs": { - "action_receipt_creation_config": { - "send_sir": 108059500000, - "send_not_sir": 108059500000, - "execution": 108059500000 - }, - "data_receipt_creation_config": { - "base_cost": { - "send_sir": 36486732312, - "send_not_sir": 36486732312, - "execution": 36486732312 - }, - "cost_per_byte": { - "send_sir": 17212011, - "send_not_sir": 17212011, - "execution": 17212011 - } - }, - "action_creation_config": { - "create_account_cost": { - "send_sir": 3850000000000, - "send_not_sir": 3850000000000, - "execution": 3850000000000 - }, - "deploy_contract_cost": { - "send_sir": 184765750000, - "send_not_sir": 184765750000, - "execution": 184765750000 - }, - "deploy_contract_cost_per_byte": { - "send_sir": 6812999, - "send_not_sir": 6812999, - "execution": 64572944 - }, - "function_call_cost": { - "send_sir": 200000000000, - "send_not_sir": 200000000000, - "execution": 780000000000 - }, - "function_call_cost_per_byte": { - "send_sir": 2235934, - "send_not_sir": 2235934, - "execution": 2235934 - }, - "transfer_cost": { - "send_sir": 115123062500, - "send_not_sir": 115123062500, - "execution": 115123062500 - }, - "stake_cost": { - "send_sir": 141715687500, - "send_not_sir": 141715687500, - "execution": 102217625000 - }, - "add_key_cost": { - "full_access_cost": { - "send_sir": 101765125000, - "send_not_sir": 101765125000, - "execution": 101765125000 - }, - "function_call_cost": { - "send_sir": 102217625000, - "send_not_sir": 102217625000, - "execution": 102217625000 - }, - "function_call_cost_per_byte": { - "send_sir": 1925331, - "send_not_sir": 1925331, - "execution": 1925331 - } - }, - "delete_key_cost": { - "send_sir": 94946625000, - "send_not_sir": 94946625000, - "execution": 94946625000 - }, - "delete_account_cost": { - "send_sir": 147489000000, - "send_not_sir": 147489000000, - "execution": 147489000000 - }, - "delegate_cost": { - "send_sir": 200000000000, - "send_not_sir": 200000000000, - "execution": 200000000000 - } - }, - "storage_usage_config": { - "num_bytes_account": 100, - "num_extra_bytes_record": 40 - }, - "burnt_gas_reward": [ - 3, - 10 - ], - "pessimistic_gas_price_inflation_ratio": [ - 103, - 100 - ] - }, - "wasm_config": { - "ext_costs": { - "base": 264768111, - "contract_loading_base": 35445963, - "contract_loading_bytes": 1089295, - "read_memory_base": 2609863200, - "read_memory_byte": 3801333, - "write_memory_base": 2803794861, - "write_memory_byte": 2723772, - "read_register_base": 2517165186, - "read_register_byte": 98562, - "write_register_base": 2865522486, - "write_register_byte": 3801564, - "utf8_decoding_base": 3111779061, - "utf8_decoding_byte": 291580479, - "utf16_decoding_base": 3543313050, - "utf16_decoding_byte": 163577493, - "sha256_base": 4540970250, - "sha256_byte": 24117351, - "keccak256_base": 5879491275, - "keccak256_byte": 21471105, - "keccak512_base": 5811388236, - "keccak512_byte": 36649701, - "ripemd160_base": 853675086, - "ripemd160_block": 680107584, - "ed25519_verify_base": 210000000000, - "ed25519_verify_byte": 9000000, - "ecrecover_base": 278821988457, - "log_base": 3543313050, - "log_byte": 13198791, - "storage_write_base": 64196736000, - "storage_write_key_byte": 70482867, - "storage_write_value_byte": 31018539, - "storage_write_evicted_byte": 32117307, - "storage_read_base": 56356845750, - "storage_read_key_byte": 30952533, - "storage_read_value_byte": 5611005, - "storage_remove_base": 53473030500, - "storage_remove_key_byte": 38220384, - "storage_remove_ret_value_byte": 11531556, - "storage_has_key_base": 54039896625, - "storage_has_key_byte": 30790845, - "storage_iter_create_prefix_base": 0, - "storage_iter_create_prefix_byte": 0, - "storage_iter_create_range_base": 0, - "storage_iter_create_from_byte": 0, - "storage_iter_create_to_byte": 0, - "storage_iter_next_base": 0, - "storage_iter_next_key_byte": 0, - "storage_iter_next_value_byte": 0, - "touching_trie_node": 16101955926, - "read_cached_trie_node": 2280000000, - "promise_and_base": 1465013400, - "promise_and_per_promise": 5452176, - "promise_return": 560152386, - "validator_stake_base": 911834726400, - "validator_total_stake_base": 911834726400, - "contract_compile_base": 0, - "contract_compile_bytes": 0, - "alt_bn128_g1_multiexp_base": 713000000000, - "alt_bn128_g1_multiexp_element": 320000000000, - "alt_bn128_g1_sum_base": 3000000000, - "alt_bn128_g1_sum_element": 5000000000, - "alt_bn128_pairing_check_base": 9686000000000, - "alt_bn128_pairing_check_element": 5102000000000, - "yield_create_base": 153411779276, - "yield_create_byte": 15643988, - "yield_resume_base": 1195627285210, - "yield_resume_byte": 1195627285210 - }, - "grow_mem_cost": 1, - "regular_op_cost": 822756, - "vm_kind": "", - "disable_9393_fix": false, - "storage_get_mode": "FlatStorage", - "fix_contract_loading_cost": false, - "implicit_account_creation": true, - "math_extension": true, - "ed25519_verify": true, - "alt_bn128": true, - "function_call_weight": true, - "eth_implicit_accounts": false, - "yield_resume_host_functions": true, - "limit_config": { - "max_gas_burnt": 300000000000000, - "max_stack_height": 262144, - "contract_prepare_version": 2, - "initial_memory_pages": 1024, - "max_memory_pages": 2048, - "registers_memory_limit": 1073741824, - "max_register_size": 104857600, - "max_number_registers": 100, - "max_number_logs": 100, - "max_total_log_length": 16384, - "max_total_prepaid_gas": 300000000000000, - "max_actions_per_receipt": 100, - "max_number_bytes_method_names": 2000, - "max_length_method_name": 256, - "max_arguments_length": 4194304, - "max_length_returned_data": 4194304, - "max_contract_size": 4194304, - "max_transaction_size": 1572864, - "max_receipt_size": 4194304, - "max_length_storage_key": 2048, - "max_length_storage_value": 4194304, - "max_promises_per_function_call_action": 1024, - "max_number_input_data_dependencies": 128, - "max_functions_number_per_contract": 10000, - "wasmer2_stack_limit": 204800, - "max_locals_per_contract": 1000000, - "account_id_validity_rules_version": 1, - "yield_timeout_length_in_blocks": 200, - "max_yield_payload_size": 1024, - "per_receipt_storage_proof_size_limit": 4000000 - } - }, - "account_creation_config": { - "min_allowed_top_level_account_length": 65, - "registrar_account_id": "registrar" - }, - "congestion_control_config": { - "max_congestion_incoming_gas": 20000000000000000, - "max_congestion_outgoing_gas": 2000000000000000, - "max_congestion_memory_consumption": 1000000000, - "max_congestion_missed_chunks": 2, - "max_outgoing_gas": 300000000000000000, - "min_outgoing_gas": 1000000000000000, - "allowed_shard_outgoing_gas": 1000000000000000, - "max_tx_gas": 500000000000000, - "min_tx_gas": 20000000000000, - "reject_tx_congestion_threshold": 0.25, - "outgoing_receipts_usual_size_limit": 102400, - "outgoing_receipts_big_size_limit": 4718592 - }, - "witness_config": { - "main_storage_proof_size_soft_limit": 3000000, - "combined_transactions_size_limit": 2097152, - "new_transactions_validation_state_size_soft_limit": 572864 - } -} diff --git a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_90.json.snap b/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_90.json.snap deleted file mode 100644 index 8710200da18..00000000000 --- a/core/parameters/src/snapshots/near_parameters__config_store__tests__testnet_90.json.snap +++ /dev/null @@ -1,246 +0,0 @@ ---- -source: core/parameters/src/config_store.rs -expression: config_view ---- -{ - "storage_amount_per_byte": "10000000000000000000", - "transaction_costs": { - "action_receipt_creation_config": { - "send_sir": 108059500000, - "send_not_sir": 108059500000, - "execution": 108059500000 - }, - "data_receipt_creation_config": { - "base_cost": { - "send_sir": 36486732312, - "send_not_sir": 36486732312, - "execution": 36486732312 - }, - "cost_per_byte": { - "send_sir": 17212011, - "send_not_sir": 17212011, - "execution": 17212011 - } - }, - "action_creation_config": { - "create_account_cost": { - "send_sir": 3850000000000, - "send_not_sir": 3850000000000, - "execution": 3850000000000 - }, - "deploy_contract_cost": { - "send_sir": 184765750000, - "send_not_sir": 184765750000, - "execution": 184765750000 - }, - "deploy_contract_cost_per_byte": { - "send_sir": 6812999, - "send_not_sir": 6812999, - "execution": 64572944 - }, - "function_call_cost": { - "send_sir": 200000000000, - "send_not_sir": 200000000000, - "execution": 780000000000 - }, - "function_call_cost_per_byte": { - "send_sir": 2235934, - "send_not_sir": 2235934, - "execution": 2235934 - }, - "transfer_cost": { - "send_sir": 115123062500, - "send_not_sir": 115123062500, - "execution": 115123062500 - }, - "stake_cost": { - "send_sir": 141715687500, - "send_not_sir": 141715687500, - "execution": 102217625000 - }, - "add_key_cost": { - "full_access_cost": { - "send_sir": 101765125000, - "send_not_sir": 101765125000, - "execution": 101765125000 - }, - "function_call_cost": { - "send_sir": 102217625000, - "send_not_sir": 102217625000, - "execution": 102217625000 - }, - "function_call_cost_per_byte": { - "send_sir": 1925331, - "send_not_sir": 1925331, - "execution": 1925331 - } - }, - "delete_key_cost": { - "send_sir": 94946625000, - "send_not_sir": 94946625000, - "execution": 94946625000 - }, - "delete_account_cost": { - "send_sir": 147489000000, - "send_not_sir": 147489000000, - "execution": 147489000000 - }, - "delegate_cost": { - "send_sir": 200000000000, - "send_not_sir": 200000000000, - "execution": 200000000000 - } - }, - "storage_usage_config": { - "num_bytes_account": 100, - "num_extra_bytes_record": 40 - }, - "burnt_gas_reward": [ - 3, - 10 - ], - "pessimistic_gas_price_inflation_ratio": [ - 103, - 100 - ] - }, - "wasm_config": { - "ext_costs": { - "base": 264768111, - "contract_loading_base": 35445963, - "contract_loading_bytes": 1089295, - "read_memory_base": 2609863200, - "read_memory_byte": 3801333, - "write_memory_base": 2803794861, - "write_memory_byte": 2723772, - "read_register_base": 2517165186, - "read_register_byte": 98562, - "write_register_base": 2865522486, - "write_register_byte": 3801564, - "utf8_decoding_base": 3111779061, - "utf8_decoding_byte": 291580479, - "utf16_decoding_base": 3543313050, - "utf16_decoding_byte": 163577493, - "sha256_base": 4540970250, - "sha256_byte": 24117351, - "keccak256_base": 5879491275, - "keccak256_byte": 21471105, - "keccak512_base": 5811388236, - "keccak512_byte": 36649701, - "ripemd160_base": 853675086, - "ripemd160_block": 680107584, - "ed25519_verify_base": 210000000000, - "ed25519_verify_byte": 9000000, - "ecrecover_base": 278821988457, - "log_base": 3543313050, - "log_byte": 13198791, - "storage_write_base": 64196736000, - "storage_write_key_byte": 70482867, - "storage_write_value_byte": 31018539, - "storage_write_evicted_byte": 32117307, - "storage_read_base": 56356845750, - "storage_read_key_byte": 30952533, - "storage_read_value_byte": 5611005, - "storage_remove_base": 53473030500, - "storage_remove_key_byte": 38220384, - "storage_remove_ret_value_byte": 11531556, - "storage_has_key_base": 54039896625, - "storage_has_key_byte": 30790845, - "storage_iter_create_prefix_base": 0, - "storage_iter_create_prefix_byte": 0, - "storage_iter_create_range_base": 0, - "storage_iter_create_from_byte": 0, - "storage_iter_create_to_byte": 0, - "storage_iter_next_base": 0, - "storage_iter_next_key_byte": 0, - "storage_iter_next_value_byte": 0, - "touching_trie_node": 16101955926, - "read_cached_trie_node": 2280000000, - "promise_and_base": 1465013400, - "promise_and_per_promise": 5452176, - "promise_return": 560152386, - "validator_stake_base": 911834726400, - "validator_total_stake_base": 911834726400, - "contract_compile_base": 0, - "contract_compile_bytes": 0, - "alt_bn128_g1_multiexp_base": 713000000000, - "alt_bn128_g1_multiexp_element": 320000000000, - "alt_bn128_g1_sum_base": 3000000000, - "alt_bn128_g1_sum_element": 5000000000, - "alt_bn128_pairing_check_base": 9686000000000, - "alt_bn128_pairing_check_element": 5102000000000, - "yield_create_base": 153411779276, - "yield_create_byte": 15643988, - "yield_resume_base": 1195627285210, - "yield_resume_byte": 1195627285210 - }, - "grow_mem_cost": 1, - "regular_op_cost": 822756, - "vm_kind": "", - "disable_9393_fix": false, - "storage_get_mode": "FlatStorage", - "fix_contract_loading_cost": false, - "implicit_account_creation": true, - "math_extension": true, - "ed25519_verify": true, - "alt_bn128": true, - "function_call_weight": true, - "eth_implicit_accounts": false, - "yield_resume_host_functions": true, - "limit_config": { - "max_gas_burnt": 300000000000000, - "max_stack_height": 262144, - "contract_prepare_version": 2, - "initial_memory_pages": 1024, - "max_memory_pages": 2048, - "registers_memory_limit": 1073741824, - "max_register_size": 104857600, - "max_number_registers": 100, - "max_number_logs": 100, - "max_total_log_length": 16384, - "max_total_prepaid_gas": 300000000000000, - "max_actions_per_receipt": 100, - "max_number_bytes_method_names": 2000, - "max_length_method_name": 256, - "max_arguments_length": 4194304, - "max_length_returned_data": 4194304, - "max_contract_size": 4194304, - "max_transaction_size": 1572864, - "max_receipt_size": 4194304, - "max_length_storage_key": 2048, - "max_length_storage_value": 4194304, - "max_promises_per_function_call_action": 1024, - "max_number_input_data_dependencies": 128, - "max_functions_number_per_contract": 10000, - "wasmer2_stack_limit": 204800, - "max_locals_per_contract": 1000000, - "account_id_validity_rules_version": 1, - "yield_timeout_length_in_blocks": 200, - "max_yield_payload_size": 1024, - "per_receipt_storage_proof_size_limit": 4000000 - } - }, - "account_creation_config": { - "min_allowed_top_level_account_length": 65, - "registrar_account_id": "registrar" - }, - "congestion_control_config": { - "max_congestion_incoming_gas": 20000000000000000, - "max_congestion_outgoing_gas": 2000000000000000, - "max_congestion_memory_consumption": 1000000000, - "max_congestion_missed_chunks": 5, - "max_outgoing_gas": 300000000000000000, - "min_outgoing_gas": 1000000000000000, - "allowed_shard_outgoing_gas": 1000000000000000, - "max_tx_gas": 500000000000000, - "min_tx_gas": 20000000000000, - "reject_tx_congestion_threshold": 0.25, - "outgoing_receipts_usual_size_limit": 102400, - "outgoing_receipts_big_size_limit": 4718592 - }, - "witness_config": { - "main_storage_proof_size_soft_limit": 3000000, - "combined_transactions_size_limit": 4194304, - "new_transactions_validation_state_size_soft_limit": 572864 - } -} diff --git a/core/parameters/src/snapshots/near_parameters__view__tests__runtime_config_view.snap b/core/parameters/src/snapshots/near_parameters__view__tests__runtime_config_view.snap index 3d2d144fd69..45b8e7e432a 100644 --- a/core/parameters/src/snapshots/near_parameters__view__tests__runtime_config_view.snap +++ b/core/parameters/src/snapshots/near_parameters__view__tests__runtime_config_view.snap @@ -18,7 +18,7 @@ expression: "&view" }, "cost_per_byte": { "send_sir": 17212011, - "send_not_sir": 17212011, + "send_not_sir": 47683715, "execution": 17212011 } }, @@ -35,7 +35,7 @@ expression: "&view" }, "deploy_contract_cost_per_byte": { "send_sir": 6812999, - "send_not_sir": 6812999, + "send_not_sir": 47683715, "execution": 64572944 }, "function_call_cost": { @@ -45,7 +45,7 @@ expression: "&view" }, "function_call_cost_per_byte": { "send_sir": 2235934, - "send_not_sir": 2235934, + "send_not_sir": 47683715, "execution": 2235934 }, "transfer_cost": { @@ -71,7 +71,7 @@ expression: "&view" }, "function_call_cost_per_byte": { "send_sir": 1925331, - "send_not_sir": 1925331, + "send_not_sir": 47683715, "execution": 1925331 } }, @@ -205,8 +205,8 @@ expression: "&view" "max_arguments_length": 4194304, "max_length_returned_data": 4194304, "max_contract_size": 4194304, - "max_transaction_size": 4194304, - "max_receipt_size": 999999999999999, + "max_transaction_size": 1572864, + "max_receipt_size": 4194304, "max_length_storage_key": 2048, "max_length_storage_value": 4194304, "max_promises_per_function_call_action": 1024, @@ -217,7 +217,7 @@ expression: "&view" "account_id_validity_rules_version": 1, "yield_timeout_length_in_blocks": 200, "max_yield_payload_size": 1024, - "per_receipt_storage_proof_size_limit": 999999999999999 + "per_receipt_storage_proof_size_limit": 4000000 } }, "account_creation_config": { @@ -225,22 +225,22 @@ expression: "&view" "registrar_account_id": "registrar" }, "congestion_control_config": { - "max_congestion_incoming_gas": 9223372036854775807, - "max_congestion_outgoing_gas": 9223372036854775807, - "max_congestion_memory_consumption": 9223372036854775807, - "max_congestion_missed_chunks": 9223372036854775807, - "max_outgoing_gas": 9223372036854775807, - "min_outgoing_gas": 9223372036854775807, - "allowed_shard_outgoing_gas": 9223372036854775807, - "max_tx_gas": 9223372036854775807, - "min_tx_gas": 9223372036854775807, - "reject_tx_congestion_threshold": 1.0, - "outgoing_receipts_usual_size_limit": 999999999999999, - "outgoing_receipts_big_size_limit": 999999999999999 + "max_congestion_incoming_gas": 20000000000000000, + "max_congestion_outgoing_gas": 10000000000000000, + "max_congestion_memory_consumption": 1000000000, + "max_congestion_missed_chunks": 5, + "max_outgoing_gas": 300000000000000000, + "min_outgoing_gas": 1000000000000000, + "allowed_shard_outgoing_gas": 1000000000000000, + "max_tx_gas": 500000000000000, + "min_tx_gas": 20000000000000, + "reject_tx_congestion_threshold": 0.5, + "outgoing_receipts_usual_size_limit": 102400, + "outgoing_receipts_big_size_limit": 4718592 }, "witness_config": { - "main_storage_proof_size_soft_limit": 999999999999999, - "combined_transactions_size_limit": 999999999999999, - "new_transactions_validation_state_size_soft_limit": 999999999999999 + "main_storage_proof_size_soft_limit": 3000000, + "combined_transactions_size_limit": 4194304, + "new_transactions_validation_state_size_soft_limit": 572864 } } diff --git a/core/primitives-core/src/version.rs b/core/primitives-core/src/version.rs index f8d676f2612..b7cad03dc94 100644 --- a/core/primitives-core/src/version.rs +++ b/core/primitives-core/src/version.rs @@ -169,6 +169,8 @@ pub enum ProtocolFeature { ChangePartialWitnessDataPartsRequired, /// Increase the `combined_transactions_size_limit` to 4MiB to allow higher throughput. BiggerCombinedTransactionLimit, + /// Increase gas cost of sending receipt to another account to 50 TGas / MiB + HigherSendingCost, } impl ProtocolFeature { @@ -218,11 +220,10 @@ impl ProtocolFeature { ProtocolFeature::SimpleNightshadeV3 => 65, ProtocolFeature::DecreaseFunctionCallBaseCost => 66, ProtocolFeature::YieldExecution => 67, - - // Congestion control should be enabled BEFORE stateless validation, so it has a lower version. - ProtocolFeature::CongestionControl => 80, - + ProtocolFeature::CongestionControl => 68, // Stateless validation features. + // TODO All of the stateless validation features should be collapsed + // into a single protocol feature. ProtocolFeature::StatelessValidationV0 | ProtocolFeature::LowerValidatorKickoutPercentForDebugging | ProtocolFeature::SingleShardTracking @@ -233,7 +234,8 @@ impl ProtocolFeature { | ProtocolFeature::OutgoingReceiptsSizeLimit | ProtocolFeature::NoChunkOnlyProducers | ProtocolFeature::ChangePartialWitnessDataPartsRequired - | ProtocolFeature::BiggerCombinedTransactionLimit => 81, + | ProtocolFeature::BiggerCombinedTransactionLimit + | ProtocolFeature::HigherSendingCost => 69, // This protocol version is reserved for use in resharding tests. An extra resharding // is simulated on top of the latest shard layout in production. Note that later @@ -264,12 +266,13 @@ impl ProtocolFeature { /// Current protocol version used on the mainnet. /// Some features (e. g. FixStorageUsage) require that there is at least one epoch with exactly /// the corresponding version -const STABLE_PROTOCOL_VERSION: ProtocolVersion = 67; +const STABLE_PROTOCOL_VERSION: ProtocolVersion = 69; /// Largest protocol version supported by the current binary. pub const PROTOCOL_VERSION: ProtocolVersion = if cfg!(feature = "statelessnet_protocol") { - // Current StatelessNet protocol version. - 81 + // Please note that congestion control and stateless validation are now + // stabilized but statelessnet should remain at its own version. + 82 } else if cfg!(feature = "nightly_protocol") { // On nightly, pick big enough version to support all features. 143 diff --git a/core/primitives/Cargo.toml b/core/primitives/Cargo.toml index cd1d8a35e6c..49200983d43 100644 --- a/core/primitives/Cargo.toml +++ b/core/primitives/Cargo.toml @@ -41,7 +41,7 @@ tracing.workspace = true zstd.workspace = true enum-map.workspace = true -near-time.workspace = true +near-time = { workspace = true } near-crypto.workspace = true near-fmt.workspace = true near-primitives-core.workspace = true diff --git a/core/primitives/src/block.rs b/core/primitives/src/block.rs index d005dbe99ad..433d060f7f9 100644 --- a/core/primitives/src/block.rs +++ b/core/primitives/src/block.rs @@ -4,19 +4,17 @@ use crate::block::BlockValidityError::{ }; use crate::block_body::{BlockBody, BlockBodyV1, ChunkEndorsementSignatures}; pub use crate::block_header::*; -use crate::challenge::{Challenges, ChallengesResult}; +use crate::challenge::Challenges; use crate::checked_feature; -use crate::congestion_info::{BlockCongestionInfo, CongestionInfo, ExtendedCongestionInfo}; -use crate::hash::{hash, CryptoHash}; +use crate::congestion_info::{BlockCongestionInfo, ExtendedCongestionInfo}; +use crate::hash::CryptoHash; use crate::merkle::{merklize, verify_path, MerklePath}; use crate::num_rational::Rational32; use crate::sharding::{ChunkHashHeight, ShardChunkHeader, ShardChunkHeaderV1}; -use crate::types::{Balance, BlockHeight, EpochId, Gas, NumBlocks}; -use crate::validator_signer::ValidatorSigner; +use crate::types::{Balance, BlockHeight, EpochId, Gas}; use crate::version::{ProtocolVersion, SHARD_CHUNK_HEADER_UPGRADE_VERSION}; use borsh::{BorshDeserialize, BorshSerialize}; -use near_crypto::Signature; -use near_time::{Clock, Duration, Utc}; +use near_time::Utc; use primitive_types::U256; use std::collections::BTreeMap; use std::ops::Index; @@ -92,7 +90,7 @@ type ShardChunkReedSolomon = reed_solomon_erasure::galois_8::ReedSolomon; #[cfg(feature = "solomon")] pub fn genesis_chunks( state_roots: Vec, - congestion_infos: Vec>, + congestion_infos: Vec>, shard_ids: &[crate::types::ShardId], initial_gas_limit: Gas, genesis_height: BlockHeight, @@ -143,7 +141,7 @@ fn genesis_chunk( initial_gas_limit: u64, shard_id: u64, state_root: CryptoHash, - congestion_info: Option, + congestion_info: Option, ) -> crate::sharding::EncodedShardChunk { let (encoded_chunk, _) = crate::sharding::EncodedShardChunk::new( CryptoHash::default(), @@ -276,30 +274,32 @@ impl Block { } /// Produces new block from header of previous block, current state root and set of transactions. + #[cfg(feature = "clock")] pub fn produce( this_epoch_protocol_version: ProtocolVersion, next_epoch_protocol_version: ProtocolVersion, prev: &BlockHeader, height: BlockHeight, - block_ordinal: NumBlocks, + block_ordinal: crate::types::NumBlocks, chunks: Vec, chunk_endorsements: Vec, epoch_id: EpochId, next_epoch_id: EpochId, epoch_sync_data_hash: Option, - approvals: Vec>>, + approvals: Vec>>, gas_price_adjustment_rate: Rational32, min_gas_price: Balance, max_gas_price: Balance, minted_amount: Option, - challenges_result: ChallengesResult, + challenges_result: crate::challenge::ChallengesResult, challenges: Challenges, - signer: &ValidatorSigner, + signer: &crate::validator_signer::ValidatorSigner, next_bp_hash: CryptoHash, block_merkle_root: CryptoHash, - clock: Clock, - sandbox_delta_time: Option, + clock: near_time::Clock, + sandbox_delta_time: Option, ) -> Self { + use crate::hash::hash; // Collect aggregate of validators and gas usage/limits from chunks. let mut prev_validator_proposals = vec![]; let mut gas_used = 0; diff --git a/core/primitives/src/block_header.rs b/core/primitives/src/block_header.rs index 52962ca9b50..e6989c7dfb8 100644 --- a/core/primitives/src/block_header.rs +++ b/core/primitives/src/block_header.rs @@ -5,10 +5,10 @@ use crate::network::PeerId; use crate::types::validator_stake::{ValidatorStake, ValidatorStakeIter, ValidatorStakeV1}; use crate::types::{AccountId, Balance, BlockHeight, EpochId, MerkleHash, NumBlocks}; use crate::validator_signer::ValidatorSigner; -use crate::version::{get_protocol_version, ProtocolVersion, PROTOCOL_VERSION}; +use crate::version::ProtocolVersion; use borsh::{BorshDeserialize, BorshSerialize}; use near_crypto::{KeyType, PublicKey, Signature}; -use near_time::{Clock, Utc}; +use near_time::Utc; use std::sync::Arc; #[derive( @@ -407,6 +407,7 @@ impl BlockHeader { combine_hash(&hash_inner, &prev_hash) } + #[cfg(feature = "clock")] pub fn new( this_epoch_protocol_version: ProtocolVersion, next_epoch_protocol_version: ProtocolVersion, @@ -437,7 +438,7 @@ impl BlockHeader { next_bp_hash: CryptoHash, block_merkle_root: CryptoHash, prev_height: BlockHeight, - clock: Clock, + clock: near_time::Clock, ) -> Self { let inner_lite = BlockHeaderInnerLite { height, @@ -476,7 +477,7 @@ impl BlockHeader { last_final_block, last_ds_final_block, approvals, - latest_protocol_version: PROTOCOL_VERSION, + latest_protocol_version: crate::version::PROTOCOL_VERSION, }; let (hash, signature) = signer.sign_block_header_parts( prev_hash, @@ -508,7 +509,7 @@ impl BlockHeader { last_final_block, last_ds_final_block, approvals, - latest_protocol_version: PROTOCOL_VERSION, + latest_protocol_version: crate::version::PROTOCOL_VERSION, }; let (hash, signature) = signer.sign_block_header_parts( prev_hash, @@ -540,7 +541,10 @@ impl BlockHeader { prev_height, epoch_sync_data_hash, approvals, - latest_protocol_version: get_protocol_version(next_epoch_protocol_version, clock), + latest_protocol_version: crate::version::get_protocol_version( + next_epoch_protocol_version, + clock, + ), }; let (hash, signature) = signer.sign_block_header_parts( prev_hash, @@ -573,7 +577,10 @@ impl BlockHeader { prev_height, epoch_sync_data_hash, approvals, - latest_protocol_version: get_protocol_version(next_epoch_protocol_version, clock), + latest_protocol_version: crate::version::get_protocol_version( + next_epoch_protocol_version, + clock, + ), }; let (hash, signature) = signer.sign_block_header_parts( prev_hash, diff --git a/core/primitives/src/errors.rs b/core/primitives/src/errors.rs index 613d4e476f4..0bb2d5c1ad2 100644 --- a/core/primitives/src/errors.rs +++ b/core/primitives/src/errors.rs @@ -699,8 +699,6 @@ pub struct BalanceMismatchError { pub processed_delayed_receipts_balance: Balance, #[serde(with = "dec_format")] pub initial_postponed_receipts_balance: Balance, - // TODO(congestion_control): remove cfg on stabilization - #[cfg(feature = "nightly")] #[serde(with = "dec_format")] pub forwarded_buffered_receipts_balance: Balance, // Output balances @@ -716,8 +714,6 @@ pub struct BalanceMismatchError { pub tx_burnt_amount: Balance, #[serde(with = "dec_format")] pub slashed_burnt_amount: Balance, - // TODO(congestion_control): remove cfg on stabilization - #[cfg(feature = "nightly")] #[serde(with = "dec_format")] pub new_buffered_receipts_balance: Balance, #[serde(with = "dec_format")] @@ -732,11 +728,8 @@ impl Display for BalanceMismatchError { .saturating_add(self.initial_accounts_balance) .saturating_add(self.incoming_receipts_balance) .saturating_add(self.processed_delayed_receipts_balance) - .saturating_add(self.initial_postponed_receipts_balance); - // TODO(congestion_control): remove cfg on stabilization - #[cfg(feature = "nightly")] - let initial_balance = - initial_balance.saturating_add(self.forwarded_buffered_receipts_balance); + .saturating_add(self.initial_postponed_receipts_balance) + .saturating_add(self.forwarded_buffered_receipts_balance); let final_balance = self .final_accounts_balance .saturating_add(self.outgoing_receipts_balance) @@ -744,46 +737,9 @@ impl Display for BalanceMismatchError { .saturating_add(self.final_postponed_receipts_balance) .saturating_add(self.tx_burnt_amount) .saturating_add(self.slashed_burnt_amount) - .saturating_add(self.other_burnt_amount); - // TODO(congestion_control): remove cfg on stabilization - #[cfg(feature = "nightly")] - let final_balance = final_balance.saturating_add(self.new_buffered_receipts_balance); + .saturating_add(self.other_burnt_amount) + .saturating_add(self.new_buffered_receipts_balance); - // TODO(congestion_control): remove cfg on stabilization - #[cfg(not(feature = "nightly"))] - return write!( - f, - "Balance Mismatch Error. The input balance {} doesn't match output balance {}\n\ - Inputs:\n\ - \tIncoming validator rewards sum: {}\n\ - \tInitial accounts balance sum: {}\n\ - \tIncoming receipts balance sum: {}\n\ - \tProcessed delayed receipts balance sum: {}\n\ - \tInitial postponed receipts balance sum: {}\n\ - Outputs:\n\ - \tFinal accounts balance sum: {}\n\ - \tOutgoing receipts balance sum: {}\n\ - \tNew delayed receipts balance sum: {}\n\ - \tFinal postponed receipts balance sum: {}\n\ - \tTx fees burnt amount: {}\n\ - \tSlashed amount: {}\n\ - \tOther burnt amount: {}", - initial_balance, - final_balance, - self.incoming_validator_rewards, - self.initial_accounts_balance, - self.incoming_receipts_balance, - self.processed_delayed_receipts_balance, - self.initial_postponed_receipts_balance, - self.final_accounts_balance, - self.outgoing_receipts_balance, - self.new_delayed_receipts_balance, - self.final_postponed_receipts_balance, - self.tx_burnt_amount, - self.slashed_burnt_amount, - self.other_burnt_amount, - ); - #[cfg(feature = "nightly")] write!( f, "Balance Mismatch Error. The input balance {} doesn't match output balance {}\n\ diff --git a/core/primitives/src/snapshots/near_primitives__views__tests__runtime_config_view.snap b/core/primitives/src/snapshots/near_primitives__views__tests__runtime_config_view.snap index 1bd07dae54b..5bba9e355d6 100644 --- a/core/primitives/src/snapshots/near_primitives__views__tests__runtime_config_view.snap +++ b/core/primitives/src/snapshots/near_primitives__views__tests__runtime_config_view.snap @@ -18,7 +18,7 @@ expression: "&view" }, "cost_per_byte": { "send_sir": 17212011, - "send_not_sir": 17212011, + "send_not_sir": 47683715, "execution": 17212011 } }, @@ -35,7 +35,7 @@ expression: "&view" }, "deploy_contract_cost_per_byte": { "send_sir": 6812999, - "send_not_sir": 6812999, + "send_not_sir": 47683715, "execution": 64572944 }, "function_call_cost": { @@ -45,7 +45,7 @@ expression: "&view" }, "function_call_cost_per_byte": { "send_sir": 2235934, - "send_not_sir": 2235934, + "send_not_sir": 47683715, "execution": 2235934 }, "transfer_cost": { @@ -71,7 +71,7 @@ expression: "&view" }, "function_call_cost_per_byte": { "send_sir": 1925331, - "send_not_sir": 1925331, + "send_not_sir": 47683715, "execution": 1925331 } }, @@ -205,8 +205,8 @@ expression: "&view" "max_arguments_length": 4194304, "max_length_returned_data": 4194304, "max_contract_size": 4194304, - "max_transaction_size": 4194304, - "max_receipt_size": 999999999999999, + "max_transaction_size": 1572864, + "max_receipt_size": 4194304, "max_length_storage_key": 2048, "max_length_storage_value": 4194304, "max_promises_per_function_call_action": 1024, @@ -217,7 +217,7 @@ expression: "&view" "account_id_validity_rules_version": 1, "yield_timeout_length_in_blocks": 200, "max_yield_payload_size": 1024, - "per_receipt_storage_proof_size_limit": 999999999999999 + "per_receipt_storage_proof_size_limit": 4000000 } }, "account_creation_config": { @@ -225,22 +225,22 @@ expression: "&view" "registrar_account_id": "registrar" }, "congestion_control_config": { - "max_congestion_incoming_gas": 9223372036854775807, - "max_congestion_outgoing_gas": 9223372036854775807, - "max_congestion_memory_consumption": 9223372036854775807, - "max_congestion_missed_chunks": 9223372036854775807, - "max_outgoing_gas": 9223372036854775807, - "min_outgoing_gas": 9223372036854775807, - "allowed_shard_outgoing_gas": 9223372036854775807, - "max_tx_gas": 9223372036854775807, - "min_tx_gas": 9223372036854775807, - "reject_tx_congestion_threshold": 1.0, - "outgoing_receipts_usual_size_limit": 999999999999999, - "outgoing_receipts_big_size_limit": 999999999999999 + "max_congestion_incoming_gas": 20000000000000000, + "max_congestion_outgoing_gas": 10000000000000000, + "max_congestion_memory_consumption": 1000000000, + "max_congestion_missed_chunks": 5, + "max_outgoing_gas": 300000000000000000, + "min_outgoing_gas": 1000000000000000, + "allowed_shard_outgoing_gas": 1000000000000000, + "max_tx_gas": 500000000000000, + "min_tx_gas": 20000000000000, + "reject_tx_congestion_threshold": 0.5, + "outgoing_receipts_usual_size_limit": 102400, + "outgoing_receipts_big_size_limit": 4718592 }, "witness_config": { - "main_storage_proof_size_soft_limit": 999999999999999, - "combined_transactions_size_limit": 999999999999999, - "new_transactions_validation_state_size_soft_limit": 999999999999999 + "main_storage_proof_size_soft_limit": 3000000, + "combined_transactions_size_limit": 4194304, + "new_transactions_validation_state_size_soft_limit": 572864 } } diff --git a/core/primitives/src/stateless_validation.rs b/core/primitives/src/stateless_validation.rs index c5a9c119de5..a1370f5bb9b 100644 --- a/core/primitives/src/stateless_validation.rs +++ b/core/primitives/src/stateless_validation.rs @@ -18,7 +18,7 @@ use near_primitives_core::version::{ProtocolFeature, PROTOCOL_VERSION}; /// Represents max allowed size of the compressed state witness, /// corresponds to EncodedChunkStateWitness struct size. -pub const MAX_COMPRESSED_STATE_WITNESS_SIZE: ByteSize = ByteSize::mib(32); +pub const MAX_COMPRESSED_STATE_WITNESS_SIZE: ByteSize = ByteSize::mib(48); /// Represents max allowed size of the raw (not compressed) state witness, /// corresponds to the size of borsh-serialized ChunkStateWitness. diff --git a/core/primitives/src/upgrade_schedule.rs b/core/primitives/src/upgrade_schedule.rs index 842db451148..09f55c203b0 100644 --- a/core/primitives/src/upgrade_schedule.rs +++ b/core/primitives/src/upgrade_schedule.rs @@ -100,6 +100,7 @@ impl ProtocolUpgradeVotingSchedule { } /// This method returns the protocol version that the node should vote for. + #[cfg(feature = "clock")] pub(crate) fn get_protocol_version( &self, now: DateTime, diff --git a/core/primitives/src/version.rs b/core/primitives/src/version.rs index da203fa6afc..4a366459749 100644 --- a/core/primitives/src/version.rs +++ b/core/primitives/src/version.rs @@ -1,5 +1,4 @@ use crate::types::Balance; -use near_time::Clock; use once_cell::sync::Lazy; /// Data structure for semver version and github tag or commit. @@ -88,9 +87,10 @@ pub const PROTOCOL_UPGRADE_SCHEDULE: Lazy = Lazy: /// Gives new clients an option to upgrade without announcing that they support /// the new version. This gives non-validator nodes time to upgrade. See /// +#[cfg(feature = "clock")] pub fn get_protocol_version( next_epoch_protocol_version: ProtocolVersion, - clock: Clock, + clock: near_time::Clock, ) -> ProtocolVersion { let now = clock.now_utc(); let chrono = chrono::DateTime::from_timestamp(now.unix_timestamp(), now.nanosecond()); diff --git a/core/primitives/tests/crate-limit-test.rs b/core/primitives/tests/crate-limit-test.rs deleted file mode 100644 index f38242544ad..00000000000 --- a/core/primitives/tests/crate-limit-test.rs +++ /dev/null @@ -1,61 +0,0 @@ -use std::collections::HashSet; -use std::process::{Command, Output}; -use std::str; - -// when you compile you see line similar to the next one: -// Building [======= ] 50/100: near-primitives v0.1.0 -// The 100 is not actually the number of dependencies, but the number of crates that are being built. -// This threshhold represents the number of dependencies -const THRESHOLD_DEFAULT: usize = 150; -const THRESHOLD_NO_DEFAULT: usize = 115; - -fn process_output(output: Output, threshold: usize) { - assert!(output.status.success(), "Cargo tree failed"); - - let output_str = str::from_utf8(&output.stdout).expect("Failed to convert output to string"); - - let re = regex::Regex::new(r"([\w-]+) v([\d.]+(?:-\w+)?)").unwrap(); - - let mut unique_crates = HashSet::new(); - - for cap in re.captures_iter(output_str) { - let crate_name = &cap[1]; - let crate_version = &cap[2]; - let crate_str = format!("{}-{}", crate_name, crate_version); - unique_crates.insert(crate_str); - } - println!("{:#?}", unique_crates); - let crate_count = unique_crates.len(); - println!("Unique crate count: {}", crate_count); - - assert!(crate_count < threshold, "Crate count is too high: {} > {}", crate_count, threshold); -} - -#[test] -fn test_crate_count() { - // Run `cargo tree -p near-primitives --edges=normal` and capture the output - let output = Command::new(std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string())) - .arg("tree") - .arg("-p") - .arg("near-primitives") - .arg("--edges=normal") - .output() - .expect("Failed to execute cargo tree"); - - process_output(output, THRESHOLD_DEFAULT); -} - -#[test] -fn test_crate_count_no_default() { - // Run `cargo tree -p near-primitives --edges=normal` and capture the output - let output = Command::new(std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string())) - .arg("tree") - .arg("-p") - .arg("near-primitives") - .arg("--no-default-features") - .arg("--edges=normal") - .output() - .expect("Failed to execute cargo tree"); - - process_output(output, THRESHOLD_NO_DEFAULT); -} diff --git a/core/store/Cargo.toml b/core/store/Cargo.toml index 915b0fe040a..4ae2190d87e 100644 --- a/core/store/Cargo.toml +++ b/core/store/Cargo.toml @@ -42,7 +42,7 @@ thiserror.workspace = true tokio.workspace = true tracing.workspace = true -near-async.workspace = true +near-time.workspace = true near-chain-configs = { workspace = true, features = ["metrics"] } near-crypto.workspace = true near-fmt.workspace = true @@ -80,8 +80,8 @@ single_thread_rocksdb = [] # Deactivate RocksDB IO background threads test_features = ["near-vm-runner/test_features"] new_epoch_sync = [] +# TODO(#11639): extract metrics into separate feature nightly_protocol = [ - "near-async/nightly_protocol", "near-chain-configs/nightly_protocol", "near-chain/nightly_protocol", "near-chunks/nightly_protocol", @@ -92,7 +92,6 @@ nightly_protocol = [ "near-vm-runner/nightly_protocol", ] nightly = [ - "near-async/nightly", "near-chain-configs/nightly", "near-chain/nightly", "near-chunks/nightly", @@ -103,6 +102,4 @@ nightly = [ "near-vm-runner/nightly", "nightly_protocol", ] -statelessnet_protocol = [ - "near-primitives/statelessnet_protocol", -] +statelessnet_protocol = ["near-primitives/statelessnet_protocol"] diff --git a/core/store/src/columns.rs b/core/store/src/columns.rs index 6c497ac24a2..01a6d9b2172 100644 --- a/core/store/src/columns.rs +++ b/core/store/src/columns.rs @@ -136,10 +136,9 @@ pub enum DBCol { /// - *Rows*: EpochId (CryptoHash) /// - *Content type*: LightClientBlockView EpochLightClientBlocks, - /// Mapping from Receipt id to destination Shard Id, i.e, the shard that this receipt is sent to. - /// - *Rows*: ReceiptId (CryptoHash) - /// - *Content type*: Shard Id || ref_count (u64 || u64) - ReceiptIdToShardId, + // Deprecated. + #[strum(serialize = "ReceiptIdToShardId")] + _ReceiptIdToShardId, // Deprecated. #[strum(serialize = "NextBlockWithNewChunk")] _NextBlockWithNewChunk, @@ -404,7 +403,7 @@ impl DBCol { /// ``` pub const fn is_rc(&self) -> bool { match self { - DBCol::State | DBCol::Transactions | DBCol::Receipts | DBCol::ReceiptIdToShardId => { + DBCol::State | DBCol::Transactions | DBCol::Receipts | DBCol::_ReceiptIdToShardId => { true } _ => false, @@ -439,7 +438,6 @@ impl DBCol { | DBCol::OutgoingReceipts // TODO can be changed to reconstruction on request instead of saving in cold storage. | DBCol::PartialChunks - | DBCol::ReceiptIdToShardId | DBCol::Receipts | DBCol::State | DBCol::StateChanges @@ -473,6 +471,8 @@ impl DBCol { // LatestChunkStateWitnesses stores the last N observed witnesses, used only for debugging. DBCol::LatestChunkStateWitnesses => false, DBCol::LatestWitnessesByIndex => false, + // Deprecated. + DBCol::_ReceiptIdToShardId => false, // Columns that are not GC-ed need not be copied to the cold storage. DBCol::BlockHeader @@ -543,7 +543,7 @@ impl DBCol { DBCol::AccountAnnouncements => &[DBKeyType::AccountId], DBCol::NextBlockHashes => &[DBKeyType::PreviousBlockHash], DBCol::EpochLightClientBlocks => &[DBKeyType::EpochId], - DBCol::ReceiptIdToShardId => &[DBKeyType::ReceiptHash], + DBCol::_ReceiptIdToShardId => &[DBKeyType::ReceiptHash], DBCol::_NextBlockWithNewChunk => &[DBKeyType::BlockHash, DBKeyType::ShardId], DBCol::_LastBlockWithNewChunk => &[DBKeyType::ShardId], DBCol::PeerComponent => &[DBKeyType::PeerId], diff --git a/core/store/src/db/rocksdb.rs b/core/store/src/db/rocksdb.rs index aa37575d457..3b8d2836cd6 100644 --- a/core/store/src/db/rocksdb.rs +++ b/core/store/src/db/rocksdb.rs @@ -767,7 +767,7 @@ fn col_name(col: DBCol) -> &'static str { DBCol::AccountAnnouncements => "col24", DBCol::NextBlockHashes => "col25", DBCol::EpochLightClientBlocks => "col26", - DBCol::ReceiptIdToShardId => "col27", + DBCol::_ReceiptIdToShardId => "col27", DBCol::_NextBlockWithNewChunk => "col28", DBCol::_LastBlockWithNewChunk => "col29", DBCol::PeerComponent => "col30", diff --git a/core/store/src/metadata.rs b/core/store/src/metadata.rs index dbbc0932fae..a07581e3922 100644 --- a/core/store/src/metadata.rs +++ b/core/store/src/metadata.rs @@ -2,7 +2,7 @@ pub type DbVersion = u32; /// Current version of the database. -pub const DB_VERSION: DbVersion = 39; +pub const DB_VERSION: DbVersion = 40; /// Database version at which point DbKind was introduced. const DB_VERSION_WITH_KIND: DbVersion = 34; diff --git a/core/store/src/metrics.rs b/core/store/src/metrics.rs index a72506d74f7..dac5cd3279b 100644 --- a/core/store/src/metrics.rs +++ b/core/store/src/metrics.rs @@ -1,12 +1,12 @@ use crate::rocksdb_metrics::export_stats_as_metrics; use crate::{NodeStorage, Store, Temperature}; use actix_rt::ArbiterHandle; -use near_async::time::Duration; use near_o11y::metrics::{ exponential_buckets, try_create_histogram, try_create_histogram_vec, try_create_histogram_with_buckets, try_create_int_counter_vec, try_create_int_gauge, try_create_int_gauge_vec, Histogram, HistogramVec, IntCounterVec, IntGauge, IntGaugeVec, }; +use near_time::Duration; use once_cell::sync::Lazy; pub(crate) static DATABASE_OP_LATENCY_HIST: Lazy = Lazy::new(|| { @@ -584,8 +584,8 @@ mod test { use crate::metadata::{DbKind, DB_VERSION}; use crate::test_utils::create_test_node_storage_with_cold; use actix; - use near_async::time::Duration; use near_o11y::testonly::init_test_logger; + use near_time::Duration; use super::spawn_db_metrics_loop; diff --git a/core/store/src/migrations.rs b/core/store/src/migrations.rs index 2950c46d449..a3de5fccfd1 100644 --- a/core/store/src/migrations.rs +++ b/core/store/src/migrations.rs @@ -345,3 +345,16 @@ pub fn migrate_38_to_39(store: &Store) -> anyhow::Result<()> { update.commit()?; Ok(()) } + +/// Migrates the database from version 39 to 40. +/// +/// This involves deleting contents of _ReceiptIdToShardId column which is now +/// deprecated and no longer used. +pub fn migrate_39_to_40(store: &Store) -> anyhow::Result<()> { + let _span = + tracing::info_span!(target: "migrations", "Deleting contents of deprecated _ReceiptIdToShardId column").entered(); + let mut update = store.store_update(); + update.delete_all(DBCol::_ReceiptIdToShardId); + update.commit()?; + Ok(()) +} diff --git a/core/time/Cargo.toml b/core/time/Cargo.toml index fc47f8ae836..f3bcb23119a 100644 --- a/core/time/Cargo.toml +++ b/core/time/Cargo.toml @@ -21,6 +21,5 @@ serde = { workspace = true, optional = true } serde_json.workspace = true [features] -default = ["clock", "serde"] clock = ["tokio", "once_cell"] serde = ["dep:serde", "time/serde"] diff --git a/docs/practices/workflows/io_trace.md b/docs/practices/workflows/io_trace.md index 5622a36c9c3..c2c8353875e 100644 --- a/docs/practices/workflows/io_trace.md +++ b/docs/practices/workflows/io_trace.md @@ -309,7 +309,6 @@ apply_transactions shard_id=3 block=AUcauGxisMqNmZu5Ln7LLu8Li31H1sYD7wgd7AP6nQZR top-level: GET 8854 Block 981 BlockHeader 16556 BlockHeight 59155 BlockInfo 2 BlockMerkleTree 330009 BlockMisc 1 BlockOrdinal 31924 BlockPerHeight 863 BlockRefCount 1609 BlocksToCatchup 1557 ChallengedBlocks 4 ChunkExtra 5135 ChunkHashesByHeight 128788 Chunks 35 EpochInfo 1 EpochStart 98361 FlatState 1150 HeaderHashesByHeight 8113 InvalidChunks 263 NextBlockHashes 22 OutgoingReceipts 131114 PartialChunks 1116 ProcessedBlockHeights 968698 State SET 865 BlockHeight 1026 BlockMerkleTree 12428 BlockMisc 1636 BlockOrdinal 865 BlockPerHeight 865 BlockRefCount 3460 ChunkExtra 3446 ChunkHashesByHeight 339142 FlatState 3460 FlatStateDeltas 3460 FlatStateMisc 865 HeaderHashesByHeight 3460 IncomingReceipts 865 NextBlockHashes 3442 OutcomeIds 3442 OutgoingReceipts 863 ProcessedBlockHeights 340093 StateChanges 3460 TrieChanges - UPDATE_RC 13517 ReceiptIdToShardId 13526 Receipts 1517322 State 6059 Transactions ``` The output contains one `apply_transactions` for each chunk, with the block hash diff --git a/genesis-tools/genesis-populate/Cargo.toml b/genesis-tools/genesis-populate/Cargo.toml index 4e1a9ab8a6e..f333daf7444 100644 --- a/genesis-tools/genesis-populate/Cargo.toml +++ b/genesis-tools/genesis-populate/Cargo.toml @@ -18,7 +18,7 @@ indicatif.workspace = true tempfile.workspace = true nearcore.workspace = true -near-async.workspace = true +near-time.workspace = true near-chain-configs.workspace = true near-crypto.workspace = true near-epoch-manager.workspace = true @@ -31,7 +31,6 @@ node-runtime.workspace = true [features] nightly_protocol = [ - "near-async/nightly_protocol", "near-chain-configs/nightly_protocol", "near-chain/nightly_protocol", "near-epoch-manager/nightly_protocol", @@ -42,7 +41,6 @@ nightly_protocol = [ "node-runtime/nightly_protocol", ] nightly = [ - "near-async/nightly", "near-chain-configs/nightly", "near-chain/nightly", "near-epoch-manager/nightly", diff --git a/genesis-tools/genesis-populate/src/lib.rs b/genesis-tools/genesis-populate/src/lib.rs index 9bddbedf0ff..62b5096eaf2 100644 --- a/genesis-tools/genesis-populate/src/lib.rs +++ b/genesis-tools/genesis-populate/src/lib.rs @@ -4,7 +4,6 @@ pub mod state_dump; use crate::state_dump::StateDump; use indicatif::{ProgressBar, ProgressStyle}; -use near_async::time::Utc; use near_chain::chain::get_genesis_congestion_infos; use near_chain::types::RuntimeAdapter; use near_chain::{Block, Chain, ChainStore}; @@ -26,6 +25,7 @@ use near_store::genesis::{compute_storage_usage, initialize_genesis_state}; use near_store::{ get_account, get_genesis_state_roots, set_access_key, set_account, set_code, Store, TrieUpdate, }; +use near_time::Utc; use near_vm_runner::logic::ProtocolVersion; use near_vm_runner::ContractCode; use nearcore::{NearConfig, NightshadeRuntime, NightshadeRuntimeExt}; diff --git a/integration-tests/Cargo.toml b/integration-tests/Cargo.toml index 043dff39024..928bc9c85dc 100644 --- a/integration-tests/Cargo.toml +++ b/integration-tests/Cargo.toml @@ -77,6 +77,7 @@ insta.workspace = true near-undo-block.workspace = true rlp.workspace = true sha3.workspace = true +regex.workspace = true [features] performance_stats = [ @@ -88,6 +89,7 @@ test_features = [ "nearcore/test_features", "near-store/test_features", "near-vm-runner/test_features", + "near-test-contracts/test_features", ] protocol_feature_fix_contract_loading_cost = [ "nearcore/protocol_feature_fix_contract_loading_cost", @@ -171,3 +173,4 @@ sandbox = [ no_cache = ["nearcore/no_cache"] calimero_zero_storage = [] new_epoch_sync = ["nearcore/new_epoch_sync"] +testloop = ["near-chain/testloop"] diff --git a/integration-tests/README.md b/integration-tests/README.md new file mode 100644 index 00000000000..a0ebe7f3ccf --- /dev/null +++ b/integration-tests/README.md @@ -0,0 +1,134 @@ +# Integration tests + +## TestLoopEnv + +`TestLoopEnv` is a framework that enables writing multi-node tests for NEAR protocol +components. It simulates an entire blockchain environment within a single test, +allowing for synchronous testing of complex scenarios. + +We recommend to use `TestLoopEnv` for writing multi-node tests and put new +tests into `src/test_loop/tests` folder. This framework is an attempt to +achieve the best of all our previous frameworks, to make tests powerful, +deterministic and easy to write and understand. The doc how it works is on +`core/async/src/test_loop.rs`. + +Here's a step-by-step guide on how to create a test. + +## 1. Build the environment + +Most important parameters are configured through the genesis. +The main part of building the environment involves constructing genesis data, +including the initial state, using `TestGenesisBuilder`: + +```rust +let builder = TestLoopBuilder::new(); + +let initial_balance = 10000 * ONE_NEAR; +let accounts = (0..NUM_ACCOUNTS) + .map(|i| format!("account{}", i).parse().unwrap()) + .collect::>(); + +let mut genesis_builder = TestGenesisBuilder::new(); +genesis_builder + .genesis_time_from_clock(&builder.clock()) + .protocol_version_latest() + .genesis_height(10000) + .epoch_length(EPOCH_LENGTH) + .shard_layout_simple_v1(&["account2", "account4", "account6"]) + // ...more configuration if needed... + +for account in &accounts { + genesis_builder.add_user_account_simple(account.clone(), initial_balance); +} +let genesis = genesis_builder.build(); + +let TestLoopEnv { mut test_loop, datas: node_datas } = + builder.genesis(genesis).clients(client_accounts).build(); +``` + +## 2. Trigger and execute events + +First, query the clients for desired chain information, such as which nodes are +responsible for tracking specific shards. Refer to the `ClientQueries` implementation +for more details on available queries. + +```rust +let first_epoch_tracked_shards = { + let clients = node_datas + .iter() + .map(|data| &test_loop.data.get(&data.client_sender.actor_handle()).client) + .collect_vec(); + clients.tracked_shards_for_each_client() +}; +``` + +Perform the actions you want to test, such as money transfers, contract +deployment and execution, specific validator selection, etc. See +`execute_money_transfers` implementation for inspiration. + +```rust +execute_money_transfers(&mut test_loop, &node_datas, &accounts); +``` + +Then, use the `run_until` method to progress the blockchain until a certain +condition is met: + +```rust +let client_handle = node_datas[0].client_sender.actor_handle(); +test_loop.run_until( + |test_loop_data| { + test_loop_data.get(&client_handle).client.chain.head().unwrap().height > 10020 + }, + Duration::seconds(20), +); +``` + +Note: The time here is not actual real-world time. `TestLoopEnv` simulates the clock +to ensure high speed and reproducibility of test results. This allows tests to +run quickly while still accurately modeling time-dependent blockchain behavior. + +## 3. Assert expected outcomes + +Verify that the test produced the expected results. For example, if your test +environment is designed to have nodes change the shards they track, you can +assert this behavior as follows: + +```rust +let clients = node_datas + .iter() + .map(|data| &test_loop.data.get(&data.client_sender.actor_handle()).client) + .collect_vec(); +let later_epoch_tracked_shards = clients.tracked_shards_for_each_client(); +assert_ne!(first_epoch_tracked_shards, later_epoch_tracked_shards); +``` + +After that, properly shut down the test environment: + +```rust +TestLoopEnv { test_loop, datas: node_datas } + .shutdown_and_drain_remaining_events(Duration::seconds(20)); +``` + +## Migration + +For historical context, there are multiple existing ways for writing such +tests. The following list presents these methods in order of their development: +* `run_actix(... setup_mock_all_validators(...))` - very powerful, spawns all +actors required for multi-node chain to operate and supports network +communication among them. However, very hard to understand, uses a lot of +resources and almost not maintained. +* pytest - quite powerful as well, spawns actual nodes in Python and uses +exposed RPC handlers to test different behaviour. Quite obsolete as well, +exposed to flakiness. +* different environments spawning clients: `TestEnv`, `TestReshardingEnv`, ... +Good middle ground for testing specific features, but doesn't test actual +network behaviour. Modifications like forcing skipping chunks require a lot +of manual intervention. + +If test became problematic, it is encouraged to migrate it to `TestLoopEnv`. +However, it would be _extremely_ hard to migrate the logic precisely. Instead, +migrate tests only if they make sense to you and their current implementation +became a huge burden. We hope that reproducing such logic in `TestLoopEnv` is +much easier. + +Enjoy! diff --git a/integration-tests/src/test_loop/builder.rs b/integration-tests/src/test_loop/builder.rs index c8cad859e01..cd56c0096d7 100644 --- a/integration-tests/src/test_loop/builder.rs +++ b/integration-tests/src/test_loop/builder.rs @@ -1,4 +1,4 @@ -use std::sync::{Arc, RwLock}; +use std::sync::{Arc, Mutex, RwLock}; use near_async::futures::FutureSpawner; use near_async::messaging::{noop, IntoMultiSender, IntoSender, LateBoundSender}; @@ -23,7 +23,7 @@ use near_client::sync_jobs_actor::SyncJobsActor; use near_client::test_utils::test_loop::test_loop_sync_actor_maker; use near_client::{Client, PartialWitnessActor, SyncAdapter}; use near_epoch_manager::shard_tracker::{ShardTracker, TrackedConfig}; -use near_epoch_manager::EpochManager; +use near_epoch_manager::{EpochManager, EpochManagerAdapter}; use near_network::test_loop::TestLoopPeerManagerActor; use near_primitives::network::PeerId; use near_primitives::test_utils::create_test_signer; @@ -36,18 +36,30 @@ use near_vm_runner::{ContractRuntimeCache, FilesystemContractRuntimeCache}; use nearcore::state_sync::StateSyncDumper; use tempfile::TempDir; -use super::env::{TestData, TestLoopEnv}; +use super::env::{ClientToShardsManagerSender, TestData, TestLoopChunksStorage, TestLoopEnv}; +use super::utils::network::partial_encoded_chunks_dropper; pub struct TestLoopBuilder { test_loop: TestLoopV2, genesis: Option, clients: Vec, + /// Will store all chunks produced within the test loop. + chunks_storage: Arc>, + /// Whether test loop should drop all chunks validated by the given account. + drop_chunks_validated_by: Option, gc: bool, } impl TestLoopBuilder { pub fn new() -> Self { - Self { test_loop: TestLoopV2::new(), genesis: None, clients: vec![], gc: true } + Self { + test_loop: TestLoopV2::new(), + genesis: None, + clients: vec![], + chunks_storage: Default::default(), + drop_chunks_validated_by: None, + gc: true, + } } /// Get the clock for the test loop. @@ -67,10 +79,8 @@ impl TestLoopBuilder { self } - /// Disable garbage collection for the nodes. - /// TODO(#11605): should always be enabled, if it doesn't work, it's a bug. - pub fn disable_gc(mut self) -> Self { - self.gc = false; + pub fn drop_chunks_validated_by(mut self, account_id: &str) -> Self { + self.drop_chunks_validated_by = Some(account_id.parse().unwrap()); self } @@ -92,15 +102,17 @@ impl TestLoopBuilder { fn build_impl(mut self) -> TestLoopEnv { let mut datas = Vec::new(); let mut network_adapters = Vec::new(); + let mut epoch_manager_adapters = Vec::new(); let tempdir = tempfile::tempdir().unwrap(); for idx in 0..self.clients.len() { - let (data, network_adapter) = self.setup_client(idx, &tempdir); + let (data, network_adapter, epoch_manager_adapter) = self.setup_client(idx, &tempdir); datas.push(data); network_adapters.push(network_adapter); + epoch_manager_adapters.push(epoch_manager_adapter); } - self.setup_network(&datas, &network_adapters); + self.setup_network(&datas, &network_adapters, &epoch_manager_adapters); - let env = TestLoopEnv { test_loop: self.test_loop, datas }; + let env = TestLoopEnv { test_loop: self.test_loop, datas, tempdir }; env.warmup() } @@ -108,11 +120,14 @@ impl TestLoopBuilder { &mut self, idx: usize, tempdir: &TempDir, - ) -> (TestData, Arc>>) { + ) -> ( + TestData, + Arc>>, + Arc, + ) { let client_adapter = LateBoundSender::new(); let network_adapter = LateBoundSender::new(); let state_snapshot_adapter = LateBoundSender::new(); - let shards_manager_adapter = LateBoundSender::new(); let partial_witness_adapter = LateBoundSender::new(); let sync_jobs_adapter = LateBoundSender::new(); @@ -136,7 +151,20 @@ impl TestLoopBuilder { num_concurrent_requests_during_catchup: 1, }), }; - client_config.tracked_shards = Vec::new(); + + // Configure tracked shards. + // * single shard tracking for validators + // * all shard tracking for RPCs + let num_block_producer = genesis.config.num_block_producer_seats; + let num_chunk_producer = genesis.config.num_chunk_producer_seats; + let num_chunk_validator = genesis.config.num_chunk_validator_seats; + let validator_num = + num_block_producer.max(num_chunk_producer).max(num_chunk_validator) as usize; + if idx < validator_num { + client_config.tracked_shards = Vec::new(); + } else { + client_config.tracked_shards = vec![666]; + } let homedir = tempdir.path().join(format!("{}", idx)); std::fs::create_dir_all(&homedir).expect("Unable to create homedir"); @@ -194,6 +222,13 @@ impl TestLoopBuilder { Some(Arc::new(create_test_signer(self.clients[idx].as_str()))), "validator_signer", ); + + let shards_manager_adapter = LateBoundSender::new(); + let client_to_shards_manager_sender = Arc::new(ClientToShardsManagerSender { + sender: shards_manager_adapter.clone(), + chunks_storage: self.chunks_storage.clone(), + }); + let client = Client::new( self.test_loop.clock(), client_config.clone(), @@ -203,7 +238,7 @@ impl TestLoopBuilder { state_sync_adapter, runtime_adapter.clone(), network_adapter.as_multi_sender(), - shards_manager_adapter.as_sender(), + client_to_shards_manager_sender.as_sender(), validator_signer.clone(), true, [0; 32], @@ -269,7 +304,7 @@ impl TestLoopBuilder { clock: self.test_loop.clock(), client_config, chain_genesis, - epoch_manager, + epoch_manager: epoch_manager.clone(), shard_tracker, runtime: runtime_adapter, validator: validator_signer, @@ -312,17 +347,29 @@ impl TestLoopBuilder { partial_witness_sender, state_sync_dumper_handle, }; - (data, network_adapter) + (data, network_adapter, epoch_manager) } + // TODO: we assume that all `Vec`s have the same length, consider + // joining them into one structure. fn setup_network( &mut self, datas: &Vec, network_adapters: &Vec>>>, + epoch_manager_adapters: &Vec>, ) { for (idx, data) in datas.iter().enumerate() { - let peer_manager_actor = + let mut peer_manager_actor = TestLoopPeerManagerActor::new(self.test_loop.clock(), &data.account_id, datas); + + if let Some(account_id) = &self.drop_chunks_validated_by { + peer_manager_actor.register_override_handler(partial_encoded_chunks_dropper( + self.chunks_storage.clone(), + epoch_manager_adapters[idx].clone(), + account_id.clone(), + )); + } + self.test_loop.register_actor_for_index( idx, peer_manager_actor, diff --git a/integration-tests/src/test_loop/env.rs b/integration-tests/src/test_loop/env.rs index 09d1d0a19f9..7f6f5c3bce7 100644 --- a/integration-tests/src/test_loop/env.rs +++ b/integration-tests/src/test_loop/env.rs @@ -1,22 +1,29 @@ -use near_async::messaging::{IntoMultiSender, IntoSender, Sender}; +use near_async::messaging::{CanSend, IntoMultiSender, IntoSender, LateBoundSender, Sender}; use near_async::test_loop::data::{TestLoopData, TestLoopDataHandle}; use near_async::test_loop::sender::TestLoopSender; use near_async::test_loop::TestLoopV2; use near_async::time::Duration; +use near_chunks::adapter::ShardsManagerRequestFromClient; use near_chunks::shards_manager_actor::ShardsManagerActor; use near_client::client_actor::ClientActorInner; use near_client::PartialWitnessActor; use near_network::shards_manager::ShardsManagerRequestFromNetwork; use near_network::state_witness::PartialWitnessSenderForNetwork; use near_network::test_loop::ClientSenderForTestLoopNetwork; +use near_primitives::sharding::{ChunkHash, ShardChunkHeader}; use near_primitives::types::AccountId; +use near_primitives_core::types::BlockHeight; use nearcore::state_sync::StateSyncDumper; +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; +use tempfile::TempDir; const NETWORK_DELAY: Duration = Duration::milliseconds(10); pub struct TestLoopEnv { pub test_loop: TestLoopV2, pub datas: Vec, + pub tempdir: TempDir, } impl TestLoopEnv { @@ -26,7 +33,7 @@ impl TestLoopEnv { /// Needed because for smaller heights blocks may not get all chunks and/or /// approvals. pub fn warmup(self) -> Self { - let Self { mut test_loop, datas } = self; + let Self { mut test_loop, datas, tempdir } = self; let client_handle = datas[0].client_sender.actor_handle(); let genesis_height = test_loop.data.get(&client_handle).client.chain.genesis().height(); @@ -50,7 +57,7 @@ impl TestLoopEnv { } test_loop.run_instant(); - Self { test_loop, datas } + Self { test_loop, datas, tempdir } } /// Used to finish off remaining events that are still in the loop. This can be necessary if the @@ -67,6 +74,64 @@ impl TestLoopEnv { } } +/// Stores all chunks ever observed on chain. Determines if a chunk can be +/// dropped within a test loop. +/// +/// Needed to intercept network messages storing chunk hash only, while +/// interception requires more detailed information like shard id. +#[derive(Default)] +pub struct TestLoopChunksStorage { + /// Mapping from chunk hashes to headers. + storage: HashMap, + /// Minimal chunk height ever observed. + min_chunk_height: Option, +} + +impl TestLoopChunksStorage { + pub fn insert(&mut self, chunk_header: ShardChunkHeader) { + let chunk_height = chunk_header.height_created(); + self.min_chunk_height = Some( + self.min_chunk_height + .map_or(chunk_height, |current_height| current_height.min(chunk_height)), + ); + self.storage.insert(chunk_header.chunk_hash(), chunk_header); + } + + pub fn get(&self, chunk_hash: &ChunkHash) -> Option<&ShardChunkHeader> { + self.storage.get(chunk_hash) + } + + /// If chunk height is too low, don't drop chunk, allow the chain to warm + /// up. + pub fn can_drop_chunk(&self, chunk_header: &ShardChunkHeader) -> bool { + self.min_chunk_height + .is_some_and(|min_height| chunk_header.height_created() >= min_height + 3) + } +} + +/// Custom implementation of `Sender` for messages from `Client` to +/// `ShardsManagerActor` that allows to intercept all messages indicating +/// any chunk production and storing all chunks. +pub struct ClientToShardsManagerSender { + pub sender: Arc>>, + /// Storage of chunks shared between all test loop nodes. + pub chunks_storage: Arc>, +} + +impl CanSend for ClientToShardsManagerSender { + fn send(&self, message: ShardsManagerRequestFromClient) { + // `DistributeEncodedChunk` indicates that a certain chunk was produced. + if let ShardsManagerRequestFromClient::DistributeEncodedChunk { partial_chunk, .. } = + &message + { + let mut chunks_storage = self.chunks_storage.lock().unwrap(); + chunks_storage.insert(partial_chunk.cloned_header()); + } + // After maybe storing the chunk, send the message as usual. + self.sender.send(message); + } +} + pub struct TestData { pub account_id: AccountId, pub client_sender: TestLoopSender, diff --git a/integration-tests/src/test_loop/tests/chunk_validator_kickout.rs b/integration-tests/src/test_loop/tests/chunk_validator_kickout.rs new file mode 100644 index 00000000000..9fe1d4acfc7 --- /dev/null +++ b/integration-tests/src/test_loop/tests/chunk_validator_kickout.rs @@ -0,0 +1,122 @@ +use crate::test_loop::builder::TestLoopBuilder; +use crate::test_loop::env::TestLoopEnv; +use crate::test_loop::utils::ONE_NEAR; +use itertools::Itertools; +use near_async::test_loop::data::TestLoopData; +use near_async::time::Duration; +use near_chain_configs::test_genesis::TestGenesisBuilder; +use near_client::Client; +use near_o11y::testonly::init_test_logger; +use near_primitives::types::AccountId; +use near_primitives_core::checked_feature; +use near_primitives_core::version::PROTOCOL_VERSION; +use std::string::ToString; + +fn run_test_chunk_validator_kickout(select_chunk_validator_only: bool) { + if !checked_feature!("stable", StatelessValidationV0, PROTOCOL_VERSION) { + println!("Test not applicable without StatelessValidation enabled"); + return; + } + + init_test_logger(); + let builder = TestLoopBuilder::new(); + + let initial_balance = 10000 * ONE_NEAR; + let epoch_length = 10; + let accounts = + (0..8).map(|i| format!("account{}", i).parse().unwrap()).collect::>(); + let clients = accounts.iter().cloned().collect_vec(); + let accounts_str = accounts.iter().map(|a| a.as_str()).collect_vec(); + let (block_and_chunk_producers, chunk_validators_only) = accounts_str.split_at(6); + + // Select the account to kick out. + // Only chunk validator-only node can be kicked out for low endorsement + // stats. + let account_id = if select_chunk_validator_only { + chunk_validators_only[0] + } else { + block_and_chunk_producers[3] + }; + let expect_kickout = select_chunk_validator_only; + + let mut genesis_builder = TestGenesisBuilder::new(); + genesis_builder + .genesis_time_from_clock(&builder.clock()) + .shard_layout_simple_v1(&["account2", "account4", "account6"]) + .epoch_length(epoch_length) + // Select 6 block&chunk producers and 2 chunk validators. + .validators_desired_roles(block_and_chunk_producers, chunk_validators_only) + // Set up config to kick out only chunk validators for low performance. + .kickouts_for_chunk_validators_only() + // Target giving one mandate to each chunk validator, which results in + // every chunk validator validating only one shard in most cases. + .target_validator_mandates_per_shard(1); + for account in &accounts { + genesis_builder.add_user_account_simple(account.clone(), initial_balance); + } + let genesis = genesis_builder.build(); + + let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder + .genesis(genesis) + .clients(clients) + // Drop only chunks validated by `account_id`. + // By how our endorsement stats are computed, this will count as this + // validator validating zero chunks. + .drop_chunks_validated_by(account_id) + .build(); + + // Run chain until our targeted chunk validator is (not) kicked out. + let client_handle = node_datas[0].client_sender.actor_handle(); + let initial_validators = get_epoch_all_validators(&test_loop.data.get(&client_handle).client); + assert_eq!(initial_validators.len(), 8); + assert!(initial_validators.contains(&account_id.to_string())); + let success_condition = |test_loop_data: &mut TestLoopData| -> bool { + let client = &test_loop_data.get(&client_handle).client; + let validators = get_epoch_all_validators(client); + let tip = client.chain.head().unwrap(); + let epoch_height = + client.epoch_manager.get_epoch_height_from_prev_block(&tip.prev_block_hash).unwrap(); + + if expect_kickout { + assert!(epoch_height < 4); + return if validators.len() == 7 { + assert!(!validators.contains(&account_id.to_string())); + true + } else { + false + }; + } else { + assert_eq!(validators.len(), 8, "No kickouts are expected"); + epoch_height >= 4 + } + }; + + test_loop.run_until( + success_condition, + // Timeout at producing 5 epochs, approximately. + Duration::seconds((5 * epoch_length) as i64), + ); + + TestLoopEnv { test_loop, datas: node_datas, tempdir } + .shutdown_and_drain_remaining_events(Duration::seconds(20)); +} + +/// Get all validator account names for the latest epoch. +fn get_epoch_all_validators(client: &Client) -> Vec { + let tip = client.chain.head().unwrap(); + let epoch_id = tip.epoch_id; + let all_validators = client.epoch_manager.get_epoch_all_validators(&epoch_id).unwrap(); + all_validators.into_iter().map(|vs| vs.account_id().to_string()).collect() +} + +/// Checks that chunk validator with low endorsement stats is kicked out. +#[test] +fn test_chunk_validator_kicked_out() { + run_test_chunk_validator_kickout(true); +} + +/// Checks that block producer with low chunk endorsement stats is not kicked out. +#[test] +fn test_block_producer_not_kicked_out() { + run_test_chunk_validator_kickout(false); +} diff --git a/integration-tests/src/test_loop/tests/congestion_control_adv_chunk_produce.rs b/integration-tests/src/test_loop/tests/congestion_control_adv_chunk_produce.rs new file mode 100644 index 00000000000..0546ccc4e83 --- /dev/null +++ b/integration-tests/src/test_loop/tests/congestion_control_adv_chunk_produce.rs @@ -0,0 +1,151 @@ +use core::panic; + +use itertools::Itertools; +use near_async::test_loop::data::{TestLoopData, TestLoopDataHandle}; +use near_async::test_loop::TestLoopV2; +use near_async::time::Duration; +use near_chain_configs::test_genesis::TestGenesisBuilder; +use near_client::client_actor::ClientActorInner; +use near_client::test_utils::test_loop::ClientQueries; +use near_client::Client; +use near_o11y::testonly::init_test_logger; +use near_primitives::hash::CryptoHash; +use near_primitives::types::{AccountId, BlockHeight}; + +use crate::test_loop::builder::TestLoopBuilder; +use crate::test_loop::env::{TestData, TestLoopEnv}; +use crate::test_loop::utils::transactions::{call_contract, deploy_contracts}; +use crate::test_loop::utils::ONE_NEAR; + +const NUM_PRODUCERS: usize = 2; +const NUM_VALIDATORS: usize = 2; +const NUM_RPC: usize = 1; +const NUM_CLIENTS: usize = NUM_PRODUCERS + NUM_VALIDATORS + NUM_RPC; + +/// This test checks that a chunk with too many transactions is rejected by the +/// chunk validators. +#[test] +fn test_congestion_control_adv_chunk_produce() { + init_test_logger(); + + let builder = TestLoopBuilder::new(); + + let initial_balance = 10000 * ONE_NEAR; + let accounts = + (0..100).map(|i| format!("account{}", i).parse().unwrap()).collect::>(); + let clients = accounts.iter().take(NUM_CLIENTS).cloned().collect_vec(); + + // split the clients into producers, validators, and rpc nodes + let tmp = clients.clone(); + let (producers, tmp) = tmp.split_at(NUM_PRODUCERS); + let (validators, tmp) = tmp.split_at(NUM_VALIDATORS); + let (rpcs, tmp) = tmp.split_at(NUM_RPC); + assert!(tmp.is_empty()); + + let producers = producers.iter().map(|account| account.as_str()).collect_vec(); + let validators = validators.iter().map(|account| account.as_str()).collect_vec(); + let [rpc] = rpcs else { panic!("Expected exactly one rpc node") }; + + let mut genesis_builder = TestGenesisBuilder::new(); + genesis_builder + .genesis_time_from_clock(&builder.clock()) + .protocol_version_latest() + .genesis_height(10000) + .gas_prices_free() + .gas_limit_one_petagas() + .shard_layout_simple_v1(&["account3", "account5", "account7"]) + .transaction_validity_period(1000) + .epoch_length(10) + .validators_desired_roles(&producers, &validators) + .shuffle_shard_assignment_for_chunk_producers(true); + for account in &accounts { + genesis_builder.add_user_account_simple(account.clone(), initial_balance); + } + let genesis = genesis_builder.build(); + + let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = + builder.genesis(genesis).clients(clients).build(); + + let first_epoch_tracked_shards = get_tracked_shards(&test_loop, &node_datas); + tracing::info!("First epoch tracked shards: {:?}", first_epoch_tracked_shards); + + // Deploy the contracts. + let txs = deploy_contracts(&mut test_loop, &node_datas); + test_loop.run_for(Duration::seconds(5)); + + tracing::info!(target: "test", "deployed contracts"); + log_txs(&test_loop, &node_datas, &rpc, &txs); + + // Call the contracts. + let mut txs = vec![]; + for account in accounts.iter().take(NUM_PRODUCERS + NUM_VALIDATORS) { + let tx = call_contract(&mut test_loop, &node_datas, account); + txs.push(tx); + } + test_loop.run_for(Duration::seconds(20)); + + tracing::info!(target: "test", "called contracts"); + log_txs(&test_loop, &node_datas, &rpc, &txs); + + // Make sure the chain progresses for several epochs. + let client_handle = node_datas[0].client_sender.actor_handle(); + test_loop.run_until( + |test_loop_data: &mut TestLoopData| height_condition(test_loop_data, &client_handle, 10050), + Duration::seconds(100), + ); + + let later_epoch_tracked_shards = get_tracked_shards(&test_loop, &node_datas); + tracing::info!("Later epoch tracked shards: {:?}", later_epoch_tracked_shards); + assert_ne!(first_epoch_tracked_shards, later_epoch_tracked_shards); + + // Give the test a chance to finish off remaining events in the event loop, which can + // be important for properly shutting down the nodes. + TestLoopEnv { test_loop, datas: node_datas, tempdir } + .shutdown_and_drain_remaining_events(Duration::seconds(20)); +} + +fn height_condition( + test_loop_data: &mut TestLoopData, + client_handle: &TestLoopDataHandle, + target_height: BlockHeight, +) -> bool { + test_loop_data.get(&client_handle).client.chain.head().unwrap().height > target_height +} + +fn get_tracked_shards(test_loop: &TestLoopV2, node_datas: &Vec) -> Vec> { + let clients = node_datas + .iter() + .map(|data| &test_loop.data.get(&data.client_sender.actor_handle()).client) + .collect_vec(); + clients.tracked_shards_for_each_client() +} + +fn rpc_client<'a>( + test_loop: &'a TestLoopV2, + node_datas: &'a Vec, + rpc: &AccountId, +) -> &'a Client { + for node_data in node_datas { + if &node_data.account_id == rpc { + let handle = node_data.client_sender.actor_handle(); + let client_actor = test_loop.data.get(&handle); + return &client_actor.client; + } + } + panic!("RPC client not found"); +} + +fn log_txs( + test_loop: &TestLoopV2, + node_datas: &Vec, + rpc: &AccountId, + txs: &Vec, +) { + let rpc = rpc_client(test_loop, node_datas, rpc); + + for &tx in txs { + let tx_outcome = rpc.chain.get_partial_transaction_result(&tx); + let status = tx_outcome.as_ref().map(|o| o.status.clone()); + tracing::info!(target: "test", ?tx, ?status, "transaction status"); + } +} diff --git a/integration-tests/src/test_loop/tests/in_memory_tries.rs b/integration-tests/src/test_loop/tests/in_memory_tries.rs index d64d54713b8..4506eabcc39 100644 --- a/integration-tests/src/test_loop/tests/in_memory_tries.rs +++ b/integration-tests/src/test_loop/tests/in_memory_tries.rs @@ -8,7 +8,8 @@ use near_store::ShardUId; use crate::test_loop::builder::TestLoopBuilder; use crate::test_loop::env::TestLoopEnv; -use crate::test_loop::utils::{execute_money_transfers, ONE_NEAR}; +use crate::test_loop::utils::transactions::execute_money_transfers; +use crate::test_loop::utils::ONE_NEAR; /// Runs chain with sequence of chunks with empty state changes, long enough to /// cover 5 epochs which is default GC period. @@ -47,7 +48,7 @@ fn test_load_memtrie_after_empty_chunks() { } let genesis = genesis_builder.build(); - let TestLoopEnv { mut test_loop, datas: node_datas } = + let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder.genesis(genesis).clients(client_accounts).build(); execute_money_transfers(&mut test_loop, &node_datas, &accounts); @@ -92,6 +93,6 @@ fn test_load_memtrie_after_empty_chunks() { // Give the test a chance to finish off remaining events in the event loop, which can // be important for properly shutting down the nodes. - TestLoopEnv { test_loop, datas: node_datas } + TestLoopEnv { test_loop, datas: node_datas, tempdir } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/integration-tests/src/test_loop/tests/mod.rs b/integration-tests/src/test_loop/tests/mod.rs index 762ad3ea757..b11c5bb4471 100644 --- a/integration-tests/src/test_loop/tests/mod.rs +++ b/integration-tests/src/test_loop/tests/mod.rs @@ -1,3 +1,5 @@ +mod chunk_validator_kickout; +pub mod congestion_control_adv_chunk_produce; pub mod in_memory_tries; pub mod multinode_stateless_validators; pub mod multinode_test_loop_example; diff --git a/integration-tests/src/test_loop/tests/multinode_stateless_validators.rs b/integration-tests/src/test_loop/tests/multinode_stateless_validators.rs index a72bad6b3f5..943bc0ed609 100644 --- a/integration-tests/src/test_loop/tests/multinode_stateless_validators.rs +++ b/integration-tests/src/test_loop/tests/multinode_stateless_validators.rs @@ -12,7 +12,8 @@ use near_primitives::views::CurrentEpochValidatorInfo; use crate::test_loop::builder::TestLoopBuilder; use crate::test_loop::env::TestLoopEnv; -use crate::test_loop::utils::{execute_money_transfers, ONE_NEAR}; +use crate::test_loop::utils::transactions::execute_money_transfers; +use crate::test_loop::utils::ONE_NEAR; const NUM_ACCOUNTS: usize = 20; const NUM_SHARDS: u64 = 4; @@ -63,7 +64,7 @@ fn test_stateless_validators_with_multi_test_loop() { } let genesis = genesis_builder.build(); - let TestLoopEnv { mut test_loop, datas: node_datas } = + let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = builder.genesis(genesis).clients(clients).build(); // Capture the initial validator info in the first epoch. @@ -97,7 +98,7 @@ fn test_stateless_validators_with_multi_test_loop() { // Give the test a chance to finish off remaining events in the event loop, which can // be important for properly shutting down the nodes. - TestLoopEnv { test_loop, datas: node_datas } + TestLoopEnv { test_loop, datas: node_datas, tempdir } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/integration-tests/src/test_loop/tests/multinode_test_loop_example.rs b/integration-tests/src/test_loop/tests/multinode_test_loop_example.rs index cb253d18660..1af69a249a0 100644 --- a/integration-tests/src/test_loop/tests/multinode_test_loop_example.rs +++ b/integration-tests/src/test_loop/tests/multinode_test_loop_example.rs @@ -7,7 +7,8 @@ use near_primitives::types::AccountId; use crate::test_loop::builder::TestLoopBuilder; use crate::test_loop::env::TestLoopEnv; -use crate::test_loop::utils::{execute_money_transfers, ONE_NEAR}; +use crate::test_loop::utils::transactions::execute_money_transfers; +use crate::test_loop::utils::ONE_NEAR; const NUM_CLIENTS: usize = 4; @@ -38,8 +39,8 @@ fn test_client_with_multi_test_loop() { } let genesis = genesis_builder.build(); - let TestLoopEnv { mut test_loop, datas: node_datas } = - builder.genesis(genesis).clients(clients).disable_gc().build(); + let TestLoopEnv { mut test_loop, datas: node_datas, tempdir } = + builder.genesis(genesis).clients(clients).build(); let first_epoch_tracked_shards = { let clients = node_datas @@ -71,6 +72,6 @@ fn test_client_with_multi_test_loop() { // Give the test a chance to finish off remaining events in the event loop, which can // be important for properly shutting down the nodes. - TestLoopEnv { test_loop, datas: node_datas } + TestLoopEnv { test_loop, datas: node_datas, tempdir } .shutdown_and_drain_remaining_events(Duration::seconds(20)); } diff --git a/integration-tests/src/test_loop/utils/mod.rs b/integration-tests/src/test_loop/utils/mod.rs new file mode 100644 index 00000000000..a1d57690e5e --- /dev/null +++ b/integration-tests/src/test_loop/utils/mod.rs @@ -0,0 +1,5 @@ +pub mod network; +pub mod transactions; + +pub(crate) const ONE_NEAR: u128 = 1_000_000_000_000_000_000_000_000; +pub(crate) const TGAS: u64 = 1_000_000_000_000; diff --git a/integration-tests/src/test_loop/utils/network.rs b/integration-tests/src/test_loop/utils/network.rs new file mode 100644 index 00000000000..d86f5b971af --- /dev/null +++ b/integration-tests/src/test_loop/utils/network.rs @@ -0,0 +1,79 @@ +use std::sync::{Arc, Mutex}; + +use near_epoch_manager::EpochManagerAdapter; +use near_network::types::NetworkRequests; +use near_primitives::types::AccountId; + +use crate::test_loop::env::TestLoopChunksStorage; + +/// Handler to drop all network messages relevant to chunk validated by +/// `validator_of_chunks_to_drop`. If number of nodes on chain is significant +/// enough (at least three?), this is enough to prevent chunk from being +/// included. +/// +/// This logic can be easily extended to dropping chunk based on any rule. +pub fn partial_encoded_chunks_dropper( + chunks_storage: Arc>, + epoch_manager_adapter: Arc, + validator_of_chunks_to_drop: AccountId, +) -> Arc Option> { + Arc::new(move |request| { + // Filter out only messages related to distributing chunk in the + // network; extract `chunk_hash` from the message. + let chunk_hash = match &request { + NetworkRequests::PartialEncodedChunkRequest { request, .. } => { + Some(request.chunk_hash.clone()) + } + NetworkRequests::PartialEncodedChunkResponse { response, .. } => { + Some(response.chunk_hash.clone()) + } + NetworkRequests::PartialEncodedChunkMessage { partial_encoded_chunk, .. } => { + Some(partial_encoded_chunk.header.chunk_hash()) + } + NetworkRequests::PartialEncodedChunkForward { forward, .. } => { + Some(forward.chunk_hash.clone()) + } + _ => None, + }; + + let Some(chunk_hash) = chunk_hash else { + return Some(request); + }; + + let chunk = { + let chunks_storage = chunks_storage.lock().unwrap(); + let chunk = chunks_storage.get(&chunk_hash).unwrap().clone(); + let can_drop_chunk = chunks_storage.can_drop_chunk(&chunk); + + if !can_drop_chunk { + return Some(request); + } + + chunk + }; + + let prev_block_hash = chunk.prev_block_hash(); + let shard_id = chunk.shard_id(); + let height_created = chunk.height_created(); + + // If we don't have block on top of which chunk is built, we can't + // retrieve epoch id. + // This case appears to be too rare to interfere with the goal of + // dropping chunk. + let Ok(epoch_id) = epoch_manager_adapter.get_epoch_id_from_prev_block(prev_block_hash) + else { + return Some(request); + }; + + // Finally, we drop chunk if the given account is present in the list + // of its validators. + let chunk_validators = epoch_manager_adapter + .get_chunk_validator_assignments(&epoch_id, shard_id, height_created) + .unwrap(); + if !chunk_validators.contains(&validator_of_chunks_to_drop) { + return Some(request); + } + + return None; + }) +} diff --git a/integration-tests/src/test_loop/utils.rs b/integration-tests/src/test_loop/utils/transactions.rs similarity index 50% rename from integration-tests/src/test_loop/utils.rs rename to integration-tests/src/test_loop/utils/transactions.rs index 1028cec0b32..ea4d9f5f145 100644 --- a/integration-tests/src/test_loop/utils.rs +++ b/integration-tests/src/test_loop/utils/transactions.rs @@ -4,18 +4,22 @@ use near_async::messaging::SendAsync; use near_async::test_loop::TestLoopV2; use near_async::time::Duration; use near_client::test_utils::test_loop::ClientQueries; +use near_crypto::{KeyType, PublicKey}; use near_network::client::ProcessTxRequest; +use near_primitives::hash::CryptoHash; use near_primitives::test_utils::create_user_test_signer; use near_primitives::transaction::SignedTransaction; use near_primitives::types::AccountId; use std::collections::HashMap; -pub(crate) const ONE_NEAR: u128 = 1_000_000_000_000_000_000_000_000; +use super::{ONE_NEAR, TGAS}; /// Execute money transfers within given `TestLoop` between given accounts. /// Runs chain long enough for the transfers to be optimistically executed. /// Used to generate state changes and check that chain is able to update /// balances correctly. +/// TODO: consider resending transactions which may be dropped because of +/// missing chunks. pub(crate) fn execute_money_transfers( test_loop: &mut TestLoopV2, node_data: &[TestData], @@ -88,3 +92,100 @@ pub(crate) fn execute_money_transfers( ); } } + +/// Deploy the test contracts to all of the provided accounts. +pub(crate) fn deploy_contracts( + test_loop: &mut TestLoopV2, + node_datas: &[TestData], +) -> Vec { + let block_hash = get_shared_block_hash(node_datas, test_loop); + + let mut txs = vec![]; + for node_data in node_datas { + let account = node_data.account_id.clone(); + + let contract = near_test_contracts::rs_contract(); + let contract_id = format!("contract.{}", account); + let signer = create_user_test_signer(&account).into(); + let public_key = PublicKey::from_seed(KeyType::ED25519, &contract_id); + let nonce = 1; + + let transaction = SignedTransaction::create_contract( + nonce, + account, + contract_id.parse().unwrap(), + contract.to_vec(), + 10 * ONE_NEAR, + public_key, + &signer, + block_hash, + ); + + txs.push(transaction.get_hash()); + + let process_tx_request = + ProcessTxRequest { transaction, is_forwarded: false, check_only: false }; + let future = node_data.client_sender.clone().send_async(process_tx_request); + drop(future); + + tracing::info!(target: "test", ?contract_id, "deployed contract"); + } + txs +} + +pub fn call_contract( + test_loop: &mut TestLoopV2, + node_datas: &[TestData], + account: &AccountId, +) -> CryptoHash { + let block_hash = get_shared_block_hash(node_datas, test_loop); + + let nonce = 2; + let signer = create_user_test_signer(&account); + let contract_id = format!("contract.{}", account).parse().unwrap(); + + let burn_gas = 250 * TGAS; + let attach_gas = 300 * TGAS; + + let deposit = 0; + let method_name = "burn_gas_raw".to_owned(); + let args = burn_gas.to_le_bytes().to_vec(); + + let transaction = SignedTransaction::call( + nonce, + signer.account_id.clone(), + contract_id, + &signer.into(), + deposit, + method_name, + args, + attach_gas, + block_hash, + ); + + let tx_hash = transaction.get_hash(); + + let process_tx_request = + ProcessTxRequest { transaction, is_forwarded: false, check_only: false }; + let future = node_datas[0].client_sender.clone().send_async(process_tx_request); + drop(future); + + tx_hash +} + +fn get_shared_block_hash(node_datas: &[TestData], test_loop: &mut TestLoopV2) -> CryptoHash { + let clients = node_datas + .iter() + .map(|data| &test_loop.data.get(&data.client_sender.actor_handle()).client) + .collect_vec(); + + let (_, block_hash) = clients + .iter() + .map(|client| { + let head = client.chain.head().unwrap(); + (head.height, head.last_block_hash) + }) + .min_by_key(|&(height, _)| height) + .unwrap(); + block_hash +} diff --git a/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs b/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs index a50b3da39b5..d2f9e851f31 100644 --- a/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs +++ b/integration-tests/src/tests/client/features/access_key_nonce_for_implicit_accounts.rs @@ -19,7 +19,7 @@ use near_primitives::sharding::ChunkHash; use near_primitives::transaction::SignedTransaction; use near_primitives::types::{AccountId, BlockHeight}; use near_primitives::utils::derive_near_implicit_account_id; -use near_primitives::version::{ProtocolFeature, ProtocolVersion}; +use near_primitives::version::{ProtocolFeature, ProtocolVersion, PROTOCOL_VERSION}; use near_primitives::views::FinalExecutionStatus; use nearcore::test_utils::TestEnvNightshadeSetupExt; use rand::seq::SliceRandom; @@ -764,7 +764,9 @@ fn test_chunk_forwarding_optimization() { // Note: For nightly, which includes SingleShardTracking, this check is disabled because // we're so efficient with part forwarding now that we don't seem to be forwarding more // than it is necessary. - if !cfg!(feature = "nightly") && !cfg!(feature = "statelessnet_protocol") { + // TODO - Since the stabilization of Stateless Validation which includes the + // SingleShardTracking this test doesn't make sense anymore. We should remove it. + if !ProtocolFeature::SingleShardTracking.enabled(PROTOCOL_VERSION) { assert!(PARTIAL_ENCODED_CHUNK_FORWARD_CACHED_WITHOUT_HEADER.get() > 0.0); } debug!(target: "test", diff --git a/integration-tests/src/tests/client/features/limit_contract_functions_number.rs b/integration-tests/src/tests/client/features/limit_contract_functions_number.rs index a1d5ec92638..c8d40a165df 100644 --- a/integration-tests/src/tests/client/features/limit_contract_functions_number.rs +++ b/integration-tests/src/tests/client/features/limit_contract_functions_number.rs @@ -70,8 +70,8 @@ fn verify_contract_limits_upgrade( // Check that we can't call a contract exceeding functions number limit after upgrade. // Disabled in nightly due to https://github.com/near/nearcore/issues/8590 -#[cfg(all(not(feature = "nightly"), not(feature = "statelessnet_protocol")))] #[test] +#[ignore] fn test_function_limit_change() { verify_contract_limits_upgrade( ProtocolFeature::LimitContractFunctionsNumber, diff --git a/integration-tests/src/tests/client/resharding.rs b/integration-tests/src/tests/client/resharding.rs index acbd897409a..49bb65457da 100644 --- a/integration-tests/src/tests/client/resharding.rs +++ b/integration-tests/src/tests/client/resharding.rs @@ -592,53 +592,6 @@ impl TestReshardingEnv { successful_txs } - // Check the receipt_id_to_shard_id mappings are correct for all outgoing receipts in the - // latest block - fn check_receipt_id_to_shard_id(&mut self) { - let env = &mut self.env; - let head = env.clients[0].chain.head().unwrap(); - let shard_layout = env.clients[0] - .epoch_manager - .get_shard_layout_from_prev_block(&head.last_block_hash) - .unwrap(); - let block = env.clients[0].chain.get_block(&head.last_block_hash).unwrap(); - - for (shard_id, chunk_header) in block.chunks().iter().enumerate() { - if chunk_header.height_included() != block.header().height() { - continue; - } - let shard_id = shard_id as ShardId; - - for (i, me) in env.validators.iter().enumerate() { - let client = &mut env.clients[i]; - let care_about_shard = client.shard_tracker.care_about_shard( - Some(me), - &head.prev_block_hash, - shard_id, - true, - ); - if !care_about_shard { - continue; - } - - let outgoing_receipts = client - .chain - .mut_chain_store() - .get_outgoing_receipts(&head.last_block_hash, shard_id) - .unwrap() - .clone(); - for receipt in outgoing_receipts.iter() { - let target_shard_id = - client.chain.get_shard_id_for_receipt_id(receipt.receipt_id()).unwrap(); - assert_eq!( - target_shard_id, - account_id_to_shard_id(receipt.receiver_id(), &shard_layout) - ); - } - } - } - } - /// Check that after resharding is finished, the artifacts stored in storage is removed fn check_resharding_artifacts(&mut self, client_id: usize) { tracing::debug!(target: "test", "checking resharding artifacts"); @@ -1042,7 +995,6 @@ fn test_shard_layout_upgrade_simple_impl( let drop_chunk_condition = DropChunkCondition::new(); for _ in 1..4 * epoch_length { test_env.step(&drop_chunk_condition, target_protocol_version); - test_env.check_receipt_id_to_shard_id(); test_env.check_snapshot(state_snapshot_enabled); } @@ -1491,7 +1443,6 @@ fn test_shard_layout_upgrade_cross_contract_calls_impl( let drop_chunk_condition = DropChunkCondition::new(); for _ in 1..5 * epoch_length { test_env.step(&drop_chunk_condition, target_protocol_version); - test_env.check_receipt_id_to_shard_id(); } let successful_txs = test_env.check_tx_outcomes(false); @@ -1708,7 +1659,6 @@ fn test_shard_layout_upgrade_promise_yield_impl(resharding_type: ReshardingType, let drop_chunk_condition = DropChunkCondition::new(); for _ in 1..5 * epoch_length { test_env.step(&drop_chunk_condition, target_protocol_version); - test_env.check_receipt_id_to_shard_id(); } let tx_outcomes = test_env.check_tx_outcomes(false); @@ -1765,7 +1715,6 @@ fn test_shard_layout_upgrade_incoming_receipts_impl( let drop_chunk_condition = DropChunkCondition::with_by_height_shard_id(by_height_shard_id); for _ in 1..5 * epoch_length { test_env.step(&drop_chunk_condition, target_protocol_version); - test_env.check_receipt_id_to_shard_id(); } let successful_txs = test_env.check_tx_outcomes(false); @@ -1870,7 +1819,6 @@ fn test_missing_chunks( for height in last_height - 3..=last_height { test_env.check_next_block_with_new_chunk(height); } - test_env.check_receipt_id_to_shard_id(); } // make sure all included transactions finished processing @@ -1881,7 +1829,6 @@ fn test_missing_chunks( for height in last_height - 3..=last_height { test_env.check_next_block_with_new_chunk(height); } - test_env.check_receipt_id_to_shard_id(); } let successful_txs = test_env.check_tx_outcomes(true); diff --git a/integration-tests/src/tests/dependencies.rs b/integration-tests/src/tests/dependencies.rs new file mode 100644 index 00000000000..a1ab72dd7a3 --- /dev/null +++ b/integration-tests/src/tests/dependencies.rs @@ -0,0 +1,106 @@ +/* + * This test is designed to ensure that the public libraries in the project do not exceed a specified number of unique dependencies. + * Please note that this test doesn't care about total compilation units that are displayed during cargo build + * The output of the `cargo build` shows compilation units. Compilation unit is a either a external dependency or a file from the source + * + * The `LIBS_THRESHOLDS` constant defines a list of libraries along with their respective maximum allowed unique dependency counts. + * The `THRESHOLD_IS_TOO_GENEROUS` constant is used to determine if the threshold for any library is too lenient, suggesting it might need to be restricted even futher. + * + * The `get_and_assert_crate_dependencies` function takes a library name and a threshold, runs the `cargo tree` command to get the dependency tree for the library, + * extracts unique dependencies using a regex, and checks if the count of unique dependencies is below the threshold. + * + * The purpose of this test is to maintain a lean dependency graph, promoting better performance, security, and maintainability. + * As well, as limit the total amount of dependencies for public facing libraries + */ + +use std::collections::HashSet; +use std::process::Command; +use std::str; + +const LIBS_THRESHOLDS: [(&str, usize); 9] = [ + ("near-primitives", 115), + ("near-jsonrpc-primitives", 130), + ("near-chain-configs", 130), + ("near-chain-primitives", 130), + ("near-client-primitives", 150), + ("near-parameters", 65), + ("near-crypto", 75), + ("near-primitives-core", 60), + ("near-time", 30), +]; + +const THRESHOLD_IS_TOO_GENEROUS: usize = 30; + +fn get_and_assert_crate_dependencies(name: &str, threshold: usize) -> usize { + let output: std::process::Output = + Command::new(std::env::var("CARGO").unwrap_or_else(|_| "cargo".to_string())) + .arg("tree") + .arg("-p") + .arg(name) + .arg("--edges=normal") + .output() + .unwrap_or_else(|_| panic!("Failed to execute cargo tree for {name}")); + + assert!( + output.status.success(), + "Cargo tree failed for {name} with: {}", + str::from_utf8(&output.stderr).unwrap() + ); + + let output_str = str::from_utf8(&output.stdout).expect("Failed to convert output to string"); + + // The `cargo tree` command outputs the dependency tree of a given crate. An example line of the output might look like: + // │ ├── actix_derive v0.6.0 (proc-macro) + // This line indicates that the crate `actix_derive` version `0.6.0` is a dependency. + // + // The regex `([\w-]+) v([\d.]+(?:-\w+)?)` is used to extract the crate name and version from each line of the `cargo tree` output. + // - `([\w-]+)` captures the crate name, which can include alphanumeric characters and hyphens. + // - `v` matches the literal character 'v' that precedes the version number. + // - `([\d.]+(?:-\w+)?)` captures the version number, which can include digits, dots, and optional pre-release identifiers. + let re = regex::Regex::new(r"([\w-]+) v([\d.]+(?:-\w+)?)").unwrap(); + + let mut unique_crates = HashSet::new(); + + for cap in re.captures_iter(output_str) { + let crate_name = &cap[1]; + let crate_version = &cap[2]; + let crate_str = format!("{}-{}", crate_name, crate_version); + unique_crates.insert(crate_str); + } + let crate_count = unique_crates.len(); + + assert!( + crate_count < threshold, + "Dependencies number is too high for {name}: {} > {}", + crate_count, + threshold + ); + crate_count +} + +#[derive(Debug, Clone, PartialEq)] +#[allow(unused)] +struct CrateDeps { + pub crate_name: String, + pub crate_deps: usize, + pub suggested_new_threshold: usize, +} + +#[test] +fn test_public_libs_are_small_enough() { + let results = LIBS_THRESHOLDS + .into_iter() + .map(|(name, limit)| (name, get_and_assert_crate_dependencies(name, limit), limit)); + let mut libs_to_fix = vec![]; + for (name, result, limit) in results { + if limit - result > THRESHOLD_IS_TOO_GENEROUS { + libs_to_fix.push(CrateDeps { + crate_name: name.to_owned(), + crate_deps: result, + suggested_new_threshold: result + 10, + }); + } + } + + assert_eq!(libs_to_fix, vec![], "Good job on reducing dependency count, but it's time to review that thresholds for next components"); +} diff --git a/integration-tests/src/tests/mod.rs b/integration-tests/src/tests/mod.rs index cdbbc04fe11..2ad0910d6a2 100644 --- a/integration-tests/src/tests/mod.rs +++ b/integration-tests/src/tests/mod.rs @@ -1,4 +1,5 @@ mod client; +mod dependencies; mod genesis_helpers; mod nearcore; mod nearcore_utils; diff --git a/integration-tests/src/tests/runtime/sanity_checks.rs b/integration-tests/src/tests/runtime/sanity_checks.rs index 516517777a1..86c8e8c1054 100644 --- a/integration-tests/src/tests/runtime/sanity_checks.rs +++ b/integration-tests/src/tests/runtime/sanity_checks.rs @@ -152,6 +152,8 @@ fn test_cost_sanity() { insta::assert_debug_snapshot!( if cfg!(feature = "nightly") { "receipts_gas_profile_nightly" + } else if cfg!(feature = "statelessnet_protocol") { + "receipts_gas_profile_statelessnet_protocol" } else { "receipts_gas_profile" }, diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap index 69980566408..57731f93f9c 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile.snap @@ -17,7 +17,7 @@ expression: receipts_gas_profile CostGasUsed { cost_category: "ACTION_COST", cost: "ADD_FUNCTION_CALL_KEY_BYTE", - gas_used: 9626655, + gas_used: 238418575, }, CostGasUsed { cost_category: "ACTION_COST", @@ -42,7 +42,7 @@ expression: receipts_gas_profile CostGasUsed { cost_category: "ACTION_COST", cost: "DEPLOY_CONTRACT_BYTE", - gas_used: 231641966, + gas_used: 1621246310, }, CostGasUsed { cost_category: "ACTION_COST", @@ -52,7 +52,7 @@ expression: receipts_gas_profile CostGasUsed { cost_category: "ACTION_COST", cost: "FUNCTION_CALL_BYTE", - gas_used: 207941862, + gas_used: 571524110, }, CostGasUsed { cost_category: "ACTION_COST", diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap index 69980566408..57731f93f9c 100644 --- a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_nightly.snap @@ -17,7 +17,7 @@ expression: receipts_gas_profile CostGasUsed { cost_category: "ACTION_COST", cost: "ADD_FUNCTION_CALL_KEY_BYTE", - gas_used: 9626655, + gas_used: 238418575, }, CostGasUsed { cost_category: "ACTION_COST", @@ -42,7 +42,7 @@ expression: receipts_gas_profile CostGasUsed { cost_category: "ACTION_COST", cost: "DEPLOY_CONTRACT_BYTE", - gas_used: 231641966, + gas_used: 1621246310, }, CostGasUsed { cost_category: "ACTION_COST", @@ -52,7 +52,7 @@ expression: receipts_gas_profile CostGasUsed { cost_category: "ACTION_COST", cost: "FUNCTION_CALL_BYTE", - gas_used: 207941862, + gas_used: 571524110, }, CostGasUsed { cost_category: "ACTION_COST", diff --git a/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_statelessnet_protocol.snap b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_statelessnet_protocol.snap new file mode 100644 index 00000000000..57731f93f9c --- /dev/null +++ b/integration-tests/src/tests/runtime/snapshots/integration_tests__tests__runtime__sanity_checks__receipts_gas_profile_statelessnet_protocol.snap @@ -0,0 +1,500 @@ +--- +source: integration-tests/src/tests/runtime/sanity_checks.rs +expression: receipts_gas_profile +--- +[ + [ + CostGasUsed { + cost_category: "ACTION_COST", + cost: "ADD_FULL_ACCESS_KEY", + gas_used: 101765125000, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "ADD_FUNCTION_CALL_KEY_BASE", + gas_used: 102217625000, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "ADD_FUNCTION_CALL_KEY_BYTE", + gas_used: 238418575, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "CREATE_ACCOUNT", + gas_used: 7700000000000, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "DELETE_ACCOUNT", + gas_used: 147489000000, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "DELETE_KEY", + gas_used: 94946625000, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "DEPLOY_CONTRACT_BASE", + gas_used: 184765750000, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "DEPLOY_CONTRACT_BYTE", + gas_used: 1621246310, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "FUNCTION_CALL_BASE", + gas_used: 1800000000000, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "FUNCTION_CALL_BYTE", + gas_used: 571524110, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "NEW_ACTION_RECEIPT", + gas_used: 1480548358496, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "STAKE", + gas_used: 141715687500, + }, + CostGasUsed { + cost_category: "ACTION_COST", + cost: "TRANSFER", + gas_used: 230246125000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "ALT_BN128_G1_MULTIEXP_BASE", + gas_used: 713000000000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "ALT_BN128_G1_MULTIEXP_ELEMENT", + gas_used: 320000000000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "ALT_BN128_G1_SUM_BASE", + gas_used: 3000000000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "ALT_BN128_G1_SUM_ELEMENT", + gas_used: 5000000000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "ALT_BN128_PAIRING_CHECK_BASE", + gas_used: 9686000000000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "ALT_BN128_PAIRING_CHECK_ELEMENT", + gas_used: 5102000000000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "BASE", + gas_used: 17209927215, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BASE", + gas_used: 35445963, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BYTES", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "LOG_BASE", + gas_used: 7086626100, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "LOG_BYTE", + gas_used: 131987910, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "PROMISE_AND_BASE", + gas_used: 1465013400, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "PROMISE_AND_PER_PROMISE", + gas_used: 87234816, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "PROMISE_RETURN", + gas_used: 560152386, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "READ_CACHED_TRIE_NODE", + gas_used: 4560000000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "READ_MEMORY_BASE", + gas_used: 182690424000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "READ_MEMORY_BYTE", + gas_used: 4744063584, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "READ_REGISTER_BASE", + gas_used: 7551495558, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "READ_REGISTER_BYTE", + gas_used: 25034748, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "RIPEMD160_BASE", + gas_used: 853675086, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "RIPEMD160_BLOCK", + gas_used: 680107584, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "SHA256_BASE", + gas_used: 4540970250, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "SHA256_BYTE", + gas_used: 120586755, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_HAS_KEY_BASE", + gas_used: 108079793250, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_HAS_KEY_BYTE", + gas_used: 277117605, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_READ_BASE", + gas_used: 112713691500, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_READ_KEY_BYTE", + gas_used: 278572797, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_READ_VALUE_BYTE", + gas_used: 28055025, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_REMOVE_BASE", + gas_used: 106946061000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_REMOVE_KEY_BYTE", + gas_used: 343983456, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_REMOVE_RET_VALUE_BYTE", + gas_used: 57657780, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_WRITE_BASE", + gas_used: 128393472000, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_WRITE_EVICTED_BYTE", + gas_used: 160586535, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_WRITE_KEY_BYTE", + gas_used: 281931468, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "STORAGE_WRITE_VALUE_BYTE", + gas_used: 310185390, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "TOUCHING_TRIE_NODE", + gas_used: 32203911852, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "UTF16_DECODING_BASE", + gas_used: 3543313050, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "UTF16_DECODING_BYTE", + gas_used: 1635774930, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "UTF8_DECODING_BASE", + gas_used: 46676685915, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "UTF8_DECODING_BYTE", + gas_used: 98262621423, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "VALIDATOR_STAKE_BASE", + gas_used: 911834726400, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "VALIDATOR_TOTAL_STAKE_BASE", + gas_used: 911834726400, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WASM_INSTRUCTION", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WRITE_MEMORY_BASE", + gas_used: 19626564027, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WRITE_MEMORY_BYTE", + gas_used: 866159496, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WRITE_REGISTER_BASE", + gas_used: 37251792318, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WRITE_REGISTER_BYTE", + gas_used: 1904583564, + }, + ], + [ + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "BASE", + gas_used: 264768111, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BASE", + gas_used: 35445963, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BYTES", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WASM_INSTRUCTION", + gas_used: 0, + }, + ], + [], + [ + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "BASE", + gas_used: 264768111, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BASE", + gas_used: 35445963, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BYTES", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "READ_MEMORY_BASE", + gas_used: 2609863200, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "READ_MEMORY_BYTE", + gas_used: 11403999, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "UTF8_DECODING_BASE", + gas_used: 3111779061, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "UTF8_DECODING_BYTE", + gas_used: 874741437, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WASM_INSTRUCTION", + gas_used: 0, + }, + ], + [], + [ + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BASE", + gas_used: 35445963, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BYTES", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WASM_INSTRUCTION", + gas_used: 0, + }, + ], + [], + [ + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BASE", + gas_used: 35445963, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BYTES", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WASM_INSTRUCTION", + gas_used: 0, + }, + ], + [], + [ + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BASE", + gas_used: 35445963, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BYTES", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WASM_INSTRUCTION", + gas_used: 0, + }, + ], + [], + [], + [], + [], + [], + [ + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BASE", + gas_used: 70891926, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BYTES", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WASM_INSTRUCTION", + gas_used: 0, + }, + ], + [], + [], + [], + [], + [ + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BASE", + gas_used: 35445963, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BYTES", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WASM_INSTRUCTION", + gas_used: 0, + }, + ], + [], + [ + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "BASE", + gas_used: 529536222, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BASE", + gas_used: 35445963, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "CONTRACT_LOADING_BYTES", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WASM_INSTRUCTION", + gas_used: 0, + }, + CostGasUsed { + cost_category: "WASM_HOST_COST", + cost: "WRITE_REGISTER_BASE", + gas_used: 2865522486, + }, + ], + [], + [], +] diff --git a/nearcore/src/config_duration_test.rs b/nearcore/src/config_duration_test.rs index 2509a83a470..d5fdebb456f 100644 --- a/nearcore/src/config_duration_test.rs +++ b/nearcore/src/config_duration_test.rs @@ -58,6 +58,9 @@ fn test_config_duration_all_std() { routed_message_ttl: Some(0), routing_table_update_rate_limit_burst: Some(0), routing_table_update_rate_limit_qps: Some(0.0), + received_messages_rate_limits: Some( + near_network::MessagesLimitsOverrideConfig::default(), + ), }, ..Default::default() }, diff --git a/nearcore/src/migrations.rs b/nearcore/src/migrations.rs index bfccff49025..d57aa0f3dff 100644 --- a/nearcore/src/migrations.rs +++ b/nearcore/src/migrations.rs @@ -88,6 +88,7 @@ impl<'a> near_store::StoreMigrator for Migrator<'a> { 36 => near_store::migrations::migrate_36_to_37(store), 37 => near_store::migrations::migrate_37_to_38(store), 38 => near_store::migrations::migrate_38_to_39(store), + 39 => near_store::migrations::migrate_39_to_40(store), DB_VERSION.. => unreachable!(), } } diff --git a/nightly/pytest-sanity.txt b/nightly/pytest-sanity.txt index 195287b1b6e..9e921487cae 100644 --- a/nightly/pytest-sanity.txt +++ b/nightly/pytest-sanity.txt @@ -192,3 +192,7 @@ pytest sanity/congestion_control.py --features nightly # Tests the correct operation of the view client without using memtries (#11312). pytest sanity/rpc_view_history.py pytest sanity/rpc_view_history.py --features nightly + +# Tests switching between memtries and disktries. +pytest sanity/memtrie_disktrie_switch.py +pytest sanity/memtrie_disktrie_switch.py --features nightly \ No newline at end of file diff --git a/pytest/lib/cluster.py b/pytest/lib/cluster.py index 675aedb4f54..1b9dddf5687 100644 --- a/pytest/lib/cluster.py +++ b/pytest/lib/cluster.py @@ -774,9 +774,14 @@ def spin_up_node(config, "127.0.0.1:%s" % (24567 + 10 + bl_ordinal) for bl_ordinal in blacklist ] - node = LocalNode(24567 + 10 + ordinal, 3030 + 10 + ordinal, - near_root, node_dir, blacklist, - config.get('binary_name'), single_node) + node = LocalNode(24567 + 10 + ordinal, + 3030 + 10 + ordinal, + near_root, + node_dir, + blacklist, + config.get('binary_name'), + single_node, + ordinal=ordinal) else: # TODO: Figure out how to know IP address beforehand for remote deployment. assert len( @@ -970,14 +975,16 @@ def start_cluster(num_nodes, def spin_up_node_and_push(i, boot_node: BootNode): single_node = (num_nodes == 1) and (num_observers == 0) - node = spin_up_node(config, - near_root, - node_dirs[i], - i, - boot_node=boot_node, - proxy=proxy, - skip_starting_proxy=True, - single_node=single_node) + node = spin_up_node( + config, + near_root, + node_dirs[i], + ordinal=i, + boot_node=boot_node, + proxy=proxy, + skip_starting_proxy=True, + single_node=single_node, + ) ret.append((i, node)) return node diff --git a/pytest/lib/utils.py b/pytest/lib/utils.py index 8daea3ecfbd..c44131065c3 100644 --- a/pytest/lib/utils.py +++ b/pytest/lib/utils.py @@ -17,7 +17,8 @@ import cluster from configured_logger import logger -from transaction import sign_payment_tx +import key +import transaction class TxContext: @@ -54,7 +55,7 @@ def send_moar_txs(self, last_block_hash, num, use_routing): if self.expected_balances[from_] >= amt: logger.info("Sending a tx from %s to %s for %s" % (from_, to, amt)) - tx = sign_payment_tx( + tx = transaction.sign_payment_tx( self.nodes[from_].signer_key, 'test%s' % to, amt, self.next_nonce, base58.b58decode(last_block_hash.encode('utf8'))) diff --git a/pytest/tests/loadtest/locust/common/base.py b/pytest/tests/loadtest/locust/common/base.py index 03fa83e0df0..4eeb44cc9e2 100644 --- a/pytest/tests/loadtest/locust/common/base.py +++ b/pytest/tests/loadtest/locust/common/base.py @@ -862,15 +862,12 @@ def do_on_locust_init(environment): num_funding_accounts = environment.parsed_options.max_workers funding_balance = 100000 * NearUser.INIT_BALANCE - def create_account(id): - account_id = f"funds_worker_{id}.{master_funding_account.key.account_id}" - return Account(key.Key.from_seed_testonly(account_id)) - - funding_accounts = [ - create_account(id) for id in range(num_funding_accounts) - ] - node.prepare_accounts(funding_accounts, master_funding_account, - funding_balance, "create funding account") + for index in range(num_funding_accounts): + account_id = f"funds_worker_{index}.{master_funding_account.key.account_id}" + account = Account(key.Key.from_seed_testonly(account_id)) + node.prepare_account(account, master_funding_account, + funding_balance, "create funding account") + funding_account = master_funding_account elif isinstance(environment.runner, runners.WorkerRunner): worker_id = environment.runner.worker_index diff --git a/pytest/tests/loadtest/locust/common/ft.py b/pytest/tests/loadtest/locust/common/ft.py index 424ef4e9aa4..1c68accc474 100644 --- a/pytest/tests/loadtest/locust/common/ft.py +++ b/pytest/tests/loadtest/locust/common/ft.py @@ -1,3 +1,4 @@ +import logging from concurrent import futures import random import string @@ -53,7 +54,7 @@ def register_passive_user(self, node: NearNodeProxy, account: Account): """ Passive users are only used as receiver, not as signer. """ - node.send_tx_retry(InitFTAccount(self.account, account), + node.send_tx_async(InitFTAccount(self.account, account), locust_name="Init FT Account") self.registered_users.append(account.key.account_id) @@ -85,20 +86,30 @@ def create_passive_users(self, assert prefix_len > 4, f"user key {parent.key.account_id} is too long" chars = string.ascii_lowercase + string.digits - def create_account(): - prefix = ''.join(random.choices(chars, k=prefix_len)) + def create_account(i): + prefix = ''.join(random.Random(i).choices(chars, k=prefix_len)) account_id = f"{prefix}.{parent.key.account_id}" return Account(key.Key.from_seed_testonly(account_id)) - accounts = [create_account() for _ in range(num)] - node.prepare_accounts(accounts, - parent, - balance=1, - msg="create passive user") - with futures.ThreadPoolExecutor() as executor: - futures.wait( - executor.submit(self.register_passive_user, node, account) - for account in accounts) + with futures.ThreadPoolExecutor(max_workers=4) as executor: + batch_size = 500 + num_batches = (num + batch_size - 1) // batch_size + for i in range(num_batches): + accounts = [ + create_account(i) + for i in range(i * batch_size, min((i + 1) * + batch_size, num)) + ] + node.prepare_accounts(accounts, + parent, + balance=1, + msg="create passive user") + futures.wait( + executor.submit(self.register_passive_user, node, account) + for account in accounts) + logging.info( + f"{parent.key.account_id}: Processed batch {i + 1}/{num_batches}, created {(i + 1) * batch_size} users" + ) class TransferFT(FunctionCall): @@ -183,7 +194,12 @@ def on_locust_init(environment, **kwargs): ft_account = Account(contract_key) ft_contract = FTContract(ft_account, ft_account, ft_contract_code) ft_contract.install(node, funding_account) + if environment.parsed_options.num_passive_users > 0: + ft_contract.create_passive_users( + environment.parsed_options.num_passive_users, node, + funding_account) environment.ft_contracts.append(ft_contract) + logging.info(f"Finished setup for account {i} on worker {parent_id}") # FT specific CLI args @@ -205,3 +221,8 @@ def _(parser): help= "Whether the names of FT contracts will deterministically based on worker id and run id." ) + parser.add_argument( + "--num-passive-users", + type=int, + default=0, + help="Number of passive users to create in each FT contract.") diff --git a/pytest/tests/loadtest/locust/common/sweat.py b/pytest/tests/loadtest/locust/common/sweat.py index f6cf288842d..fb3fb50b097 100644 --- a/pytest/tests/loadtest/locust/common/sweat.py +++ b/pytest/tests/loadtest/locust/common/sweat.py @@ -231,21 +231,15 @@ def on_locust_init(environment, **kwargs): # on master, register oracles for workers if isinstance(environment.runner, locust.runners.MasterRunner): num_oracles = int(environment.parsed_options.max_workers) - oracle_accounts = [ - Account( - key.Key.from_seed_testonly( - worker_oracle_id(id, run_id, - environment.master_funding_account))) - for id in range(num_oracles) - ] - node.prepare_accounts(oracle_accounts, - environment.master_funding_account, 100000, - "create contract account") - for oracle in oracle_accounts: - id = oracle.key.account_id - environment.sweat.top_up(node, id) - environment.sweat.register_oracle(node, id) - node.send_tx_retry(SweatAddOracle(sweat_claim_account, id), + for index in range(num_oracles): + account_id = worker_oracle_id(index, run_id, + environment.master_funding_account) + account = Account(key.Key.from_seed_testonly(account_id)) + node.prepare_account(account, environment.master_funding_account, + 100000, "create contract account") + environment.sweat.top_up(node, account_id) + environment.sweat.register_oracle(node, account_id) + node.send_tx_retry(SweatAddOracle(sweat_claim_account, account_id), "add sweat.claim oracle") diff --git a/pytest/tests/sanity/memtrie_disktrie_switch.py b/pytest/tests/sanity/memtrie_disktrie_switch.py new file mode 100644 index 00000000000..8756a982d60 --- /dev/null +++ b/pytest/tests/sanity/memtrie_disktrie_switch.py @@ -0,0 +1,342 @@ +#!/usr/bin/env python3 +# Spins up 4 validating nodes and 1 non-validating node. There are four shards in this test. +# Tests the following scenario and checks if the network can progress over a few epochs. +# 1. Starts with memtries enabled. +# 2. Restarts 2 of the validator nodes with memtries disabled. +# 3. Restarts the remaining 2 nodes with memtries disabled. +# Sends random transactions between shards at each step. + +import unittest +import pathlib +import random +import sys +import time + +sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) + +from configured_logger import logger +import cluster +import key +import state_sync_lib +import transaction +import utils + +EPOCH_LENGTH = 10 + +ONE_NEAR = 10**24 +TGAS = 10**12 + +GOOD_FINAL_EXECUTION_STATUS = ['FINAL', 'EXECUTED', 'EXECUTED_OPTIMISTIC'] + +# Shard layout with 5 roughly equal size shards for convenience. +SHARD_LAYOUT = { + "V1": { + "boundary_accounts": [ + "fff", + "kkk", + "ppp", + "uuu", + ], + "version": 2, + "shards_split_map": [], + "to_parent_shard_map": [], + } +} + +NUM_SHARDS = len(SHARD_LAYOUT["V1"]["boundary_accounts"]) + 1 + +ALL_ACCOUNTS = [ + "aaa.test0", + "ggg.test0", + "lll.test0", + "rrr.test0", + "vvv.test0", +] + +TxHash = str +AccountId = str + + +def random_u64(): + return bytes(random.randint(0, 255) for _ in range(8)) + + +class MemtrieDiskTrieSwitchTest(unittest.TestCase): + + def setUp(self): + self.nonces = {} + self.keys = [] + self.txs = [] + + def test(self): + + node_config_dump, node_config_sync = state_sync_lib.get_state_sync_configs_pair( + ) + + # Validator node configs: Enable single-shard tracking with memtries enabled. + node_config_sync["tracked_shards"] = [] + node_config_sync["store.load_mem_tries_for_tracked_shards"] = True + configs = {x: node_config_sync for x in range(4)} + + # Dumper node config: Enable tracking all shards with memtries enabled. + node_config_dump["tracked_shards"] = [0] + node_config_dump["store.load_mem_tries_for_tracked_shards"] = True + configs[4] = node_config_dump + + self.nodes = cluster.start_cluster( + num_nodes=4, + num_observers=1, + num_shards=NUM_SHARDS, + config=None, + genesis_config_changes=[ + ["epoch_length", EPOCH_LENGTH], ["shard_layout", SHARD_LAYOUT], + ["shuffle_shard_assignment_for_chunk_producers", True], + ["block_producer_kickout_threshold", 0], + ["chunk_producer_kickout_threshold", 0] + ], + client_config_changes=configs) + self.assertEqual(5, len(self.nodes)) + + # Use the dumper node as the RPC node for sending the transactions. + self.rpc_node = self.nodes[4] + + self.__wait_for_blocks(3) + + self.__create_accounts() + + self.__deploy_contracts() + + target_height = self.__next_target_height(num_epochs=1) + logger.info( + f"Step 1: Running with memtries enabled until height {target_height}" + ) + self.__random_workload_until(target_height) + + target_height = self.__next_target_height(num_epochs=1) + logger.info( + f"Step 2: Restarting nodes with memtries disabled until height {target_height}" + ) + self.__restart_nodes(enable_memtries=False) + self.__random_workload_until(target_height) + + # TODO(#11675): Fix MissingTrieValue error and re-enable this step of the test. + # target_height = self.__next_target_height(num_epochs=1) + # logger.info(f"Step 3: Restarting nodes with memtries enabled until height {target_height}") + # self.__restart_nodes(enable_memtries=True) + # self.__random_workload_until(target_height) + + self.__wait_for_txs(self.txs, assert_all_accepted=False) + logger.info("Test ended") + + def __next_target_height(self, num_epochs): + """Returns a next target height until which we will send the transactions.""" + current_height = self.__wait_for_blocks(1) + stop_height = random.randint(1, EPOCH_LENGTH) + return current_height + num_epochs * EPOCH_LENGTH + stop_height + + def next_nonce(self, signer_key): + """Returns the next nonce to use for sending transactions for the given signing key.""" + assert signer_key in self.nonces + nonce = self.nonces[signer_key] + self.nonces[signer_key] = nonce + 42 + return nonce + + def __restart_nodes(self, enable_memtries): + """Stops and restarts the nodes with the config that enables/disables memtries. + + It restarts only the validator nodes and does NOT restart the RPC node.""" + boot_node = self.rpc_node + for i in range(0, 4): + self.nodes[i].kill() + time.sleep(2) + self.nodes[i].change_config( + {"store.load_mem_tries_for_tracked_shards": enable_memtries}) + self.nodes[i].start(boot_node=None if i == 0 else boot_node) + + def __random_workload_until(self, target_height): + """Generates traffic to make transfers between accounts.""" + last_height = -1 + while True: + last_block = self.rpc_node.get_latest_block() + height = last_block.height + if height > target_height: + break + if height != last_height: + logger.info( + f'@{height}, epoch_height: {state_sync_lib.approximate_epoch_height(height, EPOCH_LENGTH)}' + ) + last_height = height + last_block_hash = last_block.hash_bytes + if random.random() < 0.5: + # Make a transfer between accounts. + # The goal is to generate cross-shard receipts. + from_account_key = random.choice(self.account_keys) + to_account_id = random.choice([ + account_key.account_id + for account_key in self.account_keys + if account_key.account_id != from_account_key.account_id + ] + ["near"]) + payment_tx = transaction.sign_payment_tx( + from_account_key, to_account_id, 1, + self.next_nonce(from_account_key), last_block_hash) + result = self.rpc_node.send_tx(payment_tx) + assert 'result' in result and 'error' not in result, ( + 'Expected "result" and no "error" in response, got: {}'. + format(result)) + logger.debug("Transfer: {}".format(result)) + tx_hash = result['result'] + self.txs.append((from_account_key.account_id, tx_hash)) + elif len(self.keys) > 10 and random.random() < 0.5: + # Do some storage reads, but only if we have enough keys populated. + key = self.keys[random.randint(0, len(self.keys) - 1)] + for account_key in self.account_keys: + tx = transaction.sign_function_call_tx( + account_key, account_key.account_id, + 'read_value', key, 300 * TGAS, 0, + self.next_nonce(account_key), last_block_hash) + result = self.rpc_node.send_tx(tx) + assert 'result' in result and 'error' not in result, ( + 'Expected "result" and no "error" in response, got: {}'. + format(result)) + logger.debug("Read value: {}".format(result)) + tx_hash = result['result'] + self.txs.append((account_key.account_id, tx_hash)) + else: + # Generate some data for storage reads + key = random_u64() + self.keys.append(key) + for account_key in self.account_keys: + tx = transaction.sign_function_call_tx( + account_key, account_key.account_id, 'write_key_value', + key + random_u64(), 300 * TGAS, 0, + self.next_nonce(account_key), last_block_hash) + result = self.rpc_node.send_tx(tx) + assert 'result' in result and 'error' not in result, ( + 'Expected "result" and no "error" in response, got: {}'. + format(result)) + logger.debug("Wrote value: {}".format(result)) + tx_hash = result['result'] + self.txs.append((account_key.account_id, tx_hash)) + time.sleep(0.5) + + def __deploy_contracts(self): + """Deploys test contract for each test account. + + Waits for the deploy-contract transactions to complete.""" + deploy_contract_tx_list = [] + for account_key in self.account_keys: + contract = utils.load_test_contract() + last_block_hash = self.rpc_node.get_latest_block().hash_bytes + deploy_contract_tx = transaction.sign_deploy_contract_tx( + account_key, contract, self.next_nonce(account_key), + last_block_hash) + result = self.rpc_node.send_tx(deploy_contract_tx) + assert 'result' in result and 'error' not in result, ( + 'Expected "result" and no "error" in response, got: {}'.format( + result)) + tx_hash = result['result'] + deploy_contract_tx_list.append((account_key.account_id, tx_hash)) + logger.info( + f"Deploying contract for account: {account_key.account_id}, tx: {tx_hash}" + ) + self.__wait_for_txs(deploy_contract_tx_list) + + def __create_accounts(self): + """Creates the test accounts. + + Waits for the create-account transactions to complete.""" + account_keys = [] + for account_id in ALL_ACCOUNTS: + account_key = key.Key.from_random(account_id) + account_keys.append(account_key) + + # Use the first validator node to sign the transactions. + signer_key = self.nodes[0].signer_key + # Update nonce of the signer account using the access key nonce. + signer_nonce = self.rpc_node.get_nonce_for_pk(signer_key.account_id, + signer_key.pk) + 42 + + create_account_tx_list = [] + for account_key in account_keys: + tx_hash = self.__create_account(account_key, 1000 * ONE_NEAR, + signer_key, signer_nonce) + signer_nonce += 1 + create_account_tx_list.append((signer_key.account_id, tx_hash)) + logger.info( + f"Creating account: {account_key.account_id}, tx: {tx_hash}") + self.__wait_for_txs(create_account_tx_list) + + # Update nonces for the newly created accounts using the access key nonces. + for account_key in account_keys: + nonce = self.rpc_node.get_nonce_for_pk(account_key.account_id, + account_key.pk) + 42 + self.nonces[account_key] = nonce + + self.account_keys = account_keys + + def __create_account(self, account_key, balance, signer_key, signer_nonce): + block_hash = self.rpc_node.get_latest_block().hash_bytes + new_signer_key = key.Key( + account_key.account_id, + account_key.pk, + account_key.sk, + ) + create_account_tx = transaction.sign_create_account_with_full_access_key_and_balance_tx( + signer_key, + account_key.account_id, + new_signer_key, + balance, + signer_nonce, + block_hash, + ) + result = self.rpc_node.send_tx(create_account_tx) + self.assertIn('result', result, result) + tx_hash = result['result'] + return tx_hash + + def __wait_for_txs(self, + tx_list: list[(AccountId, TxHash)], + assert_all_accepted=True): + """Waits for the transactions to be accepted. + + If assert_all_accepted is True, it will assert that all transactions were accepted. + Otherwise, it asserts that at least 1 of the transactions were accepted.""" + self.assertGreater(len(tx_list), 0) + self.__wait_for_blocks(3) + logger.info(f"Checking status of {len(tx_list)} transactions") + accepted = 0 + rejected = 0 + for (tx_sender, tx_hash) in tx_list: + if self.__get_tx_status(tx_hash, tx_sender): + accepted += 1 + if not assert_all_accepted: + break + else: + rejected += 1 + if assert_all_accepted: + self.assertEqual(accepted, len(tx_list)) + else: + self.assertGreater(accepted, 0) + + def __get_tx_status(self, tx_hash, tx_sender) -> bool: + """Checks the status of the transaction and returns true if it is accepted.""" + result = self.rpc_node.get_tx(tx_hash, tx_sender, timeout=10) + if 'result' not in result: + self.assertIn('error', result, result) + return False + + status = result['result']['final_execution_status'] + self.assertIn(status, GOOD_FINAL_EXECUTION_STATUS, result) + + status = result['result']['status'] + self.assertIn('SuccessValue', status, result) + + return True + + def __wait_for_blocks(self, num_blocks): + height, _ = utils.wait_for_blocks(self.rpc_node, count=num_blocks) + return height + + +if __name__ == '__main__': + unittest.main() diff --git a/pytest/tests/sanity/upgradable.py b/pytest/tests/sanity/upgradable.py index 69d490cb033..502bfabea1f 100755 --- a/pytest/tests/sanity/upgradable.py +++ b/pytest/tests/sanity/upgradable.py @@ -60,8 +60,15 @@ def get_proto_version(exe: pathlib.Path) -> int: logger.info(f'Got protocol {test_proto} in testnet release {test_release}.') logger.info(f'Got protocol {head_proto} on master branch.') - ok = (head_proto in (test_proto, test_proto + 1) and - test_proto in (main_proto, main_proto + 1)) + if head_proto == 69: + # In the congestion control and stateless validation release allow + # increasing the protocol version by 2. + ok = (head_proto in (test_proto, test_proto + 1, test_proto + 2) and + test_proto in (main_proto, main_proto + 1, main_proto + 2)) + else: + # Otherwise only allow increasing the protocol version by 1. + ok = (head_proto in (test_proto, test_proto + 1) and + test_proto in (main_proto, main_proto + 1)) assert ok, ('If changed, protocol version of a new release can increase by ' 'at most one.') diff --git a/runtime/near-test-contracts/build.rs b/runtime/near-test-contracts/build.rs index 8120249eb47..d5e32b6efcc 100644 --- a/runtime/near-test-contracts/build.rs +++ b/runtime/near-test-contracts/build.rs @@ -9,9 +9,10 @@ fn main() { std::process::exit(1); } } - fn try_main() -> Result<(), Error> { let mut test_contract_features = vec!["latest_protocol"]; + + println!("cargo:rerun-if-env-changed=CARGO_FEATURE_TEST_FEATURES"); if env::var("CARGO_FEATURE_TEST_FEATURES").is_ok() { test_contract_features.push("test_features"); } diff --git a/runtime/near-vm-runner/fuzz/fuzz_targets/diffrunner.rs b/runtime/near-vm-runner/fuzz/fuzz_targets/diffrunner.rs index 36eacd8465a..55e6a31e531 100644 --- a/runtime/near-vm-runner/fuzz/fuzz_targets/diffrunner.rs +++ b/runtime/near-vm-runner/fuzz/fuzz_targets/diffrunner.rs @@ -20,7 +20,8 @@ libfuzzer_sys::fuzz_target!(|module: ArbitraryModule| { fn run_fuzz(code: &ContractCode, vm_kind: VMKind) -> VMOutcome { let mut fake_external = MockedExternal::with_code(code.clone_for_tests()); - let mut context = create_context(vec![]); + let method_name = find_entry_point(code).unwrap_or_else(|| "main".to_string()); + let mut context = create_context(&method_name, vec![]); context.prepaid_gas = 10u64.pow(14); let config_store = RuntimeConfigStore::new(None); let config = config_store.get_config(PROTOCOL_VERSION); @@ -29,15 +30,11 @@ fn run_fuzz(code: &ContractCode, vm_kind: VMKind) -> VMOutcome { wasm_config.limit_config.contract_prepare_version = near_vm_runner::logic::ContractPrepareVersion::V2; - let method_name = find_entry_point(code).unwrap_or_else(|| "main".to_string()); - let res = vm_kind.runtime(wasm_config.into()).unwrap().run( - &method_name, - &mut fake_external, - &context, - fees, - [].into(), - None, - ); + let res = vm_kind + .runtime(wasm_config.into()) + .unwrap() + .prepare(&fake_external, &context, None) + .run(&mut fake_external, &context, fees); // Remove the VMError message details as they can differ between runtimes // TODO: maybe there's actually things we could check for equality here too? diff --git a/runtime/near-vm-runner/fuzz/fuzz_targets/runner.rs b/runtime/near-vm-runner/fuzz/fuzz_targets/runner.rs index 5f58401cae6..54637c797c9 100644 --- a/runtime/near-vm-runner/fuzz/fuzz_targets/runner.rs +++ b/runtime/near-vm-runner/fuzz/fuzz_targets/runner.rs @@ -18,16 +18,17 @@ libfuzzer_sys::fuzz_target!(|module: ArbitraryModule| { fn run_fuzz(code: &ContractCode, config: Arc) -> VMOutcome { let mut fake_external = MockedExternal::with_code(code.clone_for_tests()); - let mut context = create_context(vec![]); + let method_name = find_entry_point(code).unwrap_or_else(|| "main".to_string()); + let mut context = create_context(&method_name, vec![]); context.prepaid_gas = 10u64.pow(14); let mut wasm_config = near_parameters::vm::Config::clone(&config.wasm_config); wasm_config.limit_config.wasmer2_stack_limit = i32::MAX; // If we can crash wasmer2 even without the secondary stack limit it's still good to know let vm_kind = config.wasm_config.vm_kind; let fees = Arc::clone(&config.fees); - let method_name = find_entry_point(code).unwrap_or_else(|| "main".to_string()); vm_kind .runtime(wasm_config.into()) .unwrap() - .run(&method_name, &mut fake_external, &context, fees, [].into(), None) + .prepare(&fake_external, &context, None) + .run(&mut fake_external, &context, fees) .unwrap_or_else(|err| panic!("fatal error: {err:?}")) } diff --git a/runtime/near-vm-runner/fuzz/src/lib.rs b/runtime/near-vm-runner/fuzz/src/lib.rs index a7eb0a8e92f..2121f8ede3b 100644 --- a/runtime/near-vm-runner/fuzz/src/lib.rs +++ b/runtime/near-vm-runner/fuzz/src/lib.rs @@ -30,13 +30,15 @@ pub fn find_entry_point(contract: &ContractCode) -> Option { None } -pub fn create_context(input: Vec) -> VMContext { +pub fn create_context(method: &str, input: Vec) -> VMContext { VMContext { current_account_id: "alice".parse().unwrap(), signer_account_id: "bob".parse().unwrap(), signer_account_pk: vec![0, 1, 2, 3, 4], predecessor_account_id: "carol".parse().unwrap(), + method: method.into(), input, + promise_results: Vec::new().into(), block_height: 10, block_timestamp: 42, epoch_height: 1, diff --git a/runtime/near-vm-runner/src/lib.rs b/runtime/near-vm-runner/src/lib.rs index bc93160ca01..ca8d19e8b9a 100644 --- a/runtime/near-vm-runner/src/lib.rs +++ b/runtime/near-vm-runner/src/lib.rs @@ -36,7 +36,7 @@ pub use code::ContractCode; #[cfg(feature = "metrics")] pub use metrics::{report_metrics, reset_metrics}; pub use profile::ProfileDataV3; -pub use runner::{run, VM}; +pub use runner::{run, PreparedContract, VM}; /// This is public for internal experimentation use only, and should otherwise be considered an /// implementation detail of `near-vm-runner`. diff --git a/runtime/near-vm-runner/src/logic/context.rs b/runtime/near-vm-runner/src/logic/context.rs index 869aa24554f..c2b252dbd40 100644 --- a/runtime/near-vm-runner/src/logic/context.rs +++ b/runtime/near-vm-runner/src/logic/context.rs @@ -1,4 +1,4 @@ -use super::types::PublicKey; +use super::types::{PromiseResult, PublicKey}; use near_primitives_core::config::ViewConfig; use near_primitives_core::types::{ AccountId, Balance, BlockHeight, EpochHeight, Gas, StorageUsage, @@ -20,9 +20,14 @@ pub struct VMContext { /// If this execution is the result of direct execution of transaction then it /// is equal to `signer_account_id`. pub predecessor_account_id: AccountId, + /// The name of the method to invoke. + pub method: String, /// The input to the contract call. /// Encoded as base64 string to be able to pass input in borsh binary format. pub input: Vec, + /// If this method execution is invoked directly as a callback by one or more contract calls + /// the results of the methods that made the callback are stored in this collection. + pub promise_results: std::sync::Arc<[PromiseResult]>, /// The current block height. pub block_height: BlockHeight, /// The current block timestamp (number of non-leap-nanoseconds since January 1, 1970 0:00:00 UTC). diff --git a/runtime/near-vm-runner/src/logic/errors.rs b/runtime/near-vm-runner/src/logic/errors.rs index 3c9760eabf6..02e969290ad 100644 --- a/runtime/near-vm-runner/src/logic/errors.rs +++ b/runtime/near-vm-runner/src/logic/errors.rs @@ -58,9 +58,9 @@ pub enum FunctionCallError { #[derive(Debug, thiserror::Error, strum::IntoStaticStr)] pub enum CacheError { - #[error("cache read error")] + #[error("cache read error: {0}")] ReadError(#[source] io::Error), - #[error("cache write error")] + #[error("cache write error: {0}")] WriteError(#[source] io::Error), #[error("cache deserialization error")] DeserializationError, diff --git a/runtime/near-vm-runner/src/logic/logic.rs b/runtime/near-vm-runner/src/logic/logic.rs index d5d506e119e..eba5a116c68 100644 --- a/runtime/near-vm-runner/src/logic/logic.rs +++ b/runtime/near-vm-runner/src/logic/logic.rs @@ -35,7 +35,7 @@ fn base64(s: &[u8]) -> String { /// This is a subset of [`VMLogic`] that's strictly necessary to produce `VMOutcome`s. pub struct ExecutionResultState { /// All gas and economic parameters required during contract execution. - config: Arc, + pub(crate) config: Arc, /// Gas tracking for the current contract execution. gas_counter: GasCounter, /// Logs written by the runtime. @@ -219,9 +219,6 @@ pub struct VMLogic<'a> { config: Arc, /// Fees charged for various operations that contract may execute. fees_config: Arc, - /// If this method execution is invoked directly as a callback by one or more contract calls the - /// results of the methods that made the callback are stored in this collection. - promise_results: Arc<[PromiseResult]>, /// Pointer to the guest memory. memory: super::vmstate::Memory, @@ -303,7 +300,6 @@ impl<'a> VMLogic<'a> { ext: &'a mut dyn External, context: &'a VMContext, fees_config: Arc, - promise_results: Arc<[PromiseResult]>, result_state: ExecutionResultState, memory: impl MemoryLike + 'static, ) -> Self { @@ -319,7 +315,6 @@ impl<'a> VMLogic<'a> { context, config, fees_config, - promise_results, memory: super::vmstate::Memory::new(memory), current_account_locked_balance, recorded_storage_counter, @@ -2381,7 +2376,7 @@ impl<'a> VMLogic<'a> { } .into()); } - Ok(self.promise_results.len() as _) + Ok(self.context.promise_results.len() as _) } /// If the current function is invoked by a callback we can access the execution results of the @@ -2414,6 +2409,7 @@ impl<'a> VMLogic<'a> { ); } match self + .context .promise_results .get(result_idx as usize) .ok_or(HostError::InvalidPromiseResultIndex { result_idx })? diff --git a/runtime/near-vm-runner/src/logic/tests/promises.rs b/runtime/near-vm-runner/src/logic/tests/promises.rs index 4fd3cebf982..ee8eaca5a13 100644 --- a/runtime/near-vm-runner/src/logic/tests/promises.rs +++ b/runtime/near-vm-runner/src/logic/tests/promises.rs @@ -19,7 +19,7 @@ fn test_promise_results() { ]; let mut logic_builder = VMLogicBuilder::default(); - logic_builder.promise_results = promise_results.into(); + logic_builder.context.promise_results = promise_results.into(); let mut logic = logic_builder.build(); assert_eq!(logic.promise_results_count(), Ok(3), "Total count of registers must be 3"); diff --git a/runtime/near-vm-runner/src/logic/tests/vm_logic_builder.rs b/runtime/near-vm-runner/src/logic/tests/vm_logic_builder.rs index 32f5bfe24c3..23dfd2055c4 100644 --- a/runtime/near-vm-runner/src/logic/tests/vm_logic_builder.rs +++ b/runtime/near-vm-runner/src/logic/tests/vm_logic_builder.rs @@ -1,6 +1,5 @@ use crate::logic::mocks::mock_external::MockedExternal; use crate::logic::mocks::mock_memory::MockedMemory; -use crate::logic::types::PromiseResult; use crate::logic::{Config, ExecutionResultState, MemSlice, VMContext, VMLogic}; use crate::tests::test_vm_config; use near_parameters::RuntimeFeesConfig; @@ -10,7 +9,6 @@ pub(super) struct VMLogicBuilder { pub ext: MockedExternal, pub config: Config, pub fees_config: RuntimeFeesConfig, - pub promise_results: Arc<[PromiseResult]>, pub memory: MockedMemory, pub context: VMContext, } @@ -22,7 +20,6 @@ impl Default for VMLogicBuilder { fees_config: RuntimeFeesConfig::test(), ext: MockedExternal::default(), memory: MockedMemory::default(), - promise_results: [].into(), context: get_context(), } } @@ -43,7 +40,6 @@ impl VMLogicBuilder { &mut self.ext, &self.context, Arc::new(self.fees_config.clone()), - Arc::clone(&self.promise_results), result_state, self.memory.clone(), )) @@ -59,7 +55,6 @@ impl VMLogicBuilder { fees_config: RuntimeFeesConfig::free(), ext: MockedExternal::default(), memory: MockedMemory::default(), - promise_results: [].into(), context: get_context(), } } @@ -71,7 +66,9 @@ fn get_context() -> VMContext { signer_account_id: "bob.near".parse().unwrap(), signer_account_pk: vec![0, 1, 2, 3, 4], predecessor_account_id: "carol.near".parse().unwrap(), + method: "VMLogicBuilder::method_not_specified".into(), input: vec![0, 1, 2, 3, 4], + promise_results: vec![].into(), block_height: 10, block_timestamp: 42, epoch_height: 1, diff --git a/runtime/near-vm-runner/src/near_vm_runner/runner.rs b/runtime/near-vm-runner/src/near_vm_runner/runner.rs index 9898b798110..aa8738ae292 100644 --- a/runtime/near-vm-runner/src/near_vm_runner/runner.rs +++ b/runtime/near-vm-runner/src/near_vm_runner/runner.rs @@ -5,7 +5,6 @@ use crate::logic::errors::{ CacheError, CompilationError, FunctionCallError, MethodResolveError, VMRunnerError, WasmTrap, }; use crate::logic::gas_counter::FastGasCounter; -use crate::logic::types::PromiseResult; use crate::logic::{Config, ExecutionResultState, External, VMContext, VMLogic, VMOutcome}; use crate::near_vm_runner::{NearVmCompiler, NearVmEngine}; use crate::runner::VMResult; @@ -210,17 +209,12 @@ impl NearVM { skip_all )] fn with_compiled_and_loaded( - &self, + self: Box, cache: &dyn ContractRuntimeCache, - ext: &mut dyn External, + ext: &dyn External, context: &VMContext, - method_name: &str, - closure: impl FnOnce( - ExecutionResultState, - &mut dyn External, - &VMArtifact, - ) -> Result, - ) -> VMResult { + closure: impl FnOnce(ExecutionResultState, &VMArtifact, Box) -> VMResult, + ) -> VMResult { // (wasm code size, compilation result) type MemoryCacheType = (u64, Result); let to_any = |v: MemoryCacheType| -> Box { Box::new(v) }; @@ -307,19 +301,22 @@ impl NearVM { crate::metrics::record_compiled_contract_cache_lookup(is_cache_hit); let mut result_state = ExecutionResultState::new(&context, Arc::clone(&self.config)); - let result = result_state.before_loading_executable(method_name, wasm_bytes); + let result = result_state.before_loading_executable(&context.method, wasm_bytes); if let Err(e) = result { - return Ok(VMOutcome::abort(result_state, e)); + return Ok(PreparedContract::Outcome(VMOutcome::abort(result_state, e))); } match artifact_result { Ok(artifact) => { let result = result_state.after_loading_executable(wasm_bytes); if let Err(e) = result { - return Ok(VMOutcome::abort(result_state, e)); + return Ok(PreparedContract::Outcome(VMOutcome::abort(result_state, e))); } - closure(result_state, ext, &artifact) + closure(result_state, &artifact, self) } - Err(e) => Ok(VMOutcome::abort(result_state, FunctionCallError::CompilationError(e))), + Err(e) => Ok(PreparedContract::Outcome(VMOutcome::abort( + result_state, + FunctionCallError::CompilationError(e), + ))), } } @@ -575,53 +572,37 @@ impl<'a> finite_wasm::wasmparser::VisitOperator<'a> for GasCostCfg { } impl crate::runner::VM for NearVM { - fn run( - &self, - method_name: &str, - ext: &mut dyn External, + fn prepare( + self: Box, + ext: &dyn External, context: &VMContext, - fees_config: Arc, - promise_results: Arc<[PromiseResult]>, cache: Option<&dyn ContractRuntimeCache>, - ) -> Result { + ) -> Box { let cache = cache.unwrap_or(&NoContractRuntimeCache); - self.with_compiled_and_loaded( - cache, - ext, - context, - method_name, - |result_state, ext, artifact| { + let prepd = + self.with_compiled_and_loaded(cache, ext, context, |result_state, artifact, vm| { let memory = NearVmMemory::new( - self.config.limit_config.initial_memory_pages, - self.config.limit_config.max_memory_pages, + vm.config.limit_config.initial_memory_pages, + vm.config.limit_config.max_memory_pages, ) .expect("Cannot create memory for a contract call"); - // FIXME: this mostly duplicates the `run_module` method. - // Note that we don't clone the actual backing memory, just increase the RC. - let vmmemory = memory.vm(); - let mut logic = - VMLogic::new(ext, context, fees_config, promise_results, result_state, memory); - let import = build_imports( - vmmemory, - &mut logic, - Arc::clone(&self.config), - artifact.engine(), - ); - let entrypoint = match get_entrypoint_index(&*artifact, method_name) { + let entrypoint = match get_entrypoint_index(&*artifact, &context.method) { Ok(index) => index, Err(e) => { - return Ok(VMOutcome::abort_but_nop_outcome_in_old_protocol( - logic.result_state, - e, + return Ok(PreparedContract::Outcome( + VMOutcome::abort_but_nop_outcome_in_old_protocol(result_state, e), )) } }; - match self.run_method(&artifact, import, entrypoint)? { - Ok(()) => Ok(VMOutcome::ok(logic.result_state)), - Err(err) => Ok(VMOutcome::abort(logic.result_state, err)), - } - }, - ) + Ok(PreparedContract::Ready(ReadyContract { + memory, + result_state, + entrypoint, + artifact: Arc::clone(artifact), + vm, + })) + }); + Box::new(prepd) } fn precompile( @@ -638,6 +619,42 @@ impl crate::runner::VM for NearVM { } } +struct ReadyContract { + memory: NearVmMemory, + result_state: ExecutionResultState, + entrypoint: FunctionIndex, + artifact: VMArtifact, + vm: Box, +} + +#[allow(clippy::large_enum_variant)] +enum PreparedContract { + Outcome(VMOutcome), + Ready(ReadyContract), +} + +impl crate::PreparedContract for VMResult { + fn run( + self: Box, + ext: &mut dyn External, + context: &VMContext, + fees_config: Arc, + ) -> VMResult { + let ReadyContract { memory, result_state, entrypoint, artifact, vm } = match (*self)? { + PreparedContract::Outcome(outcome) => return Ok(outcome), + PreparedContract::Ready(r) => r, + }; + let config = Arc::clone(&result_state.config); + let vmmemory = memory.vm(); + let mut logic = VMLogic::new(ext, context, fees_config, result_state, memory); + let import = build_imports(vmmemory, &mut logic, config, artifact.engine()); + match vm.run_method(&artifact, import, entrypoint)? { + Ok(()) => Ok(VMOutcome::ok(logic.result_state)), + Err(err) => Ok(VMOutcome::abort(logic.result_state, err)), + } + } +} + pub(crate) struct NearVmImports<'engine, 'vmlogic, 'vmlogic_refs> { pub(crate) memory: VMMemory, config: Arc, diff --git a/runtime/near-vm-runner/src/runner.rs b/runtime/near-vm-runner/src/runner.rs index e28298b11db..3f264658677 100644 --- a/runtime/near-vm-runner/src/runner.rs +++ b/runtime/near-vm-runner/src/runner.rs @@ -1,6 +1,5 @@ use crate::errors::ContractPrecompilatonResult; use crate::logic::errors::{CacheError, CompilationError, VMRunnerError}; -use crate::logic::types::PromiseResult; use crate::logic::{External, VMContext, VMOutcome}; use crate::{ContractCode, ContractRuntimeCache}; use near_parameters::vm::{Config, VMKind}; @@ -24,6 +23,26 @@ use std::sync::Arc; /// validators, even when a guest error occurs, or else their state will diverge. pub(crate) type VMResult = Result; +#[tracing::instrument(target = "vm", level = "debug", "prepare", skip_all, fields( + code.hash = %ext.code_hash(), + method_name, + vm_kind = ?wasm_config.vm_kind, + burnt_gas = tracing::field::Empty, + compute_usage = tracing::field::Empty, +))] +pub fn prepare( + ext: &(dyn External + Send), + context: &VMContext, + wasm_config: Arc, + cache: Option<&dyn ContractRuntimeCache>, +) -> Box { + let vm_kind = wasm_config.vm_kind; + let runtime = vm_kind + .runtime(wasm_config) + .unwrap_or_else(|| panic!("the {vm_kind:?} runtime has not been enabled at compile time")); + runtime.prepare(ext, context, cache) +} + /// Validate and run the specified contract. /// /// This is the entry point for executing a NEAR protocol contract. Before the @@ -48,20 +67,15 @@ pub(crate) type VMResult = Result; compute_usage = tracing::field::Empty, ))] pub fn run( - method_name: &str, ext: &mut (dyn External + Send), context: &VMContext, wasm_config: Arc, fees_config: Arc, - promise_results: std::sync::Arc<[PromiseResult]>, cache: Option<&dyn ContractRuntimeCache>, ) -> VMResult { let span = tracing::Span::current(); - let vm_kind = wasm_config.vm_kind; - let runtime = vm_kind - .runtime(wasm_config) - .unwrap_or_else(|| panic!("the {vm_kind:?} runtime has not been enabled at compile time")); - let outcome = runtime.run(method_name, ext, context, fees_config, promise_results, cache); + let prepared = prepare(ext, context, wasm_config, cache); + let outcome = prepared.run(ext, context, fees_config); let outcome = match outcome { Ok(o) => o, e @ Err(_) => return e, @@ -72,30 +86,38 @@ pub fn run( Ok(outcome) } -pub trait VM { - /// Validate and run the specified contract. +pub trait PreparedContract: Send { + /// Run the prepared contract. /// - /// This is the entry point for executing a NEAR protocol contract. Before - /// the entry point (as specified by the `method_name` argument) of the - /// contract code is executed, the contract will be validated (see - /// [`crate::prepare::prepare_contract`]), instrumented (e.g. for gas - /// accounting), and linked with the externs specified via the `ext` - /// argument. + /// This is the entry point for executing a NEAR protocol contract. The entry point (as + /// specified by the `VMContext::method` argument) of the contract code is executed. /// - /// [`VMContext::input`] will be passed to the contract entrypoint as an - /// argument. - /// - /// The gas cost for contract preparation will be subtracted by the VM - /// implementation. + /// [`VMContext::input`] will be made available to the contract. fn run( - &self, - method_name: &str, + self: Box, ext: &mut dyn External, context: &VMContext, fees_config: Arc, - promise_results: std::sync::Arc<[PromiseResult]>, - cache: Option<&dyn ContractRuntimeCache>, ) -> VMResult; +} + +pub trait VM { + /// Prepare a contract for execution. + /// + /// Work that goes into the preparation is runtime implementation specific, and depending on + /// the runtime may not do anything at all (and instead prepare everything when the contract is + /// `run`.) + /// + /// ## Return + /// + /// This method does not report any errors. If the contract is invalid in any way, the errors + /// will be reported when the returned value is `run`. + fn prepare( + self: Box, + ext: &dyn External, + context: &VMContext, + cache: Option<&dyn ContractRuntimeCache>, + ) -> Box; /// Precompile a WASM contract to a VM specific format and store the result /// into the `cache`. diff --git a/runtime/near-vm-runner/src/tests.rs b/runtime/near-vm-runner/src/tests.rs index 8747da037d2..a6f5835a718 100644 --- a/runtime/near-vm-runner/src/tests.rs +++ b/runtime/near-vm-runner/src/tests.rs @@ -52,13 +52,15 @@ pub(crate) fn with_vm_variants( } } -fn create_context(input: Vec) -> VMContext { +fn create_context(method: &str, input: Vec) -> VMContext { VMContext { current_account_id: CURRENT_ACCOUNT_ID.parse().unwrap(), signer_account_id: SIGNER_ACCOUNT_ID.parse().unwrap(), signer_account_pk: Vec::from(&SIGNER_ACCOUNT_PK[..]), predecessor_account_id: PREDECESSOR_ACCOUNT_ID.parse().unwrap(), + method: method.into(), input, + promise_results: Vec::new().into(), block_height: 10, block_timestamp: 42, epoch_height: 1, diff --git a/runtime/near-vm-runner/src/tests/cache.rs b/runtime/near-vm-runner/src/tests/cache.rs index 071ab443f52..1192ffa5315 100644 --- a/runtime/near-vm-runner/src/tests/cache.rs +++ b/runtime/near-vm-runner/src/tests/cache.rs @@ -122,12 +122,11 @@ fn make_cached_contract_call_vm( MockedExternal::new() }; fake_external.code_hash = code_hash; - let mut context = create_context(vec![]); + let mut context = create_context(method_name, vec![]); let fees = Arc::new(RuntimeFeesConfig::test()); - let promise_results = [].into(); context.prepaid_gas = prepaid_gas; let runtime = vm_kind.runtime(config).expect("runtime has not been compiled"); - runtime.run(method_name, &mut fake_external, &context, fees, promise_results, Some(cache)) + runtime.prepare(&fake_external, &context, Some(cache)).run(&mut fake_external, &context, fees) } #[test] diff --git a/runtime/near-vm-runner/src/tests/fuzzers.rs b/runtime/near-vm-runner/src/tests/fuzzers.rs index 71a2819477b..95c43e23343 100644 --- a/runtime/near-vm-runner/src/tests/fuzzers.rs +++ b/runtime/near-vm-runner/src/tests/fuzzers.rs @@ -39,13 +39,15 @@ pub fn find_entry_point(contract: &ContractCode) -> Option { None } -pub fn create_context(input: Vec) -> VMContext { +pub fn create_context(method: &str, input: Vec) -> VMContext { VMContext { current_account_id: "alice".parse().unwrap(), signer_account_id: "bob".parse().unwrap(), signer_account_pk: vec![0, 1, 2, 3, 4], predecessor_account_id: "carol".parse().unwrap(), + method: method.into(), input, + promise_results: Vec::new().into(), block_height: 10, block_timestamp: 42, epoch_height: 1, @@ -108,7 +110,8 @@ impl fmt::Debug for ArbitraryModule { fn run_fuzz(code: &ContractCode, vm_kind: VMKind) -> VMResult { let mut fake_external = MockedExternal::with_code(code.clone_for_tests()); - let mut context = create_context(vec![]); + let method_name = find_entry_point(code).unwrap_or_else(|| "main".to_string()); + let mut context = create_context(&method_name, vec![]); context.prepaid_gas = 10u64.pow(14); let mut config = test_vm_config(); @@ -116,16 +119,11 @@ fn run_fuzz(code: &ContractCode, vm_kind: VMKind) -> VMResult { config.limit_config.contract_prepare_version = ContractPrepareVersion::V2; let fees = Arc::new(RuntimeFeesConfig::test()); - let promise_results = [].into(); - let method_name = find_entry_point(code).unwrap_or_else(|| "main".to_string()); - let mut res = vm_kind.runtime(config.into()).unwrap().run( - &method_name, - &mut fake_external, - &context, - Arc::clone(&fees), - promise_results, - None, - ); + let mut res = vm_kind + .runtime(config.into()) + .unwrap() + .prepare(&fake_external, &context, None) + .run(&mut fake_external, &context, Arc::clone(&fees)); // Remove the VMError message details as they can differ between runtimes // TODO: maybe there's actually things we could check for equality here too? diff --git a/runtime/near-vm-runner/src/tests/rs_contract.rs b/runtime/near-vm-runner/src/tests/rs_contract.rs index 28becc703ce..6129c45d950 100644 --- a/runtime/near-vm-runner/src/tests/rs_contract.rs +++ b/runtime/near-vm-runner/src/tests/rs_contract.rs @@ -55,28 +55,22 @@ pub fn test_read_write() { with_vm_variants(&config, |vm_kind: VMKind| { let code = test_contract(vm_kind); let mut fake_external = MockedExternal::with_code(code); - let context = create_context(encode(&[10u64, 20u64])); + let context = create_context("write_key_value", encode(&[10u64, 20u64])); - let promise_results = [].into(); let runtime = vm_kind.runtime(config.clone()).expect("runtime has not been compiled"); - let result = runtime.run( - "write_key_value", + let result = runtime.prepare(&fake_external, &context, None).run( &mut fake_external, &context, Arc::clone(&fees), - Arc::clone(&promise_results), - None, ); assert_run_result(result, 0); - let context = create_context(encode(&[10u64])); - let result = runtime.run( - "read_value", + let context = create_context("read_value", encode(&[10u64])); + let runtime = vm_kind.runtime(config.clone()).expect("runtime has not been compiled"); + let result = runtime.prepare(&fake_external, &context, None).run( &mut fake_external, &context, Arc::clone(&fees), - promise_results, - None, ); assert_run_result(result, 20); }); @@ -125,11 +119,12 @@ fn run_test_ext( fake_external.validators = validators.into_iter().map(|(s, b)| (s.parse().unwrap(), b)).collect(); let fees = Arc::new(RuntimeFeesConfig::test()); - let context = create_context(input.to_vec()); + let context = create_context(method, input.to_vec()); let runtime = vm_kind.runtime(config).expect("runtime has not been compiled"); let outcome = runtime - .run(method, &mut fake_external, &context, Arc::clone(&fees), [].into(), None) + .prepare(&fake_external, &context, None) + .run(&mut fake_external, &context, Arc::clone(&fees)) .unwrap_or_else(|err| panic!("Failed execution: {:?}", err)); assert_eq!(outcome.profile.action_gas(), 0); @@ -230,12 +225,12 @@ pub fn test_out_of_memory() { let code = test_contract(vm_kind); let mut fake_external = MockedExternal::with_code(code); - let context = create_context(Vec::new()); + let context = create_context("out_of_memory", Vec::new()); let fees = Arc::new(RuntimeFeesConfig::free()); let runtime = vm_kind.runtime(config.clone()).expect("runtime has not been compiled"); - let promise_results = [].into(); let result = runtime - .run("out_of_memory", &mut fake_external, &context, fees, promise_results, None) + .prepare(&fake_external, &context, None) + .run(&mut fake_external, &context, fees) .expect("execution failed"); assert_eq!( result.aborted, @@ -254,7 +249,7 @@ fn function_call_weight_contract() -> ContractCode { #[test] fn attach_unspent_gas_but_use_all_gas() { - let mut context = create_context(vec![]); + let mut context = create_context("attach_unspent_gas_but_use_all_gas", vec![]); context.prepaid_gas = 100 * 10u64.pow(12); let mut config = test_vm_config(); @@ -268,14 +263,8 @@ fn attach_unspent_gas_but_use_all_gas() { let runtime = vm_kind.runtime(config.clone()).expect("runtime has not been compiled"); let outcome = runtime - .run( - "attach_unspent_gas_but_use_all_gas", - &mut external, - &context, - fees, - [].into(), - None, - ) + .prepare(&external, &context, None) + .run(&mut external, &context, fees) .unwrap_or_else(|err| panic!("Failed execution: {:?}", err)); let err = outcome.aborted.as_ref().unwrap(); diff --git a/runtime/near-vm-runner/src/tests/test_builder.rs b/runtime/near-vm-runner/src/tests/test_builder.rs index 2e099f2358e..dbd9312b818 100644 --- a/runtime/near-vm-runner/src/tests/test_builder.rs +++ b/runtime/near-vm-runner/src/tests/test_builder.rs @@ -16,6 +16,8 @@ pub(crate) fn test_builder() -> TestBuilder { signer_account_pk: vec![0, 1, 2], predecessor_account_id: "carol".parse().unwrap(), input: Vec::new(), + promise_results: Vec::new().into(), + method: "main".into(), block_height: 10, block_timestamp: 42, epoch_height: 1, @@ -37,7 +39,6 @@ pub(crate) fn test_builder() -> TestBuilder { TestBuilder { code: ContractCode::new(Vec::new(), None), context, - method: "main".to_string(), protocol_versions: vec![u32::MAX], skip, opaque_error: false, @@ -49,7 +50,6 @@ pub(crate) struct TestBuilder { code: ContractCode, context: VMContext, protocol_versions: Vec, - method: String, skip: HashSet, opaque_error: bool, opaque_outcome: bool, @@ -74,7 +74,7 @@ impl TestBuilder { } pub(crate) fn method(mut self, method: &str) -> Self { - self.method = method.to_string(); + self.context.method = method.to_string(); self } @@ -217,14 +217,13 @@ impl TestBuilder { let fees = Arc::new(RuntimeFeesConfig::test()); let context = self.context.clone(); - let promise_results = [].into(); - let Some(runtime) = vm_kind.runtime(config) else { panic!("runtime for {:?} has not been compiled", vm_kind); }; println!("Running {:?} for protocol version {}", vm_kind, protocol_version); let outcome = runtime - .run(&self.method, &mut fake_external, &context, fees, promise_results, None) + .prepare(&fake_external, &context, None) + .run(&mut fake_external, &context, fees) .expect("execution failed"); let mut got = String::new(); diff --git a/runtime/near-vm-runner/src/tests/ts_contract.rs b/runtime/near-vm-runner/src/tests/ts_contract.rs index 6a317c88c20..067a74537de 100644 --- a/runtime/near-vm-runner/src/tests/ts_contract.rs +++ b/runtime/near-vm-runner/src/tests/ts_contract.rs @@ -16,19 +16,15 @@ pub fn test_ts_contract() { with_vm_variants(&config, |vm_kind: VMKind| { let code = ContractCode::new(near_test_contracts::ts_contract().to_vec(), None); let mut fake_external = MockedExternal::with_code(code); - let context = create_context(Vec::new()); + let context = create_context("try_panic", Vec::new()); let fees = Arc::new(RuntimeFeesConfig::test()); // Call method that panics. - let promise_results = [].into(); let runtime = vm_kind.runtime(config.clone()).expect("runtime has not been compiled"); - let result = runtime.run( - "try_panic", + let result = runtime.prepare(&fake_external, &context, None).run( &mut fake_external, &context, Arc::clone(&fees), - Arc::clone(&promise_results), - None, ); let outcome = result.expect("execution failed"); assert_eq!( @@ -39,16 +35,11 @@ pub fn test_ts_contract() { ); // Call method that writes something into storage. - let context = create_context(b"foo bar".to_vec()); + let context = create_context("try_storage_write", b"foo bar".to_vec()); + let runtime = vm_kind.runtime(config.clone()).expect("runtime has not been compiled"); runtime - .run( - "try_storage_write", - &mut fake_external, - &context, - Arc::clone(&fees), - Arc::clone(&promise_results), - None, - ) + .prepare(&fake_external, &context, None) + .run(&mut fake_external, &context, Arc::clone(&fees)) .expect("bad failure"); // Verify by looking directly into the storage of the host. { @@ -60,16 +51,11 @@ pub fn test_ts_contract() { } // Call method that reads the value from storage using registers. - let context = create_context(b"foo".to_vec()); + let context = create_context("try_storage_read", b"foo".to_vec()); + let runtime = vm_kind.runtime(config.clone()).expect("runtime has not been compiled"); let outcome = runtime - .run( - "try_storage_read", - &mut fake_external, - &context, - Arc::clone(&fees), - Arc::clone(&promise_results), - None, - ) + .prepare(&fake_external, &context, None) + .run(&mut fake_external, &context, Arc::clone(&fees)) .expect("execution failed"); if let ReturnData::Value(value) = outcome.return_data { diff --git a/runtime/near-vm-runner/src/wasmer2_runner.rs b/runtime/near-vm-runner/src/wasmer2_runner.rs index 3706b9d7ce0..bfef3fb8a6d 100644 --- a/runtime/near-vm-runner/src/wasmer2_runner.rs +++ b/runtime/near-vm-runner/src/wasmer2_runner.rs @@ -4,7 +4,6 @@ use crate::logic::errors::{ CacheError, CompilationError, FunctionCallError, MethodResolveError, VMRunnerError, WasmTrap, }; use crate::logic::gas_counter::FastGasCounter; -use crate::logic::types::PromiseResult; use crate::logic::{ Config, ExecutionResultState, External, MemSlice, MemoryLike, VMContext, VMLogic, VMOutcome, }; @@ -564,43 +563,58 @@ impl wasmer_vm::Tunables for &Wasmer2VM { } impl crate::runner::VM for Wasmer2VM { - fn run( + fn precompile( &self, - method_name: &str, - ext: &mut dyn External, + code: &ContractCode, + cache: &dyn ContractRuntimeCache, + ) -> Result< + Result, + crate::logic::errors::CacheError, + > { + Ok(self + .compile_and_cache(code, Some(cache))? + .map(|_| ContractPrecompilatonResult::ContractCompiled)) + } + + fn prepare( + self: Box, + ext: &dyn External, context: &VMContext, - fees_config: Arc, - promise_results: Arc<[PromiseResult]>, cache: Option<&dyn ContractRuntimeCache>, - ) -> Result { + ) -> Box { + type Result = VMResult; let Some(code) = ext.get_contract() else { - return Err(VMRunnerError::ContractCodeNotPresent); + return Box::new(Result::Err(VMRunnerError::ContractCodeNotPresent)); }; let mut result_state = ExecutionResultState::new(&context, Arc::clone(&self.config)); - - let result = result_state.before_loading_executable(method_name, code.code().len() as u64); + let result = + result_state.before_loading_executable(&context.method, code.code().len() as u64); if let Err(e) = result { - return Ok(VMOutcome::abort(result_state, e)); + return Box::new(Ok(PreparedContract::Outcome(VMOutcome::abort(result_state, e)))); } - - let artifact = self.compile_and_load(&code, cache)?; - let artifact = match artifact { - Ok(it) => it, - Err(err) => { - return Ok(VMOutcome::abort( + let artifact = match self.compile_and_load(&code, cache) { + Ok(Ok(it)) => it, + Ok(Err(err)) => { + return Box::new(Ok(PreparedContract::Outcome(VMOutcome::abort( result_state, FunctionCallError::CompilationError(err), - )); + )))); + } + Err(err) => { + return Box::new(Result::Err(err)); } }; - let result = result_state.after_loading_executable(code.code().len() as u64); if let Err(e) = result { - return Ok(VMOutcome::abort(result_state, e)); + return Box::new(Ok(PreparedContract::Outcome(VMOutcome::abort(result_state, e)))); } - let entrypoint = match get_entrypoint_index(&*artifact, method_name) { + let entrypoint = match get_entrypoint_index(&*artifact, &context.method) { Ok(index) => index, - Err(e) => return Ok(VMOutcome::abort_but_nop_outcome_in_old_protocol(result_state, e)), + Err(e) => { + return Box::new(Ok(PreparedContract::Outcome( + VMOutcome::abort_but_nop_outcome_in_old_protocol(result_state, e), + ))) + } }; let memory = Wasmer2Memory::new( @@ -608,31 +622,51 @@ impl crate::runner::VM for Wasmer2VM { self.config.limit_config.max_memory_pages, ) .expect("Cannot create memory for a contract call"); + Box::new(Ok(PreparedContract::Ready(ReadyContract { + vm: self, + memory, + result_state, + entrypoint, + artifact, + }))) + } +} + +struct ReadyContract { + vm: Box, + memory: Wasmer2Memory, + result_state: ExecutionResultState, + entrypoint: FunctionIndex, + artifact: VMArtifact, +} + +#[allow(clippy::large_enum_variant)] +enum PreparedContract { + Outcome(VMOutcome), + Ready(ReadyContract), +} + +impl crate::PreparedContract for VMResult { + fn run( + self: Box, + ext: &mut dyn External, + context: &VMContext, + fees_config: Arc, + ) -> VMResult { + let ReadyContract { vm, memory, result_state, entrypoint, artifact } = match (*self)? { + PreparedContract::Outcome(outcome) => return Ok(outcome), + PreparedContract::Ready(r) => r, + }; // FIXME: this mostly duplicates the `run_module` method. // Note that we don't clone the actual backing memory, just increase the RC. let vmmemory = memory.vm(); - let mut logic = - VMLogic::new(ext, context, fees_config, promise_results, result_state, memory); - let import = - build_imports(vmmemory, &mut logic, Arc::clone(&self.config), artifact.engine()); - match self.run_method(&artifact, import, entrypoint)? { + let mut logic = VMLogic::new(ext, context, fees_config, result_state, memory); + let import = build_imports(vmmemory, &mut logic, Arc::clone(&vm.config), artifact.engine()); + match vm.run_method(&artifact, import, entrypoint)? { Ok(()) => Ok(VMOutcome::ok(logic.result_state)), Err(err) => Ok(VMOutcome::abort(logic.result_state, err)), } } - - fn precompile( - &self, - code: &ContractCode, - cache: &dyn ContractRuntimeCache, - ) -> Result< - Result, - crate::logic::errors::CacheError, - > { - Ok(self - .compile_and_cache(code, Some(cache))? - .map(|_| ContractPrecompilatonResult::ContractCompiled)) - } } pub(crate) struct Wasmer2Imports<'engine, 'vmlogic, 'vmlogic_refs> { diff --git a/runtime/near-vm-runner/src/wasmer_runner.rs b/runtime/near-vm-runner/src/wasmer_runner.rs index 7ee8889e066..ef666cf6917 100644 --- a/runtime/near-vm-runner/src/wasmer_runner.rs +++ b/runtime/near-vm-runner/src/wasmer_runner.rs @@ -3,7 +3,6 @@ use crate::errors::ContractPrecompilatonResult; use crate::logic::errors::{ CacheError, CompilationError, FunctionCallError, MethodResolveError, VMRunnerError, WasmTrap, }; -use crate::logic::types::PromiseResult; use crate::logic::{ExecutionResultState, External, VMContext, VMLogic, VMLogicError, VMOutcome}; use crate::logic::{MemSlice, MemoryLike}; use crate::prepare; @@ -415,17 +414,28 @@ impl Wasmer0VM { } impl crate::runner::VM for Wasmer0VM { - fn run( + fn precompile( &self, - method_name: &str, - ext: &mut dyn External, + code: &ContractCode, + cache: &dyn ContractRuntimeCache, + ) -> Result< + Result, + crate::logic::errors::CacheError, + > { + Ok(self + .compile_and_cache(code, Some(cache))? + .map(|_| ContractPrecompilatonResult::ContractCompiled)) + } + + fn prepare( + self: Box, + ext: &dyn External, context: &VMContext, - fees_config: Arc, - promise_results: std::sync::Arc<[PromiseResult]>, cache: Option<&dyn ContractRuntimeCache>, - ) -> Result { + ) -> Box { + type Result = VMResult; let Some(code) = ext.get_contract() else { - return Err(VMRunnerError::ContractCodeNotPresent); + return Box::new(Result::Err(VMRunnerError::ContractCodeNotPresent)); }; if !cfg!(target_arch = "x86") && !cfg!(target_arch = "x86_64") { // TODO(#1940): Remove once NaN is standardized by the VM. @@ -439,17 +449,16 @@ impl crate::runner::VM for Wasmer0VM { panic!("AVX support is required in order to run Wasmer VM Singlepass backend."); } - let mut execution_state = ExecutionResultState::new(&context, Arc::clone(&self.config)); + let mut result_state = ExecutionResultState::new(&context, Arc::clone(&self.config)); let result = - execution_state.before_loading_executable(method_name, code.code().len() as u64); + result_state.before_loading_executable(&context.method, code.code().len() as u64); if let Err(e) = result { - return Ok(VMOutcome::abort(execution_state, e)); + return Box::new(Ok(PreparedContract::Outcome(VMOutcome::abort(result_state, e)))); } // TODO: consider using get_module() here, once we'll go via deployment path. - let module = self.compile_and_load(&code, cache)?; - let module = match module { - Ok(x) => x, + let module = match self.compile_and_load(&code, cache) { + Ok(Ok(x)) => x, // Note on backwards-compatibility: This error used to be an error // without result, later refactored to NOP outcome. Now this returns // an actual outcome, including gas costs that occurred before this @@ -457,49 +466,71 @@ impl crate::runner::VM for Wasmer0VM { // version do not have gas costs before reaching this code. (Also // see `test_old_fn_loading_behavior_preserved` for a test that // verifies future changes do not counteract this assumption.) - Err(err) => { - return Ok(VMOutcome::abort( - execution_state, + Ok(Err(err)) => { + return Box::new(Ok(PreparedContract::Outcome(VMOutcome::abort( + result_state, FunctionCallError::CompilationError(err), - )) + )))) } + Err(err) => return Box::new(Result::Err(err)), }; - let result = execution_state.after_loading_executable(code.code().len() as u64); + let result = result_state.after_loading_executable(code.code().len() as u64); if let Err(e) = result { - return Ok(VMOutcome::abort(execution_state, e)); + return Box::new(Ok(PreparedContract::Outcome(VMOutcome::abort(result_state, e)))); } - if let Err(e) = check_method(&module, method_name) { - return Ok(VMOutcome::abort_but_nop_outcome_in_old_protocol(execution_state, e)); + if let Err(e) = check_method(&module, &context.method) { + return Box::new(Ok(PreparedContract::Outcome( + VMOutcome::abort_but_nop_outcome_in_old_protocol(result_state, e), + ))); } let memory = WasmerMemory::new( self.config.limit_config.initial_memory_pages, self.config.limit_config.max_memory_pages, ); + Box::new(Ok(PreparedContract::Ready(ReadyContract { + vm: self, + memory, + result_state, + module, + }))) + } +} + +struct ReadyContract { + vm: Box, + memory: WasmerMemory, + result_state: ExecutionResultState, + module: Module, +} + +#[allow(clippy::large_enum_variant)] +enum PreparedContract { + Outcome(VMOutcome), + Ready(ReadyContract), +} + +impl crate::PreparedContract for VMResult { + fn run( + self: Box, + ext: &mut dyn External, + context: &VMContext, + fees_config: Arc, + ) -> Result { + let ReadyContract { vm, memory, result_state, module } = match (*self)? { + PreparedContract::Outcome(outcome) => return Ok(outcome), + PreparedContract::Ready(r) => r, + }; // Note that we don't clone the actual backing memory, just increase the RC. let memory_copy = memory.clone(); - let mut logic = - VMLogic::new(ext, context, fees_config, promise_results, execution_state, memory); - let import_object = build_imports(memory_copy, &self.config, &mut logic); - match run_method(&module, &import_object, method_name)? { + let mut logic = VMLogic::new(ext, context, fees_config, result_state, memory); + let import_object = build_imports(memory_copy, &vm.config, &mut logic); + match run_method(&module, &import_object, &context.method)? { Ok(()) => Ok(VMOutcome::ok(logic.result_state)), Err(err) => Ok(VMOutcome::abort(logic.result_state, err)), } } - - fn precompile( - &self, - code: &ContractCode, - cache: &dyn ContractRuntimeCache, - ) -> Result< - Result, - crate::logic::errors::CacheError, - > { - Ok(self - .compile_and_cache(code, Some(cache))? - .map(|_| ContractPrecompilatonResult::ContractCompiled)) - } } #[derive(Clone, Copy)] diff --git a/runtime/near-vm-runner/src/wasmtime_runner.rs b/runtime/near-vm-runner/src/wasmtime_runner.rs index 9a3ada2e0a1..ef79ed89558 100644 --- a/runtime/near-vm-runner/src/wasmtime_runner.rs +++ b/runtime/near-vm-runner/src/wasmtime_runner.rs @@ -3,7 +3,6 @@ use crate::logic::errors::{ CacheError, CompilationError, FunctionCallError, MethodResolveError, PrepareError, VMLogicError, VMRunnerError, WasmTrap, }; -use crate::logic::types::PromiseResult; use crate::logic::{Config, ExecutionResultState}; use crate::logic::{External, MemSlice, MemoryLike, VMContext, VMLogic, VMOutcome}; use crate::runner::VMResult; @@ -187,15 +186,10 @@ impl WasmtimeVM { fn with_compiled_and_loaded( &self, cache: &dyn ContractRuntimeCache, - ext: &mut dyn External, + ext: &dyn External, context: &VMContext, - method_name: &str, - closure: impl FnOnce( - ExecutionResultState, - &mut dyn External, - Module, - ) -> Result, - ) -> VMResult { + closure: impl FnOnce(ExecutionResultState, Module) -> VMResult, + ) -> VMResult { let code_hash = ext.code_hash(); type MemoryCacheType = (u64, Result); let to_any = |v: MemoryCacheType| -> Box { Box::new(v) }; @@ -253,123 +247,146 @@ impl WasmtimeVM { )?; let mut result_state = ExecutionResultState::new(&context, Arc::clone(&self.config)); - let result = result_state.before_loading_executable(method_name, wasm_bytes); + let result = result_state.before_loading_executable(&context.method, wasm_bytes); if let Err(e) = result { - return Ok(VMOutcome::abort(result_state, e)); + return Ok(PreparedContract::Outcome(VMOutcome::abort(result_state, e))); } match module_result { Ok(module) => { let result = result_state.after_loading_executable(wasm_bytes); if let Err(e) = result { - return Ok(VMOutcome::abort(result_state, e)); + return Ok(PreparedContract::Outcome(VMOutcome::abort(result_state, e))); } - closure(result_state, ext, module) + closure(result_state, module) } - Err(e) => Ok(VMOutcome::abort(result_state, FunctionCallError::CompilationError(e))), + Err(e) => Ok(PreparedContract::Outcome(VMOutcome::abort( + result_state, + FunctionCallError::CompilationError(e), + ))), } } } impl crate::runner::VM for WasmtimeVM { - fn run( + fn precompile( &self, - method_name: &str, - ext: &mut dyn External, + code: &ContractCode, + cache: &dyn ContractRuntimeCache, + ) -> Result< + Result, + crate::logic::errors::CacheError, + > { + Ok(self + .compile_and_cache(code, cache)? + .map(|_| ContractPrecompilatonResult::ContractCompiled)) + } + + fn prepare( + self: Box, + ext: &dyn External, context: &VMContext, - fees_config: Arc, - promise_results: Arc<[PromiseResult]>, cache: Option<&dyn ContractRuntimeCache>, - ) -> Result { + ) -> Box { let cache = cache.unwrap_or(&NoContractRuntimeCache); - self.with_compiled_and_loaded( - cache, - ext, - context, - method_name, - |result_state, ext, module| { - match module.get_export(method_name) { - Some(export) => match export { - Func(func_type) => { - if func_type.params().len() != 0 || func_type.results().len() != 0 { - let err = FunctionCallError::MethodResolveError( - MethodResolveError::MethodInvalidSignature, - ); - return Ok(VMOutcome::abort_but_nop_outcome_in_old_protocol( - result_state, - err, - )); - } + let prepd = self.with_compiled_and_loaded(cache, ext, context, |result_state, module| { + match module.get_export(&context.method) { + Some(export) => match export { + Func(func_type) => { + if func_type.params().len() != 0 || func_type.results().len() != 0 { + let err = FunctionCallError::MethodResolveError( + MethodResolveError::MethodInvalidSignature, + ); + return Ok(PreparedContract::Outcome( + VMOutcome::abort_but_nop_outcome_in_old_protocol(result_state, err), + )); } - _ => { - return Ok(VMOutcome::abort_but_nop_outcome_in_old_protocol( + } + _ => { + return Ok(PreparedContract::Outcome( + VMOutcome::abort_but_nop_outcome_in_old_protocol( result_state, FunctionCallError::MethodResolveError( MethodResolveError::MethodNotFound, ), - )); - } - }, - None => { - return Ok(VMOutcome::abort_but_nop_outcome_in_old_protocol( + ), + )); + } + }, + None => { + return Ok(PreparedContract::Outcome( + VMOutcome::abort_but_nop_outcome_in_old_protocol( result_state, FunctionCallError::MethodResolveError( MethodResolveError::MethodNotFound, ), - )); - } + ), + )); } + } - let mut store = Store::new(&self.engine, ()); - let memory = WasmtimeMemory::new( - &mut store, - self.config.limit_config.initial_memory_pages, - self.config.limit_config.max_memory_pages, - ) - .unwrap(); - let memory_copy = memory.0; - let mut logic = - VMLogic::new(ext, context, fees_config, promise_results, result_state, memory); - let mut linker = Linker::new(&(&self.engine)); - link(&mut linker, memory_copy, &store, &self.config, &mut logic); - match linker.instantiate(&mut store, &module) { - Ok(instance) => match instance.get_func(&mut store, method_name) { - Some(func) => match func.typed::<(), ()>(&mut store) { - Ok(run) => match run.call(&mut store, ()) { - Ok(_) => Ok(VMOutcome::ok(logic.result_state)), - Err(err) => { - Ok(VMOutcome::abort(logic.result_state, err.into_vm_error()?)) - } - }, - Err(err) => { - Ok(VMOutcome::abort(logic.result_state, err.into_vm_error()?)) - } - }, - None => { - return Ok(VMOutcome::abort_but_nop_outcome_in_old_protocol( - logic.result_state, - FunctionCallError::MethodResolveError( - MethodResolveError::MethodNotFound, - ), - )); - } + let mut store = Store::new(&self.engine, ()); + let memory = WasmtimeMemory::new( + &mut store, + self.config.limit_config.initial_memory_pages, + self.config.limit_config.max_memory_pages, + ) + .unwrap(); + Ok(PreparedContract::Ready(ReadyContract { store, memory, module, result_state })) + }); + Box::new(prepd) + } +} + +struct ReadyContract { + store: Store<()>, + memory: WasmtimeMemory, + module: Module, + result_state: ExecutionResultState, +} + +#[allow(clippy::large_enum_variant)] +enum PreparedContract { + Outcome(VMOutcome), + Ready(ReadyContract), +} + +impl crate::PreparedContract for VMResult { + fn run( + self: Box, + ext: &mut dyn External, + context: &VMContext, + fees_config: Arc, + ) -> VMResult { + let ReadyContract { mut store, memory, module, result_state } = match (*self)? { + PreparedContract::Outcome(outcome) => return Ok(outcome), + PreparedContract::Ready(r) => r, + }; + let memory_copy = memory.0; + let config = Arc::clone(&result_state.config); + let mut logic = VMLogic::new(ext, context, fees_config, result_state, memory); + let engine = store.engine(); + let mut linker = Linker::new(engine); + // TODO: config could be accessed through `logic.result_state`, without this code having to + // figure it out... + link(&mut linker, memory_copy, &store, &config, &mut logic); + match linker.instantiate(&mut store, &module) { + Ok(instance) => match instance.get_func(&mut store, &context.method) { + Some(func) => match func.typed::<(), ()>(&mut store) { + Ok(run) => match run.call(&mut store, ()) { + Ok(_) => Ok(VMOutcome::ok(logic.result_state)), + Err(err) => Ok(VMOutcome::abort(logic.result_state, err.into_vm_error()?)), }, Err(err) => Ok(VMOutcome::abort(logic.result_state, err.into_vm_error()?)), + }, + None => { + return Ok(VMOutcome::abort_but_nop_outcome_in_old_protocol( + logic.result_state, + FunctionCallError::MethodResolveError(MethodResolveError::MethodNotFound), + )); } }, - ) - } - - fn precompile( - &self, - code: &ContractCode, - cache: &dyn ContractRuntimeCache, - ) -> Result< - Result, - crate::logic::errors::CacheError, - > { - Ok(self - .compile_and_cache(code, cache)? - .map(|_| ContractPrecompilatonResult::ContractCompiled)) + Err(err) => Ok(VMOutcome::abort(logic.result_state, err.into_vm_error()?)), + } } } diff --git a/runtime/runtime-params-estimator/estimator-warehouse/src/main.rs b/runtime/runtime-params-estimator/estimator-warehouse/src/main.rs index 15c534a5282..d27407d531b 100644 --- a/runtime/runtime-params-estimator/estimator-warehouse/src/main.rs +++ b/runtime/runtime-params-estimator/estimator-warehouse/src/main.rs @@ -146,7 +146,11 @@ mod tests { /// - This is an expensive test. We run it like any other test for now but /// it might make sense to put it in a separate CI job. /// - QEMU based estimation is skipped - it would be too slow. + /// TODO(#11705) - This test is disabled due to errors in the congestion + /// control stack. It's likely due to missing congestion info boostrappng. + /// Fix the issue and re-enabled the test. #[test] + #[ignore] fn test_full_estimator() -> anyhow::Result<()> { let stats_path = Path::new("tmp_db.sqlite"); let db = Db::open(stats_path)?; diff --git a/runtime/runtime-params-estimator/src/function_call.rs b/runtime/runtime-params-estimator/src/function_call.rs index a5b684ae041..79145da0932 100644 --- a/runtime/runtime-params-estimator/src/function_call.rs +++ b/runtime/runtime-params-estimator/src/function_call.rs @@ -69,38 +69,26 @@ fn compute_function_call_cost( let config_store = RuntimeConfigStore::new(None); let runtime_config = config_store.get_config(protocol_version).as_ref(); let vm_config = runtime_config.wasm_config.clone(); - let runtime = vm_kind.runtime(vm_config).expect("runtime has not been enabled"); let fees = runtime_config.fees.clone(); let mut fake_external = MockedExternal::with_code(contract.clone_for_tests()); - let fake_context = create_context(vec![]); - let promise_results = Arc::from([]); + let fake_context = create_context("hello0", vec![]); // Warmup. for _ in 0..warmup_repeats { + let runtime = vm_kind.runtime(vm_config.clone()).expect("runtime has not been enabled"); let result = runtime - .run( - "hello0", - &mut fake_external, - &fake_context, - Arc::clone(&fees), - Arc::clone(&promise_results), - cache, - ) + .prepare(&fake_external, &fake_context, cache) + .run(&mut fake_external, &fake_context, Arc::clone(&fees)) .expect("fatal error"); assert!(result.aborted.is_none()); } // Run with gas metering. let start = GasCost::measure(gas_metric); for _ in 0..repeats { + let runtime = vm_kind.runtime(vm_config.clone()).expect("runtime has not been enabled"); let result = runtime - .run( - "hello0", - &mut fake_external, - &fake_context, - Arc::clone(&fees), - Arc::clone(&promise_results), - cache, - ) + .prepare(&fake_external, &fake_context, cache) + .run(&mut fake_external, &fake_context, Arc::clone(&fees)) .expect("fatal_error"); assert!(result.aborted.is_none()); } diff --git a/runtime/runtime-params-estimator/src/gas_metering.rs b/runtime/runtime-params-estimator/src/gas_metering.rs index 62670052dee..4a710379661 100644 --- a/runtime/runtime-params-estimator/src/gas_metering.rs +++ b/runtime/runtime-params-estimator/src/gas_metering.rs @@ -136,24 +136,16 @@ pub(crate) fn compute_gas_metering_cost(config: &Config, contract: &ContractCode cfg.enable_all_features(); cfg }); - let runtime = vm_kind.runtime(vm_config_gas).expect("runtime has not been enabled"); - let runtime_free_gas = vm_kind.runtime(vm_config_free).expect("runtime has not been enabled"); let fees = runtime_config.fees.clone(); let mut fake_external = MockedExternal::with_code(contract.clone_for_tests()); - let fake_context = create_context(vec![]); - let promise_results = Arc::from([]); + let fake_context = create_context("hello", vec![]); // Warmup with gas metering for _ in 0..warmup_repeats { + let runtime = vm_kind.runtime(vm_config_gas.clone()).expect("runtime has not been enabled"); let result = runtime - .run( - "hello", - &mut fake_external, - &fake_context, - Arc::clone(&fees), - Arc::clone(&promise_results), - cache, - ) + .prepare(&fake_external, &fake_context, cache) + .run(&mut fake_external, &fake_context, Arc::clone(&fees)) .expect("fatal_error"); if let Some(err) = &result.aborted { eprintln!("error: {}", err); @@ -164,15 +156,10 @@ pub(crate) fn compute_gas_metering_cost(config: &Config, contract: &ContractCode // Run with gas metering. let start = GasCost::measure(gas_metric); for _ in 0..repeats { + let runtime = vm_kind.runtime(vm_config_gas.clone()).expect("runtime has not been enabled"); let result = runtime - .run( - "hello", - &mut fake_external, - &fake_context, - Arc::clone(&fees), - Arc::clone(&promise_results), - cache, - ) + .prepare(&fake_external, &fake_context, cache) + .run(&mut fake_external, &fake_context, Arc::clone(&fees)) .expect("fatal_error"); assert!(result.aborted.is_none()); } @@ -180,15 +167,11 @@ pub(crate) fn compute_gas_metering_cost(config: &Config, contract: &ContractCode // Warmup without gas metering for _ in 0..warmup_repeats { + let runtime_free_gas = + vm_kind.runtime(vm_config_free.clone()).expect("runtime has not been enabled"); let result = runtime_free_gas - .run( - "hello", - &mut fake_external, - &fake_context, - Arc::clone(&fees), - Arc::clone(&promise_results), - cache, - ) + .prepare(&fake_external, &fake_context, cache) + .run(&mut fake_external, &fake_context, Arc::clone(&fees)) .expect("fatal_error"); assert!(result.aborted.is_none()); } @@ -196,15 +179,11 @@ pub(crate) fn compute_gas_metering_cost(config: &Config, contract: &ContractCode // Run without gas metering. let start = GasCost::measure(gas_metric); for _ in 0..repeats { + let runtime_free_gas = + vm_kind.runtime(vm_config_free.clone()).expect("runtime has not been enabled"); let result = runtime_free_gas - .run( - "hello", - &mut fake_external, - &fake_context, - Arc::clone(&fees), - Arc::clone(&promise_results), - cache, - ) + .prepare(&fake_external, &fake_context, cache) + .run(&mut fake_external, &fake_context, Arc::clone(&fees)) .expect("fatal_error"); assert!(result.aborted.is_none()); } diff --git a/runtime/runtime-params-estimator/src/lib.rs b/runtime/runtime-params-estimator/src/lib.rs index 18642023c5d..85ed414cabe 100644 --- a/runtime/runtime-params-estimator/src/lib.rs +++ b/runtime/runtime-params-estimator/src/lib.rs @@ -889,22 +889,15 @@ fn wasm_instruction(ctx: &mut EstimatorContext) -> GasCost { let config_store = RuntimeConfigStore::new(None); let config = config_store.get_config(PROTOCOL_VERSION).wasm_config.clone(); let fees = Arc::new(RuntimeFeesConfig::test()); - let promise_results = [].into(); let cache = MockContractRuntimeCache::default(); let mut run = || { - let context = create_context(vec![]); + let context = create_context("cpu_ram_soak_test", vec![]); let vm_result = vm_kind .runtime(config.clone()) .unwrap() - .run( - "cpu_ram_soak_test", - &mut fake_external, - &context, - Arc::clone(&fees), - Arc::clone(&promise_results), - Some(&cache), - ) + .prepare(&fake_external, &context, Some(&cache)) + .run(&mut fake_external, &context, Arc::clone(&fees)) .expect("fatal_error"); assert!(vm_result.aborted.is_some()); vm_result diff --git a/runtime/runtime-params-estimator/src/vm_estimator.rs b/runtime/runtime-params-estimator/src/vm_estimator.rs index 72bd4415ed6..5ef380b666f 100644 --- a/runtime/runtime-params-estimator/src/vm_estimator.rs +++ b/runtime/runtime-params-estimator/src/vm_estimator.rs @@ -15,13 +15,15 @@ const SIGNER_ACCOUNT_ID: &str = "bob"; const SIGNER_ACCOUNT_PK: [u8; 3] = [0, 1, 2]; const PREDECESSOR_ACCOUNT_ID: &str = "carol"; -pub(crate) fn create_context(input: Vec) -> VMContext { +pub(crate) fn create_context(method: &str, input: Vec) -> VMContext { VMContext { current_account_id: CURRENT_ACCOUNT_ID.parse().unwrap(), signer_account_id: SIGNER_ACCOUNT_ID.parse().unwrap(), signer_account_pk: Vec::from(&SIGNER_ACCOUNT_PK[..]), predecessor_account_id: PREDECESSOR_ACCOUNT_ID.parse().unwrap(), + method: method.into(), input, + promise_results: vec![].into(), block_height: 10, block_timestamp: 42, epoch_height: 0, diff --git a/runtime/runtime/src/actions.rs b/runtime/runtime/src/actions.rs index 87649d2303f..aec1eeb5433 100644 --- a/runtime/runtime/src/actions.rs +++ b/runtime/runtime/src/actions.rs @@ -76,7 +76,9 @@ pub(crate) fn execute_function_call( signer_account_pk: borsh::to_vec(&action_receipt.signer_public_key) .expect("Failed to serialize"), predecessor_account_id: predecessor_id.clone(), + method: function_call.method_name.clone(), input: function_call.args.clone(), + promise_results, block_height: apply_state.block_height, block_timestamp: apply_state.block_timestamp, epoch_height: apply_state.epoch_height, @@ -102,12 +104,10 @@ pub(crate) fn execute_function_call( }; let mode_guard = runtime_ext.trie_update.with_trie_cache_mode(mode); let result = near_vm_runner::run( - &function_call.method_name, runtime_ext, &context, Arc::clone(&config.wasm_config), Arc::clone(&config.fees), - promise_results, apply_state.cache.as_deref(), ); drop(mode_guard); diff --git a/runtime/runtime/src/balance_checker.rs b/runtime/runtime/src/balance_checker.rs index 61dbe56525e..0fed196bb3e 100644 --- a/runtime/runtime/src/balance_checker.rs +++ b/runtime/runtime/src/balance_checker.rs @@ -366,7 +366,6 @@ pub(crate) fn check_balance( incoming_receipts_balance, processed_delayed_receipts_balance, initial_postponed_receipts_balance, - #[cfg(feature = "nightly")] forwarded_buffered_receipts_balance, // Outputs final_accounts_balance, @@ -375,7 +374,6 @@ pub(crate) fn check_balance( final_postponed_receipts_balance, tx_burnt_amount: stats.tx_burnt_amount, slashed_burnt_amount: stats.slashed_burnt_amount, - #[cfg(feature = "nightly")] new_buffered_receipts_balance, other_burnt_amount: stats.other_burnt_amount, } diff --git a/runtime/runtime/src/lib.rs b/runtime/runtime/src/lib.rs index f60b0ef2690..32b8a20c2b1 100644 --- a/runtime/runtime/src/lib.rs +++ b/runtime/runtime/src/lib.rs @@ -1582,14 +1582,21 @@ impl Runtime { .recorded_storage_size_upper_bound() .saturating_sub(storage_proof_size_upper_bound_before) as f64; - metrics::RECEIPT_RECORDED_SIZE.observe(recorded_storage_diff); - metrics::RECEIPT_RECORDED_SIZE_UPPER_BOUND.observe(recorded_storage_upper_bound_diff); + let shard_id_str = processing_state.apply_state.shard_id.to_string(); + metrics::RECEIPT_RECORDED_SIZE + .with_label_values(&[shard_id_str.as_str()]) + .observe(recorded_storage_diff); + metrics::RECEIPT_RECORDED_SIZE_UPPER_BOUND + .with_label_values(&[shard_id_str.as_str()]) + .observe(recorded_storage_upper_bound_diff); let recorded_storage_proof_ratio = recorded_storage_upper_bound_diff / f64::max(1.0, recorded_storage_diff); // Record the ratio only for large receipts, small receipts can have a very high ratio, // but the ratio is not that important for them. if recorded_storage_upper_bound_diff > 100_000. { - metrics::RECEIPT_RECORDED_SIZE_UPPER_BOUND_RATIO.observe(recorded_storage_proof_ratio); + metrics::RECEIPT_RECORDED_SIZE_UPPER_BOUND_RATIO + .with_label_values(&[shard_id_str.as_str()]) + .observe(recorded_storage_proof_ratio); } if let Some(outcome_with_id) = result? { let gas_burnt = outcome_with_id.outcome.gas_burnt; @@ -1905,7 +1912,10 @@ impl Runtime { self.apply_state_patch(&mut state_update, state_patch); let chunk_recorded_size_upper_bound = state_update.trie.recorded_storage_size_upper_bound() as f64; - metrics::CHUNK_RECORDED_SIZE_UPPER_BOUND.observe(chunk_recorded_size_upper_bound); + let shard_id_str = apply_state.shard_id.to_string(); + metrics::CHUNK_RECORDED_SIZE_UPPER_BOUND + .with_label_values(&[shard_id_str.as_str()]) + .observe(chunk_recorded_size_upper_bound); let (trie, trie_changes, state_changes) = state_update.finalize()?; if let Some(prefetcher) = &processing_state.prefetcher { @@ -1937,8 +1947,11 @@ impl Runtime { let state_root = trie_changes.new_root; let chunk_recorded_size = trie.recorded_storage_size() as f64; - metrics::CHUNK_RECORDED_SIZE.observe(chunk_recorded_size); + metrics::CHUNK_RECORDED_SIZE + .with_label_values(&[shard_id_str.as_str()]) + .observe(chunk_recorded_size); metrics::CHUNK_RECORDED_SIZE_UPPER_BOUND_RATIO + .with_label_values(&[shard_id_str.as_str()]) .observe(chunk_recorded_size_upper_bound / f64::max(1.0, chunk_recorded_size)); let proof = trie.recorded_storage(); let processed_delayed_receipts = process_receipts_result.processed_delayed_receipts; diff --git a/runtime/runtime/src/metrics.rs b/runtime/runtime/src/metrics.rs index 43549facba2..3ca9e6ef7e3 100644 --- a/runtime/runtime/src/metrics.rs +++ b/runtime/runtime/src/metrics.rs @@ -1,9 +1,9 @@ use crate::congestion_control::ReceiptSink; use near_o11y::metrics::{ exponential_buckets, linear_buckets, try_create_counter_vec, try_create_gauge_vec, - try_create_histogram_vec, try_create_histogram_with_buckets, try_create_int_counter, - try_create_int_counter_vec, try_create_int_gauge_vec, CounterVec, GaugeVec, Histogram, - HistogramVec, IntCounter, IntCounterVec, IntGaugeVec, + try_create_histogram_vec, try_create_int_counter, try_create_int_counter_vec, + try_create_int_gauge_vec, CounterVec, GaugeVec, HistogramVec, IntCounter, IntCounterVec, + IntGaugeVec, }; use near_parameters::config::CongestionControlConfig; use near_primitives::congestion_info::CongestionInfo; @@ -295,51 +295,57 @@ static CHUNK_TX_TGAS: Lazy = Lazy::new(|| { ) .unwrap() }); -pub static RECEIPT_RECORDED_SIZE: Lazy = Lazy::new(|| { - try_create_histogram_with_buckets( +pub static RECEIPT_RECORDED_SIZE: Lazy = Lazy::new(|| { + try_create_histogram_vec( "near_receipt_recorded_size", "Size of storage proof recorded when executing a receipt", - buckets_for_receipt_storage_proof_size(), + &["shard_id"], + Some(buckets_for_receipt_storage_proof_size()), ) .unwrap() }); -pub static RECEIPT_RECORDED_SIZE_UPPER_BOUND: Lazy = Lazy::new(|| { - try_create_histogram_with_buckets( +pub static RECEIPT_RECORDED_SIZE_UPPER_BOUND: Lazy = Lazy::new(|| { + try_create_histogram_vec( "near_receipt_recorded_size_upper_bound", "Upper bound estimation (e.g with extra size added for deletes) of storage proof size recorded when executing a receipt", - buckets_for_receipt_storage_proof_size(), + &["shard_id"], + Some(buckets_for_receipt_storage_proof_size()), ) .unwrap() }); -pub static RECEIPT_RECORDED_SIZE_UPPER_BOUND_RATIO: Lazy = Lazy::new(|| { - try_create_histogram_with_buckets( +pub static RECEIPT_RECORDED_SIZE_UPPER_BOUND_RATIO: Lazy = Lazy::new(|| { + try_create_histogram_vec( "near_receipt_recorded_size_upper_bound_ratio", "Ratio of upper bound to true recorded size, calculated only for sizes larger than 100KB, equal to (near_receipt_recorded_size_upper_bound / near_receipt_recorded_size)", - buckets_for_storage_proof_size_ratio(), + &["shard_id"], + Some(buckets_for_storage_proof_size_ratio()), ) .unwrap() }); -pub static CHUNK_RECORDED_SIZE: Lazy = Lazy::new(|| { - try_create_histogram_with_buckets( +pub static CHUNK_RECORDED_SIZE: Lazy = Lazy::new(|| { + try_create_histogram_vec( "near_chunk_recorded_size", "Total size of storage proof (recorded trie nodes for state witness, post-finalization) for a single chunk", - buckets_for_chunk_storage_proof_size(), + &["shard_id"], + Some(buckets_for_chunk_storage_proof_size()), ) .unwrap() }); -pub static CHUNK_RECORDED_SIZE_UPPER_BOUND: Lazy = Lazy::new(|| { - try_create_histogram_with_buckets( +pub static CHUNK_RECORDED_SIZE_UPPER_BOUND: Lazy = Lazy::new(|| { + try_create_histogram_vec( "near_chunk_recorded_size_upper_bound", "Upper bound of storage proof size (recorded trie nodes size + estimated charges, pre-finalization) for a single chunk", - buckets_for_chunk_storage_proof_size(), + &["shard_id"], + Some(buckets_for_chunk_storage_proof_size()), ) .unwrap() }); -pub static CHUNK_RECORDED_SIZE_UPPER_BOUND_RATIO: Lazy = Lazy::new(|| { - try_create_histogram_with_buckets( +pub static CHUNK_RECORDED_SIZE_UPPER_BOUND_RATIO: Lazy = Lazy::new(|| { + try_create_histogram_vec( "near_chunk_recorded_size_upper_bound_ratio", "Ratio of upper bound to true storage proof size, equal to (near_chunk_recorded_size_upper_bound / near_chunk_recorded_size)", - buckets_for_storage_proof_size_ratio(), + &["shard_id"], + Some(buckets_for_storage_proof_size_ratio()), ) .unwrap() }); diff --git a/scripts/ft-benchmark.sh b/scripts/ft-benchmark.sh index 5edd1b71715..90f661b935a 100755 --- a/scripts/ft-benchmark.sh +++ b/scripts/ft-benchmark.sh @@ -8,6 +8,7 @@ date # Otherwise nearup and cargo don't work even if installed properly PATH=/home/ubuntu/.local/bin/:$PATH export PATH=$PATH:$HOME/.cargo/bin +source benchmarks/continous/db/tool/dbprofile # Fetch the latest changes from the remote git fetch diff --git a/scripts/mac-release.sh b/scripts/mac-release.sh index be3544f59a5..04360caaba3 100755 --- a/scripts/mac-release.sh +++ b/scripts/mac-release.sh @@ -67,7 +67,6 @@ function upload_binary { } upload_binary neard -upload_binary store-validator if [ "$release" == "release" ] then diff --git a/tools/amend-genesis/Cargo.toml b/tools/amend-genesis/Cargo.toml index c64a4050793..cca0042923d 100644 --- a/tools/amend-genesis/Cargo.toml +++ b/tools/amend-genesis/Cargo.toml @@ -20,11 +20,11 @@ serde.workspace = true serde_json.workspace = true tracing.workspace = true -near-async.workspace = true +near-time.workspace = true near-chain-configs.workspace = true near-crypto.workspace = true near-primitives.workspace = true near-primitives-core.workspace = true [dev-dependencies] -tempfile.workspace = true \ No newline at end of file +tempfile.workspace = true diff --git a/tools/amend-genesis/src/lib.rs b/tools/amend-genesis/src/lib.rs index 47baaff9812..b4b71d9e301 100644 --- a/tools/amend-genesis/src/lib.rs +++ b/tools/amend-genesis/src/lib.rs @@ -430,7 +430,6 @@ pub fn amend_genesis( #[cfg(test)] mod test { use anyhow::Context; - use near_async::time::Clock; use near_chain_configs::{get_initial_supply, Genesis, GenesisConfig, NEAR_BASE}; use near_primitives::hash::CryptoHash; use near_primitives::shard_layout::ShardLayout; @@ -440,6 +439,7 @@ mod test { use near_primitives::version::PROTOCOL_VERSION; use near_primitives_core::account::{AccessKey, Account}; use near_primitives_core::types::{Balance, StorageUsage}; + use near_time::Clock; use num_rational::Rational32; use std::collections::{HashMap, HashSet}; use std::str::FromStr; diff --git a/tools/debug-ui/package-lock.json b/tools/debug-ui/package-lock.json index eab7f4ad4f4..0e64ed46255 100644 --- a/tools/debug-ui/package-lock.json +++ b/tools/debug-ui/package-lock.json @@ -13,6 +13,7 @@ "@types/node": "^16.18.77", "@types/react": "^18.2.46", "@types/react-dom": "^18.2.18", + "date-fns": "^3.6.0", "react": "^18.2.0", "react-dom": "^18.2.0", "react-router-dom": "^6.21.1", @@ -6790,6 +6791,16 @@ "node": ">=10" } }, + "node_modules/date-fns": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-3.6.0.tgz", + "integrity": "sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/kossnocorp" + } + }, "node_modules/debug": { "version": "3.2.7", "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", @@ -22972,6 +22983,11 @@ "whatwg-url": "^8.0.0" } }, + "date-fns": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-3.6.0.tgz", + "integrity": "sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==" + }, "debug": { "version": "3.2.7", "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", diff --git a/tools/debug-ui/package.json b/tools/debug-ui/package.json index 572cac4937c..16c9e70384d 100644 --- a/tools/debug-ui/package.json +++ b/tools/debug-ui/package.json @@ -4,12 +4,13 @@ "private": true, "dependencies": { "@patternfly/react-log-viewer": "^4.87.101", + "@tanstack/react-query": "^4.0.0", "@types/node": "^16.18.77", "@types/react": "^18.2.46", "@types/react-dom": "^18.2.18", + "date-fns": "^3.6.0", "react": "^18.2.0", "react-dom": "^18.2.0", - "@tanstack/react-query": "^4.0.0", "react-router-dom": "^6.21.1", "react-scripts": "^5.0.1", "react-tooltip": "^5.22.0", @@ -48,4 +49,4 @@ "typescript": "^4.9.5", "typescript-plugin-css-modules": "^4.2.2" } -} \ No newline at end of file +} diff --git a/tools/debug-ui/src/EpochValidatorsView.scss b/tools/debug-ui/src/EpochValidatorsView.scss index 0c08bcf7b9d..e489977c7ab 100644 --- a/tools/debug-ui/src/EpochValidatorsView.scss +++ b/tools/debug-ui/src/EpochValidatorsView.scss @@ -82,37 +82,60 @@ &:nth-child(2), &:nth-child(3), - &:nth-child(4), - &:nth-child(5) { + &:nth-child(4) { opacity: 0.5; } - &:nth-child(6) { + &:nth-child(5) { border-left: $current-border; } - &:nth-child(10) { + &:nth-child(9) { border-right: $current-border; } } thead tr:nth-child(2) th { + &:nth-child(5), &:nth-child(6), &:nth-child(7), &:nth-child(8), - &:nth-child(9), - &:nth-child(10) { + &:nth-child(9) { background-color: #d6ffd0; } } tbody tr:last-child td { + &:nth-child(5), &:nth-child(6), &:nth-child(7), &:nth-child(8), - &:nth-child(9), - &:nth-child(10) { + &:nth-child(9) { border-bottom: $current-border; } } } + +.validator-role { + padding: 4px 8px; + border-radius: 8px; + color: black; + margin: 0px 2px; + font-size: 12px; + font-weight: bold; +} + +.block-producer { + @extend .validator-role; + background-color: thistle; +} + +.chunk-producer { + @extend .validator-role; + background-color: lightblue; +} + +.chunk-validator { + @extend .validator-role; + background-color: bisque; +} diff --git a/tools/debug-ui/src/EpochValidatorsView.tsx b/tools/debug-ui/src/EpochValidatorsView.tsx index ae2ffc951a3..882037a5300 100644 --- a/tools/debug-ui/src/EpochValidatorsView.tsx +++ b/tools/debug-ui/src/EpochValidatorsView.tsx @@ -9,13 +9,27 @@ interface ProducedAndExpected { expected: number; } -type ValidatorRole = 'BlockProducer' | 'ChunkOnlyProducer' | 'None'; +interface BlockProducer { + kind: 'BlockProducer' +} + +interface ChunkProducer { + kind: 'ChunkProducer' + shards: number[]; +} + +interface ChunkValidator { + kind: 'ChunkValidator' +} + +type ValidatorRole = BlockProducer | ChunkProducer | ChunkValidator; interface CurrentValidatorInfo { stake: number; shards: number[]; blocks: ProducedAndExpected; chunks: ProducedAndExpected; + endorsements: ProducedAndExpected; } interface NextValidatorInfo { @@ -29,7 +43,7 @@ interface ValidatorInfo { next: NextValidatorInfo | null; proposalStake: number | null; kickoutReason: ValidatorKickoutReason | null; - roles: ValidatorRole[]; + roles: ValidatorRole[][]; } class Validators { @@ -41,9 +55,9 @@ class Validators { if (this.validators.has(accountId)) { return this.validators.get(accountId)!; } - const roles = [] as ValidatorRole[]; + const roles = [] as ValidatorRole[][]; for (let i = 0; i < this.numEpochs; i++) { - roles.push('None'); + roles.push([]); } this.validators.set(accountId, { accountId, @@ -56,9 +70,12 @@ class Validators { return this.validators.get(accountId)!; } - setValidatorRole(accountId: string, epochIndex: number, role: ValidatorRole) { + addValidatorRole(accountId: string, epochIndex: number, role: ValidatorRole) { const validator = this.validator(accountId); - validator.roles[epochIndex] = role; + validator.roles[epochIndex].push(role); + validator.roles[epochIndex].sort((a, b) => { + return a.kind.localeCompare(b.kind) + }) } sorted(): ValidatorInfo[] { @@ -107,7 +124,8 @@ export const EpochValidatorsView = ({ addr }: EpochValidatorViewProps) => { let maxStake = 0, totalStake = 0, maxExpectedBlocks = 0, - maxExpectedChunks = 0; + maxExpectedChunks = 0, + maxExpectedEndorsements = 0; const epochs = epochData!.status_response.EpochInfo; const validators = new Validators(epochs.length); const currentValidatorInfo = epochData!.status_response.EpochInfo[1].validator_info; @@ -125,11 +143,19 @@ export const EpochValidatorsView = ({ addr }: EpochValidatorViewProps) => { produced: validatorInfo.num_produced_chunks, expected: validatorInfo.num_expected_chunks, }, + endorsements: { + produced: validatorInfo.num_produced_endorsements, + expected: validatorInfo.num_expected_endorsements, + }, }; maxStake = Math.max(maxStake, stake); totalStake += stake; maxExpectedBlocks = Math.max(maxExpectedBlocks, validatorInfo.num_expected_blocks); maxExpectedChunks = Math.max(maxExpectedChunks, validatorInfo.num_expected_chunks); + maxExpectedEndorsements = Math.max( + maxExpectedEndorsements, + validatorInfo.num_expected_endorsements + ); } for (const validatorInfo of currentValidatorInfo.next_validators) { const validator = validators.validator(validatorInfo.account_id); @@ -137,6 +163,9 @@ export const EpochValidatorsView = ({ addr }: EpochValidatorViewProps) => { stake: parseFloat(validatorInfo.stake), shards: validatorInfo.shards, }; + if (validatorInfo.shards.length > 0) { + validators.addValidatorRole(validator.accountId, 0, { kind: 'ChunkProducer', shards: validatorInfo.shards }); + } } for (const proposal of currentValidatorInfo.current_proposals) { const validator = validators.validator(proposal.account_id); @@ -147,11 +176,18 @@ export const EpochValidatorsView = ({ addr }: EpochValidatorViewProps) => { validator.kickoutReason = kickout.reason; } epochs.forEach((epochInfo, index) => { - for (const chunkOnlyProducer of epochInfo.chunk_only_producers) { - validators.setValidatorRole(chunkOnlyProducer, index, 'ChunkOnlyProducer'); - } for (const blockProducer of epochInfo.block_producers) { - validators.setValidatorRole(blockProducer.account_id, index, 'BlockProducer'); + validators.addValidatorRole(blockProducer.account_id, index, { kind: 'BlockProducer'}); + } + if (epochInfo.validator_info != null) { + for (const validator of epochInfo.validator_info.current_validators) { + if (validator.num_expected_chunks > 0) { + validators.addValidatorRole(validator.account_id, index, { kind: 'ChunkProducer', shards: validator.shards }); + } + if (validator.num_expected_endorsements > 0) { + validators.addValidatorRole(validator.account_id, index, { kind: 'ChunkValidator'}); + } + } } }); @@ -160,23 +196,22 @@ export const EpochValidatorsView = ({ addr }: EpochValidatorViewProps) => { - Next Epoch + Next Epoch Current Epoch Past Epochs Validator - Role - Shards + Roles (shards) Stake Proposal - Role - Shards + Roles (shards) Stake Blocks - Chunks + Produced Chunks + Endorsed Chunks Kickout {epochs.slice(2).map((epoch) => { @@ -193,15 +228,13 @@ export const EpochValidatorsView = ({ addr }: EpochValidatorViewProps) => { return ( {validator.accountId} - {renderRole(validator.roles[0])} - {validator.next?.shards?.join(',') ?? ''} + {renderRoles(validator.roles[0])} {drawStakeBar(validator.next?.stake ?? null, maxStake, totalStake)} {drawStakeBar(validator.proposalStake, maxStake, totalStake)} - {renderRole(validator.roles[1])} - {validator.current?.shards?.join(',') ?? ''} + {renderRoles(validator.roles[1])} {drawStakeBar( validator.current?.stake ?? null, @@ -221,12 +254,18 @@ export const EpochValidatorsView = ({ addr }: EpochValidatorViewProps) => { maxExpectedChunks )} + + {drawProducedAndExpectedBar( + validator.current?.endorsements ?? null, + maxExpectedEndorsements + )} + - {validator.roles.slice(2).map((role, i) => { - return {renderRole(role)}; + {validator.roles.slice(2).map((roles, i) => { + return {renderRoles(roles)}; })} ); @@ -238,7 +277,8 @@ export const EpochValidatorsView = ({ addr }: EpochValidatorViewProps) => { function drawProducedAndExpectedBar( producedAndExpected: ProducedAndExpected | null, - maxExpected: number + maxExpected: number, + scale = 1 ): JSX.Element { if (producedAndExpected === null) { return <>; @@ -263,10 +303,10 @@ function drawProducedAndExpectedBar( return (
{produced}
-
+
{produced !== expected && ( <> -
+
{expected - produced}
)} @@ -291,15 +331,22 @@ function drawStakeBar(stake: number | null, maxStake: number, totalStake: number ); } -function renderRole(role: ValidatorRole): JSX.Element { - switch (role) { - case 'BlockProducer': - return BP; - case 'ChunkOnlyProducer': - return CP; - default: - return <>; +function renderRoles(roles: ValidatorRole[]): JSX.Element { + const renderedItems = []; + for (const role of roles) { + switch (role.kind) { + case 'BlockProducer': + renderedItems.push(BP); + break; + case 'ChunkProducer': + renderedItems.push(CP({role.shards.join(",")})); + break; + case 'ChunkValidator': + renderedItems.push(CV); + break; + } } + return <>{renderedItems}; } const KickoutReason = ({ reason }: { reason: ValidatorKickoutReason | null }) => { diff --git a/tools/debug-ui/src/RecentEpochsView.tsx b/tools/debug-ui/src/RecentEpochsView.tsx index b133f48ca30..da281372f46 100644 --- a/tools/debug-ui/src/RecentEpochsView.tsx +++ b/tools/debug-ui/src/RecentEpochsView.tsx @@ -1,5 +1,6 @@ import { useQuery } from '@tanstack/react-query'; -import { fetchEpochInfo, fetchFullStatus } from './api'; +import { parse } from 'date-fns'; +import { EpochInfoView, fetchEpochInfo, fetchFullStatus } from './api'; import { formatDurationInMillis } from './utils'; import './RecentEpochsView.scss'; @@ -40,6 +41,8 @@ export const RecentEpochsView = ({ addr }: RecentEpochsViewProps) => { First Block Epoch Start Block Producers + Chunk Producers + Chunk Validators Chunk-only Producers @@ -61,9 +64,25 @@ export const RecentEpochsView = ({ addr }: RecentEpochsViewProps) => { } } else { firstBlockColumn = epochInfo.first_block[0]; + // The date object inside epochInfo.first_block is very particular. + // It looks like this: + // 2024,180,0,15,28,88423066,0,0,0 + // year,days,hours,minutes,seconds,nanoseconds,timezone offsets + // The solution below parses the first part of the date object, up the the seconds, in UTC. epochStartColumn = `${formatDurationInMillis( - Date.now() - Date.parse(epochInfo.first_block[1]) - )} ago`; + Date.now() - + parse( + epochInfo.first_block[1] + .toString() + .split(",") + .slice(0, 5) + .concat(["+00"]) + .join(","), + "yyyy,D,H,m,s,x", + new Date(), + { useAdditionalDayOfYearTokens: true } + ).getTime() + )} ago`; } let rowClassName = ''; let firstColumnText = ''; @@ -85,6 +104,8 @@ export const RecentEpochsView = ({ addr }: RecentEpochsViewProps) => { {firstBlockColumn} {epochStartColumn} {epochInfo.block_producers.length} + {getChunkProducersTotal(epochInfo)} + {getChunkValidatorsTotal(epochInfo)} {epochInfo.chunk_only_producers.length} ); @@ -93,3 +114,21 @@ export const RecentEpochsView = ({ addr }: RecentEpochsViewProps) => { ); }; + +function getChunkProducersTotal(epochInfo: EpochInfoView) { + return epochInfo.validator_info?.current_validators.reduce((acc, it) => { + if (it.num_expected_chunks > 0) { + acc = acc + 1; + } + return acc; + }, 0) ?? "N/A" +} + +function getChunkValidatorsTotal(epochInfo: EpochInfoView) { + return epochInfo.validator_info?.current_validators.reduce((acc, it) => { + if (it.num_expected_endorsements > 0) { + acc = acc + 1; + } + return acc; + }, 0) ?? "N/A"; +} \ No newline at end of file diff --git a/tools/debug-ui/src/SnapshotHostsView.tsx b/tools/debug-ui/src/SnapshotHostsView.tsx index fcb766cfc84..72f75160884 100644 --- a/tools/debug-ui/src/SnapshotHostsView.tsx +++ b/tools/debug-ui/src/SnapshotHostsView.tsx @@ -19,7 +19,7 @@ export const SnapshotHostsView = ({ addr }: SnapshotHostsViewProps) => { return
{(error as Error).stack}
; } - let snapshot_hosts = snapshotHosts!.status_response.SnapshotHosts.hosts; + const snapshot_hosts = snapshotHosts!.status_response.SnapshotHosts.hosts; snapshot_hosts.sort((a, b) => { if (a.epoch_height != b.epoch_height) { return b.epoch_height - a.epoch_height; diff --git a/tools/debug-ui/src/api.tsx b/tools/debug-ui/src/api.tsx index 61f5d28b06d..b7549a5fde8 100644 --- a/tools/debug-ui/src/api.tsx +++ b/tools/debug-ui/src/api.tsx @@ -204,6 +204,8 @@ export interface CurrentEpochValidatorInfo { num_expected_blocks: number; num_produced_chunks: number; num_expected_chunks: number; + num_produced_endorsements: number; + num_expected_endorsements: number; } export interface NextEpochValidatorInfo { diff --git a/tools/epoch-sync/src/cli.rs b/tools/epoch-sync/src/cli.rs index 24746e83f8a..5c19688219f 100644 --- a/tools/epoch-sync/src/cli.rs +++ b/tools/epoch-sync/src/cli.rs @@ -1,7 +1,6 @@ use anyhow::Context; use clap; use near_chain::{ChainStore, ChainStoreAccess, ChainUpdate, DoomslugThresholdMode}; -use near_epoch_manager::shard_tracker::{ShardTracker, TrackedConfig}; use near_epoch_manager::EpochManager; use near_primitives::block::BlockHeader; use near_primitives::borsh::BorshDeserialize; @@ -105,10 +104,6 @@ impl ValidateEpochSyncInfoCmd { let epoch_manager = EpochManager::new_arc_handle(storage.get_hot_store(), &config.genesis.config); - let shard_tracker = ShardTracker::new( - TrackedConfig::from_config(&config.client_config), - epoch_manager.clone(), - ); let runtime = NightshadeRuntime::from_config( home_dir, storage.get_hot_store(), @@ -119,7 +114,6 @@ impl ValidateEpochSyncInfoCmd { let chain_update = ChainUpdate::new( &mut chain_store, epoch_manager, - shard_tracker, runtime, DoomslugThresholdMode::TwoThirds, config.genesis.config.transaction_validity_period, diff --git a/tools/mock-node/Cargo.toml b/tools/mock-node/Cargo.toml index ef4501f6d59..31d216a96de 100644 --- a/tools/mock-node/Cargo.toml +++ b/tools/mock-node/Cargo.toml @@ -27,7 +27,7 @@ tokio.workspace = true tracing.workspace = true near-actix-test-utils.workspace = true -near-async.workspace = true +near-time.workspace = true near-chain.workspace = true near-chain-configs.workspace = true near-client.workspace = true diff --git a/tools/mock-node/src/lib.rs b/tools/mock-node/src/lib.rs index 0fbe713a66b..c6b026caf3f 100644 --- a/tools/mock-node/src/lib.rs +++ b/tools/mock-node/src/lib.rs @@ -2,7 +2,6 @@ //! components of the mock network. use anyhow::{anyhow, Context as AnyhowContext}; -use near_async::time; use near_chain::{Block, Chain, ChainStoreAccess, Error}; use near_client::sync::header::MAX_BLOCK_HEADERS; use near_crypto::SecretKey; @@ -305,7 +304,7 @@ impl MockPeer { network_start_height, (0..num_shards).collect(), archival, - 30 * time::Duration::SECOND, + 30 * near_time::Duration::SECOND, ) .await?; let incoming_requests = diff --git a/tools/mock-node/src/setup.rs b/tools/mock-node/src/setup.rs index 36de5dc4e38..99dba008def 100644 --- a/tools/mock-node/src/setup.rs +++ b/tools/mock-node/src/setup.rs @@ -2,7 +2,6 @@ use crate::{MockNetworkConfig, MockPeer}; use anyhow::Context; -use near_async::time::Clock; use near_chain::types::RuntimeAdapter; use near_chain::ChainStoreUpdate; use near_chain::{Chain, ChainGenesis, ChainStore, ChainStoreAccess, DoomslugThresholdMode}; @@ -17,6 +16,7 @@ use near_primitives::state_part::PartId; use near_primitives::state_sync::get_num_state_parts; use near_primitives::types::{BlockHeight, NumShards, ShardId}; use near_store::test_utils::create_test_store; +use near_time::Clock; use nearcore::{NearConfig, NightshadeRuntime, NightshadeRuntimeExt}; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use std::cmp::min; diff --git a/tools/ping/Cargo.toml b/tools/ping/Cargo.toml index b752b290368..ab1216215e5 100644 --- a/tools/ping/Cargo.toml +++ b/tools/ping/Cargo.toml @@ -21,7 +21,7 @@ prometheus.workspace = true tokio.workspace = true tracing.workspace = true -near-async.workspace = true +near-time.workspace = true near-jsonrpc.workspace = true near-network.workspace = true near-o11y.workspace = true @@ -29,7 +29,6 @@ near-primitives.workspace = true [features] nightly = [ - "near-async/nightly", "near-jsonrpc/nightly", "near-network/nightly", "near-o11y/nightly", @@ -37,7 +36,6 @@ nightly = [ "nightly_protocol", ] nightly_protocol = [ - "near-async/nightly_protocol", "near-jsonrpc/nightly_protocol", "near-network/nightly_protocol", "near-o11y/nightly_protocol", diff --git a/tools/ping/src/csv.rs b/tools/ping/src/csv.rs index d7d9115cc5b..47f0610bde9 100644 --- a/tools/ping/src/csv.rs +++ b/tools/ping/src/csv.rs @@ -1,4 +1,3 @@ -use near_async::time; use near_primitives::network::PeerId; use near_primitives::types::AccountId; use std::fs::{File, OpenOptions}; @@ -74,7 +73,7 @@ impl LatenciesCsv { &mut self, peer_id: &PeerId, account_id: Option<&AccountId>, - latency: time::Duration, + latency: near_time::Duration, ) -> io::Result<()> { let id = account_id.map_or_else(|| format!("{}", peer_id), |a| format!("{}", a)); write!( diff --git a/tools/ping/src/lib.rs b/tools/ping/src/lib.rs index 6d89d2e24ac..5701e0f4549 100644 --- a/tools/ping/src/lib.rs +++ b/tools/ping/src/lib.rs @@ -2,7 +2,6 @@ use actix_web::cookie::time::ext::InstantExt as _; use actix_web::{web, App, HttpServer}; use anyhow::Context; pub use cli::PingCommand; -use near_async::time; use near_network::raw::{ConnectError, Connection, DirectMessage, Message, RoutedMessage}; use near_network::types::HandshakeFailureReason; use near_primitives::hash::CryptoHash; @@ -26,16 +25,16 @@ struct PingStats { pongs_received: usize, // TODO: these latency stats could be separated into time to first byte // + time to last byte, etc. - min_latency: time::Duration, - max_latency: time::Duration, - average_latency: time::Duration, + min_latency: near_time::Duration, + max_latency: near_time::Duration, + average_latency: near_time::Duration, } impl PingStats { - fn pong_received(&mut self, latency: time::Duration) { + fn pong_received(&mut self, latency: near_time::Duration) { self.pongs_received += 1; - if self.min_latency == time::Duration::ZERO || self.min_latency > latency { + if self.min_latency == near_time::Duration::ZERO || self.min_latency > latency { self.min_latency = latency; } if self.max_latency < latency { @@ -51,7 +50,7 @@ type Nonce = u64; #[derive(Debug, Eq, PartialEq)] struct PingTarget { peer_id: PeerId, - last_pinged: Option, + last_pinged: Option, } impl PartialOrd for PingTarget { @@ -81,7 +80,7 @@ impl Ord for PingTarget { struct PingTimeout { peer_id: PeerId, nonce: u64, - timeout: time::Instant, + timeout: near_time::Instant, } impl PartialOrd for PingTimeout { @@ -103,18 +102,18 @@ fn peer_str(peer_id: &PeerId, account_id: Option<&AccountId>) -> String { } const MAX_PINGS_IN_FLIGHT: usize = 10; -const PING_TIMEOUT: time::Duration = time::Duration::seconds(100); +const PING_TIMEOUT: near_time::Duration = near_time::Duration::seconds(100); #[derive(Debug)] struct PingState { stats: PingStats, - last_pinged: Option, + last_pinged: Option, account_id: Option, } struct PingTimes { - sent_at: time::Instant, - timeout: time::Instant, + sent_at: near_time::Instant, + timeout: near_time::Instant, } struct AppInfo { @@ -148,7 +147,7 @@ impl AppInfo { } fn ping_sent(&mut self, peer_id: &PeerId, nonce: u64, chain_id: &str) { - let timestamp = time::Instant::now(); + let timestamp = near_time::Instant::now(); let timeout = timestamp + PING_TIMEOUT; let account_id = self.peer_id_to_account_id(&peer_id); @@ -202,8 +201,8 @@ impl AppInfo { &mut self, peer_id: &PeerId, nonce: u64, - received_at: time::Instant, - ) -> Option<(time::Duration, Option<&AccountId>)> { + received_at: near_time::Instant, + ) -> Option<(near_time::Duration, Option<&AccountId>)> { match self.stats.get_mut(peer_id) { Some(state) => { let pending_pings = self @@ -316,7 +315,7 @@ impl AppInfo { fn handle_message( app_info: &mut AppInfo, msg: Message, - received_at: time::Instant, + received_at: near_time::Instant, latencies_csv: Option<&mut crate::csv::LatenciesCsv>, ) -> anyhow::Result<()> { match msg { @@ -391,7 +390,7 @@ async fn ping_via_node( app_info.add_peer(peer_id.clone(), None); - let clock = time::Clock::real(); + let clock = near_time::Clock::real(); let mut peer = match Connection::connect( &clock, @@ -402,7 +401,7 @@ async fn ping_via_node( genesis_hash, head_height, vec![0], - time::Duration::seconds(recv_timeout_seconds.into())).await { + near_time::Duration::seconds(recv_timeout_seconds.into())).await { Ok(p) => p, Err(ConnectError::HandshakeFailure(reason)) => { match reason { diff --git a/tools/speedy_sync/Cargo.toml b/tools/speedy_sync/Cargo.toml index b74b3feacf3..864dc208844 100644 --- a/tools/speedy_sync/Cargo.toml +++ b/tools/speedy_sync/Cargo.toml @@ -12,7 +12,7 @@ publish = false workspace = true [dependencies] -near-async.workspace = true +near-time.workspace = true near-store.workspace = true near-chain-primitives.workspace = true near-primitives.workspace = true diff --git a/tools/speedy_sync/src/main.rs b/tools/speedy_sync/src/main.rs index d1320a5b6e9..02ad33e590e 100644 --- a/tools/speedy_sync/src/main.rs +++ b/tools/speedy_sync/src/main.rs @@ -1,5 +1,4 @@ use borsh::{BorshDeserialize, BorshSerialize}; -use near_async::time::Clock; use near_chain::rayon_spawner::RayonAsyncComputationSpawner; use near_chain::types::{ChainConfig, Tip}; use near_chain::{Chain, ChainGenesis, DoomslugThresholdMode}; @@ -18,6 +17,7 @@ use near_primitives::types::EpochId; use near_primitives::utils::index_to_bytes; use near_store::HEADER_HEAD_KEY; use near_store::{DBCol, Mode, NodeStorage, Store, StoreUpdate}; +use near_time::Clock; use nearcore::{NightshadeRuntime, NightshadeRuntimeExt}; use std::fs; use std::path::Path; diff --git a/tools/state-parts/Cargo.toml b/tools/state-parts/Cargo.toml index 3265dd758be..48141677696 100644 --- a/tools/state-parts/Cargo.toml +++ b/tools/state-parts/Cargo.toml @@ -21,7 +21,7 @@ time.workspace = true tokio.workspace = true tracing.workspace = true -near-async.workspace = true +near-time.workspace = true near-jsonrpc.workspace = true near-network.workspace = true near-o11y.workspace = true @@ -30,7 +30,6 @@ near-primitives.workspace = true [features] nightly = [ - "near-async/nightly", "near-jsonrpc/nightly", "near-network/nightly", "near-o11y/nightly", @@ -39,7 +38,6 @@ nightly = [ "nightly_protocol", ] nightly_protocol = [ - "near-async/nightly_protocol", "near-jsonrpc/nightly_protocol", "near-network/nightly_protocol", "near-o11y/nightly_protocol", diff --git a/tools/state-parts/src/lib.rs b/tools/state-parts/src/lib.rs index db9f51b591e..616612432eb 100644 --- a/tools/state-parts/src/lib.rs +++ b/tools/state-parts/src/lib.rs @@ -1,12 +1,12 @@ use ::time::ext::InstantExt as _; use anyhow::Context; -use near_async::time::{self, Instant}; use near_network::raw::{ConnectError, Connection, DirectMessage, Message}; use near_network::types::HandshakeFailureReason; use near_primitives::hash::CryptoHash; use near_primitives::network::PeerId; use near_primitives::types::{BlockHeight, ShardId}; use near_primitives::version::ProtocolVersion; +use near_time::Instant; use sha2::Digest; use sha2::Sha256; use std::collections::HashMap; @@ -16,7 +16,7 @@ use std::net::SocketAddr; pub mod cli; struct AppInfo { - pub requests_sent: HashMap, + pub requests_sent: HashMap, } impl AppInfo { @@ -28,7 +28,7 @@ impl AppInfo { fn handle_message( app_info: &mut AppInfo, msg: &Message, - received_at: time::Instant, + received_at: near_time::Instant, ) -> anyhow::Result<()> { match &msg { Message::Direct(DirectMessage::VersionedStateResponse(response)) => { @@ -90,7 +90,7 @@ async fn state_parts_from_node( assert!(start_part_id < num_parts && num_parts > 0, "{}/{}", start_part_id, num_parts); let mut app_info = AppInfo::new(); - let clock = time::Clock::real(); + let clock = near_time::Clock::real(); let mut peer = match Connection::connect( &clock, @@ -101,7 +101,7 @@ async fn state_parts_from_node( genesis_hash, head_height, vec![0], - time::Duration::seconds(recv_timeout_seconds.into())).await { + near_time::Duration::seconds(recv_timeout_seconds.into())).await { Ok(p) => p, Err(ConnectError::HandshakeFailure(reason)) => { match reason { @@ -137,7 +137,7 @@ async fn state_parts_from_node( let msg = DirectMessage::StateRequestPart(shard_id, block_hash, part_id); tracing::info!(target: "state-parts", ?target, shard_id, ?block_hash, part_id, ttl, "Sending a request"); result = peer.send_message(msg).await.with_context(|| format!("Failed sending State Part Request to {:?}", target)); - app_info.requests_sent.insert(part_id, time::Instant::now()); + app_info.requests_sent.insert(part_id, near_time::Instant::now()); tracing::info!(target: "state-parts", ?result); if result.is_err() { break; diff --git a/tools/state-viewer/Cargo.toml b/tools/state-viewer/Cargo.toml index 6d354678d96..7de2eea99e6 100644 --- a/tools/state-viewer/Cargo.toml +++ b/tools/state-viewer/Cargo.toml @@ -35,7 +35,7 @@ thiserror.workspace = true tracing.workspace = true yansi.workspace = true -near-async.workspace = true +near-time.workspace = true near-chain-configs.workspace = true near-chain.workspace = true near-client.workspace = true @@ -67,7 +67,6 @@ protocol_feature_nonrefundable_transfer_nep491 = [ ] nightly = [ - "near-async/nightly", "near-chain-configs/nightly", "near-chain/nightly", "near-client/nightly", @@ -84,7 +83,6 @@ nightly = [ "testlib/nightly", ] nightly_protocol = [ - "near-async/nightly_protocol", "near-chain-configs/nightly_protocol", "near-chain/nightly_protocol", "near-client/nightly_protocol", diff --git a/tools/state-viewer/src/apply_chunk.rs b/tools/state-viewer/src/apply_chunk.rs index 75544296657..c0ffb7468ee 100644 --- a/tools/state-viewer/src/apply_chunk.rs +++ b/tools/state-viewer/src/apply_chunk.rs @@ -482,7 +482,6 @@ pub(crate) fn apply_receipt( #[cfg(test)] mod test { - use near_async::time::Clock; use near_chain::{ChainStore, ChainStoreAccess, Provenance}; use near_chain_configs::Genesis; use near_client::test_utils::TestEnv; @@ -495,6 +494,7 @@ mod test { use near_primitives::utils::get_num_seats_per_shard; use near_store::genesis::initialize_genesis_state; use near_store::test_utils::create_test_store; + use near_time::Clock; use nearcore::NightshadeRuntime; use rand::rngs::StdRng; use rand::SeedableRng; diff --git a/tools/state-viewer/src/latest_witnesses.rs b/tools/state-viewer/src/latest_witnesses.rs index 28baa58d791..634a99ac900 100644 --- a/tools/state-viewer/src/latest_witnesses.rs +++ b/tools/state-viewer/src/latest_witnesses.rs @@ -1,7 +1,6 @@ use std::path::{Path, PathBuf}; use std::rc::Rc; -use near_async::time::Clock; use near_chain::runtime::NightshadeRuntime; use near_chain::stateless_validation::processing_tracker::ProcessingDoneTracker; use near_chain::{Chain, ChainGenesis, ChainStore, DoomslugThresholdMode}; @@ -10,6 +9,7 @@ use near_epoch_manager::EpochManager; use near_primitives::stateless_validation::ChunkStateWitness; use near_primitives::types::EpochId; use near_store::Store; +use near_time::Clock; use nearcore::NearConfig; use nearcore::NightshadeRuntimeExt; diff --git a/tools/state-viewer/src/state_parts.rs b/tools/state-viewer/src/state_parts.rs index f950bdec1b9..742d7319219 100644 --- a/tools/state-viewer/src/state_parts.rs +++ b/tools/state-viewer/src/state_parts.rs @@ -1,6 +1,5 @@ use crate::epoch_info::iterate_and_filter; use borsh::{BorshDeserialize, BorshSerialize}; -use near_async::time::Clock; use near_chain::{Chain, ChainGenesis, ChainStoreAccess, DoomslugThresholdMode}; use near_client::sync::external::{ create_bucket_readonly, create_bucket_readwrite, external_storage_location, @@ -18,6 +17,7 @@ use near_primitives::types::{EpochId, StateRoot}; use near_primitives_core::hash::CryptoHash; use near_primitives_core::types::{BlockHeight, EpochHeight, ShardId}; use near_store::{PartialStorage, Store, Trie}; +use near_time::Clock; use nearcore::{NearConfig, NightshadeRuntime, NightshadeRuntimeExt}; use std::ops::Range; use std::path::{Path, PathBuf};