From 5e9cc040d7fffa3e055b6e6eb11d1242a32513cc Mon Sep 17 00:00:00 2001 From: Perry Randall Date: Thu, 30 Nov 2023 10:44:35 -0800 Subject: [PATCH 01/44] [forge] Unstrip forge binary Unfortunately stripping removes our ability to get stack traces And figure out a way to distribute the symbols or symbolicate separately Test Plan: tools image on the PR is a reasonable size (below 10GB) Longer term we want to move the individual tools separate images --- docker/builder/build-node.sh | 28 ++++++++++++++++++---------- docker/builder/build-tools.sh | 2 -- docker/builder/forge.Dockerfile | 18 +++++++++--------- 3 files changed, 27 insertions(+), 21 deletions(-) diff --git a/docker/builder/build-node.sh b/docker/builder/build-node.sh index c14069d934b9a..57e2b52b53459 100755 --- a/docker/builder/build-node.sh +++ b/docker/builder/build-node.sh @@ -11,20 +11,28 @@ echo "PROFILE: $PROFILE" echo "FEATURES: $FEATURES" echo "CARGO_TARGET_DIR: $CARGO_TARGET_DIR" -# Build and overwrite the aptos-node binary with features if specified -if [ -n "$FEATURES" ]; then - echo "Building aptos-node with features ${FEATURES}" - cargo build --profile=$PROFILE --features=$FEATURES -p aptos-node "$@" -else - # Build aptos-node separately - cargo build --locked --profile=$PROFILE \ - -p aptos-node \ - "$@" -fi +PACKAGES=( + aptos-node + aptos-forge-cli +) + +# We have to do these separately because we need to avoid feature unification +# between aptos-node and other binaries +for PACKAGE in "${PACKAGES[@]}"; do + # Build and overwrite the aptos-node binary with features if specified + if [ -n "$FEATURES" ]; then + echo "Building aptos-node with features ${FEATURES}" + cargo build --profile=$PROFILE --features=$FEATURES -p $PACKAGE "$@" + else + # Build aptos-node separately + cargo build --locked --profile=$PROFILE -p $PACKAGE "$@" + fi +done # After building, copy the binaries we need to `dist` since the `target` directory is used as docker cache mount and only available during the RUN step BINS=( aptos-node + forge ) mkdir dist diff --git a/docker/builder/build-tools.sh b/docker/builder/build-tools.sh index 6cbb295a4c863..eec19ebc3f9d7 100755 --- a/docker/builder/build-tools.sh +++ b/docker/builder/build-tools.sh @@ -14,7 +14,6 @@ cargo build --locked --profile=$PROFILE \ -p aptos \ -p aptos-backup-cli \ -p aptos-faucet-service \ - -p aptos-forge-cli \ -p aptos-fn-check-client \ -p aptos-node-checker \ -p aptos-openapi-spec-generator \ @@ -33,7 +32,6 @@ BINS=( aptos-telemetry-service aptos-fn-check-client aptos-debugger - forge aptos-transaction-emitter aptos-api-tester ) diff --git a/docker/builder/forge.Dockerfile b/docker/builder/forge.Dockerfile index 3f5fe0c13560d..202ced9e16bd2 100644 --- a/docker/builder/forge.Dockerfile +++ b/docker/builder/forge.Dockerfile @@ -5,14 +5,14 @@ FROM debian-base as forge RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update && apt-get install --no-install-recommends -y \ - libssl1.1 \ - ca-certificates \ - openssh-client \ - wget \ - busybox \ - git \ - unzip \ - awscli + libssl1.1 \ + ca-certificates \ + openssh-client \ + wget \ + busybox \ + git \ + unzip \ + awscli WORKDIR /aptos @@ -25,7 +25,7 @@ RUN cd /usr/local/bin && wget "https://get.helm.sh/helm-v3.8.0-linux-amd64.tar.g ENV PATH "$PATH:/root/bin" WORKDIR /aptos -COPY --link --from=tools-builder /aptos/dist/forge /usr/local/bin/forge +COPY --link --from=node-builder /aptos/dist/forge /usr/local/bin/forge ### Get Aptos Framework Release for forge framework upgrade testing COPY --link --from=tools-builder /aptos/aptos-move/framework/ /aptos/aptos-move/framework/ COPY --link --from=tools-builder /aptos/aptos-move/aptos-release-builder/ /aptos/aptos-move/aptos-release-builder/ From 7392dae45f99c13ecadb3861edfa83c395a332f0 Mon Sep 17 00:00:00 2001 From: Perry Randall Date: Wed, 24 Jan 2024 16:46:42 -0800 Subject: [PATCH 02/44] [forge] Try and make framework upgrade a little more resilient Test Plan: running on PR succeeds --- .github/workflows/docker-build-test.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker-build-test.yaml b/.github/workflows/docker-build-test.yaml index 7c46f9103a72c..b1fd8bbcf813c 100644 --- a/.github/workflows/docker-build-test.yaml +++ b/.github/workflows/docker-build-test.yaml @@ -332,7 +332,7 @@ jobs: GIT_SHA: ${{ needs.determine-docker-build-metadata.outputs.gitSha }} FORGE_TEST_SUITE: framework_upgrade IMAGE_TAG: aptos-node-v1.8.3 # This workflow will test the upgradability from the current tip of the release branch to the current main branch. - FORGE_RUNNER_DURATION_SECS: 300 + FORGE_RUNNER_DURATION_SECS: 3600 COMMENT_HEADER: forge-framework-upgrade FORGE_NAMESPACE: forge-framework-upgrade-${{ needs.determine-docker-build-metadata.outputs.targetCacheId }} SKIP_JOB: ${{ needs.file_change_determinator.outputs.only_docs_changed == 'true' }} From 462c41f458ba32e8e90db86547da473dc81f3503 Mon Sep 17 00:00:00 2001 From: Perry Randall Date: Thu, 25 Jan 2024 10:25:38 -0800 Subject: [PATCH 03/44] [forge] Move framework upgrade to push only test This test has far exceeded the runtime acceptable for PR Move to push based test Test Plan: pull request target... --- .github/workflows/docker-build-test.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/docker-build-test.yaml b/.github/workflows/docker-build-test.yaml index b1fd8bbcf813c..bdaa1cbf5e5dc 100644 --- a/.github/workflows/docker-build-test.yaml +++ b/.github/workflows/docker-build-test.yaml @@ -322,9 +322,7 @@ jobs: !failure() && !cancelled() && needs.permission-check.result == 'success' && ( (github.event_name == 'push' && github.ref_name != 'main') || github.event_name == 'workflow_dispatch' || - contains(github.event.pull_request.labels.*.name, 'CICD:run-e2e-tests') || - github.event.pull_request.auto_merge != null || - contains(github.event.pull_request.body, '#e2e') + contains(github.event.pull_request.labels.*.name, 'CICD:run-framework-upgrade-test') ) uses: aptos-labs/aptos-core/.github/workflows/workflow-run-forge.yaml@main secrets: inherit From 1ac0b8a861952a6d39d0806918bc53faca721b3a Mon Sep 17 00:00:00 2001 From: "Brian (Sunghoon) Cho" Date: Thu, 25 Jan 2024 18:17:20 -0800 Subject: [PATCH 04/44] [API] [PFNs] improve observability of view function and simulate usage (#11696) ### Description On PFNs, periodically (every minute) log the top-9 gas-using view functions and simulate transactions. This gives observability to the PFNs usage. Utilizes a small cache that is reset every minute. Alternatively, such high-cardinality data would not work with prometheus. Requires minimal changes to propagate gas usage. ### Test Plan Deploy PFN locally, observe log messages. --- Cargo.lock | 1 + api/Cargo.toml | 1 + api/src/context.rs | 152 +++++++++++++++++- api/src/metrics.rs | 10 ++ api/src/runtime.rs | 20 ++- api/src/transactions.rs | 30 +++- api/src/view_function.rs | 23 +-- aptos-move/aptos-vm/src/aptos_vm.rs | 71 ++++++-- .../e2e-move-tests/src/aptos_governance.rs | 1 + aptos-move/e2e-move-tests/src/harness.rs | 5 +- .../src/tests/constructor_args.rs | 8 +- .../src/tests/fungible_asset.rs | 1 + aptos-move/e2e-tests/src/executor.rs | 4 +- config/src/config/api_config.rs | 3 + types/src/transaction/mod.rs | 11 ++ 15 files changed, 308 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3444021c62be3..f882e59a89b64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -412,6 +412,7 @@ dependencies = [ "hyper", "itertools 0.10.5", "mime", + "mini-moka", "move-core-types", "move-package", "num_cpus", diff --git a/api/Cargo.toml b/api/Cargo.toml index 0071d869b0365..7a0aa061fa505 100644 --- a/api/Cargo.toml +++ b/api/Cargo.toml @@ -39,6 +39,7 @@ hex = { workspace = true } hyper = { workspace = true } itertools = { workspace = true } mime = { workspace = true } +mini-moka = { workspace = true } move-core-types = { workspace = true } num_cpus = { workspace = true } once_cell = { workspace = true } diff --git a/api/src/context.rs b/api/src/context.rs index 4213f39e877dd..3aacbdcb178da 100644 --- a/api/src/context.rs +++ b/api/src/context.rs @@ -4,6 +4,7 @@ use crate::{ accept_type::AcceptType, + metrics, response::{ bcs_api_disabled, block_not_found_by_height, block_not_found_by_version, block_pruned_by_height, json_api_disabled, version_not_found, version_pruned, @@ -18,7 +19,7 @@ use aptos_api_types::{ use aptos_config::config::{NodeConfig, RoleType}; use aptos_crypto::HashValue; use aptos_gas_schedule::{AptosGasParameters, FromOnChainGasSchedule}; -use aptos_logger::{error, warn}; +use aptos_logger::{error, info, warn, Schema}; use aptos_mempool::{MempoolClientRequest, MempoolClientSender, SubmissionStatus}; use aptos_storage_interface::{ state_view::{DbStateView, DbStateViewAtVersion, LatestDbStateCheckpointView}, @@ -45,14 +46,21 @@ use aptos_types::{ use aptos_utils::aptos_try; use aptos_vm::{data_cache::AsMoveResolver, move_vm_ext::AptosMoveResolver}; use futures::{channel::oneshot, SinkExt}; +use mini_moka::sync::Cache; use move_core_types::{ + identifier::Identifier, language_storage::{ModuleId, StructTag}, move_resource::MoveResource, }; +use serde::Serialize; use std::{ + cmp::Reverse, collections::{BTreeMap, HashMap}, ops::{Bound::Included, Deref}, - sync::{Arc, RwLock, RwLockWriteGuard}, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, RwLock, RwLockWriteGuard, + }, time::Instant, }; @@ -66,6 +74,8 @@ pub struct Context { gas_schedule_cache: Arc>, gas_estimation_cache: Arc>, gas_limit_cache: Arc>, + view_function_stats: Arc, + simulate_txn_stats: Arc, } impl std::fmt::Debug for Context { @@ -81,6 +91,19 @@ impl Context { mp_sender: MempoolClientSender, node_config: NodeConfig, ) -> Self { + let (view_function_stats, simulate_txn_stats) = { + let log_per_call_stats = node_config.api.periodic_function_stats_sec.is_some(); + ( + Arc::new(FunctionStats::new( + FunctionType::ViewFuntion, + log_per_call_stats, + )), + Arc::new(FunctionStats::new( + FunctionType::TxnSimulation, + log_per_call_stats, + )), + ) + }; Self { chain_id, db, @@ -101,6 +124,8 @@ impl Context { block_executor_onchain_config: OnChainExecutionConfig::default_if_missing() .block_executor_onchain_config(), })), + view_function_stats, + simulate_txn_stats, } } @@ -1297,6 +1322,14 @@ impl Context { .min_inclusion_prices .len() } + + pub fn view_function_stats(&self) -> &FunctionStats { + &self.view_function_stats + } + + pub fn simulate_txn_stats(&self) -> &FunctionStats { + &self.simulate_txn_stats + } } pub struct GasScheduleCache { @@ -1329,3 +1362,118 @@ where .await .map_err(|err| E::internal_with_code_no_info(err, AptosErrorCode::InternalError))? } + +#[derive(Schema)] +pub struct LogSchema { + event: LogEvent, +} + +impl LogSchema { + pub fn new(event: LogEvent) -> Self { + Self { event } + } +} + +#[derive(Serialize, Copy, Clone)] +pub enum LogEvent { + ViewFunction, + TxnSimulation, +} + +pub enum FunctionType { + ViewFuntion, + TxnSimulation, +} + +impl FunctionType { + fn log_event(&self) -> LogEvent { + match self { + FunctionType::ViewFuntion => LogEvent::ViewFunction, + FunctionType::TxnSimulation => LogEvent::TxnSimulation, + } + } + + fn operation_id(&self) -> &'static str { + match self { + FunctionType::ViewFuntion => "view_function", + FunctionType::TxnSimulation => "txn_simulation", + } + } +} + +pub struct FunctionStats { + stats: Option, Arc)>>, + log_event: LogEvent, + operation_id: String, +} + +impl FunctionStats { + fn new(function_type: FunctionType, log_per_call_stats: bool) -> Self { + let stats = if log_per_call_stats { + Some(Cache::new(100)) + } else { + None + }; + FunctionStats { + stats, + log_event: function_type.log_event(), + operation_id: function_type.operation_id().to_string(), + } + } + + pub fn function_to_key(module: &ModuleId, function: &Identifier) -> String { + format!("{}::{}", module, function) + } + + pub fn increment(&self, key: String, gas: u64) { + metrics::GAS_USED + .with_label_values(&[&self.operation_id]) + .observe(gas as f64); + if let Some(stats) = &self.stats { + let (prev_gas, prev_count) = stats.get(&key).unwrap_or_else(|| { + // Note, race can occur on inserting new entry, resulting in some lost data, but it should be fine + let new_gas = Arc::new(AtomicU64::new(0)); + let new_count = Arc::new(AtomicU64::new(0)); + stats.insert(key.clone(), (new_gas.clone(), new_count.clone())); + (new_gas, new_count) + }); + prev_gas.fetch_add(gas, Ordering::Relaxed); + prev_count.fetch_add(1, Ordering::Relaxed); + } + } + + pub fn log_and_clear(&self) { + if let Some(stats) = &self.stats { + if stats.iter().next().is_none() { + return; + } + + let mut sorted: Vec<_> = stats + .iter() + .map(|entry| { + let (gas_used, count) = entry.value(); + ( + gas_used.load(Ordering::Relaxed), + count.load(Ordering::Relaxed), + entry.key().clone(), + ) + }) + .collect(); + sorted.sort_by_key(|(gas_used, ..)| Reverse(*gas_used)); + + info!( + LogSchema::new(self.log_event), + top_1 = sorted.get(0), + top_2 = sorted.get(1), + top_3 = sorted.get(2), + top_4 = sorted.get(3), + top_5 = sorted.get(4), + top_6 = sorted.get(5), + top_7 = sorted.get(6), + top_8 = sorted.get(7), + ); + + stats.invalidate_all(); + } + } +} diff --git a/api/src/metrics.rs b/api/src/metrics.rs index 213756cddf6b2..fd5e4e4a3608c 100644 --- a/api/src/metrics.rs +++ b/api/src/metrics.rs @@ -77,3 +77,13 @@ pub static GAS_ESTIMATE: Lazy = Lazy::new(|| { ) .unwrap() }); + +pub static GAS_USED: Lazy = Lazy::new(|| { + register_histogram_vec!( + "aptos_api_gas_used", + "Amount of gas used by each API operation", + &["operation_id"], + BYTE_BUCKETS.clone() + ) + .unwrap() +}); diff --git a/api/src/runtime.rs b/api/src/runtime.rs index 3e32a2e8a8d08..0fc82a98614fe 100644 --- a/api/src/runtime.rs +++ b/api/src/runtime.rs @@ -43,12 +43,13 @@ pub fn bootstrap( attach_poem_to_runtime(runtime.handle(), context.clone(), config, false) .context("Failed to attach poem to runtime")?; + let context_cloned = context.clone(); if let Some(period_ms) = config.api.periodic_gas_estimation_ms { runtime.spawn(async move { let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(period_ms)); loop { interval.tick().await; - let context_cloned = context.clone(); + let context_cloned = context_cloned.clone(); tokio::task::spawn_blocking(move || { if let Ok(latest_ledger_info) = context_cloned.get_latest_ledger_info::() @@ -66,6 +67,23 @@ pub fn bootstrap( }); } + let context_cloned = context.clone(); + if let Some(period_sec) = config.api.periodic_function_stats_sec { + runtime.spawn(async move { + let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(period_sec)); + loop { + interval.tick().await; + let context_cloned = context_cloned.clone(); + tokio::task::spawn_blocking(move || { + context_cloned.view_function_stats().log_and_clear(); + context_cloned.simulate_txn_stats().log_and_clear(); + }) + .await + .unwrap_or(()); + } + }); + } + Ok(runtime) } diff --git a/api/src/transactions.rs b/api/src/transactions.rs index 965a58eda55fd..2b095dd7016bc 100644 --- a/api/src/transactions.rs +++ b/api/src/transactions.rs @@ -6,7 +6,7 @@ use crate::{ accept_type::AcceptType, accounts::Account, bcs_payload::Bcs, - context::{api_spawn_blocking, Context}, + context::{api_spawn_blocking, Context, FunctionStats}, failpoint::fail_point_poem, generate_error_response, generate_success_response, metrics, page::Page, @@ -1235,6 +1235,34 @@ impl TransactionsApi { _ => ExecutionStatus::MiscellaneousError(None), }; + let stats_key = match txn.payload() { + TransactionPayload::Script(_) => { + format!("Script::{}", txn.clone().committed_hash()).to_string() + }, + TransactionPayload::ModuleBundle(_) => "ModuleBundle::unknown".to_string(), + TransactionPayload::EntryFunction(entry_function) => FunctionStats::function_to_key( + entry_function.module(), + &entry_function.function().into(), + ), + TransactionPayload::Multisig(multisig) => { + if let Some(payload) = &multisig.transaction_payload { + match payload { + MultisigTransactionPayload::EntryFunction(entry_function) => { + FunctionStats::function_to_key( + entry_function.module(), + &entry_function.function().into(), + ) + }, + } + } else { + "Multisig::unknown".to_string() + } + }, + }; + self.context + .simulate_txn_stats() + .increment(stats_key, output.gas_used()); + // Build up a transaction from the outputs // All state hashes are invalid, and will be filled with 0s let txn = aptos_types::transaction::Transaction::UserTransaction(txn); diff --git a/api/src/view_function.rs b/api/src/view_function.rs index ade7915215be9..26767cb5ab286 100644 --- a/api/src/view_function.rs +++ b/api/src/view_function.rs @@ -4,7 +4,7 @@ use crate::{ accept_type::AcceptType, bcs_payload::Bcs, - context::api_spawn_blocking, + context::{api_spawn_blocking, FunctionStats}, failpoint::fail_point_poem, response::{ BadRequestError, BasicErrorWith404, BasicResponse, BasicResponseStatus, BasicResultWith404, @@ -135,22 +135,22 @@ fn view_request( )); } - let return_vals = AptosVM::execute_view_function( + let output = AptosVM::execute_view_function( &state_view, view_function.module.clone(), view_function.function.clone(), view_function.ty_args.clone(), view_function.args.clone(), context.node_config.api.max_gas_view_function, - ) - .map_err(|err| { + ); + let values = output.values.map_err(|err| { BasicErrorWith404::bad_request_with_code_no_info(err, AptosErrorCode::InvalidInput) })?; - match accept_type { + let result = match accept_type { AcceptType::Bcs => { // The return values are already BCS encoded, but we still need to encode the outside // vector without re-encoding the inside values - let num_vals = return_vals.len(); + let num_vals = values.len(); // Push the length of the return values let mut length = vec![]; @@ -163,7 +163,7 @@ fn view_request( })?; // Combine all of the return values - let values = return_vals.into_iter().concat(); + let values = values.into_iter().concat(); let ret = [length, values].concat(); BasicResponse::try_from_encoded((ret, &ledger_info, BasicResponseStatus::Ok)) @@ -186,7 +186,7 @@ fn view_request( ) })?; - let move_vals = return_vals + let move_vals = values .into_iter() .zip(return_types.into_iter()) .map(|(v, ty)| { @@ -205,5 +205,10 @@ fn view_request( BasicResponse::try_from_json((move_vals, &ledger_info, BasicResponseStatus::Ok)) }, - } + }; + context.view_function_stats().increment( + FunctionStats::function_to_key(&view_function.module, &view_function.function), + output.gas_used, + ); + result } diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index 87f0b32b04a73..fee3fcdadde0f 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -55,7 +55,7 @@ use aptos_types::{ UserTransaction, }, TransactionOutput, TransactionPayload, TransactionStatus, VMValidatorResult, - WriteSetPayload, + ViewFunctionOutput, WriteSetPayload, }, vm_status::{AbortLocation, StatusCode, VMStatus}, zkid::ZkpOrOpenIdSig, @@ -1836,26 +1836,71 @@ impl AptosVM { type_args: Vec, arguments: Vec>, gas_budget: u64, - ) -> Result>> { + ) -> ViewFunctionOutput { let resolver = state_view.as_move_resolver(); let vm = AptosVM::new(&resolver); let log_context = AdapterLogSchema::new(state_view.id(), 0); - let mut gas_meter = - MemoryTrackedGasMeter::new(StandardGasMeter::new(StandardGasAlgebra::new( - vm.gas_feature_version, - get_or_vm_startup_failure(&vm.gas_params, &log_context)? - .vm - .clone(), - get_or_vm_startup_failure(&vm.storage_gas_params, &log_context)?.clone(), - gas_budget, - ))); + let mut gas_meter = match Self::memory_tracked_gas_meter(&vm, &log_context, gas_budget) { + Ok(gas_meter) => gas_meter, + Err(e) => return ViewFunctionOutput::new(Err(e), 0), + }; let mut session = vm.new_session(&resolver, SessionId::Void); + match Self::execute_view_function_in_vm( + &mut session, + &vm, + module_id, + func_name, + type_args, + arguments, + &mut gas_meter, + ) { + Ok(result) => { + ViewFunctionOutput::new(Ok(result), Self::gas_used(gas_budget, &gas_meter)) + }, + Err(e) => ViewFunctionOutput::new(Err(e), Self::gas_used(gas_budget, &gas_meter)), + } + } + + fn memory_tracked_gas_meter( + vm: &AptosVM, + log_context: &AdapterLogSchema, + gas_budget: u64, + ) -> Result>> { + let gas_meter = MemoryTrackedGasMeter::new(StandardGasMeter::new(StandardGasAlgebra::new( + vm.gas_feature_version, + get_or_vm_startup_failure(&vm.gas_params, log_context)? + .vm + .clone(), + get_or_vm_startup_failure(&vm.storage_gas_params, log_context)?.clone(), + gas_budget, + ))); + Ok(gas_meter) + } + + fn gas_used( + gas_budget: u64, + gas_meter: &MemoryTrackedGasMeter>, + ) -> u64 { + GasQuantity::new(gas_budget) + .checked_sub(gas_meter.balance()) + .expect("Balance should always be less than or equal to max gas amount") + .into() + } + fn execute_view_function_in_vm( + session: &mut SessionExt, + vm: &AptosVM, + module_id: ModuleId, + func_name: Identifier, + type_args: Vec, + arguments: Vec>, + gas_meter: &mut MemoryTrackedGasMeter>, + ) -> Result>> { let func_inst = session.load_function(&module_id, &func_name, &type_args)?; let metadata = vm.extract_module_metadata(&module_id); let arguments = verifier::view_function::validate_view_function( - &mut session, + session, arguments, func_name.as_ident_str(), &func_inst, @@ -1869,7 +1914,7 @@ impl AptosVM { func_name.as_ident_str(), type_args, arguments, - &mut gas_meter, + gas_meter, ) .map_err(|err| anyhow!("Failed to execute function: {:?}", err))? .return_values diff --git a/aptos-move/e2e-move-tests/src/aptos_governance.rs b/aptos-move/e2e-move-tests/src/aptos_governance.rs index a3aacfb2f4c77..a9f491cde4536 100644 --- a/aptos-move/e2e-move-tests/src/aptos_governance.rs +++ b/aptos-move/e2e-move-tests/src/aptos_governance.rs @@ -78,6 +78,7 @@ pub fn get_remaining_voting_power( bcs::to_bytes(&stake_pool).unwrap(), bcs::to_bytes(&proposal_id).unwrap(), ]) + .values .unwrap(); bcs::from_bytes::(&res[0]).unwrap() } diff --git a/aptos-move/e2e-move-tests/src/harness.rs b/aptos-move/e2e-move-tests/src/harness.rs index 46d2292053d92..b28cd97dfb634 100644 --- a/aptos-move/e2e-move-tests/src/harness.rs +++ b/aptos-move/e2e-move-tests/src/harness.rs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{assert_success, build_package, AptosPackageHooks}; -use anyhow::Error; use aptos_cached_packages::aptos_stdlib; use aptos_framework::{natives::code::PackageMetadata, BuildOptions, BuiltPackage}; use aptos_gas_profiling::TransactionGasLog; @@ -26,7 +25,7 @@ use aptos_types::{ }, transaction::{ EntryFunction, Script, SignedTransaction, TransactionArgument, TransactionOutput, - TransactionPayload, TransactionStatus, + TransactionPayload, TransactionStatus, ViewFunctionOutput, }, }; use aptos_vm::{data_cache::AsMoveResolver, AptosVM}; @@ -715,7 +714,7 @@ impl MoveHarness { fun: MemberId, type_args: Vec, arguments: Vec>, - ) -> Result>, Error> { + ) -> ViewFunctionOutput { self.executor .execute_view_function(fun.module_id, fun.member_id, type_args, arguments) } diff --git a/aptos-move/e2e-move-tests/src/tests/constructor_args.rs b/aptos-move/e2e-move-tests/src/tests/constructor_args.rs index 2ed937038e709..85b6e93a54cf7 100644 --- a/aptos-move/e2e-move-tests/src/tests/constructor_args.rs +++ b/aptos-move/e2e-move-tests/src/tests/constructor_args.rs @@ -85,8 +85,12 @@ fn success_generic_view( for (entry, args, expected) in tests { let res = h.execute_view_function(str::parse(entry).unwrap(), ty_args.clone(), args); - assert!(res.is_ok(), "{}", res.err().unwrap().to_string()); - let bcs = res.unwrap().pop().unwrap(); + assert!( + res.values.is_ok(), + "{}", + res.values.err().unwrap().to_string() + ); + let bcs = res.values.unwrap().pop().unwrap(); let res = bcs::from_bytes::(&bcs).unwrap(); assert_eq!(res, expected); } diff --git a/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs b/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs index e3f4bfa5047d5..4487ba9477308 100644 --- a/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs +++ b/aptos-move/e2e-move-tests/src/tests/fungible_asset.rs @@ -49,6 +49,7 @@ fn test_basic_fungible_token() { vec![], vec![], ) + .values .unwrap() .pop() .unwrap(); diff --git a/aptos-move/e2e-tests/src/executor.rs b/aptos-move/e2e-tests/src/executor.rs index 12ffd3aa42d41..76b94ce0e98c6 100644 --- a/aptos-move/e2e-tests/src/executor.rs +++ b/aptos-move/e2e-tests/src/executor.rs @@ -12,7 +12,6 @@ use crate::{ }, golden_outputs::GoldenOutputs, }; -use anyhow::Error; use aptos_abstract_gas_usage::CalibrationAlgebra; use aptos_bitvec::BitVec; use aptos_block_executor::txn_commit_hook::NoOpTransactionCommitHook; @@ -49,6 +48,7 @@ use aptos_types::{ }, BlockOutput, EntryFunction, ExecutionStatus, SignedTransaction, Transaction, TransactionOutput, TransactionPayload, TransactionStatus, VMValidatorResult, + ViewFunctionOutput, }, vm_status::VMStatus, write_set::WriteSet, @@ -1095,7 +1095,7 @@ impl FakeExecutor { func_name: Identifier, type_args: Vec, arguments: Vec>, - ) -> Result>, Error> { + ) -> ViewFunctionOutput { // No gas limit AptosVM::execute_view_function( self.get_state_view(), diff --git a/config/src/config/api_config.rs b/config/src/config/api_config.rs index e6f950161a9bc..38cc995f1d08d 100644 --- a/config/src/config/api_config.rs +++ b/config/src/config/api_config.rs @@ -79,6 +79,8 @@ pub struct ApiConfig { pub simulation_filter: Filter, /// Configuration to filter view function requests. pub view_filter: ViewFilter, + /// Periodically log stats for view function and simulate transaction usage + pub periodic_function_stats_sec: Option, } const DEFAULT_ADDRESS: &str = "127.0.0.1"; @@ -126,6 +128,7 @@ impl Default for ApiConfig { periodic_gas_estimation_ms: Some(30_000), simulation_filter: Filter::default(), view_filter: ViewFilter::default(), + periodic_function_stats_sec: Some(60), } } } diff --git a/types/src/transaction/mod.rs b/types/src/transaction/mod.rs index 11c8ef83c1280..33c7105110703 100644 --- a/types/src/transaction/mod.rs +++ b/types/src/transaction/mod.rs @@ -2004,3 +2004,14 @@ pub trait BlockExecutableTransaction: Sync + Send + Clone + 'static { /// Size of the user transaction in bytes, 0 otherwise fn user_txn_bytes_len(&self) -> usize; } + +pub struct ViewFunctionOutput { + pub values: Result>>, + pub gas_used: u64, +} + +impl ViewFunctionOutput { + pub fn new(values: Result>>, gas_used: u64) -> Self { + Self { values, gas_used } + } +} From f0523ea3a0ed83853ef6bac0cda3f82eb4edb034 Mon Sep 17 00:00:00 2001 From: Perry Randall Date: Thu, 25 Jan 2024 14:02:33 -0800 Subject: [PATCH 05/44] [docker] Fix build-node for failpoints on push I noticed after adding the unstrip forge binary that failpoints is failing on main because forge doesnt define the failpoints feature Then I realized that really only aptos node needs these special features so only build features with aptos node for now --- docker/builder/build-node.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/builder/build-node.sh b/docker/builder/build-node.sh index 57e2b52b53459..d034d6d095763 100755 --- a/docker/builder/build-node.sh +++ b/docker/builder/build-node.sh @@ -20,7 +20,7 @@ PACKAGES=( # between aptos-node and other binaries for PACKAGE in "${PACKAGES[@]}"; do # Build and overwrite the aptos-node binary with features if specified - if [ -n "$FEATURES" ]; then + if [ -n "$FEATURES" ] && [ "$PACKAGE" = "aptos-node" ]; then echo "Building aptos-node with features ${FEATURES}" cargo build --profile=$PROFILE --features=$FEATURES -p $PACKAGE "$@" else From 6685daa19720fe2a6a72ec9ae9634042c6ab50b0 Mon Sep 17 00:00:00 2001 From: Perry Randall Date: Thu, 25 Jan 2024 15:54:13 -0800 Subject: [PATCH 06/44] [forge] Adjust land blocking success criteria This has become quite flaky recently because of a regression --- testsuite/forge-cli/src/main.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/testsuite/forge-cli/src/main.rs b/testsuite/forge-cli/src/main.rs index 5e68133b38866..24f66dbe391cf 100644 --- a/testsuite/forge-cli/src/main.rs +++ b/testsuite/forge-cli/src/main.rs @@ -992,7 +992,7 @@ fn realistic_env_load_sweep_test() -> ForgeConfig { (95, 1.5, 3., 4., 0), (950, 2., 3., 4., 0), (2750, 2.5, 3.5, 4.5, 0), - (4600, 3., 4., 5., 10), // Allow some expired transactions (high-load) + (4600, 3., 4., 6., 10), // Allow some expired transactions (high-load) ] .into_iter() .map( @@ -1782,9 +1782,11 @@ fn realistic_env_max_load_test( if ha_proxy { 4600 } else if long_running { - 7500 - } else { + // This is for forge stable 7000 + } else { + // During land time we want to be less strict, otherwise we flaky fail + 6000 }, ), })) @@ -1834,7 +1836,7 @@ fn realistic_env_max_load_test( 5, )) .add_chain_progress(StateProgressThreshold { - max_no_progress_secs: 10.0, + max_no_progress_secs: 15.0, max_round_gap: 4, }), ) From c01adcfcc313020a746c93a6ced1990ad8aa4728 Mon Sep 17 00:00:00 2001 From: Teng Zhang Date: Fri, 26 Jan 2024 14:12:41 -0500 Subject: [PATCH 07/44] [Compiler-V2] Filter out a failed test (#11783) * fix ci * use reg filter --- .github/actions/move-tests-compiler-v2/action.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/move-tests-compiler-v2/action.yaml b/.github/actions/move-tests-compiler-v2/action.yaml index 66e3f0e2a5a63..a5542ceb8de19 100644 --- a/.github/actions/move-tests-compiler-v2/action.yaml +++ b/.github/actions/move-tests-compiler-v2/action.yaml @@ -23,7 +23,7 @@ runs: # Run Aptos Move tests with compiler V2 - name: Run Aptos Move tests with compiler V2 - run: cargo nextest run --profile ci --locked -p e2e-move-tests -p aptos-framework --retries 3 --no-fail-fast + run: cargo nextest run --release -E 'not (test(test_smart_data_structures_gas))' --profile ci --locked -p e2e-move-tests -p aptos-framework --retries 3 --no-fail-fast shell: bash env: MOVE_COMPILER_EXP: no-safety From d8127d63e29a3152fdb1a6573122fcc37899bc15 Mon Sep 17 00:00:00 2001 From: "Daniel Porteous (dport)" Date: Fri, 26 Jan 2024 11:52:06 -0800 Subject: [PATCH 08/44] [API] Add tests for view function and txn simulation filters (#11796) --- ...__test_simulation_filter_allow_sender.json | 5 + ...ons_test__test_simulation_filter_deny.json | 5 + ...s__view_function__test_view_allowlist.json | 1 + ...s__view_function__test_view_blocklist.json | 1 + api/src/tests/transactions_test.rs | 64 +++++++++++ api/src/tests/view_function.rs | 103 ++++++++++++++++-- 6 files changed, 169 insertions(+), 10 deletions(-) create mode 100644 api/goldens/aptos_api__tests__transactions_test__test_simulation_filter_allow_sender.json create mode 100644 api/goldens/aptos_api__tests__transactions_test__test_simulation_filter_deny.json create mode 100644 api/goldens/aptos_api__tests__view_function__test_view_allowlist.json create mode 100644 api/goldens/aptos_api__tests__view_function__test_view_blocklist.json diff --git a/api/goldens/aptos_api__tests__transactions_test__test_simulation_filter_allow_sender.json b/api/goldens/aptos_api__tests__transactions_test__test_simulation_filter_allow_sender.json new file mode 100644 index 0000000000000..1d785b8e6a327 --- /dev/null +++ b/api/goldens/aptos_api__tests__transactions_test__test_simulation_filter_allow_sender.json @@ -0,0 +1,5 @@ +{ + "message": "Transaction not allowed by simulation filter", + "error_code": "invalid_input", + "vm_error_code": null +} diff --git a/api/goldens/aptos_api__tests__transactions_test__test_simulation_filter_deny.json b/api/goldens/aptos_api__tests__transactions_test__test_simulation_filter_deny.json new file mode 100644 index 0000000000000..1d785b8e6a327 --- /dev/null +++ b/api/goldens/aptos_api__tests__transactions_test__test_simulation_filter_deny.json @@ -0,0 +1,5 @@ +{ + "message": "Transaction not allowed by simulation filter", + "error_code": "invalid_input", + "vm_error_code": null +} diff --git a/api/goldens/aptos_api__tests__view_function__test_view_allowlist.json b/api/goldens/aptos_api__tests__view_function__test_view_allowlist.json new file mode 100644 index 0000000000000..5d647063f8bef --- /dev/null +++ b/api/goldens/aptos_api__tests__view_function__test_view_allowlist.json @@ -0,0 +1 @@ +[["100000"],{"message":"Function 0000000000000000000000000000000000000000000000000000000000000001::coin::decimals is not allowed","error_code":"invalid_input","vm_error_code":null}] \ No newline at end of file diff --git a/api/goldens/aptos_api__tests__view_function__test_view_blocklist.json b/api/goldens/aptos_api__tests__view_function__test_view_blocklist.json new file mode 100644 index 0000000000000..0e612d15f7aec --- /dev/null +++ b/api/goldens/aptos_api__tests__view_function__test_view_blocklist.json @@ -0,0 +1 @@ +[{"message":"Function 0000000000000000000000000000000000000000000000000000000000000001::coin::balance is not allowed","error_code":"invalid_input","vm_error_code":null},[8]] \ No newline at end of file diff --git a/api/src/tests/transactions_test.rs b/api/src/tests/transactions_test.rs index cb402fef3310f..7d699b3adebfc 100644 --- a/api/src/tests/transactions_test.rs +++ b/api/src/tests/transactions_test.rs @@ -14,6 +14,7 @@ use aptos_crypto::{ use aptos_sdk::types::LocalAccount; use aptos_types::{ account_address::AccountAddress, + account_config::aptos_test_root_address, transaction::{ authenticator::{AuthenticationKey, TransactionAuthenticator}, EntryFunction, Script, SignedTransaction, @@ -1407,6 +1408,69 @@ async fn test_simulation_failure_error_message() { .contains("Division by zero")); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_simulation_filter_deny() { + let mut node_config = NodeConfig::default(); + + // Blocklist the balance function. + let mut filter = node_config.api.simulation_filter.clone(); + filter = filter.add_deny_all(); + node_config.api.simulation_filter = filter; + + let mut context = new_test_context_with_config(current_function_name!(), node_config); + + let admin0 = context.root_account().await; + + let resp = context.simulate_transaction(&admin0, json!({ + "type": "script_payload", + "code": { + "bytecode": "a11ceb0b030000000105000100000000050601000000000000000600000000000000001a0102", + }, + "type_arguments": [], + "arguments": [], + }), 403).await; + + context.check_golden_output(resp); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_simulation_filter_allow_sender() { + let mut node_config = NodeConfig::default(); + + // Allow the root sender only. + let mut filter = node_config.api.simulation_filter.clone(); + filter = filter.add_allow_sender(aptos_test_root_address()); + filter = filter.add_deny_all(); + node_config.api.simulation_filter = filter; + + let mut context = new_test_context_with_config(current_function_name!(), node_config); + + let admin0 = context.root_account().await; + let other_account = context.create_account().await; + + context.simulate_transaction(&admin0, json!({ + "type": "script_payload", + "code": { + "bytecode": "a11ceb0b030000000105000100000000050601000000000000000600000000000000001a0102", + }, + "type_arguments": [], + "arguments": [], + }), 200).await; + + let resp = context.simulate_transaction(&other_account, json!({ + "type": "script_payload", + "code": { + "bytecode": "a11ceb0b030000000105000100000000050601000000000000000600000000000000001a0102", + }, + "type_arguments": [], + "arguments": [], + }), 403).await; + + // It was difficult to prune when using a vec of responses so we just put the + // rejection response in the goldens. + context.check_golden_output(resp); +} + fn gen_string(len: u64) -> String { let mut rng = thread_rng(); std::iter::repeat(()) diff --git a/api/src/tests/view_function.rs b/api/src/tests/view_function.rs index 663f6fa7ad971..9d98afc3c7e38 100644 --- a/api/src/tests/view_function.rs +++ b/api/src/tests/view_function.rs @@ -1,10 +1,30 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use super::new_test_context; +use super::{new_test_context, new_test_context_with_config}; use aptos_api_test_context::current_function_name; use aptos_cached_packages::aptos_stdlib; -use serde_json::json; +use aptos_config::config::{NodeConfig, ViewFilter, ViewFunctionId}; +use aptos_types::account_address::AccountAddress; +use serde_json::{json, Value}; +use std::str::FromStr; + +fn build_coin_balance_request(address: &AccountAddress) -> Value { + json!({ + "function":"0x1::coin::balance", + "arguments": vec![address.to_string()], + "type_arguments": vec!["0x1::aptos_coin::AptosCoin"], + }) +} + +fn build_coin_decimals_request() -> Value { + let arguments: Vec = Vec::new(); + json!({ + "function":"0x1::coin::decimals", + "arguments": arguments, + "type_arguments": vec!["0x1::aptos_coin::AptosCoin"], + }) +} #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_simple_view() { @@ -17,19 +37,82 @@ async fn test_simple_view() { context.commit_block(&vec![txn1, txn2]).await; let resp = context - .post( - "/view", - json!({ - "function":"0x1::coin::balance", - "arguments": vec![owner.address().to_string()], - "type_arguments": vec!["0x1::aptos_coin::AptosCoin"], - }), - ) + .post("/view", build_coin_balance_request(&owner.address())) .await; context.check_golden_output_no_prune(resp); } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_view_allowlist() { + let mut node_config = NodeConfig::default(); + + // Allowlist only the balance function. + node_config.api.view_filter = ViewFilter::Allowlist(vec![ViewFunctionId { + address: AccountAddress::from_str("0x1").unwrap(), + module: "coin".to_string(), + function_name: "balance".to_string(), + }]); + + let mut context = new_test_context_with_config(current_function_name!(), node_config); + + let creator = &mut context.gen_account(); + let owner = &mut context.gen_account(); + let txn1 = context.mint_user_account(creator).await; + let txn2 = context.account_transfer(creator, owner, 100_000); + + context.commit_block(&vec![txn1, txn2]).await; + + // See that an allowed function works. + let resp1 = context + .expect_status_code(200) + .post("/view", build_coin_balance_request(&owner.address())) + .await; + + // See that a non-allowed function is rejected. + let resp2 = context + .expect_status_code(403) + .post("/view", build_coin_decimals_request()) + .await; + + context.check_golden_output_no_prune(json!(vec![resp1, resp2])); +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn test_view_blocklist() { + let mut node_config = NodeConfig::default(); + + // Blocklist the balance function. + node_config.api.view_filter = ViewFilter::Blocklist(vec![ViewFunctionId { + address: AccountAddress::from_str("0x1").unwrap(), + module: "coin".to_string(), + function_name: "balance".to_string(), + }]); + + let mut context = new_test_context_with_config(current_function_name!(), node_config); + + let creator = &mut context.gen_account(); + let owner = &mut context.gen_account(); + let txn1 = context.mint_user_account(creator).await; + let txn2 = context.account_transfer(creator, owner, 100_000); + + context.commit_block(&vec![txn1, txn2]).await; + + // See that a blocked function is rejected. + let resp1 = context + .expect_status_code(403) + .post("/view", build_coin_balance_request(&owner.address())) + .await; + + // See that a non-blocked function is allowed. + let resp2 = context + .expect_status_code(200) + .post("/view", build_coin_decimals_request()) + .await; + + context.check_golden_output_no_prune(json!(vec![resp1, resp2])); +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_simple_view_invalid() { let mut context = new_test_context(current_function_name!()); From 5d2a3587262bcb762da3d5019d66e1b3ea24e89d Mon Sep 17 00:00:00 2001 From: aldenhu Date: Thu, 11 Jan 2024 22:02:57 +0000 Subject: [PATCH 09/44] test_gas: add more cases 1. add 2nd transfer 2. add token v2 mints and mutations 3. change PublishLarge to use a module other than the stdlib (which is already published in the genesis); add upgrading the large module in preparation for tuning the gas (storage fee) schedule --- .../code_publishing.data/pack_large/Move.toml | 7 + .../pack_large/sources/mod0.move | 169 ++++++++++++++++++ .../pack_large/sources/mod1.move | 169 ++++++++++++++++++ .../pack_large/sources/mod2.move | 169 ++++++++++++++++++ .../pack_large/sources/mod3.move | 169 ++++++++++++++++++ .../pack_large/sources/mod4.move | 169 ++++++++++++++++++ .../pack_large/sources/mod5.move | 169 ++++++++++++++++++ .../pack_large/sources/mod6.move | 169 ++++++++++++++++++ .../pack_large/sources/mod7.move | 169 ++++++++++++++++++ .../pack_large_upgrade/Move.toml | 7 + .../pack_large_upgrade/sources/mod0.move | 169 ++++++++++++++++++ .../pack_large_upgrade/sources/mod1.move | 169 ++++++++++++++++++ .../pack_large_upgrade/sources/mod2.move | 169 ++++++++++++++++++ .../pack_large_upgrade/sources/mod3.move | 169 ++++++++++++++++++ .../pack_large_upgrade/sources/mod4.move | 169 ++++++++++++++++++ .../pack_large_upgrade/sources/mod5.move | 169 ++++++++++++++++++ .../pack_large_upgrade/sources/mod6.move | 169 ++++++++++++++++++ .../pack_large_upgrade/sources/mod7.move | 169 ++++++++++++++++++ .../pack_large_upgrade/sources/mod_new.move | 169 ++++++++++++++++++ aptos-move/e2e-move-tests/src/tests/gas.rs | 97 +++++++++- .../e2e-move-tests/src/tests/token_objects.rs | 84 ++++++--- 21 files changed, 3033 insertions(+), 35 deletions(-) create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/Move.toml create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod0.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod1.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod2.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod3.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod4.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod5.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod6.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod7.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/Move.toml create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod0.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod1.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod2.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod3.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod4.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod5.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod6.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod7.move create mode 100644 aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod_new.move diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/Move.toml b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/Move.toml new file mode 100644 index 0000000000000..d4e6ab60a4ed0 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/Move.toml @@ -0,0 +1,7 @@ +[package] +name = "test_large_pack" +version = "0.0.0" +upgrade_policy = "compatible" + +[dependencies] +AptosFramework = { local = "../../../../../framework/aptos-framework" } diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod0.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod0.move new file mode 100644 index 0000000000000..bd21b1cb2e3fd --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod0.move @@ -0,0 +1,169 @@ +module 0xcafe::mod0 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod1.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod1.move new file mode 100644 index 0000000000000..31eb4c365660f --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod1.move @@ -0,0 +1,169 @@ +module 0xcafe::mod1 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod2.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod2.move new file mode 100644 index 0000000000000..a89d9f09b4098 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod2.move @@ -0,0 +1,169 @@ +module 0xcafe::mod2 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod3.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod3.move new file mode 100644 index 0000000000000..1bf2955d1f6d0 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod3.move @@ -0,0 +1,169 @@ +module 0xcafe::mod3 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod4.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod4.move new file mode 100644 index 0000000000000..4af51c28c29cb --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod4.move @@ -0,0 +1,169 @@ +module 0xcafe::mod4 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod5.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod5.move new file mode 100644 index 0000000000000..b44a01fc3ae5b --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod5.move @@ -0,0 +1,169 @@ +module 0xcafe::mod5 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod6.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod6.move new file mode 100644 index 0000000000000..cc600ae32f4e1 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod6.move @@ -0,0 +1,169 @@ +module 0xcafe::mod6 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod7.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod7.move new file mode 100644 index 0000000000000..aa9a5a44c0565 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large/sources/mod7.move @@ -0,0 +1,169 @@ +module 0xcafe::mod7 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/Move.toml b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/Move.toml new file mode 100644 index 0000000000000..d4e6ab60a4ed0 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/Move.toml @@ -0,0 +1,7 @@ +[package] +name = "test_large_pack" +version = "0.0.0" +upgrade_policy = "compatible" + +[dependencies] +AptosFramework = { local = "../../../../../framework/aptos-framework" } diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod0.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod0.move new file mode 100644 index 0000000000000..bd21b1cb2e3fd --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod0.move @@ -0,0 +1,169 @@ +module 0xcafe::mod0 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod1.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod1.move new file mode 100644 index 0000000000000..31eb4c365660f --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod1.move @@ -0,0 +1,169 @@ +module 0xcafe::mod1 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod2.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod2.move new file mode 100644 index 0000000000000..a89d9f09b4098 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod2.move @@ -0,0 +1,169 @@ +module 0xcafe::mod2 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod3.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod3.move new file mode 100644 index 0000000000000..1bf2955d1f6d0 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod3.move @@ -0,0 +1,169 @@ +module 0xcafe::mod3 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod4.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod4.move new file mode 100644 index 0000000000000..4af51c28c29cb --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod4.move @@ -0,0 +1,169 @@ +module 0xcafe::mod4 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod5.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod5.move new file mode 100644 index 0000000000000..b44a01fc3ae5b --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod5.move @@ -0,0 +1,169 @@ +module 0xcafe::mod5 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod6.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod6.move new file mode 100644 index 0000000000000..cc600ae32f4e1 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod6.move @@ -0,0 +1,169 @@ +module 0xcafe::mod6 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod7.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod7.move new file mode 100644 index 0000000000000..aa9a5a44c0565 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod7.move @@ -0,0 +1,169 @@ +module 0xcafe::mod7 { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod_new.move b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod_new.move new file mode 100644 index 0000000000000..1a1c23f13e227 --- /dev/null +++ b/aptos-move/e2e-move-tests/src/tests/code_publishing.data/pack_large_upgrade/sources/mod_new.move @@ -0,0 +1,169 @@ +module 0xcafe::mod_new { + fun large0() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large1() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large2() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large3() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large4() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large5() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large6() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } + + fun large7() { + let _blob: vector = vector[ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ]; + } +} diff --git a/aptos-move/e2e-move-tests/src/tests/gas.rs b/aptos-move/e2e-move-tests/src/tests/gas.rs index 8d37cb5b675b8..6a0697af4152d 100644 --- a/aptos-move/e2e-move-tests/src/tests/gas.rs +++ b/aptos-move/e2e-move-tests/src/tests/gas.rs @@ -1,7 +1,16 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::{tests::common::test_dir_path, MoveHarness}; +use crate::{ + tests::{ + common::test_dir_path, + token_objects::{ + create_mint_hero_payload, create_set_hero_description_payload, + publish_object_token_example, + }, + }, + MoveHarness, +}; use aptos_cached_packages::{aptos_stdlib, aptos_token_sdk_builder}; use aptos_crypto::{bls12381, PrivateKey, Uniform}; use aptos_gas_profiling::TransactionGasLog; @@ -67,6 +76,13 @@ fn test_gas() { aptos_stdlib::aptos_coin_transfer(account_2_address, 1000), ); + run( + &mut harness, + "2ndTransfer", + account_1, + aptos_stdlib::aptos_coin_transfer(account_2_address, 1000), + ); + run( &mut harness, "CreateAccount", @@ -84,6 +100,34 @@ fn test_gas() { 1000, ), ); + + publish_object_token_example(&mut harness, account_1_address, account_1); + run( + &mut harness, + "MintTokenV2", + account_1, + create_mint_hero_payload(&account_1_address, SHORT_STR), + ); + run( + &mut harness, + "MutateTokenV2", + account_1, + create_set_hero_description_payload(&account_1_address, SHORT_STR), + ); + publish_object_token_example(&mut harness, account_2_address, account_2); + run( + &mut harness, + "MintLargeTokenV2", + account_2, + create_mint_hero_payload(&account_2_address, LONG_STR), + ); + run( + &mut harness, + "MutateLargeTokenV2", + account_2, + create_set_hero_description_payload(&account_2_address, LONG_STR), + ); + run( &mut harness, "CreateStakePool", @@ -180,7 +224,7 @@ fn test_gas() { ); run( &mut harness, - "MintToken", + "MintTokenV1", account_1, aptos_token_sdk_builder::token_mint_script( account_1_address, @@ -191,7 +235,7 @@ fn test_gas() { ); run( &mut harness, - "MutateToken", + "MutateTokenV1", account_1, aptos_token_sdk_builder::token_mutate_token_properties( account_1_address, @@ -207,7 +251,7 @@ fn test_gas() { ); run( &mut harness, - "MutateToken2ndTime", + "MutateTokenV12ndTime", account_1, aptos_token_sdk_builder::token_mutate_token_properties( account_1_address, @@ -276,12 +320,17 @@ fn test_gas() { publisher, &test_dir_path("code_publishing.data/pack_upgrade_compat"), ); - let publisher = &harness.aptos_framework_account(); publish( &mut harness, "PublishLarge", publisher, - &test_dir_path("code_publishing.data/pack_stdlib"), + &test_dir_path("code_publishing.data/pack_large"), + ); + publish( + &mut harness, + "UpgradeLarge", + publisher, + &test_dir_path("code_publishing.data/pack_large_upgrade"), ); } @@ -299,3 +348,39 @@ pub fn print_gas_cost(function: &str, gas_units: u64) { dollar_cost(gas_units, 30) ); } + +const SHORT_STR: &str = "A hero."; +const LONG_STR: &str = "\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef\ + "; diff --git a/aptos-move/e2e-move-tests/src/tests/token_objects.rs b/aptos-move/e2e-move-tests/src/tests/token_objects.rs index 95095f0927b58..f208d85f64b23 100644 --- a/aptos-move/e2e-move-tests/src/tests/token_objects.rs +++ b/aptos-move/e2e-move-tests/src/tests/token_objects.rs @@ -2,9 +2,12 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{assert_success, tests::common, MoveHarness}; +use aptos_language_e2e_tests::account::Account; use aptos_types::{ account_address::{self, AccountAddress}, event::EventHandle, + move_utils::MemberId, + transaction::{EntryFunction, TransactionPayload}, }; use move_core_types::{identifier::Identifier, language_storage::StructTag}; use serde::Deserialize; @@ -34,29 +37,11 @@ fn test_basic_token() { let addr = AccountAddress::from_hex_literal("0xcafe").unwrap(); let account = h.new_account_at(addr); - let mut build_options = aptos_framework::BuildOptions::default(); - build_options - .named_addresses - .insert("hero".to_string(), addr); + publish_object_token_example(&mut h, addr, &account); - let result = h.publish_package_with_options( + let result = h.run_transaction_payload( &account, - &common::test_dir_path("../../../move-examples/token_objects/hero"), - build_options, - ); - assert_success!(result); - - let result = h.run_entry_function( - &account, - str::parse(&format!("0x{}::hero::mint_hero", addr.to_hex())).unwrap(), - vec![], - vec![ - bcs::to_bytes("The best hero ever!").unwrap(), - bcs::to_bytes("Male").unwrap(), - bcs::to_bytes("Wukong").unwrap(), - bcs::to_bytes("Monkey God").unwrap(), - bcs::to_bytes("404").unwrap(), - ], + create_mint_hero_payload(&addr, "The best hero ever!"), ); assert_success!(result); @@ -97,15 +82,9 @@ fn test_basic_token() { .read_resource_raw(&token_addr, token_obj_tag.clone()) .is_none()); - let result = h.run_entry_function( + let result = h.run_transaction_payload( &account, - str::parse(&format!("0x{}::hero::set_hero_description", addr.to_hex())).unwrap(), - vec![], - vec![ - bcs::to_bytes("Hero Quest!").unwrap(), - bcs::to_bytes("Wukong").unwrap(), - bcs::to_bytes("Oh no!").unwrap(), - ], + create_set_hero_description_payload(&addr, "Oh no!"), ); assert_success!(result); @@ -123,3 +102,50 @@ fn test_basic_token() { token_1.description = "The best hero ever!".to_string(); assert_eq!(token_0.mutation_events.key(), token_1.mutation_events.key()); } + +pub fn publish_object_token_example(h: &mut MoveHarness, addr: AccountAddress, account: &Account) { + let mut build_options = aptos_framework::BuildOptions::default(); + build_options + .named_addresses + .insert("hero".to_string(), addr); + + let result = h.publish_package_with_options( + account, + &common::test_dir_path("../../../move-examples/token_objects/hero"), + build_options, + ); + assert_success!(result); +} + +pub fn create_mint_hero_payload(addr: &AccountAddress, description: &str) -> TransactionPayload { + let fun = str::parse(&format!("0x{}::hero::mint_hero", addr.to_hex())).unwrap(); + let MemberId { + module_id, + member_id: function_id, + } = fun; + + TransactionPayload::EntryFunction(EntryFunction::new(module_id, function_id, vec![], vec![ + bcs::to_bytes(description).unwrap(), + bcs::to_bytes("Male").unwrap(), + bcs::to_bytes("Wukong").unwrap(), + bcs::to_bytes("Monkey God").unwrap(), + bcs::to_bytes("404").unwrap(), + ])) +} + +pub fn create_set_hero_description_payload( + addr: &AccountAddress, + description: &str, +) -> TransactionPayload { + let fun = str::parse(&format!("0x{}::hero::set_hero_description", addr.to_hex())).unwrap(); + let MemberId { + module_id, + member_id: function_id, + } = fun; + + TransactionPayload::EntryFunction(EntryFunction::new(module_id, function_id, vec![], vec![ + bcs::to_bytes("Hero Quest!").unwrap(), + bcs::to_bytes("Wukong").unwrap(), + bcs::to_bytes(description).unwrap(), + ])) +} From 1cebf39904d6642dbf362373bb60e208943f1db8 Mon Sep 17 00:00:00 2001 From: Guoteng Rao <3603304+grao1991@users.noreply.github.com> Date: Fri, 26 Jan 2024 13:57:22 -0800 Subject: [PATCH 10/44] [Storage] Split rocksdb metrics for db shards into separate metrics. (#11785) --- storage/aptosdb/src/metrics.rs | 24 +++++++++++++ .../aptosdb/src/rocksdb_property_reporter.rs | 36 +++++++++++++++---- 2 files changed, 53 insertions(+), 7 deletions(-) diff --git a/storage/aptosdb/src/metrics.rs b/storage/aptosdb/src/metrics.rs index 148724398a23d..19831d0e04831 100644 --- a/storage/aptosdb/src/metrics.rs +++ b/storage/aptosdb/src/metrics.rs @@ -155,6 +155,30 @@ pub static ROCKSDB_PROPERTIES: Lazy = Lazy::new(|| { .unwrap() }); +pub(crate) static STATE_KV_DB_PROPERTIES: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + // metric name + "aptos_state_kv_db_properties", + // metric description + "StateKvDb rocksdb integer properties", + // metric labels (dimensions) + &["shard_id", "cf_name", "property_name",] + ) + .unwrap() +}); + +pub(crate) static STATE_MERKLE_DB_PROPERTIES: Lazy = Lazy::new(|| { + register_int_gauge_vec!( + // metric name + "aptos_state_merkle_db_properties", + // metric description + "StateMerkleDb rocksdb integer properties", + // metric labels (dimensions) + &["shard_id", "cf_name", "property_name",] + ) + .unwrap() +}); + // Async committer gauges: pub(crate) static LATEST_SNAPSHOT_VERSION: Lazy = Lazy::new(|| { register_int_gauge!( diff --git a/storage/aptosdb/src/rocksdb_property_reporter.rs b/storage/aptosdb/src/rocksdb_property_reporter.rs index 3fc303697f2bc..7b9d3873077c4 100644 --- a/storage/aptosdb/src/rocksdb_property_reporter.rs +++ b/storage/aptosdb/src/rocksdb_property_reporter.rs @@ -11,13 +11,17 @@ use crate::{ transaction_info_db_column_families, write_set_db_column_families, }, ledger_db::LedgerDb, - metrics::{OTHER_TIMERS_SECONDS, ROCKSDB_PROPERTIES}, + metrics::{ + OTHER_TIMERS_SECONDS, ROCKSDB_PROPERTIES, STATE_KV_DB_PROPERTIES, + STATE_MERKLE_DB_PROPERTIES, + }, state_kv_db::StateKvDb, state_merkle_db::StateMerkleDb, }; use anyhow::Result; use aptos_infallible::Mutex; use aptos_logger::prelude::*; +use aptos_metrics_core::IntGaugeVec; use aptos_schemadb::DB; use once_cell::sync::Lazy; use std::{ @@ -77,11 +81,19 @@ fn set_property(cf_name: &str, db: &DB) -> Result<()> { Ok(()) } -fn set_property_sharded(cf_name: &str, db: &DB, db_shard_id: usize) -> Result<()> { +fn set_shard_property( + cf_name: &str, + db: &DB, + db_shard_id: usize, + metrics: &Lazy, +) -> Result<()> { for (rockdb_property_name, aptos_rocksdb_property_name) in &*ROCKSDB_PROPERTY_MAP { - let cf_label = format!("{}_{}", cf_name, db_shard_id); - ROCKSDB_PROPERTIES - .with_label_values(&[&cf_label, aptos_rocksdb_property_name]) + metrics + .with_label_values(&[ + &format!("{db_shard_id}"), + cf_name, + aptos_rocksdb_property_name, + ]) .set(db.get_property(cf_name, rockdb_property_name)? as i64); } Ok(()) @@ -127,7 +139,12 @@ fn update_rocksdb_properties( set_property(cf, state_kv_db.metadata_db())?; if state_kv_db.enabled_sharding() { for shard in 0..NUM_STATE_SHARDS { - set_property_sharded(cf, state_kv_db.db_shard(shard as u8), shard)?; + set_shard_property( + cf, + state_kv_db.db_shard(shard as u8), + shard, + &STATE_KV_DB_PROPERTIES, + )?; } } } @@ -141,7 +158,12 @@ fn update_rocksdb_properties( set_property(cf_name, state_merkle_db.metadata_db())?; if state_merkle_db.sharding_enabled() { for shard in 0..NUM_STATE_SHARDS { - set_property_sharded(cf_name, state_merkle_db.db_shard(shard as u8), shard)?; + set_shard_property( + cf_name, + state_merkle_db.db_shard(shard as u8), + shard, + &STATE_MERKLE_DB_PROPERTIES, + )?; } } } From 9d73d5da975c71bc448ab0396eca43cc51517b28 Mon Sep 17 00:00:00 2001 From: igor-aptos <110557261+igor-aptos@users.noreply.github.com> Date: Sun, 28 Jan 2024 00:37:25 -0800 Subject: [PATCH 11/44] Fix emitter when client is a load balancer across multiple servers (#11679) --- crates/transaction-emitter-lib/src/emitter/mod.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/crates/transaction-emitter-lib/src/emitter/mod.rs b/crates/transaction-emitter-lib/src/emitter/mod.rs index 15d74e19186f0..d41517a96e157 100644 --- a/crates/transaction-emitter-lib/src/emitter/mod.rs +++ b/crates/transaction-emitter-lib/src/emitter/mod.rs @@ -811,12 +811,15 @@ async fn wait_for_accounts_sequence( let prev_sequence_number = latest_fetched_counts .insert(address, sequence_number) .unwrap_or(*start_seq_num); - assert!(prev_sequence_number <= sequence_number); - sum_of_completion_timestamps_millis += - millis_elapsed * (sequence_number - prev_sequence_number) as u128; - - if *end_seq_num == sequence_number { - pending_addresses.remove(&address); + // fetched sequence number that is older than one we already fetched. + // client connection probably moved to a different server. + if prev_sequence_number <= sequence_number { + sum_of_completion_timestamps_millis += + millis_elapsed * (sequence_number - prev_sequence_number) as u128; + + if *end_seq_num == sequence_number { + pending_addresses.remove(&address); + } } } From e2d8c960b29b2c7995785daf2d771b35ac641812 Mon Sep 17 00:00:00 2001 From: Justin Chang <37165464+just-in-chang@users.noreply.github.com> Date: Sun, 28 Jan 2024 20:11:22 -0800 Subject: [PATCH 12/44] [NFT Metadata Crawler] Enable better quality from configs + add IPFS gateway auth (#11801) --- .../src/utils/constants.rs | 12 +++ .../src/utils/image_optimizer.rs | 14 ++- .../src/utils/uri_parser.rs | 80 +++++++++++---- .../nft-metadata-crawler-parser/src/worker.rs | 99 ++++++++++++------- 4 files changed, 149 insertions(+), 56 deletions(-) diff --git a/ecosystem/nft-metadata-crawler-parser/src/utils/constants.rs b/ecosystem/nft-metadata-crawler-parser/src/utils/constants.rs index f64f61b2b3ada..7bae24d8cb476 100644 --- a/ecosystem/nft-metadata-crawler-parser/src/utils/constants.rs +++ b/ecosystem/nft-metadata-crawler-parser/src/utils/constants.rs @@ -11,3 +11,15 @@ pub const MAX_JSON_REQUEST_RETRY_SECONDS: u64 = 30; /// Allocate 90 seconds for downloading large image files pub const MAX_IMAGE_REQUEST_RETRY_SECONDS: u64 = 90; + +/// Default 15 MB maximum file size for files to be downloaded +pub const DEFAULT_MAX_FILE_SIZE_BYTES: u32 = 15_000_000; + +/// Default 100% image quality for image optimization +pub const DEFAULT_IMAGE_QUALITY: u8 = 100; + +/// Default 4096 maximum image dimensions for image optimization +pub const DEFAULT_MAX_IMAGE_DIMENSIONS: u32 = 4_096; + +/// Default IPFS gateway auth param key +pub const IPFS_AUTH_KEY: &str = "pinataGatewayToken"; diff --git a/ecosystem/nft-metadata-crawler-parser/src/utils/image_optimizer.rs b/ecosystem/nft-metadata-crawler-parser/src/utils/image_optimizer.rs index c44ec75f0cea4..9d22bdf5a961a 100644 --- a/ecosystem/nft-metadata-crawler-parser/src/utils/image_optimizer.rs +++ b/ecosystem/nft-metadata-crawler-parser/src/utils/image_optimizer.rs @@ -18,7 +18,11 @@ use image::{ DynamicImage, GenericImageView, ImageBuffer, ImageFormat, ImageOutputFormat, }; use reqwest::Client; -use std::{io::Cursor, time::Duration}; +use std::{ + cmp::{max, min}, + io::Cursor, + time::Duration, +}; use tracing::{info, warn}; pub struct ImageOptimizer; @@ -30,6 +34,7 @@ impl ImageOptimizer { uri: String, max_file_size_bytes: u32, image_quality: u8, + max_image_dimensions: u32, ) -> anyhow::Result<(Vec, ImageFormat)> { OPTIMIZE_IMAGE_INVOCATION_COUNT.inc(); let (_, size) = get_uri_metadata(uri.clone()).await?; @@ -71,8 +76,11 @@ impl ImageOptimizer { _ => { let img = image::load_from_memory(&img_bytes) .context(format!("Failed to load image from memory: {} bytes", size))?; - let (nwidth, nheight) = - Self::calculate_dimensions_with_ration(512, img.width(), img.height()); + let (nwidth, nheight) = Self::calculate_dimensions_with_ration( + min(max(img.width(), img.height()), max_image_dimensions), + img.width(), + img.height(), + ); let resized_image = resize(&img.to_rgba8(), nwidth, nheight, FilterType::Gaussian); Ok(Self::to_image_bytes(resized_image, image_quality)?) diff --git a/ecosystem/nft-metadata-crawler-parser/src/utils/uri_parser.rs b/ecosystem/nft-metadata-crawler-parser/src/utils/uri_parser.rs index 0bd06f71ca6fc..81d5431fd8c0a 100644 --- a/ecosystem/nft-metadata-crawler-parser/src/utils/uri_parser.rs +++ b/ecosystem/nft-metadata-crawler-parser/src/utils/uri_parser.rs @@ -1,6 +1,9 @@ // Copyright © Aptos Foundation -use crate::utils::counters::{PARSE_URI_INVOCATION_COUNT, PARSE_URI_TYPE_COUNT}; +use crate::utils::{ + constants::IPFS_AUTH_KEY, + counters::{PARSE_URI_INVOCATION_COUNT, PARSE_URI_TYPE_COUNT}, +}; use regex::{Captures, Regex}; use url::Url; @@ -9,7 +12,11 @@ pub struct URIParser; impl URIParser { /// Attempts to parse IPFS URI to use dedicated gateway. /// Returns the original URI if parsing fails. - pub fn parse(ipfs_prefix: String, uri: String) -> anyhow::Result { + pub fn parse( + ipfs_prefix: String, + uri: String, + ipfs_auth_key: Option, + ) -> anyhow::Result { PARSE_URI_INVOCATION_COUNT.inc(); if uri.contains("arweave.net") { PARSE_URI_TYPE_COUNT.with_label_values(&["arweave"]).inc(); @@ -22,6 +29,12 @@ impl URIParser { uri }; + let ipfs_auth_param = if ipfs_auth_key.is_some() { + Some(format!("?{}={}", IPFS_AUTH_KEY, ipfs_auth_key.unwrap())) + } else { + None + }; + // Expects the following format for provided URIs `ipfs/{CID}/{path}` let re = Regex::new(r"^(ipfs/)(?P[a-zA-Z0-9]+)(?P/.*)?$")?; @@ -36,22 +49,27 @@ impl URIParser { .captures(&path.unwrap_or_default()) .or_else(|| redir_re.captures(&modified_uri)) { - return Self::format_capture(captures, ipfs_prefix); + return Self::format_capture(captures, ipfs_prefix, ipfs_auth_param); } Err(anyhow::anyhow!("Invalid IPFS URI")) } /// Formats a capture group into a URI. - fn format_capture(captures: Captures<'_>, ipfs_prefix: String) -> anyhow::Result { + fn format_capture( + captures: Captures<'_>, + ipfs_prefix: String, + ipfs_auth_param: Option, + ) -> anyhow::Result { let cid = captures["cid"].to_string(); let path = captures.name("path").map(|m| m.as_str().to_string()); PARSE_URI_TYPE_COUNT.with_label_values(&["ipfs"]).inc(); Ok(format!( - "{}{}{}", + "{}{}{}{}", ipfs_prefix, cid, - path.unwrap_or_default() + path.unwrap_or_default(), + ipfs_auth_param.unwrap_or_default() )) } } @@ -61,47 +79,73 @@ mod tests { use super::*; const IPFS_PREFIX: &str = "https://testipfsprefix.com/ipfs/"; + const IPFS_AUTH: &str = "token"; const CID: &str = "testcid"; const PATH: &str = "testpath"; #[test] fn test_parse_ipfs_uri() { let test_ipfs_uri = format!("ipfs://{}/{}", CID, PATH); - let parsed_uri = URIParser::parse(IPFS_PREFIX.to_string(), test_ipfs_uri).unwrap(); - assert_eq!(parsed_uri, format!("{IPFS_PREFIX}{CID}/{PATH}")); + let parsed_uri = URIParser::parse( + IPFS_PREFIX.to_string(), + test_ipfs_uri, + Some(IPFS_AUTH.to_string()), + ) + .unwrap(); + assert_eq!( + parsed_uri, + format!("{IPFS_PREFIX}{CID}/{PATH}?{IPFS_AUTH_KEY}={IPFS_AUTH}") + ); // Path is optional for IPFS URIs let test_ipfs_uri_no_path = format!("ipfs://{}/{}", CID, ""); - let parsed_uri = URIParser::parse(IPFS_PREFIX.to_string(), test_ipfs_uri_no_path).unwrap(); + let parsed_uri = + URIParser::parse(IPFS_PREFIX.to_string(), test_ipfs_uri_no_path, None).unwrap(); assert_eq!(parsed_uri, format!("{}{}/{}", IPFS_PREFIX, CID, "")); // IPFS URIs must contain a CID, expect error here let test_ipfs_uri_no_cid = format!("ipfs://{}/{}", "", PATH); - let parsed_uri = URIParser::parse(IPFS_PREFIX.to_string(), test_ipfs_uri_no_cid); + let parsed_uri = URIParser::parse(IPFS_PREFIX.to_string(), test_ipfs_uri_no_cid, None); assert!(parsed_uri.is_err()); } #[test] fn test_parse_public_gateway_uri() { let test_public_gateway_uri = format!("https://ipfs.io/ipfs/{}/{}", CID, PATH); - let parsed_uri = - URIParser::parse(IPFS_PREFIX.to_string(), test_public_gateway_uri).unwrap(); - assert_eq!(parsed_uri, format!("{IPFS_PREFIX}{CID}/{PATH}",)); + let parsed_uri = URIParser::parse( + IPFS_PREFIX.to_string(), + test_public_gateway_uri, + Some(IPFS_AUTH.to_string()), + ) + .unwrap(); + assert_eq!( + parsed_uri, + format!("{IPFS_PREFIX}{CID}/{PATH}?{IPFS_AUTH_KEY}={IPFS_AUTH}") + ); // Path is optional for public gateway URIs let test_public_gateway_uri_no_path = format!("https://ipfs.io/ipfs/{}/{}", CID, ""); - let parsed_uri = - URIParser::parse(IPFS_PREFIX.to_string(), test_public_gateway_uri_no_path).unwrap(); + let parsed_uri = URIParser::parse( + IPFS_PREFIX.to_string(), + test_public_gateway_uri_no_path, + None, + ) + .unwrap(); assert_eq!(parsed_uri, format!("{}{}/{}", IPFS_PREFIX, CID, "")); // Some submitted URIs are in the redirected format let test_ipfs_redirect = format!("https://{}.ipfs.re.dir.io/{}", CID, PATH); - let parsed_uri = URIParser::parse(IPFS_PREFIX.to_string(), test_ipfs_redirect).unwrap(); + let parsed_uri = + URIParser::parse(IPFS_PREFIX.to_string(), test_ipfs_redirect, None).unwrap(); assert_eq!(parsed_uri, format!("{IPFS_PREFIX}{CID}/{PATH}")); // Public gateway URIs must contain a CID, expect error here let test_public_gateway_uri_no_cid = format!("https://ipfs.io/ipfs/{}/{}", "", PATH); - let parsed_uri = URIParser::parse(IPFS_PREFIX.to_string(), test_public_gateway_uri_no_cid); + let parsed_uri = URIParser::parse( + IPFS_PREFIX.to_string(), + test_public_gateway_uri_no_cid, + None, + ); assert!(parsed_uri.is_err()); } @@ -109,7 +153,7 @@ mod tests { fn test_parse_non_ipfs_uri_fail() { // Expects an error if parsing a non-IPFS URI let test_non_ipfs_uri = "https://tesetnotipfsuri.com/notipfspath.json".to_string(); - let parsed_uri = URIParser::parse(IPFS_PREFIX.to_string(), test_non_ipfs_uri); + let parsed_uri = URIParser::parse(IPFS_PREFIX.to_string(), test_non_ipfs_uri, None); assert!(parsed_uri.is_err()); } } diff --git a/ecosystem/nft-metadata-crawler-parser/src/worker.rs b/ecosystem/nft-metadata-crawler-parser/src/worker.rs index 5d0487219d27b..4e77169d83b75 100644 --- a/ecosystem/nft-metadata-crawler-parser/src/worker.rs +++ b/ecosystem/nft-metadata-crawler-parser/src/worker.rs @@ -6,6 +6,9 @@ use crate::{ nft_metadata_crawler_uris_query::NFTMetadataCrawlerURIsQuery, }, utils::{ + constants::{ + DEFAULT_IMAGE_QUALITY, DEFAULT_MAX_FILE_SIZE_BYTES, DEFAULT_MAX_IMAGE_DIMENSIONS, + }, counters::{ DUPLICATE_ASSET_URI_COUNT, DUPLICATE_RAW_ANIMATION_URI_COUNT, DUPLICATE_RAW_IMAGE_URI_COUNT, GOT_CONNECTION_COUNT, OPTIMIZE_IMAGE_TYPE_COUNT, @@ -46,8 +49,10 @@ pub struct ParserConfig { pub database_url: String, pub cdn_prefix: String, pub ipfs_prefix: String, - pub max_file_size_bytes: u32, - pub image_quality: u8, // Quality up to 100 + pub ipfs_auth_key: Option, + pub max_file_size_bytes: Option, + pub image_quality: Option, // Quality up to 100 + pub max_image_dimensions: Option, pub ack_parsed_uris: Option, pub uri_blacklist: Option>, pub server_port: u16, @@ -328,25 +333,32 @@ impl Worker { // Parse asset_uri self.log_info("Parsing asset_uri"); - let json_uri = - URIParser::parse(self.config.ipfs_prefix.clone(), self.model.get_asset_uri()) - .unwrap_or_else(|_| { - self.log_warn("Failed to parse asset_uri", None); - PARSE_URI_TYPE_COUNT.with_label_values(&["other"]).inc(); - self.model.get_asset_uri() - }); + let json_uri = URIParser::parse( + self.config.ipfs_prefix.clone(), + self.model.get_asset_uri(), + self.config.ipfs_auth_key.clone(), + ) + .unwrap_or_else(|_| { + self.log_warn("Failed to parse asset_uri", None); + PARSE_URI_TYPE_COUNT.with_label_values(&["other"]).inc(); + self.model.get_asset_uri() + }); // Parse JSON for raw_image_uri and raw_animation_uri self.log_info("Starting JSON parsing"); - let (raw_image_uri, raw_animation_uri, json) = - JSONParser::parse(json_uri, self.config.max_file_size_bytes) - .await - .unwrap_or_else(|e| { - // Increment retry count if JSON parsing fails - self.log_warn("JSON parsing failed", Some(&e)); - self.model.increment_json_parser_retry_count(); - (None, None, Value::Null) - }); + let (raw_image_uri, raw_animation_uri, json) = JSONParser::parse( + json_uri, + self.config + .max_file_size_bytes + .unwrap_or(DEFAULT_MAX_FILE_SIZE_BYTES), + ) + .await + .unwrap_or_else(|e| { + // Increment retry count if JSON parsing fails + self.log_warn("JSON parsing failed", Some(&e)); + self.model.increment_json_parser_retry_count(); + (None, None, Value::Null) + }); self.model.set_raw_image_uri(raw_image_uri); self.model.set_raw_animation_uri(raw_animation_uri); @@ -407,12 +419,16 @@ impl Worker { .model .get_raw_image_uri() .unwrap_or(self.model.get_asset_uri()); - let img_uri = URIParser::parse(self.config.ipfs_prefix.clone(), raw_image_uri.clone()) - .unwrap_or_else(|_| { - self.log_warn("Failed to parse raw_image_uri", None); - PARSE_URI_TYPE_COUNT.with_label_values(&["other"]).inc(); - raw_image_uri - }); + let img_uri = URIParser::parse( + self.config.ipfs_prefix.clone(), + raw_image_uri.clone(), + self.config.ipfs_auth_key.clone(), + ) + .unwrap_or_else(|_| { + self.log_warn("Failed to parse raw_image_uri", None); + PARSE_URI_TYPE_COUNT.with_label_values(&["other"]).inc(); + raw_image_uri + }); // Resize and optimize image self.log_info("Starting image optimization"); @@ -421,8 +437,13 @@ impl Worker { .inc(); let (image, format) = ImageOptimizer::optimize( img_uri, - self.config.max_file_size_bytes, - self.config.image_quality, + self.config + .max_file_size_bytes + .unwrap_or(DEFAULT_MAX_FILE_SIZE_BYTES), + self.config.image_quality.unwrap_or(DEFAULT_IMAGE_QUALITY), + self.config + .max_image_dimensions + .unwrap_or(DEFAULT_MAX_IMAGE_DIMENSIONS), ) .await .unwrap_or_else(|e| { @@ -490,13 +511,16 @@ impl Worker { // If raw_animation_uri_option is None, skip if let Some(raw_animation_uri) = raw_animation_uri_option { self.log_info("Starting animation optimization"); - let animation_uri = - URIParser::parse(self.config.ipfs_prefix.clone(), raw_animation_uri.clone()) - .unwrap_or_else(|_| { - self.log_warn("Failed to parse raw_animation_uri", None); - PARSE_URI_TYPE_COUNT.with_label_values(&["other"]).inc(); - raw_animation_uri - }); + let animation_uri = URIParser::parse( + self.config.ipfs_prefix.clone(), + raw_animation_uri.clone(), + self.config.ipfs_auth_key.clone(), + ) + .unwrap_or_else(|_| { + self.log_warn("Failed to parse raw_animation_uri", None); + PARSE_URI_TYPE_COUNT.with_label_values(&["other"]).inc(); + raw_animation_uri + }); // Resize and optimize animation self.log_info("Starting animation optimization"); @@ -505,8 +529,13 @@ impl Worker { .inc(); let (animation, format) = ImageOptimizer::optimize( animation_uri, - self.config.max_file_size_bytes, - self.config.image_quality, + self.config + .max_file_size_bytes + .unwrap_or(DEFAULT_MAX_FILE_SIZE_BYTES), + self.config.image_quality.unwrap_or(DEFAULT_IMAGE_QUALITY), + self.config + .max_image_dimensions + .unwrap_or(DEFAULT_MAX_IMAGE_DIMENSIONS), ) .await .unwrap_or_else(|e| { From a3d1bdb0879efcdfc4dc41ace34f295d57506827 Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Mon, 29 Jan 2024 09:41:05 -0500 Subject: [PATCH 13/44] add CommitMessage::Nack (#11711) add CommitMessage::Nack to reply to ::Vote and ::Decision that don't get ::Ack --- consensus/src/pipeline/buffer_manager.rs | 18 ++++++++++++++++++ .../src/pipeline/commit_reliable_broadcast.rs | 19 ++++++++++++++++++- .../tests/staged/consensus.yaml | 2 ++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/consensus/src/pipeline/buffer_manager.rs b/consensus/src/pipeline/buffer_manager.rs index 46ecd67cb86ef..5a0c2195bf03a 100644 --- a/consensus/src/pipeline/buffer_manager.rs +++ b/consensus/src/pipeline/buffer_manager.rs @@ -25,12 +25,14 @@ use aptos_consensus_types::{ }; use aptos_crypto::HashValue; use aptos_logger::prelude::*; +use aptos_network::protocols::{rpc::error::RpcError, wire::handshake::v1::ProtocolId}; use aptos_reliable_broadcast::{DropGuard, ReliableBroadcast}; use aptos_time_service::TimeService; use aptos_types::{ account_address::AccountAddress, epoch_change::EpochChangeProof, epoch_state::EpochState, ledger_info::LedgerInfoWithSignatures, }; +use bytes::Bytes; use futures::{ channel::{ mpsc::{unbounded, UnboundedReceiver, UnboundedSender}, @@ -578,13 +580,18 @@ impl BufferManager { commit_info = commit_info, "Failed to add commit vote", ); + reply_nack(protocol, response_sender); item }, }; self.buffer.set(¤t_cursor, new_item); if self.buffer.get(¤t_cursor).is_aggregated() { return Some(target_block_id); + } else { + return None; } + } else { + reply_nack(protocol, response_sender); // TODO: send_commit_vote() doesn't care about the response and this should be direct send not RPC } }, CommitMessage::Decision(commit_proof) => { @@ -612,11 +619,15 @@ impl BufferManager { return Some(target_block_id); } } + reply_nack(protocol, response_sender); // TODO: send_commit_proof() doesn't care about the response and this should be direct send not RPC }, CommitMessage::Ack(_) => { // It should be filtered out by verify, so we log errors here error!("Unexpected ack message"); }, + CommitMessage::Nack => { + error!("Unexpected NACK message"); + }, } None } @@ -782,3 +793,10 @@ impl BufferManager { info!("Buffer manager stops."); } } + +fn reply_nack(protocol: ProtocolId, response_sender: oneshot::Sender>) { + let response = ConsensusMsg::CommitMessage(Box::new(CommitMessage::Nack)); + if let Ok(bytes) = protocol.to_bytes(&response) { + let _ = response_sender.send(Ok(bytes.into())); + } +} diff --git a/consensus/src/pipeline/commit_reliable_broadcast.rs b/consensus/src/pipeline/commit_reliable_broadcast.rs index 677b2967dd23c..4cea70361d8f9 100644 --- a/consensus/src/pipeline/commit_reliable_broadcast.rs +++ b/consensus/src/pipeline/commit_reliable_broadcast.rs @@ -23,6 +23,8 @@ pub enum CommitMessage { Decision(CommitDecision), /// Ack on either vote or decision Ack(()), + /// Nack is non-acknowledgement, we got your message, but it was bad/we were bad + Nack, } impl CommitMessage { @@ -32,6 +34,7 @@ impl CommitMessage { CommitMessage::Vote(vote) => vote.verify(verifier), CommitMessage::Decision(decision) => decision.verify(verifier), CommitMessage::Ack(_) => bail!("Unexpected ack in incoming commit message"), + CommitMessage::Nack => bail!("Unexpected NACK in incoming commit message"), } } } @@ -55,7 +58,21 @@ impl BroadcastStatus for Arc { type Message = CommitMessage; type Response = CommitMessage; - fn add(&self, peer: Author, _ack: Self::Response) -> anyhow::Result> { + fn add(&self, peer: Author, ack: Self::Response) -> anyhow::Result> { + match ack { + CommitMessage::Vote(_) => { + bail!("unexected Vote reply to broadcast"); + }, + CommitMessage::Decision(_) => { + bail!("unexected Decision reply to broadcast"); + }, + CommitMessage::Ack(_) => { + // okay! continue + }, + CommitMessage::Nack => { + bail!("unexected Nack reply to broadcast"); + }, + } let mut validators = self.validators.lock(); if validators.remove(&peer) { if validators.is_empty() { diff --git a/testsuite/generate-format/tests/staged/consensus.yaml b/testsuite/generate-format/tests/staged/consensus.yaml index 2b600dacee1db..eb1f1345da68a 100644 --- a/testsuite/generate-format/tests/staged/consensus.yaml +++ b/testsuite/generate-format/tests/staged/consensus.yaml @@ -299,6 +299,8 @@ CommitMessage: 2: Ack: NEWTYPE: UNIT + 3: + Nack: UNIT CommitVote: STRUCT: - author: From 64dea16609d12a12da8b4c454f409af1aa43894e Mon Sep 17 00:00:00 2001 From: Vineeth Kashyap Date: Mon, 29 Jan 2024 10:52:52 -0500 Subject: [PATCH 14/44] [compiler-v2] Unreachable code analysis and unreachable code removal (#11790) --- third_party/move/move-compiler-v2/src/lib.rs | 6 +- .../move/move-compiler-v2/src/pipeline/mod.rs | 4 + .../src/pipeline/unreachable_code_analysis.rs | 161 ++++++++++++++++++ .../src/pipeline/unreachable_code_remover.rs | 79 +++++++++ .../move/move-compiler-v2/tests/testsuite.rs | 15 +- .../unreachable-code-remover/abort_only.exp | 31 ++++ .../unreachable-code-remover/abort_only.move | 6 + .../abort_or_return_always.exp | 63 +++++++ .../abort_or_return_always.move | 11 ++ .../always_false_branch.exp | 94 ++++++++++ .../always_false_branch.move | 11 ++ .../break_unreachable.exp | 149 ++++++++++++++++ .../break_unreachable.move | 17 ++ .../conditional_loop_unreachable.exp | 141 +++++++++++++++ .../conditional_loop_unreachable.move | 16 ++ .../inter_procedural_abort.exp | 93 ++++++++++ .../inter_procedural_abort.move | 13 ++ .../loop_unreachable.exp | 37 ++++ .../loop_unreachable.move | 7 + .../return_after_abort.exp | 37 ++++ .../return_after_abort.move | 7 + .../tests/no-v1-comparison/print_bytecode.exp | 2 - 22 files changed, 996 insertions(+), 4 deletions(-) create mode 100644 third_party/move/move-compiler-v2/src/pipeline/unreachable_code_analysis.rs create mode 100644 third_party/move/move-compiler-v2/src/pipeline/unreachable_code_remover.rs create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_only.exp create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_only.move create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_or_return_always.exp create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_or_return_always.move create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/always_false_branch.exp create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/always_false_branch.move create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/break_unreachable.exp create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/break_unreachable.move create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/conditional_loop_unreachable.exp create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/conditional_loop_unreachable.move create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/inter_procedural_abort.exp create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/inter_procedural_abort.move create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/loop_unreachable.exp create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/loop_unreachable.move create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/return_after_abort.exp create mode 100644 third_party/move/move-compiler-v2/tests/unreachable-code-remover/return_after_abort.move diff --git a/third_party/move/move-compiler-v2/src/lib.rs b/third_party/move/move-compiler-v2/src/lib.rs index e1ac871164354..031862e20c6c3 100644 --- a/third_party/move/move-compiler-v2/src/lib.rs +++ b/third_party/move/move-compiler-v2/src/lib.rs @@ -15,7 +15,9 @@ use crate::pipeline::{ copy_propagation::CopyPropagation, dead_store_elimination::DeadStoreElimination, explicit_drop::ExplicitDrop, livevar_analysis_processor::LiveVarAnalysisProcessor, reference_safety_processor::ReferenceSafetyProcessor, - uninitialized_use_checker::UninitializedUseChecker, visibility_checker::VisibilityChecker, + uninitialized_use_checker::UninitializedUseChecker, + unreachable_code_analysis::UnreachableCodeProcessor, + unreachable_code_remover::UnreachableCodeRemover, visibility_checker::VisibilityChecker, }; use anyhow::bail; use codespan_reporting::term::termcolor::{ColorChoice, StandardStream, WriteColor}; @@ -214,6 +216,8 @@ fn add_default_optimization_pipeline(pipeline: &mut FunctionTargetPipeline) { with_copy_inference: false, })); pipeline.add_processor(Box::new(DeadStoreElimination {})); + pipeline.add_processor(Box::new(UnreachableCodeProcessor {})); + pipeline.add_processor(Box::new(UnreachableCodeRemover {})); } /// Report any diags in the env to the writer and fail if there are errors. diff --git a/third_party/move/move-compiler-v2/src/pipeline/mod.rs b/third_party/move/move-compiler-v2/src/pipeline/mod.rs index 54f25dcc13e1f..319b506a65048 100644 --- a/third_party/move/move-compiler-v2/src/pipeline/mod.rs +++ b/third_party/move/move-compiler-v2/src/pipeline/mod.rs @@ -7,6 +7,7 @@ use crate::pipeline::{ livevar_analysis_processor::LiveVarAnalysisProcessor, reference_safety_processor::ReferenceSafetyProcessor, uninitialized_use_checker::UninitializedUseChecker, + unreachable_code_analysis::UnreachableCodeProcessor, }; use move_stackless_bytecode::function_target::FunctionTarget; @@ -18,6 +19,8 @@ pub mod explicit_drop; pub mod livevar_analysis_processor; pub mod reference_safety_processor; pub mod uninitialized_use_checker; +pub mod unreachable_code_analysis; +pub mod unreachable_code_remover; pub mod visibility_checker; /// Function to register all annotation formatters in the pipeline. Those are used @@ -28,4 +31,5 @@ pub fn register_formatters(target: &FunctionTarget) { ReferenceSafetyProcessor::register_formatters(target); AvailCopiesAnalysisProcessor::register_formatters(target); UninitializedUseChecker::register_formatters(target); + UnreachableCodeProcessor::register_formatters(target); } diff --git a/third_party/move/move-compiler-v2/src/pipeline/unreachable_code_analysis.rs b/third_party/move/move-compiler-v2/src/pipeline/unreachable_code_analysis.rs new file mode 100644 index 0000000000000..8768c51e92e8a --- /dev/null +++ b/third_party/move/move-compiler-v2/src/pipeline/unreachable_code_analysis.rs @@ -0,0 +1,161 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Implements a data-flow analysis to determine whether an instruction is reachable or not. +//! This analysis does not have any prerequisites. +//! This analysis sets an annotation of type `UnreachableCodeAnnotation` on each function target. +//! This annotation is a prerequisite for the unreachable code checker and unreachable code remover. +//! +//! This analysis a forward "may" analysis, it tracks whether an instruction is: +//! - maybe reachable (there may be an execution path from the function entry to the instruction) +//! - definitely not reachable (there is no execution path from the function entry to the instruction) +//! +//! This analysis is defined by the following (informal) dataflow equations, over which we obtain a fixpoint: +//! - the after state of the entry instruction is "maybe reachable" +//! - the before state of an instruction is the join of the after states of all its predecessors, +//! thus at least one of the predecessors must be "maybe reachable" for the before state of +//! an instruction to be "maybe reachable" (this is computed by running the forward analysis) +//! - the after state of an instruction is same as the before state, except when the instruction is +//! known to definitely stop the execution path from continuing on (such as return or abort). +//! +//! The forward dataflow analysis does not reach any program point that is not a transitive successor +//! of the function entry (and thus not attach any annotations to such program points). Such program +//! points are considered to be "definitely not reachable". + +use move_binary_format::file_format::CodeOffset; +use move_model::model::FunctionEnv; +use move_stackless_bytecode::{ + dataflow_analysis::{DataflowAnalysis, TransferFunctions}, + dataflow_domains::{AbstractDomain, JoinResult}, + function_target::{FunctionData, FunctionTarget}, + function_target_pipeline::{FunctionTargetProcessor, FunctionTargetsHolder}, + stackless_bytecode::Bytecode, + stackless_control_flow_graph::StacklessControlFlowGraph, +}; +use std::collections::BTreeMap; + +/// Reachability state of an instruction. +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum ReachableState { + Maybe, // Maybe reachable from function entry + No, // Definitely not reachable from function entry +} + +impl AbstractDomain for ReachableState { + fn join(&mut self, other: &Self) -> JoinResult { + use ReachableState::*; + match (self.clone(), other) { + (No, Maybe) => { + *self = Maybe; + JoinResult::Changed + }, + (Maybe, _) | (No, No) => JoinResult::Unchanged, + } + } +} + +/// Mapping from code offsets to their reachability state, before executing the +/// instruction at the code offset. +#[derive(Clone, Debug)] +pub struct ReachableStateAnnotation(BTreeMap); + +impl ReachableStateAnnotation { + /// Is the instruction at the given `offset` definitely not reachable? + pub fn is_definitely_not_reachable(&self, offset: CodeOffset) -> bool { + // Note that if there is no annotation attached with the offset, it is because the forward + // analysis found that the offset was not a transitive successor of the function entry. + // Thus, such offsets are considered to be definitely not reachable. + self.0 + .get(&offset) + .map_or(true, |state| matches!(state, ReachableState::No)) + } +} + +/// Forward intra-procedural dataflow analysis. +/// Determines whether an instruction is reachable or not. +pub struct UnreachableCodeAnalysis {} + +impl UnreachableCodeAnalysis { + /// Analyze the given function and return a mapping from code offsets to their reachability state. + fn analyze(&self, func_target: &FunctionTarget) -> ReachableStateAnnotation { + let code = func_target.get_bytecode(); + let cfg = StacklessControlFlowGraph::new_forward(code); + // We assume the entry of a function is reachable, and run the forward analysis. + let block_state_map = self.analyze_function(ReachableState::Maybe, code, &cfg); + let per_bytecode_state = + self.state_per_instruction(block_state_map, code, &cfg, |before, _| before.clone()); + ReachableStateAnnotation(per_bytecode_state) + } +} + +impl TransferFunctions for UnreachableCodeAnalysis { + type State = ReachableState; + + // This is forward analysis. + const BACKWARD: bool = false; + + fn execute(&self, state: &mut Self::State, instr: &Bytecode, _offset: CodeOffset) { + use Bytecode::*; + // TODO: the precision of this analysis can be improved when constant propagation + // information is available. + // For example: + // - if a branch condition is a constant false, then the branch target is definitely not reachable. + // - if addition of two constants overflows, then code after is definitely not reachable. + // + // Cases where the instruction stops the execution path from continuing on. + if matches!(instr, Ret(..) | Abort(..)) { + *state = ReachableState::No; + } // else: the instruction may not stop the execution path from continuing on. + } +} + +impl DataflowAnalysis for UnreachableCodeAnalysis {} + +/// A processor which performs the unreachable code analysis. +pub struct UnreachableCodeProcessor {} + +impl FunctionTargetProcessor for UnreachableCodeProcessor { + fn process( + &self, + _targets: &mut FunctionTargetsHolder, + func_env: &FunctionEnv, + mut data: FunctionData, + _scc_opt: Option<&[FunctionEnv]>, + ) -> FunctionData { + if func_env.is_native() { + return data; + } + let target = FunctionTarget::new(func_env, &data); + let analysis = UnreachableCodeAnalysis {}; + let annotation = analysis.analyze(&target); + data.annotations.set(annotation, true); + data + } + + fn name(&self) -> String { + "UnreachableCodeProcessor".to_string() + } +} + +impl UnreachableCodeProcessor { + /// Registers annotation formatter at the given function target. + /// Helps with testing and debugging. + pub fn register_formatters(target: &FunctionTarget) { + target.register_annotation_formatter(Box::new(format_reachable_state_annotation)); + } +} + +// ==================================================================== +// Formatting functionality for reachability state annotation. + +pub fn format_reachable_state_annotation( + target: &FunctionTarget, + code_offset: CodeOffset, +) -> Option { + let annotation = target.get_annotations().get::()?; + if annotation.is_definitely_not_reachable(code_offset) { + Some("no".to_string()) + } else { + Some("maybe".to_string()) + } +} diff --git a/third_party/move/move-compiler-v2/src/pipeline/unreachable_code_remover.rs b/third_party/move/move-compiler-v2/src/pipeline/unreachable_code_remover.rs new file mode 100644 index 0000000000000..a13553850077c --- /dev/null +++ b/third_party/move/move-compiler-v2/src/pipeline/unreachable_code_remover.rs @@ -0,0 +1,79 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Implements the "unreachable code remover" transformation. +//! +//! prerequisite: the `ReachableStateAnnotation` should already be computed by running the +//! `UnreachableCodeProcessor`. +//! side effect: all annotations will be removed from the function target annotations. +//! +//! Given reachable states information at each program point, this transformation removes +//! any definitely unreachable code. +//! +//! Note that any warnings about user's unreachable code should be emitted before running +//! this transformation. + +use crate::pipeline::unreachable_code_analysis::ReachableStateAnnotation; +use move_binary_format::file_format::CodeOffset; +use move_model::model::FunctionEnv; +use move_stackless_bytecode::{ + function_target::{FunctionData, FunctionTarget}, + function_target_pipeline::{FunctionTargetProcessor, FunctionTargetsHolder}, + stackless_bytecode::Bytecode, +}; + +/// A processor which performs unreachable code removal transformation. +pub struct UnreachableCodeRemover {} + +impl UnreachableCodeRemover { + /// Transforms the `code` of a function using the `reachable_state_annotation`, + /// by removing any definitely unreachable code. + /// + /// Returns the transformed code. + fn transform( + code: Vec, + reachable_state_annotation: &ReachableStateAnnotation, + ) -> Vec { + let mut new_code = vec![]; + for (offset, instr) in code.into_iter().enumerate() { + // If a program point is definitely not reachable, it is safe to remove that instruction + // because no execution path starting at the beginning of the function can reach it + // (and we cannot start execution from an arbitrary point in the function). + if reachable_state_annotation.is_definitely_not_reachable(offset as CodeOffset) { + continue; // skip emitting definitely unreachable code + } + new_code.push(instr); + } + new_code + } +} + +impl FunctionTargetProcessor for UnreachableCodeRemover { + fn process( + &self, + _targets: &mut FunctionTargetsHolder, + func_env: &FunctionEnv, + mut data: FunctionData, + _scc_opt: Option<&[FunctionEnv]>, + ) -> FunctionData { + if func_env.is_native() { + return data; + } + let code = std::mem::take(&mut data.code); + let target = FunctionTarget::new(func_env, &data); + let reachable_state_annotation = target + .get_annotations() + .get::() + .expect("unreachable code annotation is a prerequisite"); + let new_code = Self::transform(code, reachable_state_annotation); + data.code = new_code; + // Annotations may no longer be valid after this transformation, because code offsets have changed. + // So remove them. + data.annotations.clear(); + data + } + + fn name(&self) -> String { + "UnreachableCodeRemover".to_string() + } +} diff --git a/third_party/move/move-compiler-v2/tests/testsuite.rs b/third_party/move/move-compiler-v2/tests/testsuite.rs index cd3273db5b5f8..34e87bb59cdca 100644 --- a/third_party/move/move-compiler-v2/tests/testsuite.rs +++ b/third_party/move/move-compiler-v2/tests/testsuite.rs @@ -13,7 +13,9 @@ use move_compiler_v2::{ copy_propagation::CopyPropagation, dead_store_elimination::DeadStoreElimination, explicit_drop::ExplicitDrop, livevar_analysis_processor::LiveVarAnalysisProcessor, reference_safety_processor::ReferenceSafetyProcessor, - uninitialized_use_checker::UninitializedUseChecker, visibility_checker::VisibilityChecker, + uninitialized_use_checker::UninitializedUseChecker, + unreachable_code_analysis::UnreachableCodeProcessor, + unreachable_code_remover::UnreachableCodeRemover, visibility_checker::VisibilityChecker, }, run_file_format_gen, Options, }; @@ -271,6 +273,17 @@ impl TestConfig { dump_annotated_targets: true, dump_for_only_some_stages: None, } + } else if path.contains("/unreachable-code-remover/") { + pipeline.add_processor(Box::new(UnreachableCodeProcessor {})); + pipeline.add_processor(Box::new(UnreachableCodeRemover {})); + Self { + type_check_only: false, + dump_ast: false, + pipeline, + generate_file_format: false, + dump_annotated_targets: true, + dump_for_only_some_stages: None, + } } else { panic!( "unexpected test path `{}`, cannot derive configuration", diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_only.exp b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_only.exp new file mode 100644 index 0000000000000..b5225b93f6efc --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_only.exp @@ -0,0 +1,31 @@ +============ initial bytecode ================ + +[variant baseline] +fun m::test() { + var $t0: u64 + 0: $t0 := 0 + 1: abort($t0) + 2: return () +} + +============ after UnreachableCodeProcessor: ================ + +[variant baseline] +fun m::test() { + var $t0: u64 + # maybe + 0: $t0 := 0 + # maybe + 1: abort($t0) + # no + 2: return () +} + +============ after UnreachableCodeRemover: ================ + +[variant baseline] +fun m::test() { + var $t0: u64 + 0: $t0 := 0 + 1: abort($t0) +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_only.move b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_only.move new file mode 100644 index 0000000000000..f6cbe8877c2d4 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_only.move @@ -0,0 +1,6 @@ +module 0xc0ffee::m { + fun test() { + abort 0 + } + +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_or_return_always.exp b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_or_return_always.exp new file mode 100644 index 0000000000000..49422c69d03cb --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_or_return_always.exp @@ -0,0 +1,63 @@ +============ initial bytecode ================ + +[variant baseline] +fun m::test($t0: bool): u64 { + var $t1: u64 + var $t2: u64 + 0: if ($t0) goto 1 else goto 5 + 1: label L0 + 2: $t2 := 0 + 3: abort($t2) + 4: goto 8 + 5: label L1 + 6: $t1 := 1 + 7: return $t1 + 8: label L2 + 9: $t1 := 42 + 10: return $t1 +} + +============ after UnreachableCodeProcessor: ================ + +[variant baseline] +fun m::test($t0: bool): u64 { + var $t1: u64 + var $t2: u64 + # maybe + 0: if ($t0) goto 1 else goto 5 + # maybe + 1: label L0 + # maybe + 2: $t2 := 0 + # maybe + 3: abort($t2) + # no + 4: goto 8 + # maybe + 5: label L1 + # maybe + 6: $t1 := 1 + # maybe + 7: return $t1 + # no + 8: label L2 + # no + 9: $t1 := 42 + # no + 10: return $t1 +} + +============ after UnreachableCodeRemover: ================ + +[variant baseline] +fun m::test($t0: bool): u64 { + var $t1: u64 + var $t2: u64 + 0: if ($t0) goto 1 else goto 4 + 1: label L0 + 2: $t2 := 0 + 3: abort($t2) + 4: label L1 + 5: $t1 := 1 + 6: return $t1 +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_or_return_always.move b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_or_return_always.move new file mode 100644 index 0000000000000..69002f15ccd6f --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/abort_or_return_always.move @@ -0,0 +1,11 @@ +module 0xc0ffee::m { + fun test(p: bool): u64 { + if (p) { + abort 0 + } else { + return 1 + }; + 42 + } + +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/always_false_branch.exp b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/always_false_branch.exp new file mode 100644 index 0000000000000..f7f159b9729e7 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/always_false_branch.exp @@ -0,0 +1,94 @@ +============ initial bytecode ================ + +[variant baseline] +fun m::test(): u64 { + var $t0: u64 + var $t1: bool + var $t2: u64 + var $t3: u64 + var $t4: u64 + var $t5: u64 + 0: $t1 := false + 1: if ($t1) goto 2 else goto 11 + 2: label L0 + 3: $t3 := 0 + 4: $t2 := infer($t3) + 5: $t5 := 1 + 6: $t4 := +($t2, $t5) + 7: $t2 := infer($t4) + 8: $t0 := infer($t2) + 9: return $t0 + 10: goto 12 + 11: label L1 + 12: label L2 + 13: $t0 := 0 + 14: return $t0 +} + +============ after UnreachableCodeProcessor: ================ + +[variant baseline] +fun m::test(): u64 { + var $t0: u64 + var $t1: bool + var $t2: u64 + var $t3: u64 + var $t4: u64 + var $t5: u64 + # maybe + 0: $t1 := false + # maybe + 1: if ($t1) goto 2 else goto 11 + # maybe + 2: label L0 + # maybe + 3: $t3 := 0 + # maybe + 4: $t2 := infer($t3) + # maybe + 5: $t5 := 1 + # maybe + 6: $t4 := +($t2, $t5) + # maybe + 7: $t2 := infer($t4) + # maybe + 8: $t0 := infer($t2) + # maybe + 9: return $t0 + # no + 10: goto 12 + # maybe + 11: label L1 + # maybe + 12: label L2 + # maybe + 13: $t0 := 0 + # maybe + 14: return $t0 +} + +============ after UnreachableCodeRemover: ================ + +[variant baseline] +fun m::test(): u64 { + var $t0: u64 + var $t1: bool + var $t2: u64 + var $t3: u64 + var $t4: u64 + var $t5: u64 + 0: $t1 := false + 1: if ($t1) goto 2 else goto 10 + 2: label L0 + 3: $t3 := 0 + 4: $t2 := infer($t3) + 5: $t5 := 1 + 6: $t4 := +($t2, $t5) + 7: $t2 := infer($t4) + 8: $t0 := infer($t2) + 9: return $t0 + 10: label L1 + 11: label L2 + 12: $t0 := 0 + 13: return $t0 +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/always_false_branch.move b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/always_false_branch.move new file mode 100644 index 0000000000000..0212bf2dcc896 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/always_false_branch.move @@ -0,0 +1,11 @@ +module 0xc0ffee::m { + fun test(): u64 { + if (false) { + let i = 0; + i = i + 1; + return i + }; + 0 + } + +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/break_unreachable.exp b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/break_unreachable.exp new file mode 100644 index 0000000000000..314237cf7ba67 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/break_unreachable.exp @@ -0,0 +1,149 @@ +============ initial bytecode ================ + +[variant baseline] +fun m::test() { + var $t0: u64 + var $t1: u64 + var $t2: u64 + var $t3: u64 + var $t4: bool + var $t5: u64 + var $t6: u64 + var $t7: u64 + var $t8: u64 + var $t9: u64 + var $t10: u64 + var $t11: u64 + 0: $t1 := 0 + 1: $t0 := infer($t1) + 2: label L0 + 3: $t3 := 1 + 4: $t2 := +($t0, $t3) + 5: $t0 := infer($t2) + 6: $t5 := 10 + 7: $t4 := ==($t0, $t5) + 8: if ($t4) goto 9 else goto 15 + 9: label L2 + 10: goto 25 + 11: $t7 := 1 + 12: $t6 := +($t0, $t7) + 13: $t0 := infer($t6) + 14: goto 20 + 15: label L3 + 16: goto 2 + 17: $t9 := 1 + 18: $t8 := +($t0, $t9) + 19: $t0 := infer($t8) + 20: label L4 + 21: $t11 := 1 + 22: $t10 := +($t0, $t11) + 23: $t0 := infer($t10) + 24: goto 2 + 25: label L1 + 26: return () +} + +============ after UnreachableCodeProcessor: ================ + +[variant baseline] +fun m::test() { + var $t0: u64 + var $t1: u64 + var $t2: u64 + var $t3: u64 + var $t4: bool + var $t5: u64 + var $t6: u64 + var $t7: u64 + var $t8: u64 + var $t9: u64 + var $t10: u64 + var $t11: u64 + # maybe + 0: $t1 := 0 + # maybe + 1: $t0 := infer($t1) + # maybe + 2: label L0 + # maybe + 3: $t3 := 1 + # maybe + 4: $t2 := +($t0, $t3) + # maybe + 5: $t0 := infer($t2) + # maybe + 6: $t5 := 10 + # maybe + 7: $t4 := ==($t0, $t5) + # maybe + 8: if ($t4) goto 9 else goto 15 + # maybe + 9: label L2 + # maybe + 10: goto 25 + # no + 11: $t7 := 1 + # no + 12: $t6 := +($t0, $t7) + # no + 13: $t0 := infer($t6) + # no + 14: goto 20 + # maybe + 15: label L3 + # maybe + 16: goto 2 + # no + 17: $t9 := 1 + # no + 18: $t8 := +($t0, $t9) + # no + 19: $t0 := infer($t8) + # no + 20: label L4 + # no + 21: $t11 := 1 + # no + 22: $t10 := +($t0, $t11) + # no + 23: $t0 := infer($t10) + # no + 24: goto 2 + # maybe + 25: label L1 + # maybe + 26: return () +} + +============ after UnreachableCodeRemover: ================ + +[variant baseline] +fun m::test() { + var $t0: u64 + var $t1: u64 + var $t2: u64 + var $t3: u64 + var $t4: bool + var $t5: u64 + var $t6: u64 + var $t7: u64 + var $t8: u64 + var $t9: u64 + var $t10: u64 + var $t11: u64 + 0: $t1 := 0 + 1: $t0 := infer($t1) + 2: label L0 + 3: $t3 := 1 + 4: $t2 := +($t0, $t3) + 5: $t0 := infer($t2) + 6: $t5 := 10 + 7: $t4 := ==($t0, $t5) + 8: if ($t4) goto 9 else goto 11 + 9: label L2 + 10: goto 13 + 11: label L3 + 12: goto 2 + 13: label L1 + 14: return () +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/break_unreachable.move b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/break_unreachable.move new file mode 100644 index 0000000000000..0ef3251964b4f --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/break_unreachable.move @@ -0,0 +1,17 @@ +module 0xc0ffee::m { + fun test() { + let i = 0; + loop { + i = i + 1; + if (i == 10) { + break; + i = i + 1; // unreachable + } else { + continue; + i = i + 1; // unreachable + }; + i = i + 1; // unreachable + } + } + +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/conditional_loop_unreachable.exp b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/conditional_loop_unreachable.exp new file mode 100644 index 0000000000000..e2e7c7c81d593 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/conditional_loop_unreachable.exp @@ -0,0 +1,141 @@ +============ initial bytecode ================ + +[variant baseline] +fun m::test($t0: bool, $t1: bool) { + var $t2: u64 + var $t3: u64 + var $t4: u64 + var $t5: u64 + var $t6: u64 + var $t7: u64 + var $t8: u64 + var $t9: u64 + 0: label L0 + 1: if ($t0) goto 2 else goto 23 + 2: label L2 + 3: if ($t1) goto 4 else goto 14 + 4: label L5 + 5: label L8 + 6: goto 5 + 7: label L9 + 8: $t3 := 0 + 9: $t2 := infer($t3) + 10: $t5 := 1 + 11: $t4 := +($t2, $t5) + 12: $t2 := infer($t4) + 13: goto 16 + 14: label L6 + 15: goto 27 + 16: label L7 + 17: $t7 := 0 + 18: $t6 := infer($t7) + 19: $t9 := 1 + 20: $t8 := +($t6, $t9) + 21: $t6 := infer($t8) + 22: goto 25 + 23: label L3 + 24: goto 27 + 25: label L4 + 26: goto 0 + 27: label L1 + 28: return () +} + +============ after UnreachableCodeProcessor: ================ + +[variant baseline] +fun m::test($t0: bool, $t1: bool) { + var $t2: u64 + var $t3: u64 + var $t4: u64 + var $t5: u64 + var $t6: u64 + var $t7: u64 + var $t8: u64 + var $t9: u64 + # maybe + 0: label L0 + # maybe + 1: if ($t0) goto 2 else goto 23 + # maybe + 2: label L2 + # maybe + 3: if ($t1) goto 4 else goto 14 + # maybe + 4: label L5 + # maybe + 5: label L8 + # maybe + 6: goto 5 + # no + 7: label L9 + # no + 8: $t3 := 0 + # no + 9: $t2 := infer($t3) + # no + 10: $t5 := 1 + # no + 11: $t4 := +($t2, $t5) + # no + 12: $t2 := infer($t4) + # no + 13: goto 16 + # maybe + 14: label L6 + # maybe + 15: goto 27 + # no + 16: label L7 + # no + 17: $t7 := 0 + # no + 18: $t6 := infer($t7) + # no + 19: $t9 := 1 + # no + 20: $t8 := +($t6, $t9) + # no + 21: $t6 := infer($t8) + # no + 22: goto 25 + # maybe + 23: label L3 + # maybe + 24: goto 27 + # no + 25: label L4 + # no + 26: goto 0 + # maybe + 27: label L1 + # maybe + 28: return () +} + +============ after UnreachableCodeRemover: ================ + +[variant baseline] +fun m::test($t0: bool, $t1: bool) { + var $t2: u64 + var $t3: u64 + var $t4: u64 + var $t5: u64 + var $t6: u64 + var $t7: u64 + var $t8: u64 + var $t9: u64 + 0: label L0 + 1: if ($t0) goto 2 else goto 9 + 2: label L2 + 3: if ($t1) goto 4 else goto 7 + 4: label L5 + 5: label L8 + 6: goto 5 + 7: label L6 + 8: goto 11 + 9: label L3 + 10: goto 11 + 11: label L1 + 12: return () +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/conditional_loop_unreachable.move b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/conditional_loop_unreachable.move new file mode 100644 index 0000000000000..48807af6d0a52 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/conditional_loop_unreachable.move @@ -0,0 +1,16 @@ +module 0xc0ffee::m { + fun test(p: bool, q: bool) { + while (p) { + if (q) { + loop {}; + let i = 0; + i = i + 1; + } else { + break; + }; + let i = 0; + i = i + 1; + } + } + +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/inter_procedural_abort.exp b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/inter_procedural_abort.exp new file mode 100644 index 0000000000000..12224dcfcb6bd --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/inter_procedural_abort.exp @@ -0,0 +1,93 @@ +============ initial bytecode ================ + +[variant baseline] +fun m::always_abort() { + var $t0: u64 + 0: $t0 := 0 + 1: abort($t0) + 2: return () +} + + +[variant baseline] +fun m::test(): u64 { + var $t0: u64 + var $t1: u64 + var $t2: u64 + var $t3: u64 + var $t4: u64 + 0: m::always_abort() + 1: $t2 := 0 + 2: $t1 := infer($t2) + 3: $t4 := 1 + 4: $t3 := +($t1, $t4) + 5: $t1 := infer($t3) + 6: $t0 := infer($t1) + 7: return $t0 +} + +============ after UnreachableCodeProcessor: ================ + +[variant baseline] +fun m::always_abort() { + var $t0: u64 + # maybe + 0: $t0 := 0 + # maybe + 1: abort($t0) + # no + 2: return () +} + + +[variant baseline] +fun m::test(): u64 { + var $t0: u64 + var $t1: u64 + var $t2: u64 + var $t3: u64 + var $t4: u64 + # maybe + 0: m::always_abort() + # maybe + 1: $t2 := 0 + # maybe + 2: $t1 := infer($t2) + # maybe + 3: $t4 := 1 + # maybe + 4: $t3 := +($t1, $t4) + # maybe + 5: $t1 := infer($t3) + # maybe + 6: $t0 := infer($t1) + # maybe + 7: return $t0 +} + +============ after UnreachableCodeRemover: ================ + +[variant baseline] +fun m::always_abort() { + var $t0: u64 + 0: $t0 := 0 + 1: abort($t0) +} + + +[variant baseline] +fun m::test(): u64 { + var $t0: u64 + var $t1: u64 + var $t2: u64 + var $t3: u64 + var $t4: u64 + 0: m::always_abort() + 1: $t2 := 0 + 2: $t1 := infer($t2) + 3: $t4 := 1 + 4: $t3 := +($t1, $t4) + 5: $t1 := infer($t3) + 6: $t0 := infer($t1) + 7: return $t0 +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/inter_procedural_abort.move b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/inter_procedural_abort.move new file mode 100644 index 0000000000000..a18bff29529a8 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/inter_procedural_abort.move @@ -0,0 +1,13 @@ +module 0xc0ffee::m { + fun always_abort() { + abort 0 + } + + fun test(): u64 { + always_abort(); + let i = 0; // intraprocedural analysis cannot prove this is unreachable + i = i + 1; + i + } + +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/loop_unreachable.exp b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/loop_unreachable.exp new file mode 100644 index 0000000000000..1375b69e06439 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/loop_unreachable.exp @@ -0,0 +1,37 @@ +============ initial bytecode ================ + +[variant baseline] +fun m::test(): u64 { + var $t0: u64 + 0: label L0 + 1: goto 0 + 2: label L1 + 3: $t0 := 42 + 4: return $t0 +} + +============ after UnreachableCodeProcessor: ================ + +[variant baseline] +fun m::test(): u64 { + var $t0: u64 + # maybe + 0: label L0 + # maybe + 1: goto 0 + # no + 2: label L1 + # no + 3: $t0 := 42 + # no + 4: return $t0 +} + +============ after UnreachableCodeRemover: ================ + +[variant baseline] +fun m::test(): u64 { + var $t0: u64 + 0: label L0 + 1: goto 0 +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/loop_unreachable.move b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/loop_unreachable.move new file mode 100644 index 0000000000000..fd5c798442e51 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/loop_unreachable.move @@ -0,0 +1,7 @@ +module 0xc0ffee::m { + fun test(): u64 { + loop {}; + 42 + } + +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/return_after_abort.exp b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/return_after_abort.exp new file mode 100644 index 0000000000000..c0dda807ed3ab --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/return_after_abort.exp @@ -0,0 +1,37 @@ +============ initial bytecode ================ + +[variant baseline] +fun m::test(): u32 { + var $t0: u32 + var $t1: u64 + 0: $t1 := 0 + 1: abort($t1) + 2: $t0 := 0 + 3: return $t0 +} + +============ after UnreachableCodeProcessor: ================ + +[variant baseline] +fun m::test(): u32 { + var $t0: u32 + var $t1: u64 + # maybe + 0: $t1 := 0 + # maybe + 1: abort($t1) + # no + 2: $t0 := 0 + # no + 3: return $t0 +} + +============ after UnreachableCodeRemover: ================ + +[variant baseline] +fun m::test(): u32 { + var $t0: u32 + var $t1: u64 + 0: $t1 := 0 + 1: abort($t1) +} diff --git a/third_party/move/move-compiler-v2/tests/unreachable-code-remover/return_after_abort.move b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/return_after_abort.move new file mode 100644 index 0000000000000..44cf6d7f6f3c6 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/unreachable-code-remover/return_after_abort.move @@ -0,0 +1,7 @@ +module 0xc0ffee::m { + fun test(): u32 { + abort 0; + 0 + } + +} diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/print_bytecode.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/print_bytecode.exp index e2ba9a32d342a..d9856e46d5951 100644 --- a/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/print_bytecode.exp +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-v1-comparison/print_bytecode.exp @@ -20,7 +20,5 @@ entry public ex(Arg0: signer, Arg1: u64) /* def_idx: 0 */ { B0: 0: LdU64(0) 1: Abort -B1: - 2: Ret } } From f28494bfc7c4658e1799e80bca1c181592ad9e15 Mon Sep 17 00:00:00 2001 From: Vineeth Kashyap Date: Mon, 29 Jan 2024 10:54:01 -0500 Subject: [PATCH 15/44] Updating the status of v1-v2 test porting (#11764) --- .../friend_decl_out_of_account_addr.exp | 2 +- .../friend_decl_out_of_account_addr.move | 0 .../{ => v1-tests}/friend_decl_self.exp | 4 ++-- .../{ => v1-tests}/friend_decl_self.move | 0 .../friend_decl_unbound_module.exp | 2 +- .../friend_decl_unbound_module.move | 0 .../{ => v1-borrow-tests}/borrow_if.exp | 2 +- .../{ => v1-borrow-tests}/borrow_if.move | 0 .../{ => v1-borrows}/eq_unassigned_local.exp | 2 +- .../{ => v1-borrows}/eq_unassigned_local.move | 0 .../assign_in_one_if_branch.exp | 4 ++-- .../assign_in_one_if_branch.move | 0 .../assign_wrong_if_branch.exp | 2 +- .../assign_wrong_if_branch.move | 0 .../assign_wrong_if_branch_no_else.exp | 2 +- .../assign_wrong_if_branch_no_else.move | 0 .../else_assigns_if_doesnt.exp | 2 +- .../else_assigns_if_doesnt.move | 0 .../if_assigns_else_doesnt.exp | 2 +- .../if_assigns_else_doesnt.move | 0 .../{ => v1-commands}/if_assigns_no_else.exp | 2 +- .../{ => v1-commands}/if_assigns_no_else.move | 0 .../{ => v1-commands}/move_before_assign.exp | 2 +- .../{ => v1-commands}/move_before_assign.move | 0 .../{ => v1-commands}/use_before_assign.exp | 2 +- .../{ => v1-commands}/use_before_assign.move | 0 .../{ => v1-locals}/use_before_assign_if.exp | 6 +++--- .../{ => v1-locals}/use_before_assign_if.move | 0 .../use_before_assign_if_else.exp | 6 +++--- .../use_before_assign_if_else.move | 0 .../use_before_assign_loop.exp | 10 ++++----- .../use_before_assign_loop.move | 0 .../use_before_assign_simple.exp | 12 +++++------ .../use_before_assign_simple.move | 0 .../use_before_assign_while.exp | 8 +++---- .../use_before_assign_while.move | 0 .../move/move-compiler-v2/tests/v1.matched | 21 +++++++++++++++++++ .../move/move-compiler-v2/tests/v1.unmatched | 20 ------------------ .../tools/testdiff/src/main.rs | 17 +++++++++++++++ 39 files changed, 74 insertions(+), 56 deletions(-) rename third_party/move/move-compiler-v2/tests/checking/friends/{ => v1-tests}/friend_decl_out_of_account_addr.exp (70%) rename third_party/move/move-compiler-v2/tests/checking/friends/{ => v1-tests}/friend_decl_out_of_account_addr.move (100%) rename third_party/move/move-compiler-v2/tests/checking/friends/{ => v1-tests}/friend_decl_self.exp (64%) rename third_party/move/move-compiler-v2/tests/checking/friends/{ => v1-tests}/friend_decl_self.move (100%) rename third_party/move/move-compiler-v2/tests/checking/friends/{ => v1-tests}/friend_decl_unbound_module.exp (66%) rename third_party/move/move-compiler-v2/tests/checking/friends/{ => v1-tests}/friend_decl_unbound_module.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-borrow-tests}/borrow_if.exp (98%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-borrow-tests}/borrow_if.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-borrows}/eq_unassigned_local.exp (93%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-borrows}/eq_unassigned_local.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/assign_in_one_if_branch.exp (95%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/assign_in_one_if_branch.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/assign_wrong_if_branch.exp (97%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/assign_wrong_if_branch.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/assign_wrong_if_branch_no_else.exp (96%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/assign_wrong_if_branch_no_else.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/else_assigns_if_doesnt.exp (97%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/else_assigns_if_doesnt.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/if_assigns_else_doesnt.exp (97%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/if_assigns_else_doesnt.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/if_assigns_no_else.exp (97%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/if_assigns_no_else.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/move_before_assign.exp (90%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/move_before_assign.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/use_before_assign.exp (88%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-commands}/use_before_assign.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-locals}/use_before_assign_if.exp (95%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-locals}/use_before_assign_if.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-locals}/use_before_assign_if_else.exp (95%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-locals}/use_before_assign_if_else.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-locals}/use_before_assign_loop.exp (94%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-locals}/use_before_assign_loop.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-locals}/use_before_assign_simple.exp (86%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-locals}/use_before_assign_simple.move (100%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-locals}/use_before_assign_while.exp (97%) rename third_party/move/move-compiler-v2/tests/uninit-use-checker/{ => v1-locals}/use_before_assign_while.move (100%) diff --git a/third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_out_of_account_addr.exp b/third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_out_of_account_addr.exp similarity index 70% rename from third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_out_of_account_addr.exp rename to third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_out_of_account_addr.exp index 831f3471b144e..2278b5670a966 100644 --- a/third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_out_of_account_addr.exp +++ b/third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_out_of_account_addr.exp @@ -1,7 +1,7 @@ Diagnostics: error: friend modules of `0x3::M` must have the same address, but the declared friend module `0x2::M` has a different address - ┌─ tests/checking/friends/friend_decl_out_of_account_addr.move:7:5 + ┌─ tests/checking/friends/v1-tests/friend_decl_out_of_account_addr.move:7:5 │ 7 │ friend 0x2::M; │ ^^^^^^^^^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_out_of_account_addr.move b/third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_out_of_account_addr.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_out_of_account_addr.move rename to third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_out_of_account_addr.move diff --git a/third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_self.exp b/third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_self.exp similarity index 64% rename from third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_self.exp rename to third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_self.exp index 77cfbea9e111a..a6043f94b569a 100644 --- a/third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_self.exp +++ b/third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_self.exp @@ -1,13 +1,13 @@ Diagnostics: error: cannot declare module `0x42::M` as a friend of itself - ┌─ tests/checking/friends/friend_decl_self.move:3:5 + ┌─ tests/checking/friends/v1-tests/friend_decl_self.move:3:5 │ 3 │ friend Self; │ ^^^^^^^^^^^^ error: cannot declare module `0x43::M` as a friend of itself - ┌─ tests/checking/friends/friend_decl_self.move:9:5 + ┌─ tests/checking/friends/v1-tests/friend_decl_self.move:9:5 │ 9 │ friend 0x43::M; │ ^^^^^^^^^^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_self.move b/third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_self.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_self.move rename to third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_self.move diff --git a/third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_unbound_module.exp b/third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_unbound_module.exp similarity index 66% rename from third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_unbound_module.exp rename to third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_unbound_module.exp index 0af57a71debbd..54f3cb5d9d5ad 100644 --- a/third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_unbound_module.exp +++ b/third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_unbound_module.exp @@ -1,7 +1,7 @@ Diagnostics: error: unbound module `0x42::Nonexistent` in friend declaration - ┌─ tests/checking/friends/friend_decl_unbound_module.move:3:5 + ┌─ tests/checking/friends/v1-tests/friend_decl_unbound_module.move:3:5 │ 3 │ friend 0x42::Nonexistent; │ ^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_unbound_module.move b/third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_unbound_module.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/checking/friends/friend_decl_unbound_module.move rename to third_party/move/move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_unbound_module.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/borrow_if.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-borrow-tests/borrow_if.exp similarity index 98% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/borrow_if.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-borrow-tests/borrow_if.exp index 212f7cf4d3a8c..85b55b6d7c532 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/borrow_if.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-borrow-tests/borrow_if.exp @@ -39,7 +39,7 @@ fun _0::main() { Diagnostics: error: use of possibly unassigned local `ref` - ┌─ tests/uninit-use-checker/borrow_if.move:8:14 + ┌─ tests/uninit-use-checker/v1-borrow-tests/borrow_if.move:8:14 │ 8 │ assert!(*move ref == 5, 42); │ ^^^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/borrow_if.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-borrow-tests/borrow_if.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/borrow_if.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-borrow-tests/borrow_if.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/eq_unassigned_local.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-borrows/eq_unassigned_local.exp similarity index 93% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/eq_unassigned_local.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-borrows/eq_unassigned_local.exp index 29e7a6a8994e8..c74844dceea23 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/eq_unassigned_local.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-borrows/eq_unassigned_local.exp @@ -17,7 +17,7 @@ fun _0::main() { Diagnostics: error: use of unassigned local `ref` - ┌─ tests/uninit-use-checker/eq_unassigned_local.move:5:9 + ┌─ tests/uninit-use-checker/v1-borrows/eq_unassigned_local.move:5:9 │ 5 │ ref == &x; │ ^^^^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/eq_unassigned_local.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-borrows/eq_unassigned_local.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/eq_unassigned_local.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-borrows/eq_unassigned_local.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_in_one_if_branch.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_in_one_if_branch.exp similarity index 95% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_in_one_if_branch.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_in_one_if_branch.exp index e90261b57460c..a31e27277d3fd 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_in_one_if_branch.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_in_one_if_branch.exp @@ -32,13 +32,13 @@ fun _0::main() { Diagnostics: error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/assign_in_one_if_branch.move:7:5 + ┌─ tests/uninit-use-checker/v1-commands/assign_in_one_if_branch.move:7:5 │ 7 │ x == y; │ ^^^^^^ error: use of possibly unassigned local `y` - ┌─ tests/uninit-use-checker/assign_in_one_if_branch.move:7:5 + ┌─ tests/uninit-use-checker/v1-commands/assign_in_one_if_branch.move:7:5 │ 7 │ x == y; │ ^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_in_one_if_branch.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_in_one_if_branch.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_in_one_if_branch.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_in_one_if_branch.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_wrong_if_branch.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch.exp similarity index 97% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_wrong_if_branch.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch.exp index dbfa258d75ce5..7598b364f73bd 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_wrong_if_branch.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch.exp @@ -31,7 +31,7 @@ fun _0::main() { Diagnostics: error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/assign_wrong_if_branch.move:5:13 + ┌─ tests/uninit-use-checker/v1-commands/assign_wrong_if_branch.move:5:13 │ 5 │ assert!(x == 100, 42); │ ^^^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_wrong_if_branch.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_wrong_if_branch.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_wrong_if_branch_no_else.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch_no_else.exp similarity index 96% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_wrong_if_branch_no_else.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch_no_else.exp index 721613aa100d2..9cf7420d46e5e 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_wrong_if_branch_no_else.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch_no_else.exp @@ -31,7 +31,7 @@ fun _0::main() { Diagnostics: error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/assign_wrong_if_branch_no_else.move:5:13 + ┌─ tests/uninit-use-checker/v1-commands/assign_wrong_if_branch_no_else.move:5:13 │ 5 │ assert!(x == 100, 42); │ ^^^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_wrong_if_branch_no_else.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch_no_else.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/assign_wrong_if_branch_no_else.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch_no_else.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/else_assigns_if_doesnt.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/else_assigns_if_doesnt.exp similarity index 97% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/else_assigns_if_doesnt.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/else_assigns_if_doesnt.exp index 785585e301986..b375052acabc4 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/else_assigns_if_doesnt.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/else_assigns_if_doesnt.exp @@ -37,7 +37,7 @@ fun _0::main() { Diagnostics: error: use of possibly unassigned local `y` - ┌─ tests/uninit-use-checker/else_assigns_if_doesnt.move:11:13 + ┌─ tests/uninit-use-checker/v1-commands/else_assigns_if_doesnt.move:11:13 │ 11 │ assert!(y == 0, 42); │ ^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/else_assigns_if_doesnt.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/else_assigns_if_doesnt.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/else_assigns_if_doesnt.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/else_assigns_if_doesnt.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/if_assigns_else_doesnt.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_else_doesnt.exp similarity index 97% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/if_assigns_else_doesnt.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_else_doesnt.exp index 64d79c71a3193..2d71e8e6d40b9 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/if_assigns_else_doesnt.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_else_doesnt.exp @@ -37,7 +37,7 @@ fun _0::main() { Diagnostics: error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/if_assigns_else_doesnt.move:11:13 + ┌─ tests/uninit-use-checker/v1-commands/if_assigns_else_doesnt.move:11:13 │ 11 │ assert!(x == 42, 42); │ ^^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/if_assigns_else_doesnt.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_else_doesnt.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/if_assigns_else_doesnt.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_else_doesnt.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/if_assigns_no_else.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_no_else.exp similarity index 97% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/if_assigns_no_else.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_no_else.exp index 693ad135e5ecd..c5764f1e874d2 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/if_assigns_no_else.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_no_else.exp @@ -31,7 +31,7 @@ fun _0::main() { Diagnostics: error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/if_assigns_no_else.move:5:13 + ┌─ tests/uninit-use-checker/v1-commands/if_assigns_no_else.move:5:13 │ 5 │ assert!(x == 42, 42); │ ^^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/if_assigns_no_else.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_no_else.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/if_assigns_no_else.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_no_else.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/move_before_assign.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/move_before_assign.exp similarity index 90% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/move_before_assign.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/move_before_assign.exp index 8edad599324bf..cfe205ea1b841 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/move_before_assign.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/move_before_assign.exp @@ -13,7 +13,7 @@ fun _0::main() { Diagnostics: error: use of unassigned local `x` - ┌─ tests/uninit-use-checker/move_before_assign.move:4:13 + ┌─ tests/uninit-use-checker/v1-commands/move_before_assign.move:4:13 │ 4 │ let y = move x; │ ^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/move_before_assign.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/move_before_assign.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/move_before_assign.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/move_before_assign.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/use_before_assign.exp similarity index 88% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/use_before_assign.exp index 9ff8f4af89666..e2eb97f86e206 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/use_before_assign.exp @@ -11,7 +11,7 @@ fun _0::main() { Diagnostics: error: use of unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign.move:4:9 + ┌─ tests/uninit-use-checker/v1-commands/use_before_assign.move:4:9 │ 4 │ let y = x; │ ^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/use_before_assign.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/use_before_assign.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_if.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if.exp similarity index 95% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_if.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if.exp index 0e05f2d0422d5..61c02aec1e09f 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_if.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if.exp @@ -59,19 +59,19 @@ fun M::tmove($t0: bool) { Diagnostics: error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_if.move:5:17 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_if.move:5:17 │ 5 │ let _ = move x + 1; │ ^^^^^^ error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_if.move:11:17 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_if.move:11:17 │ 11 │ let _ = x + 1; │ ^^^^^ error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_if.move:17:17 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_if.move:17:17 │ 17 │ let _ = &x; │ ^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_if.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_if.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_if_else.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if_else.exp similarity index 95% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_if_else.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if_else.exp index 3e562b05030df..741a8edc91ea5 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_if_else.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if_else.exp @@ -65,19 +65,19 @@ fun M::tmove($t0: bool) { Diagnostics: error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_if_else.move:5:17 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_if_else.move:5:17 │ 5 │ let _ = move x + 1; │ ^^^^^^ error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_if_else.move:11:17 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_if_else.move:11:17 │ 11 │ let _ = move x + 1; │ ^^^^^^ error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_if_else.move:17:17 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_if_else.move:17:17 │ 17 │ let _ = move x + 1; │ ^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_if_else.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if_else.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_if_else.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if_else.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_loop.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_loop.exp similarity index 94% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_loop.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_loop.exp index ba1a6af6e9dbf..d0f4e2a6389ca 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_loop.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_loop.exp @@ -98,31 +98,31 @@ fun M::tmove() { Diagnostics: error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_loop.move:4:24 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_loop.move:4:24 │ 4 │ loop { let y = move x + 1; x = 0; y; } │ ^^^^^^ error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_loop.move:9:24 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_loop.move:9:24 │ 9 │ loop { let y = x + 1; if (cond) { continue }; x = 0; y; } │ ^^^^^ error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_loop.move:14:24 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_loop.move:14:24 │ 14 │ loop { let y = &x; _ = move y; x = 0 } │ ^^ error: use of unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_loop.move:19:24 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_loop.move:19:24 │ 19 │ loop { let y = &x; _ = move y; if (cond) { x = 0 }; break }; │ ^^ error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_loop.move:20:9 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_loop.move:20:9 │ 20 │ x; │ ^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_loop.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_loop.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_loop.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_loop.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_simple.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_simple.exp similarity index 86% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_simple.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_simple.exp index 4f90fe2be9a35..6960a1c3b3c7b 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_simple.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_simple.exp @@ -48,37 +48,37 @@ fun M::tmove() { Diagnostics: error: use of unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_simple.move:6:17 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_simple.move:6:17 │ 6 │ let _ = move x + 1; │ ^^^^^^ error: use of unassigned local `s` - ┌─ tests/uninit-use-checker/use_before_assign_simple.move:9:13 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_simple.move:9:13 │ 9 │ let _s2 = s; │ ^^^ error: use of unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_simple.move:14:17 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_simple.move:14:17 │ 14 │ let _ = x + 1; │ ^^^^^ error: use of unassigned local `s` - ┌─ tests/uninit-use-checker/use_before_assign_simple.move:17:19 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_simple.move:17:19 │ 17 │ let _s3 = copy s; │ ^^^^^^ error: use of unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_simple.move:22:17 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_simple.move:22:17 │ 22 │ let _ = &x; │ ^^ error: use of unassigned local `s` - ┌─ tests/uninit-use-checker/use_before_assign_simple.move:25:19 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_simple.move:25:19 │ 25 │ let _s2 = &s; │ ^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_simple.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_simple.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_simple.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_simple.move diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_while.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_while.exp similarity index 97% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_while.exp rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_while.exp index b9c7981ab9d14..dbc8d88ef2b37 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_while.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_while.exp @@ -122,25 +122,25 @@ fun M::tmove($t0: bool) { Diagnostics: error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_while.move:4:32 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_while.move:4:32 │ 4 │ while (cond) { let y = move x + 1; x = 0; y; } │ ^^^^^^ error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_while.move:9:32 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_while.move:9:32 │ 9 │ while (cond) { let y = move x + 1; if (cond) { continue }; x = 0; y; } │ ^^^^^^ error: use of possibly unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_while.move:14:32 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_while.move:14:32 │ 14 │ while (cond) { let y = &x; _ = move y; x = 0 } │ ^^ error: use of unassigned local `x` - ┌─ tests/uninit-use-checker/use_before_assign_while.move:19:32 + ┌─ tests/uninit-use-checker/v1-locals/use_before_assign_while.move:19:32 │ 19 │ while (cond) { let y = &x; _ = move y; if (cond) { x = 0 }; break } │ ^^ diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_while.move b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_while.move similarity index 100% rename from third_party/move/move-compiler-v2/tests/uninit-use-checker/use_before_assign_while.move rename to third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_while.move diff --git a/third_party/move/move-compiler-v2/tests/v1.matched b/third_party/move/move-compiler-v2/tests/v1.matched index 278cc95cbc29f..a6d28de9ea794 100644 --- a/third_party/move/move-compiler-v2/tests/v1.matched +++ b/third_party/move/move-compiler-v2/tests/v1.matched @@ -1,3 +1,4 @@ +move-compiler/tests/move_check/translated_ir_tests/move/borrow_tests/borrow_if.exp move-compiler-v2/tests/uninit-use-checker/v1-borrow-tests/borrow_if.exp move-compiler/tests/move_check/borrows/assign_local_combo.exp move-compiler-v2/tests/reference-safety/v1-tests/assign_local_combo.exp move-compiler/tests/move_check/borrows/assign_local_combo_invalid.exp move-compiler-v2/tests/reference-safety/v1-tests/assign_local_combo_invalid.exp move-compiler/tests/move_check/borrows/assign_local_field.exp move-compiler-v2/tests/reference-safety/v1-tests/assign_local_field.exp @@ -38,6 +39,7 @@ move-compiler/tests/move_check/borrows/dereference_field.exp move-compiler-v2/ move-compiler/tests/move_check/borrows/dereference_field_invalid.exp move-compiler-v2/tests/reference-safety/v1-tests/dereference_field_invalid.exp move-compiler/tests/move_check/borrows/dereference_full.exp move-compiler-v2/tests/reference-safety/v1-tests/dereference_full.exp move-compiler/tests/move_check/borrows/dereference_full_invalid.exp move-compiler-v2/tests/reference-safety/v1-tests/dereference_full_invalid.exp +move-compiler/tests/move_check/borrows/eq_unassigned_local.exp move-compiler-v2/tests/uninit-use-checker/v1-borrows/eq_unassigned_local.exp move-compiler/tests/move_check/borrows/freeze_combo.exp move-compiler-v2/tests/reference-safety/v1-tests/freeze_combo.exp move-compiler/tests/move_check/borrows/freeze_combo_invalid.exp move-compiler-v2/tests/reference-safety/v1-tests/freeze_combo_invalid.exp move-compiler/tests/move_check/borrows/freeze_field.exp move-compiler-v2/tests/reference-safety/v1-tests/freeze_field.exp @@ -64,6 +66,17 @@ move-compiler/tests/move_check/borrows/return_borrowed_local_invalid.exp move- move-compiler/tests/move_check/borrows/return_mutual_borrows.exp move-compiler-v2/tests/reference-safety/v1-tests/return_mutual_borrows.exp move-compiler/tests/move_check/borrows/return_mutual_borrows_invalid.exp move-compiler-v2/tests/reference-safety/v1-tests/return_mutual_borrows_invalid.exp move-compiler/tests/move_check/borrows/unused_ref.exp move-compiler-v2/tests/reference-safety/v1-tests/unused_ref.exp +move-compiler/tests/move_check/translated_ir_tests/move/commands/assign_in_one_if_branch.exp move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_in_one_if_branch.exp +move-compiler/tests/move_check/translated_ir_tests/move/commands/assign_wrong_if_branch.exp move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch.exp +move-compiler/tests/move_check/translated_ir_tests/move/commands/assign_wrong_if_branch_no_else.exp move-compiler-v2/tests/uninit-use-checker/v1-commands/assign_wrong_if_branch_no_else.exp +move-compiler/tests/move_check/translated_ir_tests/move/commands/else_assigns_if_doesnt.exp move-compiler-v2/tests/uninit-use-checker/v1-commands/else_assigns_if_doesnt.exp +move-compiler/tests/move_check/translated_ir_tests/move/commands/if_assigns_else_doesnt.exp move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_else_doesnt.exp +move-compiler/tests/move_check/translated_ir_tests/move/commands/if_assigns_no_else.exp move-compiler-v2/tests/uninit-use-checker/v1-commands/if_assigns_no_else.exp +move-compiler/tests/move_check/translated_ir_tests/move/commands/move_before_assign.exp move-compiler-v2/tests/uninit-use-checker/v1-commands/move_before_assign.exp +move-compiler/tests/move_check/translated_ir_tests/move/commands/use_before_assign.exp move-compiler-v2/tests/uninit-use-checker/v1-commands/use_before_assign.exp +move-compiler/tests/move_check/control_flow/for_loop_empty_novar.exp move-compiler-v2/tests/checking/control_flow/for_loop_empty_novar.exp +move-compiler/tests/move_check/control_flow/for_type_mismatch.exp move-compiler-v2/tests/checking/control_flow/for_type_mismatch.exp +move-compiler/tests/move_check/control_flow/loop_after_loop.exp move-compiler-v2/tests/checking/control_flow/loop_after_loop.exp move-compiler/tests/move_check/folding/non_constant_empty_vec.exp move-compiler-v2/tests/folding/non_constant_empty_vec.exp move-compiler/tests/move_check/folding/unfoldable_constants.exp move-compiler-v2/tests/folding/unfoldable_constants.exp move-compiler/tests/move_check/folding/unfoldable_constants_blocks.exp move-compiler-v2/tests/folding/unfoldable_constants_blocks.exp @@ -93,11 +106,19 @@ move-compiler/tests/move_check/inlining/shadowing_unused.exp move-compiler-v2/ move-compiler/tests/move_check/inlining/shadowing_unused_nodecl.exp move-compiler-v2/tests/checking/inlining/shadowing_unused_nodecl.exp move-compiler/tests/move_check/inlining/spec_inlining.exp move-compiler-v2/tests/checking/inlining/spec_inlining.exp move-compiler/tests/move_check/inlining/unused_inline.exp move-compiler-v2/tests/checking/inlining/unused_inline.exp +move-compiler/tests/move_check/locals/use_before_assign_if.exp move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if.exp +move-compiler/tests/move_check/locals/use_before_assign_if_else.exp move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_if_else.exp +move-compiler/tests/move_check/locals/use_before_assign_loop.exp move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_loop.exp +move-compiler/tests/move_check/locals/use_before_assign_simple.exp move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_simple.exp +move-compiler/tests/move_check/locals/use_before_assign_while.exp move-compiler-v2/tests/uninit-use-checker/v1-locals/use_before_assign_while.exp move-compiler/tests/move_check/naming/assert_one_arg.exp move-compiler-v2/tests/checking/naming/assert_one_arg.exp move-compiler/tests/move_check/naming/double_fun_decl.exp move-compiler-v2/tests/checking/naming/double_fun_decl.exp move-compiler/tests/move_check/naming/duplicate_acquires_list_item.exp move-compiler-v2/tests/checking/naming/duplicate_acquires_list_item.exp move-compiler/tests/move_check/naming/duplicate_type_parameter_function.exp move-compiler-v2/tests/checking/naming/duplicate_type_parameter_function.exp move-compiler/tests/move_check/naming/duplicate_type_parameter_struct.exp move-compiler-v2/tests/checking/naming/duplicate_type_parameter_struct.exp +move-compiler/tests/move_check/naming/friend_decl_out_of_account_addr.exp move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_out_of_account_addr.exp +move-compiler/tests/move_check/naming/friend_decl_self.exp move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_self.exp +move-compiler/tests/move_check/naming/friend_decl_unbound_module.exp move-compiler-v2/tests/checking/friends/v1-tests/friend_decl_unbound_module.exp move-compiler/tests/move_check/naming/generics_shadowing.exp move-compiler-v2/tests/checking/naming/generics_shadowing.exp move-compiler/tests/move_check/naming/generics_shadowing_invalid.exp move-compiler-v2/tests/checking/naming/generics_shadowing_invalid.exp move-compiler/tests/move_check/naming/generics_with_type_parameters.exp move-compiler-v2/tests/checking/naming/generics_with_type_parameters.exp diff --git a/third_party/move/move-compiler-v2/tests/v1.unmatched b/third_party/move/move-compiler-v2/tests/v1.unmatched index e9a27f6bb6079..b4a5ad16a4a5a 100644 --- a/third_party/move/move-compiler-v2/tests/v1.unmatched +++ b/third_party/move/move-compiler-v2/tests/v1.unmatched @@ -16,7 +16,6 @@ move-compiler/tests/move_check/borrow_tests/{ borrow_global_bad2.move, borrow_global_bad5.move, borrow_global_good.move, - borrow_if.move, borrow_return_mutable_borrow_bad.move, copy_loc_borrowed.move, copy_loc_borrowed_field.move, @@ -61,14 +60,10 @@ move-compiler/tests/move_check/borrow_tests/{ move-compiler/tests/move_check/borrows/{ call_acquires.move, call_acquires_invalid.move, - eq_unassigned_local.move, } move-compiler/tests/move_check/commands/{ abort_negative_stack_size.move, - assign_in_one_if_branch.move, assign_resource.move, - assign_wrong_if_branch.move, - assign_wrong_if_branch_no_else.move, assign_wrong_type.move, branch_assigns_then_moves.move, break_outside_loop.move, @@ -80,17 +75,13 @@ move-compiler/tests/move_check/commands/{ continue_outside_loop_in_if.move, dead_return.move, dead_return_local.move, - else_assigns_if_doesnt.move, else_moves_if_doesnt.move, - if_assigns_else_doesnt.move, - if_assigns_no_else.move, if_moves_else_doesnt.move, if_moves_no_else.move, invalid_fallthrough2.move, invalid_fallthrough3.move, join_failure.move, mixed_lvalue.move, - move_before_assign.move, no_let_outside_if.move, pop_negative.move, pop_positive.move, @@ -99,14 +90,11 @@ move-compiler/tests/move_check/commands/{ unpack_extra_binding.move, unpack_missing_binding.move, unpack_wrong_type.move, - use_before_assign.move, while_move_local.move, while_move_local_2.move, } move-compiler/tests/move_check/control_flow/{ - for_user.move, infinite_loop_with_dead_exits.move, - loop_after_loop.move, } move-compiler/tests/move_check/deprecated/{ assert_function.move, @@ -181,16 +169,8 @@ move-compiler/tests/move_check/locals/{ use_after_move_loop.move, use_after_move_simple.move, use_after_move_while.move, - use_before_assign_if.move, - use_before_assign_if_else.move, - use_before_assign_loop.move, - use_before_assign_simple.move, - use_before_assign_while.move, } move-compiler/tests/move_check/naming/{ - friend_decl_out_of_account_addr.move, - friend_decl_self.move, - friend_decl_unbound_module.move, named_address_distinct_from_each_others_value.move, named_address_not_distinct_from_value.move, vector_literal_type_arity.move, diff --git a/third_party/move/move-compiler-v2/tools/testdiff/src/main.rs b/third_party/move/move-compiler-v2/tools/testdiff/src/main.rs index c30030bcf603f..640f1c56d9079 100644 --- a/third_party/move/move-compiler-v2/tools/testdiff/src/main.rs +++ b/third_party/move/move-compiler-v2/tools/testdiff/src/main.rs @@ -71,6 +71,23 @@ static UNIT_PATH_REMAP: Lazy> = Lazy::new(|| { "reference-safety/v1-tests".to_string(), "borrows".to_string(), ); + map.insert("friends/v1-tests".to_string(), "naming".to_string()); + map.insert( + "uninit-use-checker/v1-commands".to_string(), + "commands".to_string(), + ); + map.insert( + "uninit-use-checker/v1-locals".to_string(), + "locals".to_string(), + ); + map.insert( + "uninit-use-checker/v1-borrows".to_string(), + "borrows".to_string(), + ); + map.insert( + "uninit-use-checker/v1-borrow-tests".to_string(), + "borrow_tests".to_string(), + ); map }); From 91a56c64b56a55b59302741fd8af57545b86b18a Mon Sep 17 00:00:00 2001 From: Alin Tomescu Date: Mon, 29 Jan 2024 11:23:09 -0800 Subject: [PATCH 16/44] update PVSS aggregate_verify benchmark to use average aggregation count (#11800) --- CODEOWNERS | 13 +- crates/aptos-dkg/benches/pvss.rs | 62 ++++- crates/aptos-dkg/benches/weighted_vuf.rs | 248 +++++++++++++----- crates/aptos-dkg/src/pvss/test_utils.rs | 85 ++++-- .../src/pvss/weighted/weighted_config.rs | 30 ++- crates/aptos-dkg/src/weighted_vuf/bls/mod.rs | 12 +- .../aptos-dkg/src/weighted_vuf/pinkas/mod.rs | 2 +- crates/aptos-dkg/tests/dkg.rs | 19 +- crates/aptos-dkg/tests/pvss.rs | 56 ++-- crates/aptos-dkg/tests/weighted_vuf.rs | 38 +-- 10 files changed, 385 insertions(+), 180 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index 996f685988c58..c4ab421d0a392 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -11,16 +11,15 @@ /aptos-move/aptos-gas/ @vgao1996 /aptos-move/aptos-vm/ @davidiw @wrwg @zekun000 @vgao1996 @georgemitenkov /aptos-move/aptos-vm-types/ @georgemitenkov @gelash @vgao1996 -/aptos-move/e2e-tests/src/account.rs @alinush /aptos-move/framework/ @davidiw @movekevin @wrwg /aptos-move/framework/aptos-framework/sources/account.move @alinush -/aptos-move/framework/aptos-stdlib/sources/cryptography/ @alinush +/aptos-move/framework/aptos-stdlib/sources/cryptography/ @alinush @zjma @mstraka100 /aptos-move/framework/**/*.spec.move @junkil-park /aptos-move/framework/aptos-stdlib/sources/hash.move @alinush # Owner for aptos-token, cryptography natives, parallel-executor and vm-genesis. /aptos-move/framework/aptos-token @areshand -/aptos-move/framework/src/natives/cryptography/ @alinush +/aptos-move/framework/src/natives/cryptography/ @alinush @zjma @mstraka100 /aptos-move/framework/src/natives/aggregator_natives/ @georgemitenkov @gelash @zekun000 /aptos-move/block-executor/ @gelash @zekun000 @sasha8 @danielxiangzl /aptos-move/sharded_block-executor/ @sitalkedia @@ -43,8 +42,8 @@ /crates/aptos @gregnazario @0xjinn @banool # Owners for the `/crates/aptos-crypto*` directories. -/crates/aptos-crypto-derive/ @alinush -/crates/aptos-crypto/ @alinush +/crates/aptos-crypto-derive/ @alinush @zjma @mstraka100 @rex1fernando +/crates/aptos-crypto/ @alinush @zjma @mstraka100 @rex1fernando # Owners for the `/crates/aptos-faucet` directory and all its subdirectories. And other faucet, genesis, and OpenAPI-related crates. /crates/aptos-faucet @banool @gregnazario @@ -109,6 +108,6 @@ /terraform/ @aptos-labs/prod-eng # Owners for the `aptos-dkg` crate. -/crates/aptos-dkg @alinush +/crates/aptos-dkg @alinush @rex1fernando -/types/src/transaction/authenticator.rs @alinush +/types/src/transaction/authenticator.rs @alinush @mstraka100 diff --git a/crates/aptos-dkg/benches/pvss.rs b/crates/aptos-dkg/benches/pvss.rs index 5d263b3122241..0e4104f1f5f95 100644 --- a/crates/aptos-dkg/benches/pvss.rs +++ b/crates/aptos-dkg/benches/pvss.rs @@ -9,13 +9,14 @@ use aptos_dkg::{ pvss::{ test_utils, test_utils::{ - get_threshold_configs_for_benchmarking, get_weighted_configs_for_benchmarking, NoAux, + get_threshold_configs_for_benchmarking, get_weighted_configs_for_benchmarking, + DealingArgs, NoAux, }, traits::{ transcript::{MalleableTranscript, Transcript}, SecretSharingConfig, }, - GenericWeighting, + WeightedConfig, }, }; use criterion::{ @@ -34,26 +35,57 @@ pub fn all_groups(c: &mut Criterion) { // weighted PVSS for wc in get_weighted_configs_for_benchmarking() { - pvss_group::(&wc, c); - pvss_group::>(&wc, c); + let d = pvss_group::(&wc, c); + weighted_pvss_group(&wc, d, c); + + // Note: Insecure, so not interested in benchmarks. + // let d = pvss_group::>(&wc, c); + // weighted_pvss_group(&wc, d, c); } } -pub fn pvss_group(sc: &T::SecretSharingConfig, c: &mut Criterion) { +pub fn pvss_group( + sc: &T::SecretSharingConfig, + c: &mut Criterion, +) -> DealingArgs { let name = T::scheme_name(); let mut group = c.benchmark_group(format!("pvss/{}", name)); let mut rng = thread_rng(); // TODO: use a lazy pattern to avoid this expensive step when no benchmarks are run - let (pp, ssks, spks, dks, eks, iss, s, _) = - test_utils::setup_dealing::(sc, &mut rng); + let d = test_utils::setup_dealing::(sc, &mut rng); // pvss_transcript_random::(sc, &mut group); - pvss_deal::(sc, &pp, &ssks, &eks, &mut group); + pvss_deal::(sc, &d.pp, &d.ssks, &d.eks, &mut group); pvss_aggregate::(sc, &mut group); - pvss_verify::(sc, &pp, &ssks, &spks, &eks, &mut group); - pvss_aggregate_verify::(sc, &pp, &ssks, &spks, &eks, &iss[0], 100, &mut group); - pvss_decrypt_own_share::(sc, &pp, &ssks, &dks, &eks, &s, &mut group); + pvss_verify::(sc, &d.pp, &d.ssks, &d.spks, &d.eks, &mut group); + pvss_decrypt_own_share::(sc, &d.pp, &d.ssks, &d.dks, &d.eks, &d.s, &mut group); + + group.finish(); + + d +} + +pub fn weighted_pvss_group>( + sc: &T::SecretSharingConfig, + d: DealingArgs, + c: &mut Criterion, +) { + let name = T::scheme_name(); + let mut group = c.benchmark_group(format!("wpvss/{}", name)); + let mut rng = thread_rng(); + + let average_aggregation_size = sc.get_average_size_of_eligible_subset(250, &mut rng); + pvss_aggregate_verify::( + sc, + &d.pp, + &d.ssks, + &d.spks, + &d.eks, + &d.iss[0], + average_aggregation_size, + &mut group, + ); group.finish(); } @@ -160,6 +192,7 @@ fn pvss_aggregate_verify( // players obtaining shares. (In other settings, there could be 1 million dealers, dealing a // secret to only 100 players such that, say, any 50 can reconstruct them.) assert_le!(num_aggr, sc.get_total_num_players()); + assert_eq!(ssks.len(), spks.len()); g.throughput(Throughput::Elements(sc.get_total_num_shares() as u64)); @@ -184,13 +217,16 @@ fn pvss_aggregate_verify( &mut rng, )); - for (i, ssk) in ssks.iter().enumerate().skip(1).take(num_aggr) { + for (i, ssk) in ssks.iter().enumerate().skip(1).take(num_aggr - 1) { let mut trx = trxs[0].clone(); trx.maul_signature(ssk, &NoAux, &sc.get_player(i)); trxs.push(trx); } + assert_eq!(spks.len(), trxs.len()); - T::aggregate(sc, trxs).unwrap() + let trx = T::aggregate(sc, trxs).unwrap(); + assert_eq!(trx.get_dealers().len(), num_aggr); + trx }, |trx| { trx.verify(&sc, &pp, &spks, &eks, &vec![NoAux; num_aggr]) diff --git a/crates/aptos-dkg/benches/weighted_vuf.rs b/crates/aptos-dkg/benches/weighted_vuf.rs index 429f84eab1df0..e6b538b65c4ef 100644 --- a/crates/aptos-dkg/benches/weighted_vuf.rs +++ b/crates/aptos-dkg/benches/weighted_vuf.rs @@ -38,6 +38,7 @@ pub fn wvuf_benches< WT: Transcript, WVUF: WeightedVUF< SecretKey = WT::DealtSecretKey, + PubKey = WT::DealtPubKey, PubKeyShare = WT::DealtPubKeyShare, SecretKeyShare = WT::DealtSecretKeyShare, >, @@ -52,8 +53,7 @@ pub fn wvuf_benches< let mut bench_cases = vec![]; for wc in get_weighted_configs_for_benchmarking() { // TODO: use a lazy pattern to avoid this expensive dealing when no benchmarks are run - let (pvss_pp, ssks, _spks, dks, eks, iss, _s, dsk) = - setup_dealing::(&wc, &mut rng); + let d = setup_dealing::(&wc, &mut rng); println!( "Best-case subset size: {}", @@ -67,30 +67,30 @@ pub fn wvuf_benches< println!("Dealing a {} PVSS transcript", WT::scheme_name()); let trx = WT::deal( &wc, - &pvss_pp, - &ssks[0], - &eks, - &iss[0], + &d.pp, + &d.ssks[0], + &d.eks, + &d.iss[0], &NoAux, &wc.get_player(0), &mut rng, ); - let vuf_pp = WVUF::PublicParameters::from(&pvss_pp); + let vuf_pp = WVUF::PublicParameters::from(&d.pp); let mut sks = vec![]; let mut pks = vec![]; + let mut deltas = vec![]; let mut asks = vec![]; let mut apks = vec![]; - let mut deltas = vec![]; println!( "Decrypting shares from {} PVSS transcript", WT::scheme_name() ); for i in 0..wc.get_total_num_players() { - let (sk, pk) = trx.decrypt_own_share(&wc, &wc.get_player(i), &dks[i]); - + let (sk, pk) = trx.decrypt_own_share(&wc, &wc.get_player(i), &d.dks[i]); let (ask, apk) = WVUF::augment_key_pair(&vuf_pp, sk.clone(), pk.clone(), &mut rng); + sks.push(sk); pks.push(pk); deltas.push(WVUF::get_public_delta(&apk).clone()); @@ -99,10 +99,10 @@ pub fn wvuf_benches< } println!(); - bench_cases.push((wc, vuf_pp, dsk, sks, pks, asks, apks, deltas)); + bench_cases.push((wc, vuf_pp, d.dsk, d.dpk, sks, pks, asks, apks, deltas)); } - for (wc, vuf_pp, sk, sks, pks, asks, apks, deltas) in bench_cases { + for (wc, vuf_pp, sk, pk, sks, pks, asks, apks, deltas) in bench_cases { wvuf_augment_random_keypair::( &wc, &vuf_pp, &sks, &pks, group, &mut rng, ); @@ -117,44 +117,58 @@ pub fn wvuf_benches< wvuf_verify_share::(&wc, &vuf_pp, &asks, &apks, group, &mut rng); - // best-case aggregation times (pick players with largest weights) - wvuf_aggregate_shares::( - &wc, - &asks, - &apks, - group, - &mut rng, - WeightedConfig::get_worst_case_eligible_subset_of_players, - "best_case".to_string(), - ); - - // average/random case aggregation time - wvuf_aggregate_shares::( - &wc, - &asks, - &apks, - group, - &mut rng, - WeightedConfig::get_random_eligible_subset_of_players, - "random".to_string(), - ); - - // worst-case aggregation times (pick players with smallest weights) - wvuf_aggregate_shares::( - &wc, - &asks, - &apks, - group, - &mut rng, - WeightedConfig::get_worst_case_eligible_subset_of_players, - "worst_case".to_string(), - ); + let bc: Vec<(fn(&WeightedConfig, &mut ThreadRng) -> Vec, String)> = vec![ + ( + WeightedConfig::get_random_eligible_subset_of_players, + "random".to_string(), + ), + ( + WeightedConfig::get_best_case_eligible_subset_of_players, + "best_case".to_string(), + ), + ( + WeightedConfig::get_worst_case_eligible_subset_of_players, + "worst_case".to_string(), + ), + ]; + + for (pick_subset_fn, subset_type) in bc { + // best-case aggregation times (pick players with largest weights) + wvuf_aggregate_shares::( + &wc, + &asks, + &apks, + group, + &mut rng, + pick_subset_fn, + &subset_type, + ); + + wvuf_verify_proof::( + &wc, + &vuf_pp, + &pk, + &asks, + &apks, + group, + &mut rng, + pick_subset_fn, + &subset_type, + ); + + wvuf_derive_eval::( + &wc, + &vuf_pp, + &asks, + &apks, + group, + &mut rng, + pick_subset_fn, + &subset_type, + ); + } wvuf_eval::(&wc, &sk, group); - - // TODO: verify_proof (but needs efficient create_proof) - - // TODO: derive_eval (but needs efficient create_proof) } } @@ -320,6 +334,7 @@ fn wvuf_aggregate_shares< WT: Transcript, WVUF: WeightedVUF< SecretKey = WT::DealtSecretKey, + PubKey = WT::DealtPubKey, PubKeyShare = WT::DealtPubKeyShare, SecretKeyShare = WT::DealtSecretKeyShare, >, @@ -333,36 +348,143 @@ fn wvuf_aggregate_shares< group: &mut BenchmarkGroup, rng: &mut R, pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, - subset_type: String, + subset_type: &String, ) where WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, { group.bench_function( - format!("aggregate_{}_shares/{}", subset_type, wc), + format!("aggregate_shares/{}-subset/{}", subset_type, wc), move |b| { b.iter_with_setup( - || { - let players = pick_subset_fn(wc, rng); + || get_apks_and_proofs::(&wc, &asks, apks, rng, pick_subset_fn), + |apks_and_proofs| { + WVUF::aggregate_shares(&wc, apks_and_proofs.as_slice()); + }, + ) + }, + ); +} - players +fn wvuf_verify_proof< + WT: Transcript, + WVUF: WeightedVUF< + SecretKey = WT::DealtSecretKey, + PubKey = WT::DealtPubKey, + PubKeyShare = WT::DealtPubKeyShare, + SecretKeyShare = WT::DealtSecretKeyShare, + >, + R: rand_core::RngCore + rand_core::CryptoRng, + M: Measurement, +>( + // For efficiency, we re-use the PVSS transcript + wc: &WeightedConfig, + pp: &WVUF::PublicParameters, + pk: &WVUF::PubKey, + asks: &Vec, + apks: &Vec, + group: &mut BenchmarkGroup, + rng: &mut R, + pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, + subset_type: &String, +) where + WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, +{ + group.bench_function( + format!("verify_proof/{}-subset/{}", subset_type, wc), + move |b| { + b.iter_with_setup( + || { + let apks_and_proofs = + get_apks_and_proofs::(&wc, &asks, apks, rng, pick_subset_fn); + WVUF::aggregate_shares(&wc, apks_and_proofs.as_slice()) + }, + |proof| { + let apks = apks .iter() - .map(|p| { - ( - *p, - apks[p.id].clone(), - WVUF::create_share(&asks[p.id], BENCH_MSG), - ) - }) - .collect::>() + .map(|apk| Some(apk.clone())) + .collect::>>(); + assert!(WVUF::verify_proof(pp, pk, apks.as_slice(), BENCH_MSG, &proof).is_ok()) }, - |apks_and_proofs| { - WVUF::aggregate_shares(&wc, apks_and_proofs.as_slice()); + ) + }, + ); +} + +fn wvuf_derive_eval< + WT: Transcript, + WVUF: WeightedVUF< + SecretKey = WT::DealtSecretKey, + PubKey = WT::DealtPubKey, + PubKeyShare = WT::DealtPubKeyShare, + SecretKeyShare = WT::DealtSecretKeyShare, + >, + R: rand_core::RngCore + rand_core::CryptoRng, + M: Measurement, +>( + // For efficiency, we re-use the PVSS transcript + wc: &WeightedConfig, + pp: &WVUF::PublicParameters, + asks: &Vec, + apks: &Vec, + group: &mut BenchmarkGroup, + rng: &mut R, + pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, + subset_type: &String, +) where + WVUF::PublicParameters: for<'a> From<&'a WT::PublicParameters>, +{ + group.bench_function( + format!("derive_eval/{}-subset/{}", subset_type, wc), + move |b| { + b.iter_with_setup( + || { + let apks_and_proofs = + get_apks_and_proofs::(&wc, &asks, apks, rng, pick_subset_fn); + WVUF::aggregate_shares(&wc, apks_and_proofs.as_slice()) + }, + |proof| { + let apks = apks + .iter() + .map(|apk| Some(apk.clone())) + .collect::>>(); + assert!(WVUF::derive_eval(wc, pp, BENCH_MSG, apks.as_slice(), &proof).is_ok()) }, ) }, ); } +fn get_apks_and_proofs< + WT: Transcript, + WVUF: WeightedVUF< + SecretKey = WT::DealtSecretKey, + PubKey = WT::DealtPubKey, + PubKeyShare = WT::DealtPubKeyShare, + SecretKeyShare = WT::DealtSecretKeyShare, + >, + R: rand_core::RngCore + rand_core::CryptoRng, +>( + // For efficiency, we re-use the PVSS transcript + wc: &WeightedConfig, + asks: &Vec, + apks: &Vec, + rng: &mut R, + pick_subset_fn: fn(&WeightedConfig, &mut R) -> Vec, +) -> Vec<(Player, WVUF::AugmentedPubKeyShare, WVUF::ProofShare)> { + let players = pick_subset_fn(wc, rng); + + players + .iter() + .map(|p| { + ( + *p, + apks[p.id].clone(), + WVUF::create_share(&asks[p.id], BENCH_MSG), + ) + }) + .collect::>() +} + fn wvuf_eval< WT: Transcript, WVUF: WeightedVUF< diff --git a/crates/aptos-dkg/src/pvss/test_utils.rs b/crates/aptos-dkg/src/pvss/test_utils.rs index 5072a76482fb0..479f8081f2eed 100644 --- a/crates/aptos-dkg/src/pvss/test_utils.rs +++ b/crates/aptos-dkg/src/pvss/test_utils.rs @@ -15,6 +15,19 @@ use std::ops::AddAssign; #[derive(Clone, Serialize)] pub struct NoAux; +/// Useful for gathering all the necessary args to deal inside tests & benchmarks. +pub struct DealingArgs { + pub pp: T::PublicParameters, + pub ssks: Vec, + pub spks: Vec, + pub dks: Vec, + pub eks: Vec, + pub iss: Vec, + pub s: T::InputSecret, + pub dsk: T::DealtSecretKey, + pub dpk: T::DealtPubKey, +} + /// Helper function that, given a sharing configuration for `n` players, returns an a tuple of: /// - public parameters /// - a vector of `n` signing SKs @@ -27,16 +40,7 @@ pub struct NoAux; pub fn setup_dealing( sc: &T::SecretSharingConfig, mut rng: &mut R, -) -> ( - T::PublicParameters, - Vec, - Vec, - Vec, - Vec, - Vec, - T::InputSecret, - T::DealtSecretKey, -) { +) -> DealingArgs { println!( "Setting up dealing for {} PVSS, with {}", T::scheme_name(), @@ -73,10 +77,24 @@ pub fn setup_dealing::DealtSecretKey = s.to(&pp); + + let dpk: ::DealtPubKey = s.to(&pp); + let dsk: ::DealtSecretKey = s.to(&pp); // println!("Dealt SK: {:?}", sk); - (pp, ssks, spks, dks, eks, iss, s, sk) + assert_eq!(ssks.len(), spks.len()); + + DealingArgs { + pp, + ssks, + spks, + dks, + eks, + iss, + s, + dsk, + dpk, + } } /// Useful for printing types of variables without too much hassle. @@ -161,27 +179,38 @@ pub fn get_threshold_configs_for_benchmarking() -> Vec { pub fn get_weighted_configs_for_benchmarking() -> Vec { let mut wcs = vec![]; - // Total weight is 9230 let weights = vec![ - 17, 17, 11, 11, 11, 74, 40, 11, 11, 11, 11, 11, 218, 218, 218, 218, 218, 218, 218, 170, 11, - 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 18, 11, 11, 11, 192, 218, 11, 11, 52, 11, - 161, 24, 11, 11, 11, 11, 218, 218, 161, 175, 80, 13, 103, 11, 11, 11, 11, 40, 40, 40, 14, - 218, 218, 11, 218, 11, 11, 218, 11, 218, 71, 55, 218, 184, 170, 11, 218, 218, 164, 177, - 171, 18, 209, 11, 20, 12, 147, 18, 169, 13, 35, 208, 13, 218, 218, 218, 218, 218, 218, 163, - 73, 26, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 4, 4, 4, + 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 9, 10, 14, 14, 15, 15, 15, 15, 15, 15, 16, 16, 16, 17, + 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 20, + 20, 20, 20, + ]; + let total_weight: usize = weights.iter().sum(); + let threshold = total_weight * 2 / 3 + 1; + wcs.push(WeightedConfig::new(threshold, weights).unwrap()); + + let weights = vec![ + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 5, 5, 5, 5, 5, 5, 6, 7, 7, 10, 11, 11, + 11, 11, 11, 13, 14, 14, 15, 18, 18, 20, 20, 20, 22, 28, 31, 42, 44, 44, 44, 45, 46, 46, 46, + 47, 47, 48, 50, 51, 51, 51, 51, 52, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, + 54, 57, 57, 60, 60, 60, 60, ]; - wcs.push(WeightedConfig::new(3087, weights.clone()).unwrap()); - wcs.push(WeightedConfig::new(6162, weights).unwrap()); + let total_weight: usize = weights.iter().sum(); + let threshold = total_weight * 2 / 3 + 1; + wcs.push(WeightedConfig::new(threshold, weights).unwrap()); - // Total weight is 850 let weights = vec![ - 2, 2, 1, 1, 1, 7, 4, 1, 1, 1, 1, 1, 20, 20, 20, 20, 20, 20, 20, 16, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 2, 1, 1, 1, 18, 20, 1, 1, 5, 1, 15, 2, 1, 1, 1, 1, 20, 20, 15, 16, 7, 1, 9, - 1, 1, 1, 1, 4, 4, 4, 1, 20, 20, 1, 20, 1, 1, 20, 1, 20, 7, 5, 20, 17, 16, 1, 20, 20, 15, - 16, 16, 2, 19, 1, 2, 1, 13, 2, 16, 1, 3, 19, 1, 20, 20, 20, 20, 20, 20, 15, 7, 2, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 8, 8, 8, 8, 8, 9, 11, 11, 12, 16, 18, + 18, 18, 18, 19, 22, 23, 23, 25, 29, 30, 32, 33, 34, 36, 46, 51, 69, 72, 72, 73, 73, 76, 76, + 76, 77, 78, 79, 82, 84, 84, 84, 84, 86, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, + 89, 89, 93, 94, 98, 98, 98, 98, ]; - wcs.push(WeightedConfig::new(290, weights.clone()).unwrap()); - wcs.push(WeightedConfig::new(573, weights).unwrap()); + let total_weight: usize = weights.iter().sum(); + let threshold = total_weight * 2 / 3 + 1; + wcs.push(WeightedConfig::new(threshold, weights).unwrap()); wcs } diff --git a/crates/aptos-dkg/src/pvss/weighted/weighted_config.rs b/crates/aptos-dkg/src/pvss/weighted/weighted_config.rs index 845bf65dbe6cd..ed35cfea7c0ce 100644 --- a/crates/aptos-dkg/src/pvss/weighted/weighted_config.rs +++ b/crates/aptos-dkg/src/pvss/weighted/weighted_config.rs @@ -146,6 +146,9 @@ impl WeightedConfig { &self.tc.get_evaluation_domain() } + /// NOTE: RNG is passed in to maintain function signature compatibility with + /// `SecretSharingConfig::get_random_eligible_subset_of_players`, so as to easily benchmark + /// with different methods of sampling subsets. pub fn get_best_case_eligible_subset_of_players( &self, _rng: &mut R, @@ -155,6 +158,9 @@ impl WeightedConfig { self.pop_eligible_subset(&mut player_and_weights) } + /// NOTE: RNG is passed in to maintain function signature compatibility with + /// `SecretSharingConfig::get_random_eligible_subset_of_players`, so as to easily benchmark + /// with different methods of sampling subsets. pub fn get_worst_case_eligible_subset_of_players( &self, _rng: &mut R, @@ -166,27 +172,39 @@ impl WeightedConfig { self.pop_eligible_subset(&mut player_and_weights) } - fn sort_players_by_weight(&self) -> Vec<(usize, usize)> { + pub fn get_average_size_of_eligible_subset( + &self, + sample_size: usize, + rng: &mut R, + ) -> usize { + let mut average = 0; + for _ in 0..sample_size { + average += self.get_random_eligible_subset_of_players(rng).len(); + } + average / sample_size + } + + fn sort_players_by_weight(&self) -> Vec<(Player, usize)> { // the set of remaining players that we are picking a "capable" subset from let mut player_and_weights = self .weight .iter() .enumerate() - .map(|(i, w)| (i, *w)) - .collect::>(); + .map(|(i, w)| (self.get_player(i), *w)) + .collect::>(); player_and_weights.sort_by(|a, b| a.1.cmp(&b.1)); player_and_weights } - fn pop_eligible_subset(&self, player_and_weights: &mut Vec<(usize, usize)>) -> Vec { + fn pop_eligible_subset(&self, player_and_weights: &mut Vec<(Player, usize)>) -> Vec { let mut picked_players = vec![]; let mut current_weight = 0; while current_weight < self.tc.t { - let (player_idx, weight) = player_and_weights.pop().unwrap(); + let (player, weight) = player_and_weights.pop().unwrap(); - picked_players.push(self.get_player(player_idx)); + picked_players.push(player); // rinse and repeat until the picked players jointly have enough weight current_weight += weight; diff --git a/crates/aptos-dkg/src/weighted_vuf/bls/mod.rs b/crates/aptos-dkg/src/weighted_vuf/bls/mod.rs index 72c7f19a1b89e..e853a4d6c8be1 100644 --- a/crates/aptos-dkg/src/weighted_vuf/bls/mod.rs +++ b/crates/aptos-dkg/src/weighted_vuf/bls/mod.rs @@ -4,13 +4,14 @@ use crate::{ algebra::lagrange::lagrange_coefficients, pvss, pvss::{Player, WeightedConfig}, - utils::{g1_multi_exp, multi_pairing}, + utils::{g1_multi_exp, multi_pairing, random::random_scalars, HasMultiExp}, weighted_vuf::traits::WeightedVUF, }; use anyhow::bail; use blstrs::{G1Projective, G2Projective, Gt, Scalar}; use ff::Field; use group::Group; +use rand::thread_rng; use serde::{Deserialize, Serialize}; use std::ops::{Mul, Neg}; @@ -79,12 +80,15 @@ impl WeightedVUF for BlsWUF { proof: &Self::ProofShare, ) -> anyhow::Result<()> { let hash = Self::hash_to_curve(msg); + // TODO: Use Fiat-Shamir + let coeffs = random_scalars(apk.len(), &mut thread_rng()); - let agg_pk = apk + let pks = apk .iter() .map(|pk| *pk.as_group_element()) - .sum::(); - let agg_sig = proof.iter().copied().sum::(); + .collect::>(); + let agg_pk = G2Projective::multi_exp_slice(pks.as_slice(), coeffs.as_slice()); + let agg_sig = G1Projective::multi_exp_slice(proof.to_vec().as_slice(), coeffs.as_slice()); if multi_pairing( [&hash, &agg_sig].into_iter(), diff --git a/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs b/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs index a8c9dcf85678b..514c4d1aa867c 100644 --- a/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs +++ b/crates/aptos-dkg/src/weighted_vuf/pinkas/mod.rs @@ -220,7 +220,7 @@ impl WeightedVUF for PinkasWUF { Ok(multi_pairing(lhs.iter().map(|r| r), rhs.into_iter())) } - /// Verifies the proof shares one by one + /// Verifies the proof shares (using batch verification) fn verify_proof( pp: &Self::PublicParameters, _pk: &Self::PubKey, diff --git a/crates/aptos-dkg/tests/dkg.rs b/crates/aptos-dkg/tests/dkg.rs index bd99c40d47b69..77c79bad81b62 100644 --- a/crates/aptos-dkg/tests/dkg.rs +++ b/crates/aptos-dkg/tests/dkg.rs @@ -44,8 +44,7 @@ fn test_dkg_all_weighted() { fn aggregatable_dkg(sc: &T::SecretSharingConfig, seed_bytes: [u8; 32]) { let mut rng = StdRng::from_seed(seed_bytes); - let (pp, ssks, spks, dks, eks, iss, _, sk) = - test_utils::setup_dealing::(sc, &mut rng); + let d = test_utils::setup_dealing::(sc, &mut rng); let mut trxs = vec![]; @@ -53,10 +52,10 @@ fn aggregatable_dkg(sc: &T::SecretSharingConfig, see for i in 0..sc.get_total_num_players() { trxs.push(T::deal( sc, - &pp, - &ssks[i], - &eks, - &iss[i], + &d.pp, + &d.ssks[i], + &d.eks, + &d.iss[i], &NoAux, &sc.get_player(i), &mut rng, @@ -69,16 +68,16 @@ fn aggregatable_dkg(sc: &T::SecretSharingConfig, see // Verify the aggregated transcript trx.verify( sc, - &pp, - &spks, - &eks, + &d.pp, + &d.spks, + &d.eks, &(0..sc.get_total_num_players()) .map(|_| NoAux) .collect::>(), ) .expect("aggregated PVSS transcript failed verification"); - if sk != reconstruct_dealt_secret_key_randomly::(sc, &mut rng, &dks, trx) { + if d.dsk != reconstruct_dealt_secret_key_randomly::(sc, &mut rng, &d.dks, trx) { panic!("Reconstructed SK did not match"); } } diff --git a/crates/aptos-dkg/tests/pvss.rs b/crates/aptos-dkg/tests/pvss.rs index 5bc80c38a2988..61f9d89aa1dbf 100644 --- a/crates/aptos-dkg/tests/pvss.rs +++ b/crates/aptos-dkg/tests/pvss.rs @@ -12,7 +12,10 @@ use aptos_dkg::{ das, das::unweighted_protocol, insecure_field, test_utils, - test_utils::{reconstruct_dealt_secret_key_randomly, NoAux}, + test_utils::{ + get_threshold_configs_for_benchmarking, get_weighted_configs_for_benchmarking, + reconstruct_dealt_secret_key_randomly, NoAux, + }, traits::{transcript::Transcript, SecretSharingConfig}, GenericWeighting, ThresholdConfig, }, @@ -76,19 +79,27 @@ fn test_pvss_all_weighted() { #[test] fn test_pvss_transcript_size() { - for (t, n) in [(333, 1_000), (666, 1_000), (3_333, 10_000), (6_666, 10_000)] { + for sc in get_threshold_configs_for_benchmarking() { println!(); - print_transcript_size::(t, n); + let expected_size = expected_transcript_size::(&sc); + let actual_size = actual_transcript_size::(&sc); + + print_transcript_size::("Expected", &sc, expected_size); + print_transcript_size::("Actual", &sc, actual_size); + } + + for wc in get_weighted_configs_for_benchmarking() { + let actual_size = actual_transcript_size::(wc.get_threshold_config()); + print_transcript_size::("Actual", wc.get_threshold_config(), actual_size); + + let actual_size = actual_transcript_size::(&wc); + print_transcript_size::("Actual", &wc, actual_size); } } -fn print_transcript_size>(t: usize, n: usize) { +fn print_transcript_size(size_type: &str, sc: &T::SecretSharingConfig, size: usize) { let name = T::scheme_name(); - let expected_size = expected_transcript_size::(t, n); - let actual_size = actual_transcript_size::(t, n); - - println!("Expected transcript size for {t}-out-of-{n} {name}: {expected_size} bytes"); - println!("Actual transcript size for {t}-out-of-{n} {name}: {actual_size} bytes"); + println!("{size_type:8} transcript size for {sc} {name}: {size} bytes"); } // @@ -107,20 +118,20 @@ fn pvss_deal_verify_and_reconstruct( // println!("Seed: {}", hex::encode(seed_bytes.as_slice())); let mut rng = StdRng::from_seed(seed_bytes); - let (pp, ssks, spks, dks, eks, _, s, sk) = test_utils::setup_dealing::(sc, &mut rng); + let d = test_utils::setup_dealing::(sc, &mut rng); // Test dealing let trx = T::deal( &sc, - &pp, - &ssks[0], - &eks, - &s, + &d.pp, + &d.ssks[0], + &d.eks, + &d.s, &NoAux, &sc.get_player(0), &mut rng, ); - trx.verify(&sc, &pp, &vec![spks[0].clone()], &eks, &vec![NoAux]) + trx.verify(&sc, &d.pp, &vec![d.spks[0].clone()], &d.eks, &vec![NoAux]) .expect("PVSS transcript failed verification"); // Test transcript (de)serialization @@ -128,16 +139,13 @@ fn pvss_deal_verify_and_reconstruct( .expect("serialized transcript should deserialize correctly"); assert_eq!(trx, trx_deserialized); - if sk != reconstruct_dealt_secret_key_randomly::(sc, &mut rng, &dks, trx) { + if d.dsk != reconstruct_dealt_secret_key_randomly::(sc, &mut rng, &d.dks, trx) { panic!("Reconstructed SK did not match"); } } -fn actual_transcript_size>( - t: usize, - n: usize, -) -> usize { - let (sc, mut rng) = test_utils::get_threshold_config_and_rng(t, n); +fn actual_transcript_size(sc: &T::SecretSharingConfig) -> usize { + let mut rng = thread_rng(); let trx = T::generate(&sc, &mut rng); let actual_size = trx.to_bytes().len(); @@ -146,11 +154,11 @@ fn actual_transcript_size>( } fn expected_transcript_size>( - _t: usize, - n: usize, + sc: &ThresholdConfig, ) -> usize { if T::scheme_name() == unweighted_protocol::DAS_SK_IN_G1 { - G2_PROJ_NUM_BYTES + (n + 1) * (G2_PROJ_NUM_BYTES + G1_PROJ_NUM_BYTES) + G2_PROJ_NUM_BYTES + + (sc.get_total_num_players() + 1) * (G2_PROJ_NUM_BYTES + G1_PROJ_NUM_BYTES) } else { panic!("Did not implement support for '{}' yet", T::scheme_name()) } diff --git a/crates/aptos-dkg/tests/weighted_vuf.rs b/crates/aptos-dkg/tests/weighted_vuf.rs index 5a1504226b8ad..dd0bc0a273122 100644 --- a/crates/aptos-dkg/tests/weighted_vuf.rs +++ b/crates/aptos-dkg/tests/weighted_vuf.rs @@ -7,8 +7,8 @@ use aptos_dkg::{ pvss, pvss::{ test_utils, - test_utils::NoAux, - traits::{Convert, SecretSharingConfig, Transcript}, + test_utils::{DealingArgs, NoAux}, + traits::{SecretSharingConfig, Transcript}, Player, WeightedConfig, }, utils::random::random_scalar, @@ -41,48 +41,38 @@ where // Do a weighted PVSS let mut rng = StdRng::from_seed(seed.to_bytes_le()); - let (wc, pvss_pp, dks, sk, pk, trx) = weighted_pvss::(&mut rng); + let (wc, d, trx) = weighted_pvss::(&mut rng); // Test decrypting SK shares, creating VUF proof shares, and aggregating those shares into a VUF // proof, verifying that proof and finally deriving the VUF evaluation. wvuf_randomly_aggregate_verify_and_derive_eval::( - &wc, &sk, &pk, &dks, &pvss_pp, &trx, &mut rng, + &wc, &d.dsk, &d.dpk, &d.dks, &d.pp, &trx, &mut rng, ); } fn weighted_pvss>( - mut rng: &mut StdRng, -) -> ( - WeightedConfig, - T::PublicParameters, - Vec, - T::DealtSecretKey, - T::DealtPubKey, - T, -) { + rng: &mut StdRng, +) -> (WeightedConfig, DealingArgs, T) { let wc = WeightedConfig::new(10, vec![3, 5, 3, 4, 2, 1, 1, 7]).unwrap(); - let (pvss_pp, ssks, spks, dks, eks, _, s, sk) = - test_utils::setup_dealing::(&wc, rng); + let d = test_utils::setup_dealing::(&wc, rng); let trx = T::deal( &wc, - &pvss_pp, - &ssks[0], - &eks, - &s, + &d.pp, + &d.ssks[0], + &d.eks, + &d.s, &NoAux, &wc.get_player(0), - &mut rng, + rng, ); - let pk: ::DealtPubKey = s.to(&pvss_pp); - // Make sure the PVSS dealt correctly - trx.verify(&wc, &pvss_pp, &vec![spks[0].clone()], &eks, &vec![NoAux]) + trx.verify(&wc, &d.pp, &vec![d.spks[0].clone()], &d.eks, &vec![NoAux]) .expect("PVSS transcript failed verification"); - (wc, pvss_pp, dks, sk, pk, trx) + (wc, d, trx) } /// 1. Evaluates the VUF using the `sk` directly. From ffe38cf956c25a320ff812c49782f9f6f917a726 Mon Sep 17 00:00:00 2001 From: "Daniel Porteous (dport)" Date: Mon, 29 Jan 2024 11:25:45 -0800 Subject: [PATCH 17/44] [Indexer] Use spawn_blocking for converting txns in stream coordinator (#11815) --- .../indexer-grpc-fullnode/src/stream_coordinator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs index 3e0793b4271f9..a772948aba129 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs @@ -144,7 +144,7 @@ impl IndexerStreamCoordinator { let mut tasks = vec![]; for batch in task_batches { let context = self.context.clone(); - let task = tokio::spawn(async move { + let task = tokio::task::spawn_blocking(move || { let raw_txns = batch; let api_txns = Self::convert_to_api_txns(context, raw_txns); let pb_txns = Self::convert_to_pb_txns(api_txns); From 99477c34ce1aeec762c74b9a0bf14036084b737e Mon Sep 17 00:00:00 2001 From: Josh Lind Date: Thu, 25 Jan 2024 18:11:29 -0500 Subject: [PATCH 18/44] [Cargo] Update tokio to the latest version. --- Cargo.lock | 13 +++++++------ Cargo.toml | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f882e59a89b64..67bd69210c8fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15486,9 +15486,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.1" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "777d57dcc6bb4cf084e3212e1858447222aa451f21b5e2452497d9100da65b91" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" dependencies = [ "backtrace", "bytes", @@ -15516,9 +15516,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2 1.0.75", "quote 1.0.35", @@ -15527,13 +15527,14 @@ dependencies = [ [[package]] name = "tokio-metrics" -version = "0.1.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcb585a0069b53171684e22d5255984ec30d1c7304fd0a4a9a603ffd8c765cdd" +checksum = "eace09241d62c98b7eeb1107d4c5c64ca3bd7da92e8c218c153ab3a78f9be112" dependencies = [ "futures-util", "pin-project-lite", "tokio", + "tokio-stream", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 72de4d6e24ba2..5d508b60ffe99 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -674,9 +674,9 @@ tiny-keccak = { version = "2.0.2", features = ["keccak", "sha3"] } tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } trybuild = "1.0.80" -tokio = { version = "1.21.0", features = ["full"] } +tokio = { version = "1.35.1", features = ["full"] } tokio-io-timeout = "1.2.0" -tokio-metrics = "0.1.0" +tokio-metrics = "0.3.1" tokio-retry = "0.3.0" tokio-scoped = { version = "0.2.0" } tokio-stream = { version = "0.1.14", features = ["fs"] } From ece6bb89da231a76ad9f0f12cd449842c83b535a Mon Sep 17 00:00:00 2001 From: Josh Lind Date: Thu, 25 Jan 2024 18:42:29 -0500 Subject: [PATCH 19/44] [Rust] Upgrade to rust 1.75.0 --- .cargo/config.toml | 1 + Cargo.toml | 2 +- api/src/context.rs | 2 +- api/src/tests/state_test.rs | 5 +- api/types/src/move_types.rs | 1 + aptos-move/aptos-release-builder/src/utils.rs | 2 +- aptos-move/aptos-sdk-builder/src/rust.rs | 1 + .../src/aptos_framework_sdk_builder.rs | 1 + .../src/aptos_token_objects_sdk_builder.rs | 1 + .../src/aptos_token_sdk_builder.rs | 1 + aptos-move/framework/src/module_metadata.rs | 4 +- aptos-move/framework/src/release_bundle.rs | 2 +- .../safety-rules/src/safety_rules_manager.rs | 7 +- consensus/src/dag/anchor_election/mod.rs | 1 + consensus/src/dag/mod.rs | 7 +- consensus/src/epoch_manager.rs | 2 +- .../src/quorum_store/quorum_store_builder.rs | 2 +- consensus/src/sender_aware_shuffler.rs | 82 ++++++++++++++----- consensus/src/test_utils/mod.rs | 8 +- .../src/tests/publish_module.rs | 2 +- .../secp256r1_ecdsa/secp256r1_ecdsa_sigs.rs | 2 +- crates/aptos-openapi/src/lib.rs | 1 - crates/aptos-rosetta/src/types/misc.rs | 4 +- crates/aptos-rosetta/src/types/objects.rs | 4 +- crates/aptos-telemetry/src/sender.rs | 26 +++--- crates/aptos/CHANGELOG.md | 2 +- .../src/models/coin_models/coin_utils.rs | 6 +- .../indexer/src/processors/token_processor.rs | 4 +- .../src/workflow_delegator.rs | 2 +- docker/builder/docker-bake-rust-all.hcl | 4 +- .../indexer-grpc-fullnode/src/tests/mod.rs | 2 - .../src/tests/proto_converter_tests.rs | 2 +- execution/block-partitioner/src/test_utils.rs | 2 +- execution/executor-benchmark/src/lib.rs | 12 +-- .../src/transaction_generator.rs | 2 +- mempool/src/core_mempool/mod.rs | 6 +- mempool/src/counters.rs | 28 +++---- mempool/src/shared_mempool/mod.rs | 1 - rust-toolchain.toml | 2 +- scripts/update_docker_images.py | 2 +- .../smoke-test/src/aptos_cli/validator.rs | 2 +- testsuite/smoke-test/src/genesis.rs | 1 + testsuite/smoke-test/src/rosetta.rs | 2 +- testsuite/smoke-test/src/test_utils.rs | 56 ++++++------- testsuite/testcases/src/lib.rs | 2 +- .../move/evm/move-to-yul/src/events.rs | 2 +- .../evm/move-to-yul/src/native_functions.rs | 2 +- .../move/evm/move-to-yul/src/tables.rs | 10 +-- .../move/evm/move-to-yul/src/vectors.rs | 12 +-- .../move/move-borrow-graph/src/graph.rs | 2 +- .../src/reference_safety/mod.rs | 2 +- .../src/signature_v2.rs | 2 +- .../move-command-line-common/src/files.rs | 8 +- third_party/move/move-compiler-v2/src/lib.rs | 2 +- .../move-compiler/src/cfgir/liveness/mod.rs | 2 +- .../move-model/src/builder/exp_builder.rs | 2 +- third_party/move/move-prover/src/lib.rs | 2 +- .../move-vm/runtime/src/loader/modules.rs | 2 +- .../move/move-vm/runtime/src/loader/script.rs | 2 +- 59 files changed, 201 insertions(+), 162 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index d41b21d2cc7f2..d7681b6c2c0a9 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -10,6 +10,7 @@ xclippy = [ "-Aclippy::enum-variant-names", "-Aclippy::result-large-err", "-Aclippy::mutable-key-type", + "-Aclippy::map_identity", # We temporarily ignore this due to: https://github.com/rust-lang/rust-clippy/issues/11764 ] x = "run --package aptos-cargo-cli --bin aptos-cargo-cli --" diff --git a/Cargo.toml b/Cargo.toml index 5d508b60ffe99..316743829824c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -269,7 +269,7 @@ homepage = "https://aptoslabs.com" license = "Apache-2.0" publish = false repository = "https://github.com/aptos-labs/aptos-core" -rust-version = "1.74.1" +rust-version = "1.75.0" [workspace.dependencies] # Internal crate dependencies. diff --git a/api/src/context.rs b/api/src/context.rs index 3aacbdcb178da..cf30dd40b3972 100644 --- a/api/src/context.rs +++ b/api/src/context.rs @@ -1463,7 +1463,7 @@ impl FunctionStats { info!( LogSchema::new(self.log_event), - top_1 = sorted.get(0), + top_1 = sorted.first(), top_2 = sorted.get(1), top_3 = sorted.get(2), top_4 = sorted.get(3), diff --git a/api/src/tests/state_test.rs b/api/src/tests/state_test.rs index 61013cfadf7ee..91b83706d3fe7 100644 --- a/api/src/tests/state_test.rs +++ b/api/src/tests/state_test.rs @@ -9,7 +9,7 @@ use move_core_types::account_address::AccountAddress; use move_package::BuildConfig; use serde::Serialize; use serde_json::{json, Value}; -use std::{convert::TryInto, path::PathBuf}; +use std::path::PathBuf; #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn test_get_account_resource() { @@ -320,8 +320,7 @@ fn get_table_item(handle: AccountAddress) -> String { async fn make_test_tables(ctx: &mut TestContext, account: &mut LocalAccount) { let module = build_test_module(account.address()).await; - ctx.api_publish_module(account, module.try_into().unwrap()) - .await; + ctx.api_publish_module(account, module.into()).await; ctx.api_execute_entry_function(account, "make_test_tables", json!([]), json!([])) .await } diff --git a/api/types/src/move_types.rs b/api/types/src/move_types.rs index 672019580e0c9..9c7194ce95360 100644 --- a/api/types/src/move_types.rs +++ b/api/types/src/move_types.rs @@ -1072,6 +1072,7 @@ impl MoveModuleBytecode { } } + #[allow(clippy::unnecessary_fallible_conversions)] pub fn try_parse_abi(mut self) -> anyhow::Result { if self.abi.is_none() { // Ignore error, because it is possible a transaction module payload contains diff --git a/aptos-move/aptos-release-builder/src/utils.rs b/aptos-move/aptos-release-builder/src/utils.rs index 2b49469918a0b..2d7a2c53d674a 100644 --- a/aptos-move/aptos-release-builder/src/utils.rs +++ b/aptos-move/aptos-release-builder/src/utils.rs @@ -44,7 +44,7 @@ pub(crate) fn generate_next_execution_hash_blob( emitln!(writer, "proposal_id,"); emitln!(writer, "@{},", for_address); emit!(writer, "vector["); - for (_, b) in next_execution_hash.iter().enumerate() { + for b in next_execution_hash.iter() { emit!(writer, "{}u8,", b); } emitln!(writer, "],"); diff --git a/aptos-move/aptos-sdk-builder/src/rust.rs b/aptos-move/aptos-sdk-builder/src/rust.rs index 516bd0ab2145f..f9e1d6ff6777a 100644 --- a/aptos-move/aptos-sdk-builder/src/rust.rs +++ b/aptos-move/aptos-sdk-builder/src/rust.rs @@ -41,6 +41,7 @@ pub fn output(out: &mut dyn Write, abis: &[EntryABI], local_types: bool) -> Resu writeln!(emitter.out, "#![allow(unused_imports)]")?; writeln!(emitter.out, "#![allow(clippy::too_many_arguments)]")?; writeln!(emitter.out, "#![allow(clippy::arc_with_non_send_sync)]")?; + writeln!(emitter.out, "#![allow(clippy::get_first)]")?; emitter.output_script_call_enum_with_imports(abis)?; diff --git a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs index 819c1c51e1d17..c54667d9a8b64 100644 --- a/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs +++ b/aptos-move/framework/cached-packages/src/aptos_framework_sdk_builder.rs @@ -14,6 +14,7 @@ #![allow(unused_imports)] #![allow(clippy::too_many_arguments)] #![allow(clippy::arc_with_non_send_sync)] +#![allow(clippy::get_first)] use aptos_types::{ account_address::AccountAddress, transaction::{EntryFunction, TransactionPayload}, diff --git a/aptos-move/framework/cached-packages/src/aptos_token_objects_sdk_builder.rs b/aptos-move/framework/cached-packages/src/aptos_token_objects_sdk_builder.rs index 69f9f19a439bd..92b35f81cebc9 100644 --- a/aptos-move/framework/cached-packages/src/aptos_token_objects_sdk_builder.rs +++ b/aptos-move/framework/cached-packages/src/aptos_token_objects_sdk_builder.rs @@ -14,6 +14,7 @@ #![allow(unused_imports)] #![allow(clippy::too_many_arguments)] #![allow(clippy::arc_with_non_send_sync)] +#![allow(clippy::get_first)] use aptos_types::{ account_address::AccountAddress, transaction::{EntryFunction, TransactionPayload}, diff --git a/aptos-move/framework/cached-packages/src/aptos_token_sdk_builder.rs b/aptos-move/framework/cached-packages/src/aptos_token_sdk_builder.rs index 9d477824b5391..62b706dd415cc 100644 --- a/aptos-move/framework/cached-packages/src/aptos_token_sdk_builder.rs +++ b/aptos-move/framework/cached-packages/src/aptos_token_sdk_builder.rs @@ -14,6 +14,7 @@ #![allow(unused_imports)] #![allow(clippy::too_many_arguments)] #![allow(clippy::arc_with_non_send_sync)] +#![allow(clippy::get_first)] use aptos_types::{ account_address::AccountAddress, transaction::{EntryFunction, TransactionPayload}, diff --git a/aptos-move/framework/src/module_metadata.rs b/aptos-move/framework/src/module_metadata.rs index fddf32fb5b00d..773f2a71ac2fa 100644 --- a/aptos-move/framework/src/module_metadata.rs +++ b/aptos-move/framework/src/module_metadata.rs @@ -112,7 +112,7 @@ impl KnownAttribute { pub fn get_resource_group(&self) -> Option { if self.kind == KnownAttributeKind::ResourceGroup as u8 { - self.args.get(0).and_then(|scope| str::parse(scope).ok()) + self.args.first().and_then(|scope| str::parse(scope).ok()) } else { None } @@ -127,7 +127,7 @@ impl KnownAttribute { pub fn get_resource_group_member(&self) -> Option { if self.kind == KnownAttributeKind::ResourceGroupMember as u8 { - self.args.get(0)?.parse().ok() + self.args.first()?.parse().ok() } else { None } diff --git a/aptos-move/framework/src/release_bundle.rs b/aptos-move/framework/src/release_bundle.rs index 9610b4220ff31..67f14e94b8091 100644 --- a/aptos-move/framework/src/release_bundle.rs +++ b/aptos-move/framework/src/release_bundle.rs @@ -308,7 +308,7 @@ impl ReleasePackage { emitln!(writer, "proposal_id,"); emitln!(writer, "@{},", for_address); emit!(writer, "vector["); - for (_, b) in next_execution_hash.iter().enumerate() { + for b in next_execution_hash.iter() { emit!(writer, "{}u8,", b); } emitln!(writer, "],"); diff --git a/consensus/safety-rules/src/safety_rules_manager.rs b/consensus/safety-rules/src/safety_rules_manager.rs index aa4342f8430b6..4537cdfb2d550 100644 --- a/consensus/safety-rules/src/safety_rules_manager.rs +++ b/consensus/safety-rules/src/safety_rules_manager.rs @@ -14,11 +14,11 @@ use crate::{ use aptos_config::config::{InitialSafetyRulesConfig, SafetyRulesConfig, SafetyRulesService}; use aptos_infallible::RwLock; use aptos_secure_storage::{KVStorage, Storage}; -use std::{convert::TryInto, net::SocketAddr, sync::Arc}; +use std::{net::SocketAddr, sync::Arc}; pub fn storage(config: &SafetyRulesConfig) -> PersistentSafetyStorage { let backend = &config.backend; - let internal_storage: Storage = backend.try_into().expect("Unable to initialize storage"); + let internal_storage: Storage = backend.into(); if let Err(error) = internal_storage.available() { panic!("Storage is not available: {:?}", error); } @@ -53,8 +53,7 @@ pub fn storage(config: &SafetyRulesConfig) -> PersistentSafetyStorage { let waypoint = config.initial_safety_rules_config.waypoint(); let backend = &config.backend; - let internal_storage: Storage = - backend.try_into().expect("Unable to initialize storage"); + let internal_storage: Storage = backend.into(); PersistentSafetyStorage::initialize( internal_storage, identity_blob diff --git a/consensus/src/dag/anchor_election/mod.rs b/consensus/src/dag/anchor_election/mod.rs index f5404d0490850..5666de15f3a00 100644 --- a/consensus/src/dag/anchor_election/mod.rs +++ b/consensus/src/dag/anchor_election/mod.rs @@ -21,4 +21,5 @@ mod leader_reputation_adapter; mod round_robin; pub use leader_reputation_adapter::{LeaderReputationAdapter, MetadataBackendAdapter}; +#[cfg(test)] pub use round_robin::RoundRobinAnchorElection; diff --git a/consensus/src/dag/mod.rs b/consensus/src/dag/mod.rs index ce9577adfb537..930e9c9118b24 100644 --- a/consensus/src/dag/mod.rs +++ b/consensus/src/dag/mod.rs @@ -26,7 +26,6 @@ pub use adapter::{ProofNotifier, StorageAdapter}; pub use bootstrap::DagBootstrapper; pub use commit_signer::DagCommitSigner; pub use dag_network::{RpcHandler, RpcWithFallback, TDAGNetworkSender}; -pub use storage::DAGStorage; -pub use types::{ - CertifiedNode, DAGMessage, DAGNetworkMessage, DAGRpcResult, Extensions, Node, NodeId, Vote, -}; +#[cfg(test)] +pub use types::Extensions; +pub use types::{CertifiedNode, DAGMessage, DAGNetworkMessage, DAGRpcResult, Node, NodeId, Vote}; diff --git a/consensus/src/epoch_manager.rs b/consensus/src/epoch_manager.rs index b2133fc218da2..29ee8b844e830 100644 --- a/consensus/src/epoch_manager.rs +++ b/consensus/src/epoch_manager.rs @@ -1490,7 +1490,7 @@ impl EpochManager

{ #[allow(dead_code)] fn new_signer_from_storage(author: Author, backend: &SecureBackend) -> Arc { - let storage: Storage = backend.try_into().expect("Unable to initialize storage"); + let storage: Storage = backend.into(); if let Err(error) = storage.available() { panic!("Storage is not available: {:?}", error); } diff --git a/consensus/src/quorum_store/quorum_store_builder.rs b/consensus/src/quorum_store/quorum_store_builder.rs index 8c0704ddb7147..b0f0d2b002d53 100644 --- a/consensus/src/quorum_store/quorum_store_builder.rs +++ b/consensus/src/quorum_store/quorum_store_builder.rs @@ -217,7 +217,7 @@ impl InnerBuilder { fn create_batch_store(&mut self) -> Arc> { let backend = &self.backend; - let storage: Storage = backend.try_into().expect("Unable to initialize storage"); + let storage: Storage = backend.into(); if let Err(error) = storage.available() { panic!("Storage is not available: {:?}", error); } diff --git a/consensus/src/sender_aware_shuffler.rs b/consensus/src/sender_aware_shuffler.rs index f61b69305e0e7..98057a8e28f74 100644 --- a/consensus/src/sender_aware_shuffler.rs +++ b/consensus/src/sender_aware_shuffler.rs @@ -286,7 +286,7 @@ mod tests { let mut senders = Vec::new(); for _ in 0..num_senders { let mut sender_txns = create_signed_transaction(1); - senders.push(sender_txns.get(0).unwrap().sender()); + senders.push(sender_txns.first().unwrap().sender()); txns.append(&mut sender_txns); } let txn_shuffer = SenderAwareShuffler::new(10); @@ -304,7 +304,7 @@ mod tests { let mut senders = Vec::new(); for _ in 0..num_senders { let mut sender_txns = create_signed_transaction(10); - senders.push(sender_txns.get(0).unwrap().sender()); + senders.push(sender_txns.first().unwrap().sender()); txns.append(&mut sender_txns); } @@ -325,7 +325,7 @@ mod tests { let mut senders = Vec::new(); for _ in 0..num_senders { let mut sender_txns = create_signed_transaction(10); - senders.push(sender_txns.get(0).unwrap().sender()); + senders.push(sender_txns.first().unwrap().sender()); txns.append(&mut sender_txns); } @@ -345,7 +345,7 @@ mod tests { let mut orig_txns_by_sender = HashMap::new(); for _ in 0..num_senders { let mut sender_txns = create_signed_transaction(rng.gen_range(1, max_txn_per_sender)); - orig_txns_by_sender.insert(sender_txns.get(0).unwrap().sender(), sender_txns.clone()); + orig_txns_by_sender.insert(sender_txns.first().unwrap().sender(), sender_txns.clone()); orig_txns.append(&mut sender_txns); } let txn_shuffler = SenderAwareShuffler::new(num_senders - 1); @@ -377,9 +377,18 @@ mod tests { orig_txns.extend(sender3_txns.clone()); let txn_shuffler = SenderAwareShuffler::new(3); let optimized_txns = txn_shuffler.shuffle(orig_txns); - assert_eq!(optimized_txns.get(0).unwrap(), sender1_txns.get(0).unwrap()); - assert_eq!(optimized_txns.get(1).unwrap(), sender2_txns.get(0).unwrap()); - assert_eq!(optimized_txns.get(2).unwrap(), sender3_txns.get(0).unwrap()); + assert_eq!( + optimized_txns.first().unwrap(), + sender1_txns.first().unwrap() + ); + assert_eq!( + optimized_txns.get(1).unwrap(), + sender2_txns.first().unwrap() + ); + assert_eq!( + optimized_txns.get(2).unwrap(), + sender3_txns.first().unwrap() + ); assert_eq!(optimized_txns.get(3).unwrap(), sender3_txns.get(1).unwrap()); } @@ -402,12 +411,27 @@ mod tests { orig_txns.extend(sender5_txns.clone()); let txn_shuffler = SenderAwareShuffler::new(3); let optimized_txns = txn_shuffler.shuffle(orig_txns); - assert_eq!(optimized_txns.get(0).unwrap(), sender1_txns.get(0).unwrap()); - assert_eq!(optimized_txns.get(1).unwrap(), sender2_txns.get(0).unwrap()); - assert_eq!(optimized_txns.get(2).unwrap(), sender3_txns.get(0).unwrap()); - assert_eq!(optimized_txns.get(3).unwrap(), sender4_txns.get(0).unwrap()); + assert_eq!( + optimized_txns.first().unwrap(), + sender1_txns.first().unwrap() + ); + assert_eq!( + optimized_txns.get(1).unwrap(), + sender2_txns.first().unwrap() + ); + assert_eq!( + optimized_txns.get(2).unwrap(), + sender3_txns.first().unwrap() + ); + assert_eq!( + optimized_txns.get(3).unwrap(), + sender4_txns.first().unwrap() + ); assert_eq!(optimized_txns.get(4).unwrap(), sender1_txns.get(1).unwrap()); - assert_eq!(optimized_txns.get(5).unwrap(), sender5_txns.get(0).unwrap()); + assert_eq!( + optimized_txns.get(5).unwrap(), + sender5_txns.first().unwrap() + ); } #[test] @@ -430,14 +454,32 @@ mod tests { orig_txns.extend(sender6_txns.clone()); let txn_shuffler = SenderAwareShuffler::new(3); let optimized_txns = txn_shuffler.shuffle(orig_txns); - assert_eq!(optimized_txns.get(0).unwrap(), sender1_txns.get(0).unwrap()); - assert_eq!(optimized_txns.get(1).unwrap(), sender2_txns.get(0).unwrap()); - assert_eq!(optimized_txns.get(2).unwrap(), sender3_txns.get(0).unwrap()); - assert_eq!(optimized_txns.get(3).unwrap(), sender4_txns.get(0).unwrap()); + assert_eq!( + optimized_txns.first().unwrap(), + sender1_txns.first().unwrap() + ); + assert_eq!( + optimized_txns.get(1).unwrap(), + sender2_txns.first().unwrap() + ); + assert_eq!( + optimized_txns.get(2).unwrap(), + sender3_txns.first().unwrap() + ); + assert_eq!( + optimized_txns.get(3).unwrap(), + sender4_txns.first().unwrap() + ); assert_eq!(optimized_txns.get(4).unwrap(), sender1_txns.get(1).unwrap()); - assert_eq!(optimized_txns.get(5).unwrap(), sender5_txns.get(0).unwrap()); + assert_eq!( + optimized_txns.get(5).unwrap(), + sender5_txns.first().unwrap() + ); assert_eq!(optimized_txns.get(6).unwrap(), sender3_txns.get(1).unwrap()); - assert_eq!(optimized_txns.get(7).unwrap(), sender6_txns.get(0).unwrap()); + assert_eq!( + optimized_txns.get(7).unwrap(), + sender6_txns.first().unwrap() + ); } #[test] @@ -451,7 +493,7 @@ mod tests { let mut orig_txn_set = HashSet::new(); for _ in 0..num_senders { let mut sender_txns = create_signed_transaction(rng.gen_range(1, max_txn_per_sender)); - senders.push(sender_txns.get(0).unwrap().sender()); + senders.push(sender_txns.first().unwrap().sender()); orig_txns.append(&mut sender_txns); } for txn in orig_txns.clone() { @@ -483,7 +525,7 @@ mod tests { let mut senders = Vec::new(); for _ in 0..num_senders { let mut sender_txns = create_signed_transaction(rng.gen_range(1, max_txn_per_sender)); - senders.push(sender_txns.get(0).unwrap().sender()); + senders.push(sender_txns.first().unwrap().sender()); orig_txns.append(&mut sender_txns); } diff --git a/consensus/src/test_utils/mod.rs b/consensus/src/test_utils/mod.rs index 31b92a0de5e66..5b62d08e59a65 100644 --- a/consensus/src/test_utils/mod.rs +++ b/consensus/src/test_utils/mod.rs @@ -31,10 +31,10 @@ use aptos_types::{ transaction::{RawTransaction, Script, SignedTransaction, TransactionPayload}, }; pub use mock_payload_manager::MockPayloadManager; -pub use mock_state_computer::{ - EmptyStateComputer, MockStateComputer, RandomComputeResultStateComputer, -}; -pub use mock_storage::{EmptyStorage, MockSharedStorage, MockStorage}; +pub use mock_state_computer::EmptyStateComputer; +#[cfg(test)] +pub use mock_state_computer::{MockStateComputer, RandomComputeResultStateComputer}; +pub use mock_storage::{EmptyStorage, MockStorage}; use move_core_types::account_address::AccountAddress; pub const TEST_TIMEOUT: Duration = Duration::from_secs(60); diff --git a/crates/aptos-api-tester/src/tests/publish_module.rs b/crates/aptos-api-tester/src/tests/publish_module.rs index 620395a29bbfc..b1cf3a1cfe685 100644 --- a/crates/aptos-api-tester/src/tests/publish_module.rs +++ b/crates/aptos-api-tester/src/tests/publish_module.rs @@ -247,7 +247,7 @@ async fn publish_module( }; // get blob for later comparison - let blob = match blobs.get(0) { + let blob = match blobs.first() { Some(bytecode) => HexEncodedBytes::from(bytecode.clone()), None => { error!( diff --git a/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_sigs.rs b/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_sigs.rs index 3aa17e94c648a..6bf5d80f3a415 100644 --- a/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_sigs.rs +++ b/crates/aptos-crypto/src/secp256r1_ecdsa/secp256r1_ecdsa_sigs.rs @@ -29,7 +29,7 @@ impl Signature { /// Serialize an Signature. Uses the SEC1 serialization format. pub fn to_bytes(&self) -> [u8; SIGNATURE_LENGTH] { // The RustCrypto P256 `to_bytes` call here should never return a byte array of the wrong length - self.0.to_bytes().try_into().unwrap() + self.0.to_bytes().into() } /// Deserialize an P256Signature, without checking for malleability diff --git a/crates/aptos-openapi/src/lib.rs b/crates/aptos-openapi/src/lib.rs index b9d4060f3ef5e..50e27b54b1bbc 100644 --- a/crates/aptos-openapi/src/lib.rs +++ b/crates/aptos-openapi/src/lib.rs @@ -3,6 +3,5 @@ mod helpers; -pub use helpers::*; // Re-export so users don't have to import this themselves. pub use percent_encoding; diff --git a/crates/aptos-rosetta/src/types/misc.rs b/crates/aptos-rosetta/src/types/misc.rs index 5422ee0e31608..f97c3ed6ceb5b 100644 --- a/crates/aptos-rosetta/src/types/misc.rs +++ b/crates/aptos-rosetta/src/types/misc.rs @@ -391,7 +391,7 @@ fn parse_requested_balance( ) -> Option { if account_identifier.is_delegator_active_stake() { return balances_result - .get(0) + .first() .and_then(|v| v.as_str().map(|s| s.to_owned())); } else if account_identifier.is_delegator_inactive_stake() { return balances_result @@ -421,7 +421,7 @@ fn parse_requested_balance( fn parse_lockup_expiration(lockup_secs_result: Vec) -> u64 { return lockup_secs_result - .get(0) + .first() .and_then(|v| v.as_str().and_then(|s| s.parse::().ok())) .unwrap_or(0); } diff --git a/crates/aptos-rosetta/src/types/objects.rs b/crates/aptos-rosetta/src/types/objects.rs index 1f0ef56937317..0679f37fbbe95 100644 --- a/crates/aptos-rosetta/src/types/objects.rs +++ b/crates/aptos-rosetta/src/types/objects.rs @@ -1015,7 +1015,7 @@ fn parse_failed_operations_from_txn_payload( (AccountAddress::ONE, ACCOUNT_MODULE, CREATE_ACCOUNT_FUNCTION) => { if let Some(Ok(address)) = inner .args() - .get(0) + .first() .map(|encoded| bcs::from_bytes::(encoded)) { operations.push(Operation::create_account( @@ -1162,7 +1162,7 @@ fn parse_transfer_from_txn_payload( let args = payload.args(); let maybe_receiver = args - .get(0) + .first() .map(|encoded| bcs::from_bytes::(encoded)); let maybe_amount = args.get(1).map(|encoded| bcs::from_bytes::(encoded)); diff --git a/crates/aptos-telemetry/src/sender.rs b/crates/aptos-telemetry/src/sender.rs index a9cb87bc93f36..2bcf89ed45e8c 100644 --- a/crates/aptos-telemetry/src/sender.rs +++ b/crates/aptos-telemetry/src/sender.rs @@ -404,6 +404,19 @@ impl TelemetrySender { } } +async fn error_for_status_with_body(response: Response) -> Result { + if response.status().is_client_error() || response.status().is_server_error() { + Err(anyhow!( + "HTTP status error ({}) for url ({}): {}", + response.status(), + response.url().clone(), + response.text().await?, + )) + } else { + Ok(response) + } +} + #[cfg(test)] mod tests { @@ -660,16 +673,3 @@ mod tests { mock.assert(); } } - -async fn error_for_status_with_body(response: Response) -> Result { - if response.status().is_client_error() || response.status().is_server_error() { - Err(anyhow!( - "HTTP status error ({}) for url ({}): {}", - response.status(), - response.url().clone(), - response.text().await?, - )) - } else { - Ok(response) - } -} diff --git a/crates/aptos/CHANGELOG.md b/crates/aptos/CHANGELOG.md index 0c61ebe1fe8d9..170356de5cb38 100644 --- a/crates/aptos/CHANGELOG.md +++ b/crates/aptos/CHANGELOG.md @@ -3,7 +3,7 @@ All notable changes to the Aptos CLI will be captured in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) and the format set out by [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## Unreleased -N/A +- Updated CLI source compilation to use rust toolchain version 1.75.0 (from 1.74.1). ## [2.4.0] - 2023/01/05 - Hide the V2 compiler from input options until the V2 compiler is ready for release diff --git a/crates/indexer/src/models/coin_models/coin_utils.rs b/crates/indexer/src/models/coin_models/coin_utils.rs index 27c6bd7ad8466..f4d2e7a4c518c 100644 --- a/crates/indexer/src/models/coin_models/coin_utils.rs +++ b/crates/indexer/src/models/coin_models/coin_utils.rs @@ -37,7 +37,7 @@ impl CoinInfoResource { /// Getting the table item location of the supply aggregator pub fn get_aggregator_metadata(&self) -> Option { - if let Some(inner) = self.supply.vec.get(0) { + if let Some(inner) = self.supply.vec.first() { inner.aggregator.get_aggregator_metadata() } else { None @@ -64,7 +64,7 @@ pub struct AggregatorWrapperResource { impl AggregatorWrapperResource { /// In case we do want to track supply pub fn get_aggregator_metadata(&self) -> Option { - self.vec.get(0).cloned() + self.vec.first().cloned() } } @@ -76,7 +76,7 @@ pub struct IntegerWrapperResource { impl IntegerWrapperResource { /// In case we do want to track supply pub fn get_supply(&self) -> Option { - self.vec.get(0).map(|inner| inner.value.clone()) + self.vec.first().map(|inner| inner.value.clone()) } } diff --git a/crates/indexer/src/processors/token_processor.rs b/crates/indexer/src/processors/token_processor.rs index dc09d4cdab715..f37c959c1a8f7 100644 --- a/crates/indexer/src/processors/token_processor.rs +++ b/crates/indexer/src/processors/token_processor.rs @@ -1090,7 +1090,7 @@ fn parse_v2_token( let mut tokens_burned: TokenV2Burned = HashSet::new(); // Need to do a first pass to get all the objects - for (_, wsc) in user_txn.info.changes.iter().enumerate() { + for wsc in user_txn.info.changes.iter() { if let WriteSetChange::WriteResource(wr) = wsc { if let Some(object) = ObjectWithMetadata::from_write_resource(wr, txn_version).unwrap() @@ -1115,7 +1115,7 @@ fn parse_v2_token( } // Need to do a second pass to get all the structs related to the object - for (_, wsc) in user_txn.info.changes.iter().enumerate() { + for wsc in user_txn.info.changes.iter() { if let WriteSetChange::WriteResource(wr) = wsc { let address = standardize_address(&wr.address.to_string()); if let Some(aggregated_data) = token_v2_metadata_helper.get_mut(&address) { diff --git a/crates/transaction-generator-lib/src/workflow_delegator.rs b/crates/transaction-generator-lib/src/workflow_delegator.rs index 1ae9bcfaabed9..a24451d9e38da 100644 --- a/crates/transaction-generator-lib/src/workflow_delegator.rs +++ b/crates/transaction-generator-lib/src/workflow_delegator.rs @@ -113,7 +113,7 @@ impl TransactionGenerator for WorkflowTxnGenerator { StageTracking::WhenDone(stage_counter) => { if stage == 0 { if num_to_create == 0 { - info!("TransactionGenerator Workflow: Stage 0 is full with {} accounts, moving to stage 1", self.pool_per_stage.get(0).unwrap().len()); + info!("TransactionGenerator Workflow: Stage 0 is full with {} accounts, moving to stage 1", self.pool_per_stage.first().unwrap().len()); let _ = stage_counter.compare_exchange( 0, 1, diff --git a/docker/builder/docker-bake-rust-all.hcl b/docker/builder/docker-bake-rust-all.hcl index 965c32678f731..0bd7db9079f43 100644 --- a/docker/builder/docker-bake-rust-all.hcl +++ b/docker/builder/docker-bake-rust-all.hcl @@ -79,8 +79,8 @@ target "builder-base" { target = "builder-base" context = "." contexts = { - # Run `docker buildx imagetools inspect rust:1.74.1-bullseye` to find the latest multi-platform hash - rust = "docker-image://rust:1.74.1-bullseye@sha256:698e0acd00a30c8842887c3792c5c68e1d23565cfcc11d0203dbe4eeb31213c0" + # Run `docker buildx imagetools inspect rust:1.75.0-bullseye` to find the latest multi-platform hash + rust = "docker-image://rust:1.74.1-bullseye@sha256:41e5ac5baf626dcf190cfe6adf9bf3f17c72a677641ae2de6a1f36a6db883aca" } args = { PROFILE = "${PROFILE}" diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/tests/mod.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/tests/mod.rs index 0199623e8ea60..1a622d57fa5a6 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/tests/mod.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/tests/mod.rs @@ -2,5 +2,3 @@ // SPDX-License-Identifier: Apache-2.0 // mod proto_converter_tests; - -pub use aptos_api_test_context::{new_test_context as super_new_test_context, TestContext}; diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/tests/proto_converter_tests.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/tests/proto_converter_tests.rs index 5c5d22a739bf5..e53e01b2132ad 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/tests/proto_converter_tests.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/tests/proto_converter_tests.rs @@ -221,7 +221,7 @@ async fn test_table_item_parsing_works() { async fn make_test_tables(ctx: &mut TestContext, account: &mut LocalAccount) { let module = build_test_module(account.address()).await; - ctx.api_publish_module(account, module.try_into().unwrap()) + ctx.api_publish_module(account, module.into()) .await; ctx.api_execute_entry_function( account, diff --git a/execution/block-partitioner/src/test_utils.rs b/execution/block-partitioner/src/test_utils.rs index 5880cd007e1bb..24f21261014bc 100644 --- a/execution/block-partitioner/src/test_utils.rs +++ b/execution/block-partitioner/src/test_utils.rs @@ -80,7 +80,7 @@ pub fn create_signed_p2p_transaction( receivers: Vec<&TestAccount>, ) -> Vec { let mut transactions = Vec::new(); - for (_, receiver) in receivers.iter().enumerate() { + for receiver in receivers.iter() { let transaction_payload = TransactionPayload::EntryFunction(EntryFunction::new( ModuleId::new(AccountAddress::ONE, Identifier::new("coin").unwrap()), Identifier::new("transfer").unwrap(), diff --git a/execution/executor-benchmark/src/lib.rs b/execution/executor-benchmark/src/lib.rs index ada5b29b24944..c72988bfaae29 100644 --- a/execution/executor-benchmark/src/lib.rs +++ b/execution/executor-benchmark/src/lib.rs @@ -673,6 +673,12 @@ impl OverallMeasuring { } } +fn log_total_supply(db_reader: &Arc) { + let total_supply = + DbAccessUtil::get_total_supply(&db_reader.latest_state_checkpoint_view().unwrap()).unwrap(); + info!("total supply is {:?} octas", total_supply) +} + #[cfg(test)] mod tests { use crate::{native_executor::NativeExecutor, pipeline::PipelineConfig}; @@ -751,9 +757,3 @@ mod tests { test_generic_benchmark::(None, false); } } - -fn log_total_supply(db_reader: &Arc) { - let total_supply = - DbAccessUtil::get_total_supply(&db_reader.latest_state_checkpoint_view().unwrap()).unwrap(); - info!("total supply is {:?} octas", total_supply) -} diff --git a/execution/executor-benchmark/src/transaction_generator.rs b/execution/executor-benchmark/src/transaction_generator.rs index 5e150e32642fb..eaca89c5ca759 100644 --- a/execution/executor-benchmark/src/transaction_generator.rs +++ b/execution/executor-benchmark/src/transaction_generator.rs @@ -654,7 +654,7 @@ impl TransactionGenerator { }); let (tx, rx) = std::sync::mpsc::channel(); self.worker_pool.scope(move |scope| { - for (_, per_worker_jobs) in jobs.into_iter().enumerate() { + for per_worker_jobs in jobs.into_iter() { let tx = tx.clone(); scope.spawn(move |_| { for (index, job) in per_worker_jobs { diff --git a/mempool/src/core_mempool/mod.rs b/mempool/src/core_mempool/mod.rs index 434d991d03398..9ddf60f6ff9be 100644 --- a/mempool/src/core_mempool/mod.rs +++ b/mempool/src/core_mempool/mod.rs @@ -7,9 +7,9 @@ mod mempool; mod transaction; mod transaction_store; +#[cfg(test)] +pub use self::transaction::{MempoolTransaction, SubmittedBy}; pub use self::{ - index::TxnPointer, - mempool::Mempool as CoreMempool, - transaction::{MempoolTransaction, SubmittedBy, TimelineState}, + index::TxnPointer, mempool::Mempool as CoreMempool, transaction::TimelineState, transaction_store::TXN_INDEX_ESTIMATED_BYTES, }; diff --git a/mempool/src/counters.rs b/mempool/src/counters.rs index 5d5f14a58da55..93617b206c36b 100644 --- a/mempool/src/counters.rs +++ b/mempool/src/counters.rs @@ -119,20 +119,6 @@ static TRANSACTION_COUNT_BUCKETS: Lazy> = Lazy::new(|| { .unwrap() }); -#[cfg(test)] -mod test { - use crate::counters::RANKING_SCORE_BUCKETS; - - #[test] - fn generate_ranking_score_buckets() { - let buckets: Vec = (0..21) - .map(|n| 100.0 * (10.0_f64.powf(n as f64 / 6.0))) - .map(|f| f.round()) - .collect(); - assert_eq!(RANKING_SCORE_BUCKETS, &buckets); - } -} - /// Counter tracking size of various indices in core mempool pub static CORE_MEMPOOL_INDEX_SIZE: Lazy = Lazy::new(|| { register_int_gauge_vec!( @@ -627,3 +613,17 @@ pub static MAIN_LOOP: Lazy = Lazy::new(|| { .unwrap(), ) }); + +#[cfg(test)] +mod test { + use crate::counters::RANKING_SCORE_BUCKETS; + + #[test] + fn generate_ranking_score_buckets() { + let buckets: Vec = (0..21) + .map(|n| 100.0 * (10.0_f64.powf(n as f64 / 6.0))) + .map(|f| f.round()) + .collect(); + assert_eq!(RANKING_SCORE_BUCKETS, &buckets); + } +} diff --git a/mempool/src/shared_mempool/mod.rs b/mempool/src/shared_mempool/mod.rs index 7303ed3595239..5c4da46782b29 100644 --- a/mempool/src/shared_mempool/mod.rs +++ b/mempool/src/shared_mempool/mod.rs @@ -3,7 +3,6 @@ // SPDX-License-Identifier: Apache-2.0 pub mod network; -pub use network::MempoolSyncMsg; mod runtime; pub(crate) mod types; pub use runtime::bootstrap; diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 75fd35f510581..0b13a21a22d01 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.74.1" +channel = "1.75.0" # Note: we don't specify cargofmt in our toolchain because we rely on # the nightly version of cargofmt and verify formatting in CI/CD. diff --git a/scripts/update_docker_images.py b/scripts/update_docker_images.py index 1d4a34bb85bfc..9865aa27c7171 100755 --- a/scripts/update_docker_images.py +++ b/scripts/update_docker_images.py @@ -10,7 +10,7 @@ IMAGES = { "debian": "debian:bullseye", - "rust": "rust:1.74.1-bullseye", + "rust": "rust:1.75.0-bullseye", } diff --git a/testsuite/smoke-test/src/aptos_cli/validator.rs b/testsuite/smoke-test/src/aptos_cli/validator.rs index c1f4410086e86..31e030e99b1c0 100644 --- a/testsuite/smoke-test/src/aptos_cli/validator.rs +++ b/testsuite/smoke-test/src/aptos_cli/validator.rs @@ -170,7 +170,7 @@ async fn check_vote_to_elected(swarm: &mut LocalSwarm) -> (Option, Option) { - let key_type = fun_id.inst.get(0).expect("key type"); + let key_type = fun_id.inst.first().expect("key type"); // TODO: right now the key to storage is simply hash (table_handle, key), which works when key // is a primitive type. However, this doesn't work when key is a struct or a vector, represented @@ -63,7 +63,7 @@ fn define_contains_fun( ctx: &Context, fun_id: &QualifiedInstId, ) { - let key_type = fun_id.inst.get(0).expect("key type"); + let key_type = fun_id.inst.first().expect("key type"); emitln!(ctx.writer, "(table_ref, key_ref) -> res {"); ctx.writer.indent(); @@ -130,7 +130,7 @@ fn define_contains_fun( } fn define_insert_fun(gen: &mut FunctionGenerator, ctx: &Context, fun_id: &QualifiedInstId) { - let key_type = fun_id.inst.get(0).expect("key type"); + let key_type = fun_id.inst.first().expect("key type"); let value_type = fun_id.inst.get(1).expect("value type"); emitln!(ctx.writer, "(table_ref, key_ref, value) {"); @@ -233,7 +233,7 @@ fn define_insert_fun(gen: &mut FunctionGenerator, ctx: &Context, fun_id: &Qualif } fn define_borrow_fun(gen: &mut FunctionGenerator, ctx: &Context, fun_id: &QualifiedInstId) { - let key_type = fun_id.inst.get(0).expect("key type"); + let key_type = fun_id.inst.first().expect("key type"); emitln!(ctx.writer, "(table_ref, key_ref) -> value_ref {"); ctx.writer.indent(); @@ -308,7 +308,7 @@ fn define_borrow_fun(gen: &mut FunctionGenerator, ctx: &Context, fun_id: &Qualif } fn define_remove_fun(gen: &mut FunctionGenerator, ctx: &Context, fun_id: &QualifiedInstId) { - let key_type = fun_id.inst.get(0).expect("key type"); + let key_type = fun_id.inst.first().expect("key type"); let value_type = fun_id.inst.get(1).expect("value type"); emitln!(ctx.writer, "(table_ref, key_ref) -> value {"); diff --git a/third_party/move/evm/move-to-yul/src/vectors.rs b/third_party/move/evm/move-to-yul/src/vectors.rs index 551330d5b94b5..2da3f74090744 100644 --- a/third_party/move/evm/move-to-yul/src/vectors.rs +++ b/third_party/move/evm/move-to-yul/src/vectors.rs @@ -370,7 +370,7 @@ fn define_empty_fun(gen: &mut FunctionGenerator, ctx: &Context, fun_id: &Qualifi ); emitln!(ctx.writer, "() -> vector {"); ctx.writer.indent(); - let type_size = ctx.type_size(fun_id.inst.get(0).unwrap()); + let type_size = ctx.type_size(fun_id.inst.first().unwrap()); emitln!( ctx.writer, "vector := {}", @@ -444,7 +444,7 @@ fn define_borrow_fun(gen: &mut FunctionGenerator, ctx: &Context, fun_id: &Qualif 1, "vector instantiated with non-one type parameter" ); - let elem_type = fun_id.inst.get(0).unwrap(); + let elem_type = fun_id.inst.first().unwrap(); let elem_type_size = ctx.type_size(elem_type); emitln!(ctx.writer, "(v_ref, i) -> e_ptr {"); @@ -543,7 +543,7 @@ fn define_pop_back_fun( 1, "vector instantiated with non-one type parameter" ); - let elem_type = fun_id.inst.get(0).unwrap(); + let elem_type = fun_id.inst.first().unwrap(); let elem_type_size = ctx.type_size(elem_type); emitln!(ctx.writer, "(v_ref) -> e {"); @@ -689,7 +689,7 @@ fn define_push_back_fun( 1, "vector instantiated with non-one type parameter" ); - let elem_type = fun_id.inst.get(0).unwrap(); + let elem_type = fun_id.inst.first().unwrap(); let elem_type_size = ctx.type_size(elem_type); emitln!(ctx.writer, "(v_ref, e) {"); @@ -864,7 +864,7 @@ fn define_push_back_fun( } fn define_swap_fun(gen: &mut FunctionGenerator, ctx: &Context, fun_id: &QualifiedInstId) { - let elem_type = fun_id.inst.get(0).unwrap(); + let elem_type = fun_id.inst.first().unwrap(); let elem_type_size = ctx.type_size(elem_type); emitln!(ctx.writer, "(v_ref, i, j) {"); ctx.writer.indent(); @@ -994,7 +994,7 @@ fn define_destroy_empty_fun( ); emitln!(ctx.writer, "(v) {"); ctx.writer.indent(); - let type_size = ctx.type_size(fun_id.inst.get(0).unwrap()); + let type_size = ctx.type_size(fun_id.inst.first().unwrap()); emitln!( ctx.writer, "let size := {}", diff --git a/third_party/move/move-borrow-graph/src/graph.rs b/third_party/move/move-borrow-graph/src/graph.rs index 1c93b5df8e9fc..bb3dfe4bf0b29 100644 --- a/third_party/move/move-borrow-graph/src/graph.rs +++ b/third_party/move/move-borrow-graph/src/graph.rs @@ -64,7 +64,7 @@ impl BorrowGraph { for (borrower, edges) in &borrowed_by.0 { let borrower = *borrower; for edge in edges { - match edge.path.get(0) { + match edge.path.first() { None => full_borrows.insert(borrower, edge.loc), Some(f) => field_borrows .entry(f.clone()) diff --git a/third_party/move/move-bytecode-verifier/src/reference_safety/mod.rs b/third_party/move/move-bytecode-verifier/src/reference_safety/mod.rs index c564da09441e7..90a89194b6b2d 100644 --- a/third_party/move/move-bytecode-verifier/src/reference_safety/mod.rs +++ b/third_party/move/move-bytecode-verifier/src/reference_safety/mod.rs @@ -134,7 +134,7 @@ fn vec_element_type( verifier: &mut ReferenceSafetyAnalysis, idx: SignatureIndex, ) -> PartialVMResult { - match verifier.resolver.signature_at(idx).0.get(0) { + match verifier.resolver.signature_at(idx).0.first() { Some(ty) => Ok(ty.clone()), None => Err(PartialVMError::new( StatusCode::VERIFIER_INVARIANT_VIOLATION, diff --git a/third_party/move/move-bytecode-verifier/src/signature_v2.rs b/third_party/move/move-bytecode-verifier/src/signature_v2.rs index 7d379c97ee509..48283b993c382 100644 --- a/third_party/move/move-bytecode-verifier/src/signature_v2.rs +++ b/third_party/move/move-bytecode-verifier/src/signature_v2.rs @@ -855,7 +855,7 @@ impl<'a, const N: usize> SignatureChecker<'a, N> { .map(|idx| (idx as TypeParameterIndex, AbilitySet::ALL)) .collect::>(); - for (_field_offset, field_def) in fields.iter().enumerate() { + for field_def in fields.iter() { let field_ty = &field_def.signature.0; // Check if the field type itself is well-formed. diff --git a/third_party/move/move-command-line-common/src/files.rs b/third_party/move/move-command-line-common/src/files.rs index 6a66e0db55167..e265c474f648f 100644 --- a/third_party/move/move-command-line-common/src/files.rs +++ b/third_party/move/move-command-line-common/src/files.rs @@ -5,7 +5,7 @@ use anyhow::{anyhow, bail, *}; use serde::{Deserialize, Serialize}; use sha2::Digest; -use std::{collections::BTreeMap, convert::TryInto, path::Path}; +use std::{collections::BTreeMap, path::Path}; /// Result of sha256 hash of a file's contents. #[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] @@ -13,11 +13,7 @@ pub struct FileHash(pub [u8; 32]); impl FileHash { pub fn new(file_contents: &str) -> Self { - Self( - sha2::Sha256::digest(file_contents.as_bytes()) - .try_into() - .expect("Length of sha256 digest must always be 32 bytes"), - ) + Self(sha2::Sha256::digest(file_contents.as_bytes()).into()) } pub const fn empty() -> Self { diff --git a/third_party/move/move-compiler-v2/src/lib.rs b/third_party/move/move-compiler-v2/src/lib.rs index 031862e20c6c3..17fd58f1af75e 100644 --- a/third_party/move/move-compiler-v2/src/lib.rs +++ b/third_party/move/move-compiler-v2/src/lib.rs @@ -81,7 +81,7 @@ pub fn run_move_compiler( // from the first input file. let dump_base_name = options .sources - .get(0) + .first() .and_then(|f| { Path::new(f) .file_name() diff --git a/third_party/move/move-compiler/src/cfgir/liveness/mod.rs b/third_party/move/move-compiler/src/cfgir/liveness/mod.rs index 2e56bd3693dd5..01bb910ad3c3b 100644 --- a/third_party/move/move-compiler/src/cfgir/liveness/mod.rs +++ b/third_party/move/move-compiler/src/cfgir/liveness/mod.rs @@ -461,7 +461,7 @@ fn release_dead_refs_block( return; } - let cmd_loc = block.get(0).unwrap().loc; + let cmd_loc = block.front().unwrap().loc; let cur_state = { let mut s = liveness_pre_state.clone(); for cmd in block.iter().rev() { diff --git a/third_party/move/move-model/src/builder/exp_builder.rs b/third_party/move/move-model/src/builder/exp_builder.rs index ecde7a52d6f3b..89e307669ea4b 100644 --- a/third_party/move/move-model/src/builder/exp_builder.rs +++ b/third_party/move/move-model/src/builder/exp_builder.rs @@ -1599,7 +1599,7 @@ impl<'env, 'translator, 'module_translator> ExpTranslator<'env, 'translator, 'mo // Shortcut for single element case if list.value.len() == 1 { return self.translate_lvalue( - list.value.get(0).unwrap(), + list.value.first().unwrap(), expected_type, expected_order, match_locals, diff --git a/third_party/move/move-prover/src/lib.rs b/third_party/move/move-prover/src/lib.rs index 7f1b809ae5ea2..13306e1fcbf15 100644 --- a/third_party/move/move-prover/src/lib.rs +++ b/third_party/move/move-prover/src/lib.rs @@ -227,7 +227,7 @@ pub fn create_and_process_bytecode(options: &Options, env: &GlobalEnv) -> Functi let output_dir = Path::new(&options.output_path) .parent() .expect("expect the parent directory of the output path to exist"); - let output_prefix = options.move_sources.get(0).map_or("bytecode", |s| { + let output_prefix = options.move_sources.first().map_or("bytecode", |s| { Path::new(s).file_name().unwrap().to_str().unwrap() }); diff --git a/third_party/move/move-vm/runtime/src/loader/modules.rs b/third_party/move/move-vm/runtime/src/loader/modules.rs index 67395e7baf3d7..ea84bd28f2238 100644 --- a/third_party/move/move-vm/runtime/src/loader/modules.rs +++ b/third_party/move/move-vm/runtime/src/loader/modules.rs @@ -344,7 +344,7 @@ impl Module { | Bytecode::VecUnpack(si, _) | Bytecode::VecSwap(si) => { if !single_signature_token_map.contains_key(si) { - let ty = match module.signature_at(*si).0.get(0) { + let ty = match module.signature_at(*si).0.first() { None => { return Err(PartialVMError::new( StatusCode::VERIFIER_INVARIANT_VIOLATION, diff --git a/third_party/move/move-vm/runtime/src/loader/script.rs b/third_party/move/move-vm/runtime/src/loader/script.rs index a9068e90fa447..b886e9f53be40 100644 --- a/third_party/move/move-vm/runtime/src/loader/script.rs +++ b/third_party/move/move-vm/runtime/src/loader/script.rs @@ -159,7 +159,7 @@ impl Script { | Bytecode::VecUnpack(si, _) | Bytecode::VecSwap(si) => { if !single_signature_token_map.contains_key(si) { - let ty = match script.signature_at(*si).0.get(0) { + let ty = match script.signature_at(*si).0.first() { None => { return Err(PartialVMError::new( StatusCode::VERIFIER_INVARIANT_VIOLATION, From 67316eeeeab23fbfaad70979f1b2a74228667ba6 Mon Sep 17 00:00:00 2001 From: aldenhu Date: Thu, 25 Jan 2024 21:47:24 +0000 Subject: [PATCH 20/44] add grao1991 as code owner for storage and execution --- CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CODEOWNERS b/CODEOWNERS index c4ab421d0a392..563eedb0f1073 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -90,7 +90,7 @@ /ecosystem/typescript/sdk @gregnazario @banool @0xmaayan @0xjinn # Owners for execution and storage. -/execution/ @msmouse @lightmark +/execution/ @msmouse @lightmark @grao1991 # Owners for mempool. /mempool/ @bchocho @@ -102,7 +102,7 @@ /state-sync/ @joshlind # Owners for execution and storage. -/storage/ @msmouse @lightmark +/storage/ @msmouse @lightmark @grao1991 # Owners for the `/terraform` directory and all its subdirectories. /terraform/ @aptos-labs/prod-eng From 10bf671ff664267f95ff4b638fb24fe77d388028 Mon Sep 17 00:00:00 2001 From: "Brian R. Murphy" <132495859+brmataptos@users.noreply.github.com> Date: Tue, 30 Jan 2024 10:51:05 -0800 Subject: [PATCH 21/44] add MOVE_COMPILER_DUMP environment variable to initialize --dump-bytecode flag to debug tests where compiler flags are not accessible (#11804) MOVE_COMPILER_DUMP env var provides default for --dump-bytecode, which causes compiler-v2 pipeline code to be dumped for debugging. --- third_party/move/move-compiler-v2/src/options.rs | 16 +++++++++++++++- .../move/move-compiler/src/command_line/mod.rs | 7 +++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/third_party/move/move-compiler-v2/src/options.rs b/third_party/move/move-compiler-v2/src/options.rs index db0bd54f38763..3d0b429c64114 100644 --- a/third_party/move/move-compiler-v2/src/options.rs +++ b/third_party/move/move-compiler-v2/src/options.rs @@ -35,7 +35,7 @@ pub struct Options { #[clap(long = cli::DEBUG_FLAG, default_value=debug_compiler_env_var_str())] pub debug: bool, /// Whether to dump intermediate bytecode for debugging. - #[clap(long = "dump-bytecode")] + #[clap(long = "dump-bytecode", default_value=debug_compiler_dump_env_var_str())] pub dump_bytecode: bool, /// Do not complain about unknown attributes in Move code. #[clap(long, default_value = "false")] @@ -102,6 +102,12 @@ fn debug_compiler_env_var() -> bool { *DEBUG_COMPILER } +fn debug_compiler_dump_env_var() -> bool { + static DEBUG_COMPILER_DUMP: Lazy = + Lazy::new(|| read_bool_env_var(cli::MOVE_COMPILER_DUMP_ENV_VAR)); + *DEBUG_COMPILER_DUMP +} + fn compiler_exp_var() -> Vec { static EXP_VAR: Lazy> = Lazy::new(|| { let s = read_env_var("MOVE_COMPILER_EXP"); @@ -117,3 +123,11 @@ fn debug_compiler_env_var_str() -> &'static str { "false" } } + +fn debug_compiler_dump_env_var_str() -> &'static str { + if debug_compiler_dump_env_var() { + "true" + } else { + "false" + } +} diff --git a/third_party/move/move-compiler/src/command_line/mod.rs b/third_party/move/move-compiler/src/command_line/mod.rs index 640169ee07af8..f9a0dab985a5a 100644 --- a/third_party/move/move-compiler/src/command_line/mod.rs +++ b/third_party/move/move-compiler/src/command_line/mod.rs @@ -38,10 +38,17 @@ pub const MOVE_COMPILED_INTERFACES_DIR: &str = "mv_interfaces"; pub const COMPILED_NAMED_ADDRESS_MAPPING: &str = "compiled-module-address-name"; +// default value for compiler --debug flag (1 or true to set) +// (usually for debugging situations where compiler flags are hard to reach) pub const MOVE_COMPILER_DEBUG_ENV_VAR: &str = "MOVE_COMPILER_DEBUG"; +// Name of compiler CLI debug clap flag (in CLI, looks like --debug): pub const DEBUG_FLAG: &str = "debug"; +// default value for boolean --dump-bytecode flag (1 or true to set) +// (usually for debugging situations where compiler flags are hard to reach) +pub const MOVE_COMPILER_DUMP_ENV_VAR: &str = "MOVE_COMPILER_DUMP"; + pub const MOVE_COMPILER_WARN_OF_DEPRECATION_USE: &str = "MOVE_COMPILER_WARN_OF_DEPRECATION_USE"; pub const MOVE_COMPILER_WARN_OF_DEPRECATION_USE_FLAG: &str = "Wdeprecation"; From a47719ba234f6941d55094eb49f26368d7e02f64 Mon Sep 17 00:00:00 2001 From: Bo Wu Date: Mon, 29 Jan 2024 11:54:05 -0800 Subject: [PATCH 22/44] Add feature flag for removing detailed error from hash calcution --- .../aptos-release-builder/src/components/feature_flags.rs | 3 +++ types/src/on_chain_config/aptos_features.rs | 5 +++++ 2 files changed, 8 insertions(+) diff --git a/aptos-move/aptos-release-builder/src/components/feature_flags.rs b/aptos-move/aptos-release-builder/src/components/feature_flags.rs index e16f7be77fa57..128aba7148b27 100644 --- a/aptos-move/aptos-release-builder/src/components/feature_flags.rs +++ b/aptos-move/aptos-release-builder/src/components/feature_flags.rs @@ -98,6 +98,7 @@ pub enum FeatureFlag { ReconfigureWithDKG, ZkIdSignature, OpenIdSignature, + RemoveDetailedError, } fn generate_features_blob(writer: &CodeWriter, data: &[u64]) { @@ -254,6 +255,7 @@ impl From for AptosFeatureFlag { FeatureFlag::ReconfigureWithDKG => AptosFeatureFlag::RECONFIGURE_WITH_DKG, FeatureFlag::ZkIdSignature => AptosFeatureFlag::ZK_ID_SIGNATURE, FeatureFlag::OpenIdSignature => AptosFeatureFlag::OPEN_ID_SIGNATURE, + FeatureFlag::RemoveDetailedError => AptosFeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH, } } } @@ -333,6 +335,7 @@ impl From for FeatureFlag { AptosFeatureFlag::RECONFIGURE_WITH_DKG => FeatureFlag::ReconfigureWithDKG, AptosFeatureFlag::ZK_ID_SIGNATURE => FeatureFlag::ZkIdSignature, AptosFeatureFlag::OPEN_ID_SIGNATURE => FeatureFlag::OpenIdSignature, + AptosFeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH => FeatureFlag::RemoveDetailedError, } } } diff --git a/types/src/on_chain_config/aptos_features.rs b/types/src/on_chain_config/aptos_features.rs index 8f8bbb7c12d23..e5fe29864572e 100644 --- a/types/src/on_chain_config/aptos_features.rs +++ b/types/src/on_chain_config/aptos_features.rs @@ -55,6 +55,7 @@ pub enum FeatureFlag { RECONFIGURE_WITH_DKG = 45, ZK_ID_SIGNATURE = 46, OPEN_ID_SIGNATURE = 47, + REMOVE_DETAILED_ERROR_FROM_HASH = 48, } /// Representation of features on chain as a bitset. @@ -158,4 +159,8 @@ impl Features { pub fn is_open_id_signature_enabled(&self) -> bool { self.is_enabled(FeatureFlag::OPEN_ID_SIGNATURE) } + + pub fn is_remove_detailed_error_from_hash_enabled(&self) -> bool { + self.is_enabled(FeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH) + } } From 6a447c4f6a89396c154d52f13f12b4c09b1a40af Mon Sep 17 00:00:00 2001 From: larry-aptos <112209412+larry-aptos@users.noreply.github.com> Date: Tue, 30 Jan 2024 12:50:28 -0800 Subject: [PATCH 23/44] [indexer] If the channel is closed, don't panic. (#11827) * fix the broken E2E TS tests. --- .../indexer-grpc-fullnode/src/fullnode_data_service.rs | 8 ++++++++ .../indexer-grpc-fullnode/src/localnet_data_service.rs | 10 +++++++++- .../indexer-grpc-fullnode/src/stream_coordinator.rs | 8 +++----- 3 files changed, 20 insertions(+), 6 deletions(-) diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs index fe9f1364ea0d9..6924540f20cae 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/fullnode_data_service.rs @@ -92,6 +92,14 @@ impl FullnodeData for FullnodeDataService { let start_time = std::time::Instant::now(); // Processes and sends batch of transactions to client let results = coordinator.process_next_batch().await; + if results.is_empty() { + info!( + start_version = starting_version, + chain_id = ledger_chain_id, + "[Indexer Fullnode] Client disconnected." + ); + break; + } let max_version = match IndexerStreamCoordinator::get_max_batch_version(results) { Ok(max_version) => max_version, Err(e) => { diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/localnet_data_service.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/localnet_data_service.rs index 8e72f82a8f53b..2235645878258 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/localnet_data_service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/localnet_data_service.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{stream_coordinator::IndexerStreamCoordinator, ServiceContext}; -use aptos_logger::error; +use aptos_logger::{error, info}; use aptos_protos::{ indexer::v1::{raw_data_server::RawData, GetTransactionsRequest, TransactionsResponse}, internal::fullnode::v1::transactions_from_node_response, @@ -63,6 +63,14 @@ impl RawData for LocalnetDataService { loop { // Processes and sends batch of transactions to client let results = coordinator.process_next_batch().await; + if results.is_empty() { + info!( + start_version = starting_version, + chain_id = ledger_chain_id, + "[Indexer Fullnode] Client disconnected." + ); + break; + } let max_version = match IndexerStreamCoordinator::get_max_batch_version(results) { Ok(max_version) => max_version, Err(e) => { diff --git a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs index a772948aba129..bc8f68da5119c 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-fullnode/src/stream_coordinator.rs @@ -187,11 +187,9 @@ impl IndexerStreamCoordinator { // Stage 3: send responses to stream let sending_start_time = std::time::Instant::now(); for response in responses { - if let Err(err) = self.transactions_sender.send(Ok(response)).await { - panic!( - "[Indexer Fullnode] Error sending transaction response to stream: {:?}", - err - ); + if self.transactions_sender.send(Ok(response)).await.is_err() { + // Error from closed channel. This means the client has disconnected. + return vec![]; } } log_grpc_step_fullnode( From 3aba7d654de56f8701a327255996f0a5c1d69e59 Mon Sep 17 00:00:00 2001 From: rtso <8248583+rtso@users.noreply.github.com> Date: Tue, 30 Jan 2024 10:54:27 -0800 Subject: [PATCH 24/44] Remove log --- .../indexer-grpc-data-service/src/service.rs | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs index 955e375ccffa0..1a18ee381e00e 100644 --- a/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs +++ b/ecosystem/indexer-grpc/indexer-grpc-data-service/src/service.rs @@ -246,7 +246,6 @@ impl RawData for RawDataServerWrapper { let mut tps_calculator = MovingAverage::new(MOVING_AVERAGE_WINDOW_SIZE); loop { // 1. Fetch data from cache and file store. - let current_batch_start_time = std::time::Instant::now(); let mut transaction_data = match data_fetch( current_version, &mut cache_operator, @@ -258,20 +257,6 @@ impl RawData for RawDataServerWrapper { { Ok(TransactionsDataStatus::Success(transactions)) => transactions, Ok(TransactionsDataStatus::AheadOfCache) => { - info!( - start_version = current_version, - request_name = request_metadata.processor_name.as_str(), - request_email = request_metadata.request_email.as_str(), - request_api_key_name = request_metadata.request_api_key_name.as_str(), - processor_name = request_metadata.processor_name.as_str(), - connection_id = request_metadata.request_connection_id.as_str(), - request_user_classification = - request_metadata.request_user_classification.as_str(), - duration_in_secs = current_batch_start_time.elapsed().as_secs_f64(), - service_type = SERVICE_TYPE, - "[Data Service] Requested data is ahead of cache. Sleeping for {} ms.", - AHEAD_OF_CACHE_RETRY_SLEEP_DURATION_MS, - ); ahead_of_cache_data_handling().await; // Retry after a short sleep. continue; From 3f2980c033761c37721c9b386f8a02cceb59a990 Mon Sep 17 00:00:00 2001 From: igor-aptos <110557261+igor-aptos@users.noreply.github.com> Date: Tue, 30 Jan 2024 18:01:23 -0800 Subject: [PATCH 25/44] Recalibrate single_node_performance (#11308) * Recalibrate single_node_performance * new recalibration --------- --- testsuite/single_node_performance.py | 50 ++++++++++++++-------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/testsuite/single_node_performance.py b/testsuite/single_node_performance.py index 0e25465b53aea..55fdb4d6b74b8 100755 --- a/testsuite/single_node_performance.py +++ b/testsuite/single_node_performance.py @@ -85,33 +85,33 @@ class RunGroupConfig: # https://app.axiom.co/aptoslabs-hghf/explorer?qid=29zYzeVi7FX-s4ukl5&relative=1 # fmt: off TESTS = [ - RunGroupConfig(expected_tps=25300, key=RunGroupKey("no-op"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=3500, key=RunGroupKey("no-op", module_working_set_size=1000), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=16200, key=RunGroupKey("coin-transfer"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=24000, key=RunGroupKey("no-op"), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=3600, key=RunGroupKey("no-op", module_working_set_size=1000), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=15000, key=RunGroupKey("coin-transfer"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), # this was changed from 42000 to make landings not flaky, needs follow up - RunGroupConfig(expected_tps=35500, key=RunGroupKey("coin-transfer", executor_type="native"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=13500, key=RunGroupKey("account-generation"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=33200, key=RunGroupKey("account-generation", executor_type="native"), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=22000, key=RunGroupKey("account-resource32-b"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=4150, key=RunGroupKey("modify-global-resource"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=13000, key=RunGroupKey("modify-global-resource", module_working_set_size=10), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=140, key=RunGroupKey("publish-package"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=2600, key=RunGroupKey( + RunGroupConfig(expected_tps=37500, key=RunGroupKey("coin-transfer", executor_type="native"), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=12800, key=RunGroupKey("account-generation"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=30000, key=RunGroupKey("account-generation", executor_type="native"), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=20700, key=RunGroupKey("account-resource32-b"), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=4500, key=RunGroupKey("modify-global-resource"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=16500, key=RunGroupKey("modify-global-resource", module_working_set_size=10), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=135, key=RunGroupKey("publish-package"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=2620, key=RunGroupKey( "mix_publish_transfer", transaction_type_override="publish-package coin-transfer", transaction_weights_override="1 500", ), included_in=LAND_BLOCKING_AND_C), RunGroupConfig(expected_tps=365, key=RunGroupKey("batch100-transfer"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=1100, key=RunGroupKey("batch100-transfer", executor_type="native"), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=995, key=RunGroupKey("batch100-transfer", executor_type="native"), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=165, key=RunGroupKey("vector-picture40"), included_in=Flow(0), waived=True), RunGroupConfig(expected_tps=1000, key=RunGroupKey("vector-picture40", module_working_set_size=20), included_in=Flow(0), waived=True), - RunGroupConfig(expected_tps=160, key=RunGroupKey("vector-picture30k"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=1000, key=RunGroupKey("vector-picture30k", module_working_set_size=20), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=165, key=RunGroupKey("vector-picture30k"), included_in=LAND_BLOCKING_AND_C), + RunGroupConfig(expected_tps=995, key=RunGroupKey("vector-picture30k", module_working_set_size=20), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=17, key=RunGroupKey("smart-table-picture30-k-with200-change"), included_in=LAND_BLOCKING_AND_C), - RunGroupConfig(expected_tps=82, key=RunGroupKey("smart-table-picture30-k-with200-change", module_working_set_size=20), included_in=Flow.CONTINUOUS), - # RunGroupConfig(expected_tps=3, key=RunGroupKey("smart-table-picture1-m-with1-k-change"), included_in=LAND_BLOCKING_AND_C, waived=True), - # RunGroupConfig(expected_tps=12, key=RunGroupKey("smart-table-picture1-m-with1-k-change", module_working_set_size=20), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=86, key=RunGroupKey("smart-table-picture30-k-with200-change", module_working_set_size=20), included_in=Flow.CONTINUOUS), + # RunGroupConfig(expected_tps=3.6, key=RunGroupKey("smart-table-picture1-m-with1-k-change"), included_in=LAND_BLOCKING_AND_C), + # RunGroupConfig(expected_tps=12.8, key=RunGroupKey("smart-table-picture1-m-with1-k-change", module_working_set_size=20), included_in=Flow.CONTINUOUS), # RunGroupConfig(expected_tps=5, key=RunGroupKey("smart-table-picture1-b-with1-k-change"), included_in=Flow(0), waived=True), # RunGroupConfig(expected_tps=10, key=RunGroupKey("smart-table-picture1-b-with1-k-change", module_working_set_size=20), included_in=Flow(0), waived=True), @@ -122,19 +122,19 @@ class RunGroupConfig: RunGroupConfig(expected_tps=4050, key=RunGroupKey("modify-global-bounded-agg-v2"), included_in=Flow.AGG_V2), RunGroupConfig(expected_tps=12500, key=RunGroupKey("modify-global-bounded-agg-v2", module_working_set_size=50), included_in=Flow.AGG_V2), - RunGroupConfig(expected_tps=4000, key=RunGroupKey("resource-groups-global-write-tag1-kb"), included_in=LAND_BLOCKING_AND_C | Flow.RESOURCE_GROUPS, waived=True), + RunGroupConfig(expected_tps=3800, key=RunGroupKey("resource-groups-global-write-tag1-kb"), included_in=LAND_BLOCKING_AND_C | Flow.RESOURCE_GROUPS), RunGroupConfig(expected_tps=8000, key=RunGroupKey("resource-groups-global-write-tag1-kb", module_working_set_size=20), included_in=Flow.RESOURCE_GROUPS, waived=True), - RunGroupConfig(expected_tps=4000, key=RunGroupKey("resource-groups-global-write-and-read-tag1-kb"), included_in=Flow.CONTINUOUS | Flow.RESOURCE_GROUPS, waived=True), + RunGroupConfig(expected_tps=3500, key=RunGroupKey("resource-groups-global-write-and-read-tag1-kb"), included_in=Flow.CONTINUOUS | Flow.RESOURCE_GROUPS), RunGroupConfig(expected_tps=8000, key=RunGroupKey("resource-groups-global-write-and-read-tag1-kb", module_working_set_size=20), included_in=Flow.RESOURCE_GROUPS, waived=True), - RunGroupConfig(expected_tps=8000, key=RunGroupKey("resource-groups-sender-write-tag1-kb"), included_in=Flow.CONTINUOUS | Flow.RESOURCE_GROUPS, waived=True), + RunGroupConfig(expected_tps=17900, key=RunGroupKey("resource-groups-sender-write-tag1-kb"), included_in=Flow.CONTINUOUS | Flow.RESOURCE_GROUPS), RunGroupConfig(expected_tps=8000, key=RunGroupKey("resource-groups-sender-write-tag1-kb", module_working_set_size=20), included_in=Flow.RESOURCE_GROUPS, waived=True), - RunGroupConfig(expected_tps=8000, key=RunGroupKey("resource-groups-sender-multi-change1-kb"), included_in=LAND_BLOCKING_AND_C | Flow.RESOURCE_GROUPS, waived=True), + RunGroupConfig(expected_tps=16230, key=RunGroupKey("resource-groups-sender-multi-change1-kb"), included_in=LAND_BLOCKING_AND_C | Flow.RESOURCE_GROUPS), RunGroupConfig(expected_tps=8000, key=RunGroupKey("resource-groups-sender-multi-change1-kb", module_working_set_size=20), included_in=Flow.RESOURCE_GROUPS, waived=True), RunGroupConfig(expected_tps=1890, key=RunGroupKey("token-v1ft-mint-and-transfer"), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=9250, key=RunGroupKey("token-v1ft-mint-and-transfer", module_working_set_size=20), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=1100, key=RunGroupKey("token-v1nft-mint-and-transfer-sequential"), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=5900, key=RunGroupKey("token-v1nft-mint-and-transfer-sequential", module_working_set_size=20), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=6100, key=RunGroupKey("token-v1nft-mint-and-transfer-sequential", module_working_set_size=20), included_in=Flow.CONTINUOUS), RunGroupConfig(expected_tps=1300, key=RunGroupKey("token-v1nft-mint-and-transfer-parallel"), included_in=Flow(0)), RunGroupConfig(expected_tps=5300, key=RunGroupKey("token-v1nft-mint-and-transfer-parallel", module_working_set_size=20), included_in=Flow(0)), @@ -142,10 +142,10 @@ class RunGroupConfig: # RunGroupConfig(expected_tps=1000, key=RunGroupKey("token-v1nft-mint-and-store-sequential"), included_in=Flow(0)), # RunGroupConfig(expected_tps=1000, key=RunGroupKey("token-v1nft-mint-and-transfer-parallel"), included_in=Flow(0)), - RunGroupConfig(expected_tps=25500, key=RunGroupKey("no-op5-signers"), included_in=Flow.CONTINUOUS), + RunGroupConfig(expected_tps=23500, key=RunGroupKey("no-op5-signers"), included_in=Flow.CONTINUOUS), - RunGroupConfig(expected_tps=1710, key=RunGroupKey("token-v2-ambassador-mint"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), - RunGroupConfig(expected_tps=6900, key=RunGroupKey("token-v2-ambassador-mint", module_working_set_size=20), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=1610, key=RunGroupKey("token-v2-ambassador-mint"), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), + RunGroupConfig(expected_tps=6800, key=RunGroupKey("token-v2-ambassador-mint", module_working_set_size=20), included_in=LAND_BLOCKING_AND_C | Flow.REPRESENTATIVE), RunGroupConfig(expected_tps=50000, key=RunGroupKey("coin_transfer_connected_components", executor_type="sharded", sharding_traffic_flags="--connected-tx-grps 5000", transaction_type_override=""), included_in=Flow.REPRESENTATIVE), RunGroupConfig(expected_tps=50000, key=RunGroupKey("coin_transfer_hotspot", executor_type="sharded", sharding_traffic_flags="--hotspot-probability 0.8", transaction_type_override=""), included_in=Flow.REPRESENTATIVE), From cae3d44a0e5fc4362cc0660c48112d7e92d83427 Mon Sep 17 00:00:00 2001 From: Stelian Ionescu Date: Tue, 30 Jan 2024 20:37:27 -0500 Subject: [PATCH 26/44] [GHA] Remove jobs `sequential-execution-performance` and `parallel-execution-performance` They're unreliable. --- .../workflow-run-execution-performance.yaml | 48 ------------------- 1 file changed, 48 deletions(-) diff --git a/.github/workflows/workflow-run-execution-performance.yaml b/.github/workflows/workflow-run-execution-performance.yaml index c8c0fce1f55cd..b67eb1c637abf 100644 --- a/.github/workflows/workflow-run-execution-performance.yaml +++ b/.github/workflows/workflow-run-execution-performance.yaml @@ -50,54 +50,6 @@ jobs: id: determine_file_changes uses: ./.github/actions/file-change-determinator - # Run sequential execution performance tests - sequential-execution-performance: - needs: file_change_determinator - timeout-minutes: 30 - runs-on: ${{ inputs.RUNNER_NAME }} - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ inputs.GIT_SHA }} - if: ${{ needs.file_change_determinator.outputs.only_docs_changed != 'true' && inputs.IS_FULL_RUN }} - - - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main - with: - GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} - if: ${{ needs.file_change_determinator.outputs.only_docs_changed != 'true' && inputs.IS_FULL_RUN }} - - - name: Run sequential execution benchmark in performance build mode - shell: bash - run: testsuite/sequential_execution_performance.py - if: ${{ needs.file_change_determinator.outputs.only_docs_changed != 'true' && inputs.IS_FULL_RUN }} - - - run: echo "Skipping sequential execution performance!" - if: ${{ needs.file_change_determinator.outputs.only_docs_changed == 'true' || !inputs.IS_FULL_RUN }} - - # Run parallel execution performance tests - parallel-execution-performance: - needs: file_change_determinator - timeout-minutes: 60 - runs-on: ${{ inputs.RUNNER_NAME }} - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ inputs.GIT_SHA }} - if: ${{ needs.file_change_determinator.outputs.only_docs_changed != 'true' && inputs.IS_FULL_RUN }} - - - uses: aptos-labs/aptos-core/.github/actions/rust-setup@main - with: - GIT_CREDENTIALS: ${{ secrets.GIT_CREDENTIALS }} - if: ${{ needs.file_change_determinator.outputs.only_docs_changed != 'true' && inputs.IS_FULL_RUN }} - - - name: Run parallel execution benchmark in performance build mode - shell: bash - run: testsuite/parallel_execution_performance.py - if: ${{ needs.file_change_determinator.outputs.only_docs_changed != 'true' && inputs.IS_FULL_RUN }} - - - run: echo "Skipping parallel execution performance!" - if: ${{ needs.file_change_determinator.outputs.only_docs_changed == 'true' || !inputs.IS_FULL_RUN }} - # Run single node execution performance tests single-node-performance: needs: file_change_determinator From d8dde3347940fe3aa95b0816de4bcf3a8f008ea7 Mon Sep 17 00:00:00 2001 From: "Brian R. Murphy" <132495859+brmataptos@users.noreply.github.com> Date: Wed, 31 Jan 2024 12:21:07 -0800 Subject: [PATCH 27/44] [move-compiler-v2] Implement unused parameter and unused local warnings as an AST pass (#11756) Check for and warn about unused parameters and local variables in compiler-v2 AST (flow-insensitive) pass. (#11756) --- .../src/flow_insensitive_checkers.rs | 198 ++++++++++++++ .../move/move-compiler-v2/src/inliner.rs | 4 +- third_party/move/move-compiler-v2/src/lib.rs | 12 +- .../tests/bytecode-generator/borrow.exp | 14 + .../tests/bytecode-generator/globals.exp | 8 + .../tests/bytecode-generator/tuple.exp | 8 + .../bytecode-generator/tuple_invalid.exp | 8 + .../checking/access_specifiers/access_ok.exp | 20 ++ .../inlining/shadowing_unused_nodecl.exp | 8 + .../checking/naming/generics_shadowing.exp | 8 + .../specs/inline_spec_inference_bitvector.exp | 8 + .../specs/inline_spec_inference_vector.exp | 8 + .../tests/checking/typing/assign_nested.exp | 8 +- .../tests/checking/typing/assign_nested.move | 2 +- .../tests/checking/typing/assign_tuple.exp | 8 + .../typing/declare_with_type_annot.exp | 26 ++ .../tests/checking/typing/eq_inline.exp | 8 + .../checking/typing/unused_lambda_param.exp | 71 +++++ .../checking/typing/unused_lambda_param.move | 17 ++ .../tests/checking/typing/unused_local.exp | 257 ++++++++++++++++++ .../tests/checking/typing/unused_local.move | 46 ++++ .../tests/copy-propagation/call_1.exp | 8 + .../copy-propagation/dead_assignment_1.exp | 8 + .../tests/copy-propagation/immut_refs_2.exp | 8 + .../tests/explicit-drop/unused_var.exp | 6 + .../tests/file-format-generator/borrow.exp | 14 + .../tests/file-format-generator/const.exp | 68 +++++ .../tests/file-format-generator/globals.exp | 8 + .../move/move-compiler-v2/tests/testsuite.rs | 21 +- .../use_twice_before_assign.exp | 8 + .../uninit-use-checker/uses_before_assign.exp | 8 + .../v1-commands/move_before_assign.exp | 8 + .../v1-commands/use_before_assign.exp | 8 + .../tests/inlining/multi_param.exp | 15 +- .../tests/no-safety/simple_map_keys.exp | 15 +- third_party/move/move-model/src/symbol.rs | 4 + 36 files changed, 931 insertions(+), 23 deletions(-) create mode 100644 third_party/move/move-compiler-v2/src/flow_insensitive_checkers.rs create mode 100644 third_party/move/move-compiler-v2/tests/checking/typing/unused_lambda_param.exp create mode 100644 third_party/move/move-compiler-v2/tests/checking/typing/unused_lambda_param.move create mode 100644 third_party/move/move-compiler-v2/tests/checking/typing/unused_local.exp create mode 100644 third_party/move/move-compiler-v2/tests/checking/typing/unused_local.move diff --git a/third_party/move/move-compiler-v2/src/flow_insensitive_checkers.rs b/third_party/move/move-compiler-v2/src/flow_insensitive_checkers.rs new file mode 100644 index 0000000000000..859324fa8408b --- /dev/null +++ b/third_party/move/move-compiler-v2/src/flow_insensitive_checkers.rs @@ -0,0 +1,198 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +//! Flow-insensitive checks can be done on the AST. +//! +//! Warnings about Unused parameter and local variable +//! "Unused assignment or binding for local 's'. Consider removing, replacing with '_' or prefixing with '_' (e.g., '_r_ref') + +use codespan_reporting::diagnostic::Severity; +use move_model::{ + ast::{ExpData, TempIndex}, + model::{GlobalEnv, Loc, NodeId, Parameter}, + symbol::Symbol, +}; +use std::{collections::BTreeSet, iter::Iterator}; + +/// Warns about all parameters and local variables that are unused. +pub fn check_for_unused_vars_and_params(env: &mut GlobalEnv) { + for module in env.get_modules() { + if module.is_target() { + for func in module.get_functions() { + if let Some(def) = func.get_def() { + let params = &func.get_parameters(); + find_unused_params_and_vars(env, params, def) + } + } + } + } +} + +fn find_unused_params_and_vars(env: &GlobalEnv, params: &[Parameter], exp: &ExpData) { + let mut visitor = SymbolVisitor::new(env, params); + exp.visit_pre_post(&mut |post, exp_data| visitor.entry(post, exp_data)); + visitor.check_parameter_usage(); +} + +/// Tracks things of type `V` which are visible from below in a tree, such as free/used variables, +/// etc. `values` is cleared when entering a scope, but the old value saved so it can be re-added +/// once the scope is finished. +struct ScopedVisibleSet { + saved: Vec>, + values: BTreeSet, +} + +impl ScopedVisibleSet +where + V: Ord + Copy, +{ + pub fn new() -> Self { + Self { + saved: Vec::new(), + values: BTreeSet::new(), + } + } + + /// Save and clear the current set. + pub fn enter_scope(&mut self) { + self.saved.push(std::mem::take(&mut self.values)); + } + + /// Combine the current values with that previously saved + /// in a corresponding `enter_scope` call. + pub fn exit_scope(&mut self) { + let mut saved_values = self + .saved + .pop() + .expect("exit_scope calls should balance enter_scope calls"); + self.values.append(&mut saved_values); + } + + /// Add a value to the current values. + pub fn insert(&mut self, value: V) { + self.values.insert(value); + } + + /// Remove a value from the current scope. + pub fn remove(&mut self, value: &V) { + self.values.remove(value); + } + + pub fn contains(&self, value: &V) -> bool { + self.values.contains(value) + } +} + +// Visits all symbols in a function. +struct SymbolVisitor<'env, 'params> { + env: &'env GlobalEnv, + params: &'params [Parameter], + seen_uses: ScopedVisibleSet, +} + +impl<'env, 'params> SymbolVisitor<'env, 'params> { + fn new(env: &'env GlobalEnv, params: &'params [Parameter]) -> SymbolVisitor<'env, 'params> { + SymbolVisitor { + env, + params, + seen_uses: ScopedVisibleSet::new(), + } + } + + fn entry(&mut self, post: bool, e: &ExpData) -> bool { + use ExpData::*; + match e { + Block(_, pat, _, _) => { + if !post { + self.seen_uses.enter_scope(); + } else { + // postorder + for (id, var) in pat.vars() { + self.node_symbol_decl_visitor(post, &id, &var, "local variable"); + } + self.seen_uses.exit_scope(); + } + }, + Lambda(_, pat, _) => { + if !post { + self.seen_uses.enter_scope(); + } else { + // postorder + for (id, var) in pat.vars() { + self.node_symbol_decl_visitor(post, &id, &var, "parameter"); + } + self.seen_uses.exit_scope(); + } + }, + Quant(_, _, ranges, ..) => { + if !post { + self.seen_uses.enter_scope(); + } else { + // postorder + for (id, var) in ranges.iter().flat_map(|(pat, _)| pat.vars().into_iter()) { + self.node_symbol_decl_visitor(post, &id, &var, "range parameter"); + } + self.seen_uses.exit_scope(); + } + }, + Assign(_, pat, _) => { + for (id, sym) in pat.vars().iter() { + self.node_symbol_use_visitor(post, id, sym); + } + }, + LocalVar(id, sym) => { + self.node_symbol_use_visitor(post, id, sym); + }, + Temporary(id, idx) => { + self.node_tmp_use_visitor(post, id, idx); + }, + _ => {}, + } + true // always continue + } + + fn check_symbol_usage(&mut self, loc: &Loc, sym: &Symbol, kind: &str) { + let symbol_pool = self.env.symbol_pool(); + if !symbol_pool.symbol_starts_with_underscore(*sym) && !self.seen_uses.contains(sym) { + let msg = format!( + "Unused {} `{}`. Consider removing or prefixing with an underscore: `_{}`", + kind, + sym.display(symbol_pool), + sym.display(symbol_pool) + ); + self.env.diag(Severity::Warning, loc, &msg); + } + } + + fn check_parameter_usage(&mut self) { + for Parameter(sym, _atype, loc) in self.params.iter() { + self.check_symbol_usage(loc, sym, "parameter"); + } + } + + fn node_symbol_decl_visitor(&mut self, post: bool, id: &NodeId, sym: &Symbol, kind: &str) { + if post { + let loc = self.env.get_node_loc(*id); + self.check_symbol_usage(&loc, sym, kind); + self.seen_uses.remove(sym); + } + } + + fn node_symbol_use_visitor(&mut self, post: bool, _id: &NodeId, sym: &Symbol) { + if post { + self.seen_uses.insert(*sym); + } + } + + fn node_tmp_use_visitor(&mut self, post: bool, id: &NodeId, idx: &TempIndex) { + if post { + if let Some(sym) = self.params.get(*idx).map(|p| p.0) { + self.node_symbol_use_visitor(post, id, &sym) + } else { + let loc = self.env.get_node_loc(*id); + let msg = format!("Temporary `{}` has no associated user symbol.", idx); + self.env.diag(Severity::Bug, &loc, &msg); + } + } + } +} diff --git a/third_party/move/move-compiler-v2/src/inliner.rs b/third_party/move/move-compiler-v2/src/inliner.rs index b990054224b64..276c52336e843 100644 --- a/third_party/move/move-compiler-v2/src/inliner.rs +++ b/third_party/move/move-compiler-v2/src/inliner.rs @@ -382,9 +382,7 @@ impl<'env> Inliner<'env> { fn do_inlining_in(&mut self, func_id: QualifiedFunId) { assert!(!self.funexprs_after_inlining.contains_key(&func_id)); let func_env = self.env.get_function(func_id); - - let optional_def = func_env.get_def(); - if let Some(def) = optional_def { + if let Some(def) = func_env.get_def() { let mut rewriter = OuterInlinerRewriter::new(self.env, self); let rewritten = rewriter.rewrite_exp(def.clone()); diff --git a/third_party/move/move-compiler-v2/src/lib.rs b/third_party/move/move-compiler-v2/src/lib.rs index 17fd58f1af75e..de47c3314299a 100644 --- a/third_party/move/move-compiler-v2/src/lib.rs +++ b/third_party/move/move-compiler-v2/src/lib.rs @@ -5,6 +5,7 @@ mod bytecode_generator; mod experiments; mod file_format_generator; +pub mod flow_insensitive_checkers; pub mod function_checker; pub mod inliner; mod options; @@ -55,12 +56,21 @@ pub fn run_move_compiler( let mut env = run_checker(options.clone())?; check_errors(&env, error_writer, "checking errors")?; + if options.debug { + eprintln!("After error check, GlobalEnv={}", env.dump_env()); + } + + // Flow-insensitive checks on AST + flow_insensitive_checkers::check_for_unused_vars_and_params(&mut env); function_checker::check_for_function_typed_parameters(&mut env); function_checker::check_access_and_use(&mut env); check_errors(&env, error_writer, "checking errors")?; if options.debug { - eprintln!("After error check, GlobalEnv={}", env.dump_env()); + eprintln!( + "After flow-insensitive checks, GlobalEnv={}", + env.dump_env() + ); } // Run inlining. diff --git a/third_party/move/move-compiler-v2/tests/bytecode-generator/borrow.exp b/third_party/move/move-compiler-v2/tests/bytecode-generator/borrow.exp index 575e0f57d86f2..c581a64671f66 100644 --- a/third_party/move/move-compiler-v2/tests/bytecode-generator/borrow.exp +++ b/third_party/move/move-compiler-v2/tests/bytecode-generator/borrow.exp @@ -1,3 +1,17 @@ + +Diagnostics: +warning: Unused parameter `param`. Consider removing or prefixing with an underscore: `_param` + ┌─ tests/bytecode-generator/borrow.move:12:15 + │ +12 │ fun local(param: u64): u64 { + │ ^^^^^ + +warning: Unused parameter `param`. Consider removing or prefixing with an underscore: `_param` + ┌─ tests/bytecode-generator/borrow.move:29:19 + │ +29 │ fun mut_local(param: u64): u64 { + │ ^^^^^ + // ---- Model Dump module 0x42::borrow { struct S { diff --git a/third_party/move/move-compiler-v2/tests/bytecode-generator/globals.exp b/third_party/move/move-compiler-v2/tests/bytecode-generator/globals.exp index cbae4c1db03f3..c500bf28a4487 100644 --- a/third_party/move/move-compiler-v2/tests/bytecode-generator/globals.exp +++ b/third_party/move/move-compiler-v2/tests/bytecode-generator/globals.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/bytecode-generator/globals.move:18:27 + │ +18 │ fun write(a: address, x: u64): u64 { + │ ^ + // ---- Model Dump module 0x42::globals { struct R { diff --git a/third_party/move/move-compiler-v2/tests/bytecode-generator/tuple.exp b/third_party/move/move-compiler-v2/tests/bytecode-generator/tuple.exp index c0c82a2c5cbdd..c1a5b3d1abf50 100644 --- a/third_party/move/move-compiler-v2/tests/bytecode-generator/tuple.exp +++ b/third_party/move/move-compiler-v2/tests/bytecode-generator/tuple.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/bytecode-generator/tuple.move:11:19 + │ +11 │ fun use_tuple(x: u64): u64 { + │ ^ + // ---- Model Dump module 0x42::tuple { struct S { diff --git a/third_party/move/move-compiler-v2/tests/bytecode-generator/tuple_invalid.exp b/third_party/move/move-compiler-v2/tests/bytecode-generator/tuple_invalid.exp index 3e78e33e385d0..b694a72b8b72f 100644 --- a/third_party/move/move-compiler-v2/tests/bytecode-generator/tuple_invalid.exp +++ b/third_party/move/move-compiler-v2/tests/bytecode-generator/tuple_invalid.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/bytecode-generator/tuple_invalid.move:11:20 + │ +11 │ fun use_tuple1(x: u64): u64 { + │ ^ + // ---- Model Dump module 0x42::tuple_invalid { struct S { diff --git a/third_party/move/move-compiler-v2/tests/checking/access_specifiers/access_ok.exp b/third_party/move/move-compiler-v2/tests/checking/access_specifiers/access_ok.exp index 6be3919df7927..ecd3e8f5e7f25 100644 --- a/third_party/move/move-compiler-v2/tests/checking/access_specifiers/access_ok.exp +++ b/third_party/move/move-compiler-v2/tests/checking/access_specifiers/access_ok.exp @@ -1,3 +1,23 @@ + +Diagnostics: +warning: Unused parameter `a`. Consider removing or prefixing with an underscore: `_a` + ┌─ tests/checking/access_specifiers/access_ok.move:35:12 + │ +35 │ fun f9(a: address) acquires *(a) { + │ ^ + +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/access_specifiers/access_ok.move:38:13 + │ +38 │ fun f10(x: u64) acquires *(make_up_address(x)) { + │ ^ + +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/access_specifiers/access_ok.move:41:25 + │ +41 │ fun make_up_address(x: u64): address { + │ ^ + // ---- Model Dump module 0x42::m { struct T { diff --git a/third_party/move/move-compiler-v2/tests/checking/inlining/shadowing_unused_nodecl.exp b/third_party/move/move-compiler-v2/tests/checking/inlining/shadowing_unused_nodecl.exp index 30d49ec0fc9c1..b3618714a6e89 100644 --- a/third_party/move/move-compiler-v2/tests/checking/inlining/shadowing_unused_nodecl.exp +++ b/third_party/move/move-compiler-v2/tests/checking/inlining/shadowing_unused_nodecl.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused parameter `z`. Consider removing or prefixing with an underscore: `_z` + ┌─ tests/checking/inlining/shadowing_unused_nodecl.move:6:42 + │ +6 │ public inline fun quux(f:|u64, u64|, z: u64) { + │ ^ + // ---- Model Dump module 0x42::Test { public fun test_shadowing() { diff --git a/third_party/move/move-compiler-v2/tests/checking/naming/generics_shadowing.exp b/third_party/move/move-compiler-v2/tests/checking/naming/generics_shadowing.exp index eab4f41536ed7..db1ac8c729b7f 100644 --- a/third_party/move/move-compiler-v2/tests/checking/naming/generics_shadowing.exp +++ b/third_party/move/move-compiler-v2/tests/checking/naming/generics_shadowing.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused parameter `s`. Consider removing or prefixing with an underscore: `_s` + ┌─ tests/checking/naming/generics_shadowing.move:6:29 + │ +6 │ fun foo(s: S): S { + │ ^ + // ---- Model Dump module 0x2::M { struct S { diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/inline_spec_inference_bitvector.exp b/third_party/move/move-compiler-v2/tests/checking/specs/inline_spec_inference_bitvector.exp index 789c07e9447d9..1ca840a26998f 100644 --- a/third_party/move/move-compiler-v2/tests/checking/specs/inline_spec_inference_bitvector.exp +++ b/third_party/move/move-compiler-v2/tests/checking/specs/inline_spec_inference_bitvector.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused parameter `length`. Consider removing or prefixing with an underscore: `_length` + ┌─ tests/checking/specs/inline_spec_inference_bitvector.move:3:18 + │ +3 │ public fun new(length: u64) { + │ ^^^^^^ + // ---- Model Dump module 0x42::bit_vector_infer { use std::vector; diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/inline_spec_inference_vector.exp b/third_party/move/move-compiler-v2/tests/checking/specs/inline_spec_inference_vector.exp index 0e107f4ddc121..9f599daa2a6e3 100644 --- a/third_party/move/move-compiler-v2/tests/checking/specs/inline_spec_inference_vector.exp +++ b/third_party/move/move-compiler-v2/tests/checking/specs/inline_spec_inference_vector.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused parameter `length`. Consider removing or prefixing with an underscore: `_length` + ┌─ tests/checking/specs/inline_spec_inference_vector.move:3:18 + │ +3 │ public fun new(length: u64) { + │ ^^^^^^ + // ---- Model Dump module 0x42::bit_vector { use std::vector; diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/assign_nested.exp b/third_party/move/move-compiler-v2/tests/checking/typing/assign_nested.exp index d503528ee9b0f..6e9994af31930 100644 --- a/third_party/move/move-compiler-v2/tests/checking/typing/assign_nested.exp +++ b/third_party/move/move-compiler-v2/tests/checking/typing/assign_nested.exp @@ -1,7 +1,7 @@ Diagnostics: -error: invalid assignment - ┌─ tests/checking/typing/assign_nested.move:14:19 +error: expected 4 item(s), found 3 + ┌─ tests/checking/typing/assign_nested.move:14:9 │ -14 │ (_, x, _, s.f) = four(); - │ ^^^ Invalid assignment syntax. Expected: a local, a field write, or a deconstructing assignment +14 │ (_, x, _) = four(); + │ ^^^^^^^^^ diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/assign_nested.move b/third_party/move/move-compiler-v2/tests/checking/typing/assign_nested.move index 29c3340c4b0fe..f492d8242081c 100644 --- a/third_party/move/move-compiler-v2/tests/checking/typing/assign_nested.move +++ b/third_party/move/move-compiler-v2/tests/checking/typing/assign_nested.move @@ -11,7 +11,7 @@ module 0x8675309::A { let r = 0; let r_ref = &mut r; let s = S { f: 0 }; - (_, x, _, s.f) = four(); + (_, x, _) = four(); } } diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/assign_tuple.exp b/third_party/move/move-compiler-v2/tests/checking/typing/assign_tuple.exp index 4b4e13f06a8f7..9a2ccc6fdd700 100644 --- a/third_party/move/move-compiler-v2/tests/checking/typing/assign_tuple.exp +++ b/third_party/move/move-compiler-v2/tests/checking/typing/assign_tuple.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/typing/assign_tuple.move:11:20 + │ +11 │ fun use_tuple1(x: u64): u64 { + │ ^ + // ---- Model Dump module 0x42::tuple_invalid { struct S { diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/declare_with_type_annot.exp b/third_party/move/move-compiler-v2/tests/checking/typing/declare_with_type_annot.exp index de44d54510fb5..5f02d585c1c91 100644 --- a/third_party/move/move-compiler-v2/tests/checking/typing/declare_with_type_annot.exp +++ b/third_party/move/move-compiler-v2/tests/checking/typing/declare_with_type_annot.exp @@ -1,3 +1,29 @@ + +Diagnostics: +warning: Unused local variable `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/typing/declare_with_type_annot.move:6:13 + │ +6 │ let x: u64; + │ ^ + +warning: Unused local variable `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/typing/declare_with_type_annot.move:7:14 + │ +7 │ let (x, b, R{f}): (u64, bool, R); + │ ^ + +warning: Unused local variable `b`. Consider removing or prefixing with an underscore: `_b` + ┌─ tests/checking/typing/declare_with_type_annot.move:7:17 + │ +7 │ let (x, b, R{f}): (u64, bool, R); + │ ^ + +warning: Unused local variable `f`. Consider removing or prefixing with an underscore: `_f` + ┌─ tests/checking/typing/declare_with_type_annot.move:7:22 + │ +7 │ let (x, b, R{f}): (u64, bool, R); + │ ^ + // ---- Model Dump module 0x8675309::M { struct R { diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/eq_inline.exp b/third_party/move/move-compiler-v2/tests/checking/typing/eq_inline.exp index 6ede7f9161abf..b8e8f93b8dd3d 100644 --- a/third_party/move/move-compiler-v2/tests/checking/typing/eq_inline.exp +++ b/third_party/move/move-compiler-v2/tests/checking/typing/eq_inline.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused parameter `f`. Consider removing or prefixing with an underscore: `_f` + ┌─ tests/checking/typing/eq_inline.move:3:20 + │ +3 │ inline fun foo(f: |&u64|) { + │ ^ + // ---- Model Dump module 0x42::m { private fun g() { diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/unused_lambda_param.exp b/third_party/move/move-compiler-v2/tests/checking/typing/unused_lambda_param.exp new file mode 100644 index 0000000000000..2d716459b51bc --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/typing/unused_lambda_param.exp @@ -0,0 +1,71 @@ + +Diagnostics: +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/typing/unused_lambda_param.move:7:18 + │ +7 │ test(0, |x| 1); + │ ^ + +warning: Function `0xc0ffee::m::unused_lambda` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_lambda_param.move:6:5 + │ +6 │ ╭ fun unused_lambda() { +7 │ │ test(0, |x| 1); +8 │ │ } + │ ╰─────^ + +warning: Function `0xc0ffee::m::unused_lambda_suppressed1` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_lambda_param.move:10:5 + │ +10 │ ╭ fun unused_lambda_suppressed1() { +11 │ │ test(0, |_x| 1); +12 │ │ } + │ ╰─────^ + +warning: Function `0xc0ffee::m::unused_lambda_suppressed2` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_lambda_param.move:14:5 + │ +14 │ ╭ fun unused_lambda_suppressed2() { +15 │ │ test(0, |_| 1); +16 │ │ } + │ ╰─────^ + +// ---- Model Dump +module 0xc0ffee::m { + private fun unused_lambda() { + { + let (p: u64) = Tuple(0); + { + let (x: u64) = Tuple(p); + 1 + } + }; + Tuple() + } + private fun unused_lambda_suppressed1() { + { + let (p: u64) = Tuple(0); + { + let (_x: u64) = Tuple(p); + 1 + } + }; + Tuple() + } + private fun unused_lambda_suppressed2() { + { + let (p: u64) = Tuple(0); + { + let (_) = Tuple(p); + 1 + } + }; + Tuple() + } + spec fun $test(p: u64,f: |u64|u64): u64 { + (f)(p) + } + spec fun $unused_lambda(); + spec fun $unused_lambda_suppressed1(); + spec fun $unused_lambda_suppressed2(); +} // end 0xc0ffee::m diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/unused_lambda_param.move b/third_party/move/move-compiler-v2/tests/checking/typing/unused_lambda_param.move new file mode 100644 index 0000000000000..17079c81ec075 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/typing/unused_lambda_param.move @@ -0,0 +1,17 @@ +module 0xc0ffee::m { + inline fun test(p: u64, f: |u64| u64): u64 { + f(p) + } + + fun unused_lambda() { + test(0, |x| 1); + } + + fun unused_lambda_suppressed1() { + test(0, |_x| 1); + } + + fun unused_lambda_suppressed2() { + test(0, |_| 1); + } +} diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/unused_local.exp b/third_party/move/move-compiler-v2/tests/checking/typing/unused_local.exp new file mode 100644 index 0000000000000..59da02c9cb9e0 --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/typing/unused_local.exp @@ -0,0 +1,257 @@ + +Diagnostics: +warning: Unused local variable `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/typing/unused_local.move:5:13 + │ +5 │ let x: u64; + │ ^ + +warning: Function `0x8675309::M::t0` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:4:5 + │ +4 │ ╭ fun t0() { +5 │ │ let x: u64; +6 │ │ } + │ ╰─────^ + +warning: Unused local variable `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/typing/unused_local.move:9:14 + │ +9 │ let (x, y): (u64, u64); + │ ^ + +warning: Unused local variable `y`. Consider removing or prefixing with an underscore: `_y` + ┌─ tests/checking/typing/unused_local.move:9:17 + │ +9 │ let (x, y): (u64, u64); + │ ^ + +warning: Function `0x8675309::M::t1` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:8:5 + │ + 8 │ ╭ fun t1() { + 9 │ │ let (x, y): (u64, u64); +10 │ │ } + │ ╰─────^ + +warning: Unused local variable `f`. Consider removing or prefixing with an underscore: `_f` + ┌─ tests/checking/typing/unused_local.move:13:16 + │ +13 │ let S{ f, g }: S; + │ ^ + +warning: Unused local variable `g`. Consider removing or prefixing with an underscore: `_g` + ┌─ tests/checking/typing/unused_local.move:13:19 + │ +13 │ let S{ f, g }: S; + │ ^ + +warning: Function `0x8675309::M::t2` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:12:5 + │ +12 │ ╭ fun t2() { +13 │ │ let S{ f, g }: S; +14 │ │ } + │ ╰─────^ + +warning: Function `0x8675309::M::unused_local_suppressed1` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:16:5 + │ +16 │ ╭ fun unused_local_suppressed1() { +17 │ │ let _x: u64; +18 │ │ } + │ ╰─────^ + +warning: Function `0x8675309::M::unused_local_suppressed2` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:20:5 + │ +20 │ ╭ fun unused_local_suppressed2() { +21 │ │ let _: u64; +22 │ │ } + │ ╰─────^ + +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/typing/unused_local.move:25:22 + │ +25 │ fun unused_param(x: u64) { + │ ^ + +warning: Function `0x8675309::M::unused_param` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:25:5 + │ +25 │ ╭ fun unused_param(x: u64) { +26 │ │ } + │ ╰─────^ + +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/typing/unused_local.move:28:20 + │ +28 │ fun two_unused(x: u64, y: bool) { + │ ^ + +warning: Unused parameter `y`. Consider removing or prefixing with an underscore: `_y` + ┌─ tests/checking/typing/unused_local.move:28:28 + │ +28 │ fun two_unused(x: u64, y: bool) { + │ ^ + +warning: Function `0x8675309::M::two_unused` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:28:5 + │ +28 │ ╭ fun two_unused(x: u64, y: bool) { +29 │ │ } + │ ╰─────^ + +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/checking/typing/unused_local.move:31:35 + │ +31 │ fun unused_param1_used_param2(x: u64, y: bool): bool { + │ ^ + +warning: Function `0x8675309::M::unused_param1_used_param2` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:31:5 + │ +31 │ ╭ fun unused_param1_used_param2(x: u64, y: bool): bool { +32 │ │ y +33 │ │ } + │ ╰─────^ + +warning: Unused parameter `y`. Consider removing or prefixing with an underscore: `_y` + ┌─ tests/checking/typing/unused_local.move:35:43 + │ +35 │ fun unused_param2_used_param1(x: u64, y: bool): u64 { + │ ^ + +warning: Function `0x8675309::M::unused_param2_used_param1` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:35:5 + │ +35 │ ╭ fun unused_param2_used_param1(x: u64, y: bool): u64 { +36 │ │ x +37 │ │ } + │ ╰─────^ + +warning: Function `0x8675309::M::unused_param_suppressed1` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:39:5 + │ +39 │ ╭ fun unused_param_suppressed1(_: u64) { +40 │ │ } + │ ╰─────^ + +warning: Function `0x8675309::M::unused_param_suppressed2` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:42:5 + │ +42 │ ╭ fun unused_param_suppressed2(_x: u64) { +43 │ │ } + │ ╰─────^ + +warning: Function `0x8675309::M::unused_native_ok` is unused: it has no current callers and is private to its module. + ┌─ tests/checking/typing/unused_local.move:45:5 + │ +45 │ native fun unused_native_ok(x: u64, y: bool); + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +// ---- Model Dump +module 0x8675309::M { + struct S { + f: u64, + g: bool, + } + private fun t0() { + { + let x: u64; + Tuple() + } + } + private fun t1() { + { + let (x: u64, y: u64); + Tuple() + } + } + private fun t2() { + { + let M::S{ f: f: u64, g: g: bool }; + Tuple() + } + } + private fun two_unused(x: u64,y: bool) { + Tuple() + } + private fun unused_local_suppressed1() { + { + let _x: u64; + Tuple() + } + } + private fun unused_local_suppressed2() { + { + let _; + Tuple() + } + } + private native fun unused_native_ok(x: u64,y: bool); + private fun unused_param(x: u64) { + Tuple() + } + private fun unused_param1_used_param2(x: u64,y: bool): bool { + y + } + private fun unused_param2_used_param1(x: u64,y: bool): u64 { + x + } + private fun unused_param_suppressed1(_: u64) { + Tuple() + } + private fun unused_param_suppressed2(_x: u64) { + Tuple() + } + spec fun $t0() { + { + let x: u64; + Tuple() + } + } + spec fun $t1() { + { + let (x: u64, y: u64); + Tuple() + } + } + spec fun $t2() { + { + let M::S{ f: f: u64, g: g: bool }; + Tuple() + } + } + spec fun $two_unused(x: u64,y: bool) { + Tuple() + } + spec fun $unused_local_suppressed1() { + { + let _x: u64; + Tuple() + } + } + spec fun $unused_local_suppressed2() { + { + let _; + Tuple() + } + } + spec fun $unused_native_ok(x: u64,y: bool); + spec fun $unused_param(x: u64) { + Tuple() + } + spec fun $unused_param1_used_param2(x: u64,y: bool): bool { + y + } + spec fun $unused_param2_used_param1(x: u64,y: bool): u64 { + x + } + spec fun $unused_param_suppressed1(_: u64) { + Tuple() + } + spec fun $unused_param_suppressed2(_x: u64) { + Tuple() + } +} // end 0x8675309::M diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/unused_local.move b/third_party/move/move-compiler-v2/tests/checking/typing/unused_local.move new file mode 100644 index 0000000000000..f2c060a717fbe --- /dev/null +++ b/third_party/move/move-compiler-v2/tests/checking/typing/unused_local.move @@ -0,0 +1,46 @@ +module 0x8675309::M { + struct S { f: u64, g: bool } + + fun t0() { + let x: u64; + } + + fun t1() { + let (x, y): (u64, u64); + } + + fun t2() { + let S{ f, g }: S; + } + + fun unused_local_suppressed1() { + let _x: u64; + } + + fun unused_local_suppressed2() { + let _: u64; + } + + + fun unused_param(x: u64) { + } + + fun two_unused(x: u64, y: bool) { + } + + fun unused_param1_used_param2(x: u64, y: bool): bool { + y + } + + fun unused_param2_used_param1(x: u64, y: bool): u64 { + x + } + + fun unused_param_suppressed1(_: u64) { + } + + fun unused_param_suppressed2(_x: u64) { + } + + native fun unused_native_ok(x: u64, y: bool); +} diff --git a/third_party/move/move-compiler-v2/tests/copy-propagation/call_1.exp b/third_party/move/move-compiler-v2/tests/copy-propagation/call_1.exp index 829293941de02..27b0ad25d8e4b 100644 --- a/third_party/move/move-compiler-v2/tests/copy-propagation/call_1.exp +++ b/third_party/move/move-compiler-v2/tests/copy-propagation/call_1.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused local variable `a`. Consider removing or prefixing with an underscore: `_a` + ┌─ tests/copy-propagation/call_1.move:7:13 + │ +7 │ let a = p; + │ ^ + ============ initial bytecode ================ [variant baseline] diff --git a/third_party/move/move-compiler-v2/tests/copy-propagation/dead_assignment_1.exp b/third_party/move/move-compiler-v2/tests/copy-propagation/dead_assignment_1.exp index 3857e27ad61fa..9be6936bbb747 100644 --- a/third_party/move/move-compiler-v2/tests/copy-propagation/dead_assignment_1.exp +++ b/third_party/move/move-compiler-v2/tests/copy-propagation/dead_assignment_1.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused local variable `a`. Consider removing or prefixing with an underscore: `_a` + ┌─ tests/copy-propagation/dead_assignment_1.move:3:13 + │ +3 │ let a = p; + │ ^ + ============ initial bytecode ================ [variant baseline] diff --git a/third_party/move/move-compiler-v2/tests/copy-propagation/immut_refs_2.exp b/third_party/move/move-compiler-v2/tests/copy-propagation/immut_refs_2.exp index ddfe4a0b5fae1..e1d11fb8d12b9 100644 --- a/third_party/move/move-compiler-v2/tests/copy-propagation/immut_refs_2.exp +++ b/third_party/move/move-compiler-v2/tests/copy-propagation/immut_refs_2.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused local variable `a`. Consider removing or prefixing with an underscore: `_a` + ┌─ tests/copy-propagation/immut_refs_2.move:4:13 + │ +4 │ let a = &p; + │ ^ + ============ initial bytecode ================ [variant baseline] diff --git a/third_party/move/move-compiler-v2/tests/explicit-drop/unused_var.exp b/third_party/move/move-compiler-v2/tests/explicit-drop/unused_var.exp index 756594e3a6320..6f25e5b10fb56 100644 --- a/third_party/move/move-compiler-v2/tests/explicit-drop/unused_var.exp +++ b/third_party/move/move-compiler-v2/tests/explicit-drop/unused_var.exp @@ -8,6 +8,12 @@ warning: Function `0x42::explicate_drop::unused_var` is unused: it has no curren 4 │ │ } │ ╰─────^ +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/explicit-drop/unused_var.move:6:26 + │ +6 │ fun unused_arg(x: T) { + │ ^ + warning: Function `0x42::explicate_drop::unused_arg` is unused: it has no current callers and is private to its module. ┌─ tests/explicit-drop/unused_var.move:6:2 │ diff --git a/third_party/move/move-compiler-v2/tests/file-format-generator/borrow.exp b/third_party/move/move-compiler-v2/tests/file-format-generator/borrow.exp index 13f5a90b2d757..a8c5a74d3c26d 100644 --- a/third_party/move/move-compiler-v2/tests/file-format-generator/borrow.exp +++ b/third_party/move/move-compiler-v2/tests/file-format-generator/borrow.exp @@ -1,3 +1,17 @@ + +Diagnostics: +warning: Unused parameter `param`. Consider removing or prefixing with an underscore: `_param` + ┌─ tests/file-format-generator/borrow.move:12:15 + │ +12 │ fun local(param: u64): u64 { + │ ^^^^^ + +warning: Unused parameter `param`. Consider removing or prefixing with an underscore: `_param` + ┌─ tests/file-format-generator/borrow.move:29:19 + │ +29 │ fun mut_local(param: u64): u64 { + │ ^^^^^ + ============ initial bytecode ================ [variant baseline] diff --git a/third_party/move/move-compiler-v2/tests/file-format-generator/const.exp b/third_party/move/move-compiler-v2/tests/file-format-generator/const.exp index 21ab2c4ee6cd4..671d7e26de1c0 100644 --- a/third_party/move/move-compiler-v2/tests/file-format-generator/const.exp +++ b/third_party/move/move-compiler-v2/tests/file-format-generator/const.exp @@ -1,3 +1,71 @@ + +Diagnostics: +warning: Unused local variable `const_true`. Consider removing or prefixing with an underscore: `_const_true` + ┌─ tests/file-format-generator/const.move:3:13 + │ +3 │ let const_true = true; + │ ^^^^^^^^^^ + +warning: Unused local variable `const_false`. Consider removing or prefixing with an underscore: `_const_false` + ┌─ tests/file-format-generator/const.move:4:13 + │ +4 │ let const_false = false; + │ ^^^^^^^^^^^ + +warning: Unused local variable `hex_u8`. Consider removing or prefixing with an underscore: `_hex_u8` + ┌─ tests/file-format-generator/const.move:5:13 + │ +5 │ let hex_u8: u8 = 0x1; + │ ^^^^^^ + +warning: Unused local variable `hex_u16`. Consider removing or prefixing with an underscore: `_hex_u16` + ┌─ tests/file-format-generator/const.move:6:13 + │ +6 │ let hex_u16: u16 = 0x1BAE; + │ ^^^^^^^ + +warning: Unused local variable `hex_u32`. Consider removing or prefixing with an underscore: `_hex_u32` + ┌─ tests/file-format-generator/const.move:7:13 + │ +7 │ let hex_u32: u32 = 0xDEAD80; + │ ^^^^^^^ + +warning: Unused local variable `hex_u64`. Consider removing or prefixing with an underscore: `_hex_u64` + ┌─ tests/file-format-generator/const.move:8:13 + │ +8 │ let hex_u64: u64 = 0xCAFE; + │ ^^^^^^^ + +warning: Unused local variable `hex_u128`. Consider removing or prefixing with an underscore: `_hex_u128` + ┌─ tests/file-format-generator/const.move:9:13 + │ +9 │ let hex_u128: u128 = 0xDEADBEEF; + │ ^^^^^^^^ + +warning: Unused local variable `hex_u256`. Consider removing or prefixing with an underscore: `_hex_u256` + ┌─ tests/file-format-generator/const.move:10:13 + │ +10 │ let hex_u256: u256 = 0x1123_456A_BCDE_F; + │ ^^^^^^^^ + +warning: Unused local variable `a`. Consider removing or prefixing with an underscore: `_a` + ┌─ tests/file-format-generator/const.move:11:13 + │ +11 │ let a = @0x42; + │ ^ + +warning: Unused local variable `vec`. Consider removing or prefixing with an underscore: `_vec` + ┌─ tests/file-format-generator/const.move:12:13 + │ +12 │ let vec = vector[1, 2, 3]; + │ ^^^ + +warning: Unused local variable `s`. Consider removing or prefixing with an underscore: `_s` + ┌─ tests/file-format-generator/const.move:13:13 + │ +13 │ let s = b"Hello!\n"; + │ ^ + ============ initial bytecode ================ [variant baseline] diff --git a/third_party/move/move-compiler-v2/tests/file-format-generator/globals.exp b/third_party/move/move-compiler-v2/tests/file-format-generator/globals.exp index 5ea9f5ee1bbc1..0ad865102c8d2 100644 --- a/third_party/move/move-compiler-v2/tests/file-format-generator/globals.exp +++ b/third_party/move/move-compiler-v2/tests/file-format-generator/globals.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused parameter `x`. Consider removing or prefixing with an underscore: `_x` + ┌─ tests/file-format-generator/globals.move:18:27 + │ +18 │ fun write(a: address, x: u64): u64 { + │ ^ + ============ initial bytecode ================ [variant baseline] diff --git a/third_party/move/move-compiler-v2/tests/testsuite.rs b/third_party/move/move-compiler-v2/tests/testsuite.rs index 34e87bb59cdca..4b2217bbf5d55 100644 --- a/third_party/move/move-compiler-v2/tests/testsuite.rs +++ b/third_party/move/move-compiler-v2/tests/testsuite.rs @@ -7,7 +7,7 @@ use move_binary_format::binary_views::BinaryIndexedView; use move_command_line_common::files::FileHash; use move_compiler::compiled_unit::CompiledUnit; use move_compiler_v2::{ - function_checker, inliner, pipeline, + flow_insensitive_checkers, function_checker, inliner, pipeline, pipeline::{ ability_checker::AbilityChecker, avail_copies_analysis::AvailCopiesAnalysisProcessor, copy_propagation::CopyPropagation, dead_store_elimination::DeadStoreElimination, @@ -310,23 +310,28 @@ impl TestConfig { let mut ok = Self::check_diags(&mut test_output.borrow_mut(), &env); if ok { + if options.debug { + eprint!("After error check, GlobalEnv={}", env.dump_env()); + } + // Flow-insensitive checks on AST + flow_insensitive_checkers::check_for_unused_vars_and_params(&mut env); function_checker::check_for_function_typed_parameters(&mut env); function_checker::check_access_and_use(&mut env); ok = Self::check_diags(&mut test_output.borrow_mut(), &env); } - if ok { if options.debug { - eprint!("After error check, GlobalEnv={}", env.dump_env()); + eprint!( + "After flow-insensitive checks, GlobalEnv={}", + env.dump_env() + ); } - // Run inlining. inliner::run_inlining(&mut env); ok = Self::check_diags(&mut test_output.borrow_mut(), &env); - - if ok && options.debug { - eprint!("After inlining, GlobalEnv={}", env.dump_env()); - } + } + if ok && options.debug { + eprint!("After inlining, GlobalEnv={}", env.dump_env()); } if ok && self.dump_ast { diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_twice_before_assign.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_twice_before_assign.exp index 02bf766afbb73..b6dcbf64bab6a 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_twice_before_assign.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/use_twice_before_assign.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused local variable `y`. Consider removing or prefixing with an underscore: `_y` + ┌─ tests/uninit-use-checker/use_twice_before_assign.move:4:9 + │ +4 │ let y = x + x; + │ ^ + ============ initial bytecode ================ [variant baseline] diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/uses_before_assign.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/uses_before_assign.exp index f485484414f5b..9eda63a3143a7 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/uses_before_assign.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/uses_before_assign.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused local variable `z`. Consider removing or prefixing with an underscore: `_z` + ┌─ tests/uninit-use-checker/uses_before_assign.move:5:9 + │ +5 │ let z = x + y; + │ ^ + ============ initial bytecode ================ [variant baseline] diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/move_before_assign.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/move_before_assign.exp index cfe205ea1b841..10d9fec61d88f 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/move_before_assign.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/move_before_assign.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused local variable `y`. Consider removing or prefixing with an underscore: `_y` + ┌─ tests/uninit-use-checker/v1-commands/move_before_assign.move:4:9 + │ +4 │ let y = move x; + │ ^ + ============ initial bytecode ================ [variant baseline] diff --git a/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/use_before_assign.exp b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/use_before_assign.exp index e2eb97f86e206..5ccf7530d5fde 100644 --- a/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/use_before_assign.exp +++ b/third_party/move/move-compiler-v2/tests/uninit-use-checker/v1-commands/use_before_assign.exp @@ -1,3 +1,11 @@ + +Diagnostics: +warning: Unused local variable `y`. Consider removing or prefixing with an underscore: `_y` + ┌─ tests/uninit-use-checker/v1-commands/use_before_assign.move:4:9 + │ +4 │ let y = x; + │ ^ + ============ initial bytecode ================ [variant baseline] diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/inlining/multi_param.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/inlining/multi_param.exp index a6db107b3b9ca..3aafe113fad6b 100644 --- a/third_party/move/move-compiler-v2/transactional-tests/tests/inlining/multi_param.exp +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/inlining/multi_param.exp @@ -1,3 +1,12 @@ -processed 2 tasks - -==> Compiler v2 delivered same results! +comparison between v1 and v2 failed: += processed 2 tasks += ++ task 0 'publish'. lines 1-29: ++ warning: Unused parameter `elem`. Consider removing or prefixing with an underscore: `_elem` ++ ┌─ TEMPFILE:17:30 ++ │ ++ 17 │ for_each_ref_mut(v, |elem| { ++ │ ^^^^ ++ ++ ++ diff --git a/third_party/move/move-compiler-v2/transactional-tests/tests/no-safety/simple_map_keys.exp b/third_party/move/move-compiler-v2/transactional-tests/tests/no-safety/simple_map_keys.exp index a6db107b3b9ca..618e0759dca45 100644 --- a/third_party/move/move-compiler-v2/transactional-tests/tests/no-safety/simple_map_keys.exp +++ b/third_party/move/move-compiler-v2/transactional-tests/tests/no-safety/simple_map_keys.exp @@ -1,3 +1,12 @@ -processed 2 tasks - -==> Compiler v2 delivered same results! +comparison between v1 and v2 failed: += processed 2 tasks += ++ task 0 'publish'. lines 1-48: ++ warning: Unused parameter `e`. Consider removing or prefixing with an underscore: `_e` ++ ┌─ TEMPFILE:13:29 ++ │ ++ 13 │ map_ref(&map.data, |e| { ++ │ ^ ++ ++ ++ diff --git a/third_party/move/move-model/src/symbol.rs b/third_party/move/move-model/src/symbol.rs index 50741f536d92c..f1e09b039774e 100644 --- a/third_party/move/move-model/src/symbol.rs +++ b/third_party/move/move-model/src/symbol.rs @@ -82,6 +82,10 @@ impl SymbolPool { pub fn string(&self, sym: Symbol) -> Rc { self.inner.borrow().strings[sym.0].clone() } + + pub fn symbol_starts_with_underscore(&self, sym: Symbol) -> bool { + self.string(sym).starts_with('_') + } } impl Default for SymbolPool { From 3557fb35fd0708ef8c5d76fc6d78738a94f7b4f0 Mon Sep 17 00:00:00 2001 From: "Brian R. Murphy" <132495859+brmataptos@users.noreply.github.com> Date: Wed, 31 Jan 2024 12:23:29 -0800 Subject: [PATCH 28/44] Improve Diagnostic ordering further for multiple diagnostics with some of the same labels (#11717) Improve Diagnostic ordering for move-model and move-compiler-v2 diagnostics with multiple labels. --- .../checking/inlining/private_call_3.exp | 96 +++++++++---------- .../typing/bad_type_argument_arity_const.exp | 64 ++++++------- .../typing/constant_invalid_base_type.exp | 54 +++++------ .../typing/constant_non_base_type.exp | 22 ++--- .../folding/bad_type_argument_arity_const.exp | 64 ++++++------- .../tests/folding/constants_blocks.exp | 24 ++--- .../return_borrowed_local_invalid.exp | 80 ++++++++-------- third_party/move/move-model/src/model.rs | 56 ++++------- 8 files changed, 221 insertions(+), 239 deletions(-) diff --git a/third_party/move/move-compiler-v2/tests/checking/inlining/private_call_3.exp b/third_party/move/move-compiler-v2/tests/checking/inlining/private_call_3.exp index 67d9226e41192..534964c3491d3 100644 --- a/third_party/move/move-compiler-v2/tests/checking/inlining/private_call_3.exp +++ b/third_party/move/move-compiler-v2/tests/checking/inlining/private_call_3.exp @@ -11,17 +11,6 @@ error: `public(friend)` inline function `0x42::m::friend_accessible` cannot be c 81 │ m::friend_accessible(); │ ---------------------- called here -error: `public(friend)` inline function `0x42::m::friend_accessible` cannot be called from 0x42::o_nonfriend::friend_accessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m` - ┌─ tests/checking/inlining/private_call_3.move:12:5 - │ - 12 │ ╭ public(friend) inline fun friend_accessible(): u64 { - 13 │ │ bar() - 14 │ │ } - │ ╰─────^ - · │ -101 │ m::friend_accessible(); - │ ---------------------- called here - error: `public(friend)` inline function `0x42::m::friend_accessible` cannot be called from 0x42::o_nonfriend::inaccessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m` ┌─ tests/checking/inlining/private_call_3.move:12:5 │ @@ -33,6 +22,17 @@ error: `public(friend)` inline function `0x42::m::friend_accessible` cannot be c 91 │ m::friend_accessible(); │ ---------------------- called here +error: `public(friend)` inline function `0x42::m::friend_accessible` cannot be called from 0x42::o_nonfriend::friend_accessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m` + ┌─ tests/checking/inlining/private_call_3.move:12:5 + │ + 12 │ ╭ public(friend) inline fun friend_accessible(): u64 { + 13 │ │ bar() + 14 │ │ } + │ ╰─────^ + · │ +101 │ m::friend_accessible(); + │ ---------------------- called here + error: `public(friend)` function `0x42::m::bar` cannot be called from 0x42::o_nonfriend::foofunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m` ┌─ tests/checking/inlining/private_call_3.move:16:5 │ @@ -42,15 +42,6 @@ error: `public(friend)` function `0x42::m::bar` cannot be called from 0x42::o_no 83 │ m::bar(); │ -------- called here -error: `public(friend)` function `0x42::m::bar` cannot be called from 0x42::o_nonfriend::friend_accessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m` - ┌─ tests/checking/inlining/private_call_3.move:16:5 - │ - 16 │ public(friend) fun bar(): u64 { 42 } - │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - · -103 │ m::bar(); - │ -------- called here - error: `public(friend)` function `0x42::m::bar` cannot be called from 0x42::o_nonfriend::inaccessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m` ┌─ tests/checking/inlining/private_call_3.move:16:5 │ @@ -60,6 +51,15 @@ error: `public(friend)` function `0x42::m::bar` cannot be called from 0x42::o_no 93 │ m::bar(); │ -------- called here +error: `public(friend)` function `0x42::m::bar` cannot be called from 0x42::o_nonfriend::friend_accessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m` + ┌─ tests/checking/inlining/private_call_3.move:16:5 + │ + 16 │ public(friend) fun bar(): u64 { 42 } + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + · +103 │ m::bar(); + │ -------- called here + error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` cannot be called from 0x42::o::foofunction `inline ` because module `0x42::o` is not a `friend` of `0x42::m_nonfriend` ┌─ tests/checking/inlining/private_call_3.move:28:5 │ @@ -71,7 +71,7 @@ error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` c 45 │ m_nonfriend::friend_accessible(); │ -------------------------------- called here -error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` cannot be called from 0x42::o::friend_accessiblefunction `inline ` because module `0x42::o` is not a `friend` of `0x42::m_nonfriend` +error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` cannot be called from 0x42::o::inaccessiblefunction `inline ` because module `0x42::o` is not a `friend` of `0x42::m_nonfriend` ┌─ tests/checking/inlining/private_call_3.move:28:5 │ 28 │ ╭ public(friend) inline fun friend_accessible(): u64 { @@ -79,10 +79,10 @@ error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` c 30 │ │ } │ ╰─────^ · │ -65 │ m_nonfriend::friend_accessible(); +55 │ m_nonfriend::friend_accessible(); │ -------------------------------- called here -error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` cannot be called from 0x42::o::inaccessiblefunction `inline ` because module `0x42::o` is not a `friend` of `0x42::m_nonfriend` +error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` cannot be called from 0x42::o::friend_accessiblefunction `inline ` because module `0x42::o` is not a `friend` of `0x42::m_nonfriend` ┌─ tests/checking/inlining/private_call_3.move:28:5 │ 28 │ ╭ public(friend) inline fun friend_accessible(): u64 { @@ -90,7 +90,7 @@ error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` c 30 │ │ } │ ╰─────^ · │ -55 │ m_nonfriend::friend_accessible(); +65 │ m_nonfriend::friend_accessible(); │ -------------------------------- called here error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` cannot be called from 0x42::o_nonfriend::foofunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m_nonfriend` @@ -104,17 +104,6 @@ error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` c 82 │ m_nonfriend::friend_accessible(); │ -------------------------------- called here -error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` cannot be called from 0x42::o_nonfriend::friend_accessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m_nonfriend` - ┌─ tests/checking/inlining/private_call_3.move:28:5 - │ - 28 │ ╭ public(friend) inline fun friend_accessible(): u64 { - 29 │ │ bar() - 30 │ │ } - │ ╰─────^ - · │ -102 │ m_nonfriend::friend_accessible(); - │ -------------------------------- called here - error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` cannot be called from 0x42::o_nonfriend::inaccessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m_nonfriend` ┌─ tests/checking/inlining/private_call_3.move:28:5 │ @@ -126,6 +115,17 @@ error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` c 92 │ m_nonfriend::friend_accessible(); │ -------------------------------- called here +error: `public(friend)` inline function `0x42::m_nonfriend::friend_accessible` cannot be called from 0x42::o_nonfriend::friend_accessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m_nonfriend` + ┌─ tests/checking/inlining/private_call_3.move:28:5 + │ + 28 │ ╭ public(friend) inline fun friend_accessible(): u64 { + 29 │ │ bar() + 30 │ │ } + │ ╰─────^ + · │ +102 │ m_nonfriend::friend_accessible(); + │ -------------------------------- called here + error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 0x42::o::foofunction `inline ` because module `0x42::o` is not a `friend` of `0x42::m_nonfriend` ┌─ tests/checking/inlining/private_call_3.move:32:5 │ @@ -135,22 +135,22 @@ error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 47 │ m_nonfriend::bar() │ ------------------ called here -error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 0x42::o::friend_accessiblefunction `inline ` because module `0x42::o` is not a `friend` of `0x42::m_nonfriend` +error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 0x42::o::inaccessiblefunction `inline ` because module `0x42::o` is not a `friend` of `0x42::m_nonfriend` ┌─ tests/checking/inlining/private_call_3.move:32:5 │ 32 │ public(friend) fun bar(): u64 { 42 } │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ · -67 │ m_nonfriend::bar() +57 │ m_nonfriend::bar() │ ------------------ called here -error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 0x42::o::inaccessiblefunction `inline ` because module `0x42::o` is not a `friend` of `0x42::m_nonfriend` +error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 0x42::o::friend_accessiblefunction `inline ` because module `0x42::o` is not a `friend` of `0x42::m_nonfriend` ┌─ tests/checking/inlining/private_call_3.move:32:5 │ 32 │ public(friend) fun bar(): u64 { 42 } │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ · -57 │ m_nonfriend::bar() +67 │ m_nonfriend::bar() │ ------------------ called here error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 0x42::o_nonfriend::foofunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m_nonfriend` @@ -162,15 +162,6 @@ error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 84 │ m_nonfriend::bar() │ ------------------ called here -error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 0x42::o_nonfriend::friend_accessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m_nonfriend` - ┌─ tests/checking/inlining/private_call_3.move:32:5 - │ - 32 │ public(friend) fun bar(): u64 { 42 } - │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - · -104 │ m_nonfriend::bar() - │ ------------------ called here - error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 0x42::o_nonfriend::inaccessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m_nonfriend` ┌─ tests/checking/inlining/private_call_3.move:32:5 │ @@ -180,6 +171,15 @@ error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 94 │ m_nonfriend::bar() │ ------------------ called here +error: `public(friend)` function `0x42::m_nonfriend::bar` cannot be called from 0x42::o_nonfriend::friend_accessiblefunction `inline ` because module `0x42::o_nonfriend` is not a `friend` of `0x42::m_nonfriend` + ┌─ tests/checking/inlining/private_call_3.move:32:5 + │ + 32 │ public(friend) fun bar(): u64 { 42 } + │ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + · +104 │ m_nonfriend::bar() + │ ------------------ called here + error: inline function `0x42::o::inaccessible` cannot be called from function `0x42::n::test` because it is private to module `0x42::o` ┌─ tests/checking/inlining/private_call_3.move:50:5 │ diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/bad_type_argument_arity_const.exp b/third_party/move/move-compiler-v2/tests/checking/typing/bad_type_argument_arity_const.exp index 8a765ec286cac..4fa0556d47d86 100644 --- a/third_party/move/move-compiler-v2/tests/checking/typing/bad_type_argument_arity_const.exp +++ b/third_party/move/move-compiler-v2/tests/checking/typing/bad_type_argument_arity_const.exp @@ -6,14 +6,6 @@ error: type argument count mismatch (expected 1 but got 0) 6 │ const S1: S = S { f: 0 }; │ ^ -error: Invalid type for constant - ┌─ tests/checking/typing/bad_type_argument_arity_const.move:6:19 - │ -6 │ const S1: S = S { f: 0 }; - │ --------------^^^^^^^^^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/checking/typing/bad_type_argument_arity_const.move:6:19 │ @@ -22,20 +14,20 @@ error: Not a valid constant expression. │ │ │ Invalid call or operation in constant +error: Invalid type for constant + ┌─ tests/checking/typing/bad_type_argument_arity_const.move:6:19 + │ +6 │ const S1: S = S { f: 0 }; + │ --------------^^^^^^^^^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. + error: type argument count mismatch (expected 1 but got 0) ┌─ tests/checking/typing/bad_type_argument_arity_const.move:7:15 │ 7 │ const S2: S<> = S { f: 0 }; │ ^ -error: Invalid type for constant - ┌─ tests/checking/typing/bad_type_argument_arity_const.move:7:21 - │ -7 │ const S2: S<> = S { f: 0 }; - │ ----------------^^^^^^^^^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/checking/typing/bad_type_argument_arity_const.move:7:21 │ @@ -44,20 +36,20 @@ error: Not a valid constant expression. │ │ │ Invalid call or operation in constant +error: Invalid type for constant + ┌─ tests/checking/typing/bad_type_argument_arity_const.move:7:21 + │ +7 │ const S2: S<> = S { f: 0 }; + │ ----------------^^^^^^^^^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. + error: type argument count mismatch (expected 1 but got 2) ┌─ tests/checking/typing/bad_type_argument_arity_const.move:8:15 │ 8 │ const S3: S = S { f: 0 }; │ ^ -error: Invalid type for constant - ┌─ tests/checking/typing/bad_type_argument_arity_const.move:8:30 - │ -8 │ const S3: S = S { f: 0 }; - │ -------------------------^^^^^^^^^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/checking/typing/bad_type_argument_arity_const.move:8:30 │ @@ -66,20 +58,20 @@ error: Not a valid constant expression. │ │ │ Invalid call or operation in constant +error: Invalid type for constant + ┌─ tests/checking/typing/bad_type_argument_arity_const.move:8:30 + │ +8 │ const S3: S = S { f: 0 }; + │ -------------------------^^^^^^^^^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. + error: type argument count mismatch (expected 1 but got 2) ┌─ tests/checking/typing/bad_type_argument_arity_const.move:9:17 │ 9 │ const S4: S> = S { f: S { f: 0 } }; │ ^ -error: Invalid type for constant - ┌─ tests/checking/typing/bad_type_argument_arity_const.move:9:33 - │ -9 │ const S4: S> = S { f: S { f: 0 } }; - │ ----------------------------^^^^^^^^^^^^^^^^^^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/checking/typing/bad_type_argument_arity_const.move:9:33 │ @@ -88,3 +80,11 @@ error: Not a valid constant expression. │ │ │ │ │ Invalid call or operation in constant │ Invalid call or operation in constant + +error: Invalid type for constant + ┌─ tests/checking/typing/bad_type_argument_arity_const.move:9:33 + │ +9 │ const S4: S> = S { f: S { f: 0 } }; + │ ----------------------------^^^^^^^^^^^^^^^^^^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/constant_invalid_base_type.exp b/third_party/move/move-compiler-v2/tests/checking/typing/constant_invalid_base_type.exp index 1785014167b76..a4828d3de571c 100644 --- a/third_party/move/move-compiler-v2/tests/checking/typing/constant_invalid_base_type.exp +++ b/third_party/move/move-compiler-v2/tests/checking/typing/constant_invalid_base_type.exp @@ -1,13 +1,5 @@ Diagnostics: -error: Invalid type for constant - ┌─ tests/checking/typing/constant_invalid_base_type.move:6:24 - │ -6 │ const C1: signer = abort 0; - │ -------------------^^^^^^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/checking/typing/constant_invalid_base_type.move:6:24 │ @@ -17,10 +9,10 @@ error: Not a valid constant expression. │ Invalid call or operation in constant error: Invalid type for constant - ┌─ tests/checking/typing/constant_invalid_base_type.move:7:19 + ┌─ tests/checking/typing/constant_invalid_base_type.move:6:24 │ -7 │ const C2: S = S{}; - │ --------------^^^- +6 │ const C1: signer = abort 0; + │ -------------------^^^^^^^- │ │ │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. @@ -33,9 +25,9 @@ error: Not a valid constant expression. │ Invalid call or operation in constant error: Invalid type for constant - ┌─ tests/checking/typing/constant_invalid_base_type.move:8:19 + ┌─ tests/checking/typing/constant_invalid_base_type.move:7:19 │ -8 │ const C3: R = R{}; +7 │ const C2: S = S{}; │ --------------^^^- │ │ │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. @@ -49,10 +41,10 @@ error: Not a valid constant expression. │ Invalid call or operation in constant error: Invalid type for constant - ┌─ tests/checking/typing/constant_invalid_base_type.move:9:27 + ┌─ tests/checking/typing/constant_invalid_base_type.move:8:19 │ -9 │ const C4: vector = abort 0; - │ ----------------------^^^^^^^- +8 │ const C3: R = R{}; + │ --------------^^^- │ │ │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. @@ -65,12 +57,12 @@ error: Not a valid constant expression. │ Invalid call or operation in constant error: Invalid type for constant - ┌─ tests/checking/typing/constant_invalid_base_type.move:10:27 - │ -10 │ const C5: vector = abort 0; - │ ----------------------^^^^^^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. + ┌─ tests/checking/typing/constant_invalid_base_type.move:9:27 + │ +9 │ const C4: vector = abort 0; + │ ----------------------^^^^^^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. error: Not a valid constant expression. ┌─ tests/checking/typing/constant_invalid_base_type.move:10:27 @@ -81,10 +73,10 @@ error: Not a valid constant expression. │ Invalid call or operation in constant error: Invalid type for constant - ┌─ tests/checking/typing/constant_invalid_base_type.move:11:35 + ┌─ tests/checking/typing/constant_invalid_base_type.move:10:27 │ -11 │ const C6: vector> = abort 0; - │ ------------------------------^^^^^^^- +10 │ const C5: vector = abort 0; + │ ----------------------^^^^^^^- │ │ │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. @@ -97,9 +89,9 @@ error: Not a valid constant expression. │ Invalid call or operation in constant error: Invalid type for constant - ┌─ tests/checking/typing/constant_invalid_base_type.move:12:35 + ┌─ tests/checking/typing/constant_invalid_base_type.move:11:35 │ -12 │ const C7: vector> = abort 0; +11 │ const C6: vector> = abort 0; │ ------------------------------^^^^^^^- │ │ │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. @@ -111,3 +103,11 @@ error: Not a valid constant expression. │ ^^^^^^^ │ │ │ Invalid call or operation in constant + +error: Invalid type for constant + ┌─ tests/checking/typing/constant_invalid_base_type.move:12:35 + │ +12 │ const C7: vector> = abort 0; + │ ------------------------------^^^^^^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. diff --git a/third_party/move/move-compiler-v2/tests/checking/typing/constant_non_base_type.exp b/third_party/move/move-compiler-v2/tests/checking/typing/constant_non_base_type.exp index 23b87aaa630a5..b1c08a78c1fc1 100644 --- a/third_party/move/move-compiler-v2/tests/checking/typing/constant_non_base_type.exp +++ b/third_party/move/move-compiler-v2/tests/checking/typing/constant_non_base_type.exp @@ -1,13 +1,5 @@ Diagnostics: -error: Invalid type for constant - ┌─ tests/checking/typing/constant_non_base_type.move:3:22 - │ -3 │ const C1: &u64 = &0; - │ -----------------^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/checking/typing/constant_non_base_type.move:3:22 │ @@ -17,10 +9,10 @@ error: Not a valid constant expression. │ Invalid call or operation in constant error: Invalid type for constant - ┌─ tests/checking/typing/constant_non_base_type.move:4:26 + ┌─ tests/checking/typing/constant_non_base_type.move:3:22 │ -4 │ const C2: &mut u64 = &0; - │ ---------------------^^- +3 │ const C1: &u64 = &0; + │ -----------------^^- │ │ │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. @@ -32,6 +24,14 @@ error: Not a valid constant expression. │ │ │ Invalid call or operation in constant +error: Invalid type for constant + ┌─ tests/checking/typing/constant_non_base_type.move:4:26 + │ +4 │ const C2: &mut u64 = &0; + │ ---------------------^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. + error: mutability mismatch (&mut != &) ┌─ tests/checking/typing/constant_non_base_type.move:4:26 │ diff --git a/third_party/move/move-compiler-v2/tests/folding/bad_type_argument_arity_const.exp b/third_party/move/move-compiler-v2/tests/folding/bad_type_argument_arity_const.exp index fdb045589b2a1..e3e26a77e6c71 100644 --- a/third_party/move/move-compiler-v2/tests/folding/bad_type_argument_arity_const.exp +++ b/third_party/move/move-compiler-v2/tests/folding/bad_type_argument_arity_const.exp @@ -6,14 +6,6 @@ error: type argument count mismatch (expected 1 but got 0) 6 │ const S1: S = S { f: 0 }; │ ^ -error: Invalid type for constant - ┌─ tests/folding/bad_type_argument_arity_const.move:6:19 - │ -6 │ const S1: S = S { f: 0 }; - │ --------------^^^^^^^^^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/folding/bad_type_argument_arity_const.move:6:19 │ @@ -22,20 +14,20 @@ error: Not a valid constant expression. │ │ │ Invalid call or operation in constant +error: Invalid type for constant + ┌─ tests/folding/bad_type_argument_arity_const.move:6:19 + │ +6 │ const S1: S = S { f: 0 }; + │ --------------^^^^^^^^^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. + error: type argument count mismatch (expected 1 but got 0) ┌─ tests/folding/bad_type_argument_arity_const.move:7:15 │ 7 │ const S2: S<> = S { f: 0 }; │ ^ -error: Invalid type for constant - ┌─ tests/folding/bad_type_argument_arity_const.move:7:21 - │ -7 │ const S2: S<> = S { f: 0 }; - │ ----------------^^^^^^^^^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/folding/bad_type_argument_arity_const.move:7:21 │ @@ -44,20 +36,20 @@ error: Not a valid constant expression. │ │ │ Invalid call or operation in constant +error: Invalid type for constant + ┌─ tests/folding/bad_type_argument_arity_const.move:7:21 + │ +7 │ const S2: S<> = S { f: 0 }; + │ ----------------^^^^^^^^^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. + error: type argument count mismatch (expected 1 but got 2) ┌─ tests/folding/bad_type_argument_arity_const.move:8:15 │ 8 │ const S3: S = S { f: 0 }; │ ^ -error: Invalid type for constant - ┌─ tests/folding/bad_type_argument_arity_const.move:8:30 - │ -8 │ const S3: S = S { f: 0 }; - │ -------------------------^^^^^^^^^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/folding/bad_type_argument_arity_const.move:8:30 │ @@ -66,20 +58,20 @@ error: Not a valid constant expression. │ │ │ Invalid call or operation in constant +error: Invalid type for constant + ┌─ tests/folding/bad_type_argument_arity_const.move:8:30 + │ +8 │ const S3: S = S { f: 0 }; + │ -------------------------^^^^^^^^^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. + error: type argument count mismatch (expected 1 but got 2) ┌─ tests/folding/bad_type_argument_arity_const.move:9:17 │ 9 │ const S4: S> = S { f: S { f: 0 } }; │ ^ -error: Invalid type for constant - ┌─ tests/folding/bad_type_argument_arity_const.move:9:33 - │ -9 │ const S4: S> = S { f: S { f: 0 } }; - │ ----------------------------^^^^^^^^^^^^^^^^^^^- - │ │ - │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/folding/bad_type_argument_arity_const.move:9:33 │ @@ -88,3 +80,11 @@ error: Not a valid constant expression. │ │ │ │ │ Invalid call or operation in constant │ Invalid call or operation in constant + +error: Invalid type for constant + ┌─ tests/folding/bad_type_argument_arity_const.move:9:33 + │ +9 │ const S4: S> = S { f: S { f: 0 } }; + │ ----------------------------^^^^^^^^^^^^^^^^^^^- + │ │ + │ Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. diff --git a/third_party/move/move-compiler-v2/tests/folding/constants_blocks.exp b/third_party/move/move-compiler-v2/tests/folding/constants_blocks.exp index bd2dff3318a5d..407b7fe439c0b 100644 --- a/third_party/move/move-compiler-v2/tests/folding/constants_blocks.exp +++ b/third_party/move/move-compiler-v2/tests/folding/constants_blocks.exp @@ -32,18 +32,6 @@ error: Not a valid constant expression. │ │ │ Invalid call or operation in constant -error: Invalid type for constant - ┌─ tests/folding/constants_blocks.move:11:20 - │ -11 │ ╭ const C7: () = { - │ ╭──────────────────────^ -12 │ │ │ let x = 0; -13 │ │ │ let y = 1; -14 │ │ │ x + y; -15 │ │ │ }; - │ ╰─│─────^ - │ ╰──────' Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. - error: Not a valid constant expression. ┌─ tests/folding/constants_blocks.move:11:20 │ @@ -62,6 +50,18 @@ error: Not a valid constant expression. │ ╰─│─────' Invalid statement or expression in constant │ ╰─────' Invalid statement or expression in constant +error: Invalid type for constant + ┌─ tests/folding/constants_blocks.move:11:20 + │ +11 │ ╭ const C7: () = { + │ ╭──────────────────────^ +12 │ │ │ let x = 0; +13 │ │ │ let y = 1; +14 │ │ │ x + y; +15 │ │ │ }; + │ ╰─│─────^ + │ ╰──────' Expected one of `u8`, `u16, `u32`, `u64`, `u128`, `u256`, `bool`, `address`, or `vector<_>` with valid element type. + error: Not a valid constant expression. ┌─ tests/folding/constants_blocks.move:16:25 │ diff --git a/third_party/move/move-compiler-v2/tests/reference-safety/v1-tests/return_borrowed_local_invalid.exp b/third_party/move/move-compiler-v2/tests/reference-safety/v1-tests/return_borrowed_local_invalid.exp index d58ba8d5eebc3..5e324db087c18 100644 --- a/third_party/move/move-compiler-v2/tests/reference-safety/v1-tests/return_borrowed_local_invalid.exp +++ b/third_party/move/move-compiler-v2/tests/reference-safety/v1-tests/return_borrowed_local_invalid.exp @@ -1,125 +1,125 @@ Diagnostics: -error: cannot return a reference derived from local `s1` since it is not a parameter +error: cannot return a reference derived from local `v1` since it is not a parameter ┌─ tests/reference-safety/v1-tests/return_borrowed_local_invalid.move:19:9 │ 19 │ ╭ (&mut v1, + │ ------- previous mutable local borrow 20 │ │ &v2, 21 │ │ id_mut(&mut v3), 22 │ │ id(&v4), -23 │ │ &mut s1.f, - │ │ --------- - │ │ │ │ - │ │ │ previous mutable local borrow - │ │ used by mutable field borrow -24 │ │ &s2.f, + · │ 25 │ │ id_mut(&mut s3.f), 26 │ │ id(&s4.f)) │ ╰──────────────────^ returned here -error: cannot return a reference derived from local `s2` since it is not a parameter +error: cannot return a reference derived from local `v2` since it is not a parameter ┌─ tests/reference-safety/v1-tests/return_borrowed_local_invalid.move:19:9 │ 19 │ ╭ (&mut v1, 20 │ │ &v2, + │ │ --- previous local borrow 21 │ │ id_mut(&mut v3), 22 │ │ id(&v4), -23 │ │ &mut s1.f, -24 │ │ &s2.f, - │ │ ----- - │ │ ││ - │ │ │previous local borrow - │ │ used by field borrow + · │ 25 │ │ id_mut(&mut s3.f), 26 │ │ id(&s4.f)) │ ╰──────────────────^ returned here -error: cannot return a reference derived from local `s3` since it is not a parameter +error: cannot return a reference derived from local `v3` since it is not a parameter ┌─ tests/reference-safety/v1-tests/return_borrowed_local_invalid.move:19:9 │ 19 │ ╭ (&mut v1, 20 │ │ &v2, 21 │ │ id_mut(&mut v3), + │ │ --------------- + │ │ │ │ + │ │ │ previous mutable local borrow + │ │ used by mutable call result 22 │ │ id(&v4), · │ 25 │ │ id_mut(&mut s3.f), - │ │ --------- - │ │ │ │ - │ │ │ previous mutable local borrow - │ │ used by mutable field borrow 26 │ │ id(&s4.f)) │ ╰──────────────────^ returned here -error: cannot return a reference derived from local `s4` since it is not a parameter +error: cannot return a reference derived from local `v4` since it is not a parameter ┌─ tests/reference-safety/v1-tests/return_borrowed_local_invalid.move:19:9 │ 19 │ ╭ (&mut v1, 20 │ │ &v2, 21 │ │ id_mut(&mut v3), 22 │ │ id(&v4), + │ │ ------- + │ │ │ │ + │ │ │ previous local borrow + │ │ used by call result · │ 25 │ │ id_mut(&mut s3.f), 26 │ │ id(&s4.f)) - │ │ ----- - │ │ ││ - │ │ │previous local borrow - │ │ used by field borrow │ ╰──────────────────^ returned here -error: cannot return a reference derived from local `v1` since it is not a parameter +error: cannot return a reference derived from local `s1` since it is not a parameter ┌─ tests/reference-safety/v1-tests/return_borrowed_local_invalid.move:19:9 │ 19 │ ╭ (&mut v1, - │ ------- previous mutable local borrow 20 │ │ &v2, 21 │ │ id_mut(&mut v3), 22 │ │ id(&v4), - · │ +23 │ │ &mut s1.f, + │ │ --------- + │ │ │ │ + │ │ │ previous mutable local borrow + │ │ used by mutable field borrow +24 │ │ &s2.f, 25 │ │ id_mut(&mut s3.f), 26 │ │ id(&s4.f)) │ ╰──────────────────^ returned here -error: cannot return a reference derived from local `v2` since it is not a parameter +error: cannot return a reference derived from local `s2` since it is not a parameter ┌─ tests/reference-safety/v1-tests/return_borrowed_local_invalid.move:19:9 │ 19 │ ╭ (&mut v1, 20 │ │ &v2, - │ │ --- previous local borrow 21 │ │ id_mut(&mut v3), 22 │ │ id(&v4), - · │ +23 │ │ &mut s1.f, +24 │ │ &s2.f, + │ │ ----- + │ │ ││ + │ │ │previous local borrow + │ │ used by field borrow 25 │ │ id_mut(&mut s3.f), 26 │ │ id(&s4.f)) │ ╰──────────────────^ returned here -error: cannot return a reference derived from local `v3` since it is not a parameter +error: cannot return a reference derived from local `s3` since it is not a parameter ┌─ tests/reference-safety/v1-tests/return_borrowed_local_invalid.move:19:9 │ 19 │ ╭ (&mut v1, 20 │ │ &v2, 21 │ │ id_mut(&mut v3), - │ │ --------------- - │ │ │ │ - │ │ │ previous mutable local borrow - │ │ used by mutable call result 22 │ │ id(&v4), · │ 25 │ │ id_mut(&mut s3.f), + │ │ --------- + │ │ │ │ + │ │ │ previous mutable local borrow + │ │ used by mutable field borrow 26 │ │ id(&s4.f)) │ ╰──────────────────^ returned here -error: cannot return a reference derived from local `v4` since it is not a parameter +error: cannot return a reference derived from local `s4` since it is not a parameter ┌─ tests/reference-safety/v1-tests/return_borrowed_local_invalid.move:19:9 │ 19 │ ╭ (&mut v1, 20 │ │ &v2, 21 │ │ id_mut(&mut v3), 22 │ │ id(&v4), - │ │ ------- - │ │ │ │ - │ │ │ previous local borrow - │ │ used by call result · │ 25 │ │ id_mut(&mut s3.f), 26 │ │ id(&s4.f)) + │ │ ----- + │ │ ││ + │ │ │previous local borrow + │ │ used by field borrow │ ╰──────────────────^ returned here diff --git a/third_party/move/move-model/src/model.rs b/third_party/move/move-model/src/model.rs index 5a7636746eea1..fcbcead4451f5 100644 --- a/third_party/move/move-model/src/model.rs +++ b/third_party/move/move-model/src/model.rs @@ -1130,48 +1130,30 @@ impl GlobalEnv { } } + // Label comparison within a list of labels for a given diagnostic, which orders by priority + // first, then files and line numbers. + fn cmp_label_priority(label1: &Label, label2: &Label) -> Ordering { + use LabelStyle::*; + match (label1.style, label2.style) { + (Primary, Secondary) => Ordering::Less, + (Secondary, Primary) => Ordering::Greater, + (_, _) => GlobalEnv::cmp_label(label1, label2), + } + } + // Comparison for sets of labels that orders them based on program ordering, using // the earliest label found. If a `Primary` label is found then `Secondary` labels // are ignored, but if all are `Secondary` then the earliest of those is used in // the ordering. fn cmp_labels(labels1: &[Label], labels2: &[Label]) -> Ordering { - let primary1 = labels1 - .iter() - .filter(|l| l.style == LabelStyle::Primary) - .min_by(|l1, l2| GlobalEnv::cmp_label(l1, l2)); - let primary2 = labels2 - .iter() - .filter(|l| l.style == LabelStyle::Primary) - .min_by(|l1, l2| GlobalEnv::cmp_label(l1, l2)); - match (primary1, primary2) { - (Some(prim1), Some(prim2)) => GlobalEnv::cmp_label(prim1, prim2), - (Some(prim1), None) => { - let second2 = labels2.iter().min_by(|l1, l2| GlobalEnv::cmp_label(l1, l2)); - if let Some(sec2) = second2 { - GlobalEnv::cmp_label(prim1, sec2) - } else { - Ordering::Less // Label beats none - } - }, - (None, Some(prim2)) => { - let second1 = labels1.iter().min_by(|l1, l2| GlobalEnv::cmp_label(l1, l2)); - if let Some(sec1) = second1 { - GlobalEnv::cmp_label(sec1, prim2) - } else { - Ordering::Greater // None is beaten by Label - } - }, - (None, None) => { - let second1 = labels1.iter().min_by(|l1, l2| GlobalEnv::cmp_label(l1, l2)); - let second2 = labels2.iter().min_by(|l1, l2| GlobalEnv::cmp_label(l1, l2)); - match (second1, second2) { - (Some(sec1), Some(sec2)) => GlobalEnv::cmp_label(sec1, sec2), - (Some(_), None) => Ordering::Less, // Label beats None - (None, Some(_)) => Ordering::Greater, // None is beaten by Label - (None, None) => Ordering::Equal, - } - }, - } + let mut sorted_labels1 = labels1.iter().collect_vec(); + sorted_labels1.sort_by(|l1, l2| GlobalEnv::cmp_label_priority(l1, l2)); + let mut sorted_labels2 = labels2.iter().collect_vec(); + sorted_labels2.sort_by(|l1, l2| GlobalEnv::cmp_label_priority(l1, l2)); + std::iter::zip(sorted_labels1, sorted_labels2) + .map(|(l1, l2)| GlobalEnv::cmp_label(l1, l2)) + .find(|r| Ordering::Equal != *r) + .unwrap_or(Ordering::Equal) } /// Writes accumulated diagnostics that pass through `filter` From 9322aae3b72af9739c469537e1683686c36ac052 Mon Sep 17 00:00:00 2001 From: Rati Gelashvili Date: Wed, 31 Jan 2024 15:59:49 -0500 Subject: [PATCH 29/44] [Executor] Refactor errors, prepare for resource group bcs fallback (#11832) --- aptos-move/aptos-aggregator/src/types.rs | 2 +- aptos-move/aptos-vm/src/block_executor/mod.rs | 6 +- aptos-move/block-executor/src/errors.rs | 24 +- aptos-move/block-executor/src/executor.rs | 221 +++++++++--------- .../src/proptest_types/baseline.rs | 9 +- .../src/proptest_types/tests.rs | 6 +- .../src/proptest_types/types.rs | 14 +- .../src/txn_last_input_output.rs | 35 ++- 8 files changed, 164 insertions(+), 153 deletions(-) diff --git a/aptos-move/aptos-aggregator/src/types.rs b/aptos-move/aptos-aggregator/src/types.rs index c152902aba68c..8d6c578d17bf8 100644 --- a/aptos-move/aptos-aggregator/src/types.rs +++ b/aptos-move/aptos-aggregator/src/types.rs @@ -36,7 +36,7 @@ impl PanicOr { pub fn code_invariant_error(message: M) -> PanicError { let msg = format!( - "Delayed logic code invariant broken (there is a bug in the code), {:?}", + "Delayed field / resource group code invariant broken (a bug in the code), {:?}", message ); error!("{}", msg); diff --git a/aptos-move/aptos-vm/src/block_executor/mod.rs b/aptos-move/aptos-vm/src/block_executor/mod.rs index 59aec76825b63..e7bd292fb3c61 100644 --- a/aptos-move/aptos-vm/src/block_executor/mod.rs +++ b/aptos-move/aptos-vm/src/block_executor/mod.rs @@ -13,7 +13,7 @@ use aptos_aggregator::{ types::DelayedFieldID, }; use aptos_block_executor::{ - errors::Error, executor::BlockExecutor, + errors::BlockExecutionError, executor::BlockExecutor, task::TransactionOutput as BlockExecutorTransactionOutput, txn_commit_hook::TransactionCommitHook, types::InputOutputKey, }; @@ -426,13 +426,13 @@ impl BlockAptosVM { Ok(BlockOutput::new(output_vec)) }, - Err(Error::FallbackToSequential(e)) => { + Err(BlockExecutionError::FallbackToSequential(e)) => { unreachable!( "[Execution]: Must be handled by sequential fallback: {:?}", e ) }, - Err(Error::UserError(err)) => Err(err), + Err(BlockExecutionError::FatalVMError((err, _))) => Err(err), } } } diff --git a/aptos-move/block-executor/src/errors.rs b/aptos-move/block-executor/src/errors.rs index ead50d2c7749f..8b23c93efe567 100644 --- a/aptos-move/block-executor/src/errors.rs +++ b/aptos-move/block-executor/src/errors.rs @@ -3,6 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use aptos_aggregator::types::PanicOr; +use aptos_mvhashmap::types::TxnIndex; use aptos_types::aggregator::PanicError; #[derive(Clone, Debug, PartialEq, Eq)] @@ -13,28 +14,29 @@ pub enum IntentionalFallbackToSequential { /// TODO: (short-mid term) relax the limitation, and (mid-long term) provide proper multi-versioning /// for code (like data) for the cache. ModulePathReadWrite, - /// We defensively check certain resource group related invariant violations. - ResourceGroupError(String), + /// We defensively check resource group serialization error in the commit phase. + /// TODO: should trigger invariant violation in the transaction itself. + ResourceGroupSerializationError(String), } #[derive(Clone, Debug, PartialEq, Eq)] -pub enum Error { +pub enum BlockExecutionError { FallbackToSequential(PanicOr), - /// Execution of a thread yields a non-recoverable error, such error will be propagated back to - /// the caller (leading to the block execution getting aborted). TODO: revisit name (UserError). - UserError(E), + /// Execution of a thread yields a non-recoverable error from the VM. Such an error will be propagated + /// back to the caller (leading to the block execution getting aborted). + FatalVMError((E, TxnIndex)), } -pub type Result = ::std::result::Result>; +pub type BlockExecutionResult = Result>; -impl From> for Error { +impl From> for BlockExecutionError { fn from(err: PanicOr) -> Self { - Error::FallbackToSequential(err) + BlockExecutionError::FallbackToSequential(err) } } -impl From for Error { +impl From for BlockExecutionError { fn from(err: PanicError) -> Self { - Error::FallbackToSequential(err.into()) + BlockExecutionError::FallbackToSequential(err.into()) } } diff --git a/aptos-move/block-executor/src/executor.rs b/aptos-move/block-executor/src/executor.rs index a1cb8daac55c9..0c67be88bab0c 100644 --- a/aptos-move/block-executor/src/executor.rs +++ b/aptos-move/block-executor/src/executor.rs @@ -41,7 +41,7 @@ use aptos_types::{ transaction::{BlockExecutableTransaction as Transaction, BlockOutput}, write_set::{TransactionWrite, WriteOp}, }; -use aptos_vm_logging::{clear_speculative_txn_logs, init_speculative_logs}; +use aptos_vm_logging::{alert, clear_speculative_txn_logs, init_speculative_logs, prelude::*}; use bytes::Bytes; use claims::assert_none; use core::panic; @@ -102,7 +102,7 @@ where executor: &E, base_view: &S, latest_view: ParallelState, - ) -> ::std::result::Result> { + ) -> Result> { let _timer = TASK_EXECUTE_SECONDS.start_timer(); let txn = &signature_verified_block[idx_to_execute as usize]; @@ -122,7 +122,7 @@ where // For tracking whether the recent execution wrote outside of the previous write/delta set. let mut updates_outside = false; - let mut apply_updates = |output: &E::Output| -> ::std::result::Result<(), PanicError> { + let mut apply_updates = |output: &E::Output| -> Result<(), PanicError> { for (group_key, group_metadata_op, group_ops) in output.resource_group_write_set().into_iter() { @@ -239,7 +239,7 @@ where }, ExecutionStatus::Abort(err) => { // Record the status indicating abort. - ExecutionStatus::Abort(Error::UserError(err)) + ExecutionStatus::Abort(BlockExecutionError::FatalVMError((err, idx_to_execute))) }, ExecutionStatus::DirectWriteSetTransactionNotCapableError => { // TODO[agg_v2](fix) decide how to handle/propagate. @@ -289,7 +289,7 @@ where idx_to_validate: TxnIndex, last_input_output: &TxnLastInputOutput, versioned_cache: &MVHashMap, - ) -> ::std::result::Result { + ) -> Result { let _timer = TASK_VALIDATE_SECONDS.start_timer(); let read_set = last_input_output .read_set(idx_to_validate) @@ -376,7 +376,7 @@ where txn_idx: TxnIndex, versioned_cache: &MVHashMap, last_input_output: &TxnLastInputOutput, - ) -> ::std::result::Result { + ) -> Result { let read_set = last_input_output .read_set(txn_idx) .expect("Read set must be recorded"); @@ -419,19 +419,14 @@ where versioned_cache: &MVHashMap, scheduler_task: &mut SchedulerTask, last_input_output: &TxnLastInputOutput, - shared_commit_state: &ExplicitSyncWrapper<( - BlockGasLimitProcessor, - Option>, - )>, + shared_commit_state: &ExplicitSyncWrapper>, base_view: &S, start_shared_counter: u32, shared_counter: &AtomicU32, executor: &E, block: &[T], - ) -> ::std::result::Result<(), PanicOr> { - let mut shared_commit_state_guard = shared_commit_state.acquire(); - let (block_limit_processor, shared_maybe_error) = - shared_commit_state_guard.dereference_mut(); + ) -> BlockExecutionResult<(), E::Error> { + let mut block_limit_processor = shared_commit_state.acquire(); while let Some((txn_idx, incarnation)) = scheduler.try_commit() { if !Self::validate_commit_ready(txn_idx, versioned_cache, last_input_output)? { @@ -518,26 +513,24 @@ where Ok(finalized_group) => { // finalize_group already applies the deletions. if finalized_group.is_empty() != metadata_is_deletion { - return Err(Error::FallbackToSequential(resource_group_error( - format!( + return Err(code_invariant_error(format!( "Group is empty = {} but op is deletion = {} in parallel execution", finalized_group.is_empty(), metadata_is_deletion - ), - ))); + ))); } Ok(finalized_group) }, - Err(e) => Err(Error::FallbackToSequential(resource_group_error(format!( + Err(e) => Err(code_invariant_error(format!( "Error committing resource group {:?}", e - )))), + ))), } }; let group_metadata_ops = last_input_output.group_metadata_ops(txn_idx); let mut finalized_groups = Vec::with_capacity(group_metadata_ops.len()); - let mut maybe_err = None; + let mut maybe_code_error = Ok(()); for (group_key, metadata_op) in group_metadata_ops.into_iter() { // finalize_group copies Arc of values and the Tags (TODO: optimize as needed). let finalized_result = versioned_cache @@ -548,16 +541,13 @@ where finalized_groups.push((group_key, metadata_op, finalized_group)); }, Err(err) => { - maybe_err = Some(err); + maybe_code_error = Err(err.into()); break; }, } - if maybe_err.is_some() { - break; - } } - if maybe_err.is_none() { + if maybe_code_error.is_ok() { if let Some(group_reads_needing_delayed_field_exchange) = last_input_output.group_reads_needing_delayed_field_exchange(txn_idx) { @@ -572,45 +562,38 @@ where finalized_groups.push((group_key, metadata_op, finalized_group)); }, Err(err) => { - maybe_err = Some(err); + maybe_code_error = Err(err.into()); break; }, } - if maybe_err.is_some() { + if maybe_code_error.is_err() { break; } } } } - last_input_output.record_finalized_group(txn_idx, finalized_groups); - - maybe_err = maybe_err.or_else(|| last_input_output.maybe_execution_error(txn_idx)); + // We return an error (leads to halting the execution) in the following cases: + // 1) Code invariant violation. + // 2) We detect module read/write intersection + // 3) A transaction triggered an Abort - // We `halt` the execution in the following 4 cases: - // 1) We detect module read/write intersection - // 2) A transaction triggered an Abort - // 3) All transactions are scheduled for committing - // 4) We skip_rest after a transaction + last_input_output.record_finalized_group(txn_idx, finalized_groups); // We cover cases 1 and 2 here - if let Some(err) = maybe_err { - *shared_maybe_error = Some(err); - if scheduler.halt() { - info!( - "Block execution was aborted due to {:?}", - shared_maybe_error.as_ref().unwrap() - ); - block_limit_processor.finish_parallel_update_counters_and_log_info( - txn_idx + 1, - scheduler.num_txns(), - ); - } // else it's already halted - break; + maybe_code_error.and_then(|_| last_input_output.module_rw_intersection_ok())?; + + // Next, we handle 3, an abort / an unrecoverable VM error. + if let Some(err) = last_input_output.aborted_execution_status(txn_idx) { + assert!(matches!(err, BlockExecutionError::FatalVMError(_))); + return Err(err); } - // We cover cases 3 and 4 here: Either all txn committed, - // or a committed txn caused an early halt. + // While the above propagate errors and lead to eventually halting parallel execution, + // below we may halt the execution without an error in cases when: + // a) all transactions are scheduled for committing + // b) we skip_rest after a transaction + // Either all txn committed, or a committed txn caused an early halt. if txn_idx + 1 == scheduler.num_txns() || last_input_output.block_skips_rest_at_idx(txn_idx) { @@ -795,8 +778,7 @@ where fn serialize_groups( finalized_groups: Vec<(T::Key, T::Value, Vec<(T::Tag, Arc)>)>, - ) -> ::std::result::Result, PanicOr> - { + ) -> Result, PanicOr> { finalized_groups .into_iter() .map(|(group_key, mut metadata_op, finalized_group)| { @@ -811,14 +793,27 @@ where }) .collect(); - bcs::to_bytes(&btree) + let res = bcs::to_bytes(&btree) .map_err(|e| { - resource_group_error(format!("Unexpected resource group error {:?}", e)) + PanicOr::Or( + IntentionalFallbackToSequential::ResourceGroupSerializationError( + format!("Unexpected resource group error {:?}", e), + ), + ) }) .map(|group_bytes| { metadata_op.set_bytes(group_bytes.into()); (group_key, metadata_op) - }) + }); + + if res.is_err() { + alert!("Failed to serialize resource group"); + // Alert first, then log an error with actual btree, to make sure + // printing it can't possibly fail during alert. + error!("Failed to serialize resource group BTreeMap {:?}", btree); + } + + res }) .collect() } @@ -833,7 +828,7 @@ where last_input_output: &TxnLastInputOutput, base_view: &S, final_results: &ExplicitSyncWrapper>, - ) -> ::std::result::Result<(), PanicOr> { + ) -> Result<(), PanicOr> { let parallel_state = ParallelState::::new( versioned_cache, scheduler, @@ -941,12 +936,9 @@ where base_view: &S, start_shared_counter: u32, shared_counter: &AtomicU32, - shared_commit_state: &ExplicitSyncWrapper<( - BlockGasLimitProcessor, - Option>, - )>, + shared_commit_state: &ExplicitSyncWrapper>, final_results: &ExplicitSyncWrapper>, - ) -> ::std::result::Result<(), PanicOr> { + ) -> BlockExecutionResult<(), E::Error> { // Make executor for each task. TODO: fast concurrent executor. let init_timer = VM_INIT_SECONDS.start_timer(); let executor = E::init(*executor_arguments); @@ -955,25 +947,24 @@ where let _timer = WORK_WITH_TASK_SECONDS.start_timer(); let mut scheduler_task = SchedulerTask::NoTask; - let drain_commit_queue = - || -> ::std::result::Result<(), PanicOr> { - while let Ok(txn_idx) = scheduler.pop_from_commit_queue() { - self.materialize_txn_commit( - txn_idx, - versioned_cache, - scheduler, - start_shared_counter, - shared_counter, - last_input_output, - base_view, - final_results, - )?; - } - Ok(()) - }; + let drain_commit_queue = || -> Result<(), PanicOr> { + while let Ok(txn_idx) = scheduler.pop_from_commit_queue() { + self.materialize_txn_commit( + txn_idx, + versioned_cache, + scheduler, + start_shared_counter, + shared_counter, + last_input_output, + base_view, + final_results, + )?; + } + Ok(()) + }; loop { - // Priorotize committing validated transactions + // Prioritize committing validated transactions while scheduler.should_coordinate_commits() { self.prepare_and_queue_commit_ready_txns( &self.config.onchain.block_gas_limit_type, @@ -1052,7 +1043,7 @@ where executor_initial_arguments: E::Argument, signature_verified_block: &[T], base_view: &S, - ) -> Result, E::Error> { + ) -> BlockExecutionResult, E::Error> { let _timer = PARALLEL_EXECUTION_SECONDS.start_timer(); // Using parallel execution with 1 thread currently will not work as it // will only have a coordinator role but no workers for rolling commit. @@ -1073,10 +1064,11 @@ where let num_txns = signature_verified_block.len(); - let shared_commit_state = ExplicitSyncWrapper::new(( - BlockGasLimitProcessor::new(self.config.onchain.block_gas_limit_type.clone(), num_txns), - None, + let shared_commit_state = ExplicitSyncWrapper::new(BlockGasLimitProcessor::new( + self.config.onchain.block_gas_limit_type.clone(), + num_txns, )); + let shared_maybe_error = ExplicitSyncWrapper::new(Ok(())); let final_results = ExplicitSyncWrapper::new(Vec::with_capacity(num_txns)); @@ -1108,9 +1100,19 @@ where &final_results, ) { if scheduler.halt() { - let mut shared_commit_state_guard = shared_commit_state.acquire(); - let (_, maybe_error) = shared_commit_state_guard.dereference_mut(); - *maybe_error = Some(Error::FallbackToSequential(e)); + // Only one thread / worker will successfully halt, hence below + // ExplicitSyncWrapper acquires are safe. + + if let BlockExecutionError::FatalVMError((inner_err, txn_idx)) = &e { + let block_limit_processor = shared_commit_state.acquire(); + info!("Block execution was aborted due to {:?}", inner_err); + block_limit_processor.finish_parallel_update_counters_and_log_info( + txn_idx + 1, + scheduler.num_txns(), + ); + } + + *shared_maybe_error.acquire() = Err(e); } } }); @@ -1119,21 +1121,19 @@ where drop(timer); // Explicit async drops. DEFAULT_DROPPER.schedule_drop((last_input_output, scheduler, versioned_cache)); - let (_block_limit_processor, maybe_error) = shared_commit_state.into_inner(); // TODO add block end info to output. // block_limit_processor.is_block_limit_reached(); - match maybe_error { - Some(err) => Err(err), - None => Ok(BlockOutput::new(final_results.into_inner())), - } + shared_maybe_error + .into_inner() + .map(|()| BlockOutput::new(final_results.into_inner())) } fn apply_output_sequential( unsync_map: &UnsyncMap, output: &E::Output, - ) -> ::std::result::Result<(), PanicOr> { + ) -> Result<(), PanicOr> { for (key, (write_op, layout)) in output.resource_write_set().into_iter() { unsync_map.write(key, write_op, layout); } @@ -1143,7 +1143,7 @@ where unsync_map .insert_group_op(&group_key, value_tag, group_op, maybe_layout) .map_err(|e| { - resource_group_error(format!("Unexpected resource group error {:?}", e)) + code_invariant_error(format!("Unexpected resource group error {:?}", e)) })?; } unsync_map.write(group_key, metadata_op, None); @@ -1214,7 +1214,7 @@ where signature_verified_block: &[T], base_view: &S, dynamic_change_set_optimizations_enabled: bool, - ) -> Result, E::Error> { + ) -> BlockExecutionResult, E::Error> { let num_txns = signature_verified_block.len(); let init_timer = VM_INIT_SECONDS.start_timer(); let executor = E::init(executor_arguments); @@ -1314,7 +1314,7 @@ where if finalized_group.is_empty() != group_metadata_op.is_deletion() { // TODO[agg_v2](fix): code invariant error if dynamic change set optimizations disabled. // TODO[agg_v2](fix): make sure this cannot be triggered by an user transaction - return Err(resource_group_error(format!( + return Err(code_invariant_error(format!( "Group is empty = {} but op is deletion = {} in sequential execution", finalized_group.is_empty(), group_metadata_op.is_deletion() @@ -1328,7 +1328,7 @@ where { let finalized_group = unsync_map.finalize_group(&group_key); if finalized_group.is_empty() != group_metadata_op.is_deletion() { - return Err(resource_group_error(format!( + return Err(code_invariant_error(format!( "Group is empty = {} but op is deletion = {} in sequential execution", finalized_group.is_empty(), group_metadata_op.is_deletion() @@ -1359,7 +1359,7 @@ where ) .is_some() { - return Err(Error::FallbackToSequential(code_invariant_error( + return Err(BlockExecutionError::FallbackToSequential(code_invariant_error( "reads_needing_delayed_field_exchange already in the write set for key", ).into())); } @@ -1376,7 +1376,7 @@ where ); let serialized_groups = Self::serialize_groups(patched_finalized_groups) - .map_err(Error::FallbackToSequential)?; + .map_err(BlockExecutionError::FallbackToSequential)?; // TODO[agg_v2] patch resources in groups and provide explicitly output.incorporate_materialized_txn_output( @@ -1408,7 +1408,7 @@ where commit_hook.on_execution_aborted(idx as TxnIndex); } // Record the status indicating abort. - return Err(Error::UserError(err)); + return Err(BlockExecutionError::FatalVMError((err, idx as TxnIndex))); }, ExecutionStatus::DirectWriteSetTransactionNotCapableError => { panic!("PayloadWriteSet::Direct transaction not alone in a block, in sequential execution") @@ -1424,9 +1424,9 @@ where "Sequential execution failed with DelayedFieldsCodeInvariantError: {:?}", msg ); - return Err(Error::FallbackToSequential(PanicOr::CodeInvariantError( - msg, - ))); + return Err(BlockExecutionError::FallbackToSequential( + PanicOr::CodeInvariantError(msg), + )); }, }; // When the txn is a SkipRest txn, halt sequential execution. @@ -1455,7 +1455,7 @@ where executor_arguments: E::Argument, signature_verified_block: &[T], base_view: &S, - ) -> Result, E::Error> { + ) -> BlockExecutionResult, E::Error> { let dynamic_change_set_optimizations_enabled = signature_verified_block.len() != 1 || E::is_transaction_dynamic_change_set_capable(&signature_verified_block[0]); @@ -1479,14 +1479,16 @@ where // Sequential execution fallback // Only worth doing if we did parallel before, i.e. if we did a different pass. if self.config.local.concurrency_level > 1 { - if let Err(Error::FallbackToSequential(e)) = &ret { + if let Err(BlockExecutionError::FallbackToSequential(e)) = &ret { match e { PanicOr::Or(IntentionalFallbackToSequential::ModulePathReadWrite) => { debug!("[Execution]: Module read & written, sequential fallback"); }, - PanicOr::Or(IntentionalFallbackToSequential::ResourceGroupError(msg)) => { + PanicOr::Or( + IntentionalFallbackToSequential::ResourceGroupSerializationError(msg), + ) => { error!( - "[Execution]: ResourceGroupError({:?}), sequential fallback", + "[Execution]: ResourceGroupSerializationError {}, sequential fallback", msg ); }, @@ -1513,7 +1515,7 @@ where // If after trying available fallbacks, we still are askign to do a fallback, // something unrecoverable went wrong. - if let Err(Error::FallbackToSequential(e)) = &ret { + if let Err(BlockExecutionError::FallbackToSequential(e)) = &ret { // TODO[agg_v2][fix] make sure this can never happen - we have sequential raising // this error often when something that should never happen goes wrong panic!("Sequential execution failed with {:?}", e); @@ -1523,11 +1525,6 @@ where } } -fn resource_group_error(err_msg: String) -> PanicOr { - error!("resource_group_error: {:?}", err_msg); - PanicOr::Or(IntentionalFallbackToSequential::ResourceGroupError(err_msg)) -} - fn gen_id_start_value(sequential: bool) -> u32 { // IDs are ephemeral. Pick a random prefix, and different each time, // in case exchange is mistakenly not performed - to more easily catch it. diff --git a/aptos-move/block-executor/src/proptest_types/baseline.rs b/aptos-move/block-executor/src/proptest_types/baseline.rs index 24adc209eacc5..6e9e6e65b2e66 100644 --- a/aptos-move/block-executor/src/proptest_types/baseline.rs +++ b/aptos-move/block-executor/src/proptest_types/baseline.rs @@ -12,7 +12,7 @@ /// number, and hence it is crucial for the baseline to know the final incarnation number /// of each transaction of the tested block executor execution. use crate::{ - errors::{Error as BlockExecutorError, Result as BlockExecutorResult}, + errors::{BlockExecutionError, BlockExecutionResult}, proptest_types::types::{ MockOutput, MockTransaction, ValueType, RESERVED_TAG, STORAGE_AGGREGATOR_VALUE, }, @@ -222,7 +222,7 @@ impl BaselineOutput { // itself to be easily traceable in case of an error. pub(crate) fn assert_output( &self, - results: &BlockExecutorResult>, usize>, + results: &BlockExecutionResult>, usize>, ) { let base_map: HashMap = HashMap::from([(RESERVED_TAG, vec![0].into())]); let mut group_world = HashMap::new(); @@ -362,12 +362,13 @@ impl BaselineOutput { assert_none!(output.materialized_delta_writes.get()); }); }, - Err(BlockExecutorError::UserError(idx)) => { + Err(BlockExecutionError::FatalVMError((idx, executor_idx))) => { + assert_eq!(*idx, *executor_idx as usize); assert_matches!(&self.status, BaselineStatus::Aborted); assert_eq!(*idx, self.read_values.len()); assert_eq!(*idx, self.resolved_deltas.len()); }, - Err(BlockExecutorError::FallbackToSequential(e)) => { + Err(BlockExecutionError::FallbackToSequential(e)) => { unimplemented!("not tested here FallbackToSequential({:?})", e) }, } diff --git a/aptos-move/block-executor/src/proptest_types/tests.rs b/aptos-move/block-executor/src/proptest_types/tests.rs index 1eb41da3a4c3c..b51c877e223db 100644 --- a/aptos-move/block-executor/src/proptest_types/tests.rs +++ b/aptos-move/block-executor/src/proptest_types/tests.rs @@ -3,7 +3,7 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - errors::{Error, IntentionalFallbackToSequential}, + errors::{BlockExecutionError, IntentionalFallbackToSequential}, executor::BlockExecutor, proptest_types::{ baseline::BaselineOutput, @@ -88,7 +88,7 @@ fn run_transactions( if module_access.0 && module_access.1 { assert_eq!( output.unwrap_err(), - Error::FallbackToSequential(PanicOr::Or( + BlockExecutionError::FallbackToSequential(PanicOr::Or( IntentionalFallbackToSequential::ModulePathReadWrite )) ); @@ -476,7 +476,7 @@ fn publishing_fixed_params_with_block_gas_limit( assert_eq!( output.unwrap_err(), - Error::FallbackToSequential(PanicOr::Or( + BlockExecutionError::FallbackToSequential(PanicOr::Or( IntentionalFallbackToSequential::ModulePathReadWrite )) ); diff --git a/aptos-move/block-executor/src/proptest_types/types.rs b/aptos-move/block-executor/src/proptest_types/types.rs index df1a57d112126..48efb8faef094 100644 --- a/aptos-move/block-executor/src/proptest_types/types.rs +++ b/aptos-move/block-executor/src/proptest_types/types.rs @@ -48,8 +48,6 @@ use std::{ }, }; -type Result = std::result::Result; - // Should not be possible to overflow or underflow, as each delta is at most 100 in the tests. // TODO: extend to delta failures. pub(crate) const STORAGE_AGGREGATOR_VALUE: u128 = 100001; @@ -69,7 +67,7 @@ where type Key = K; // Contains mock storage value with STORAGE_AGGREGATOR_VALUE. - fn get_state_value(&self, _: &K) -> Result> { + fn get_state_value(&self, _: &K) -> Result, StateviewError> { Ok(Some(StateValue::new_legacy( serialize(&STORAGE_AGGREGATOR_VALUE).into(), ))) @@ -79,7 +77,7 @@ where StateViewId::Miscellaneous } - fn get_usage(&self) -> Result { + fn get_usage(&self) -> Result { unreachable!("Not used in tests"); } } @@ -95,7 +93,7 @@ where type Key = K; // Contains mock storage value with a non-empty group (w. value at RESERVED_TAG). - fn get_state_value(&self, key: &K) -> Result> { + fn get_state_value(&self, key: &K) -> Result, StateviewError> { if self.group_keys.contains(key) { let group: BTreeMap = BTreeMap::from([(RESERVED_TAG, vec![0].into())]); @@ -110,7 +108,7 @@ where StateViewId::Miscellaneous } - fn get_usage(&self) -> Result { + fn get_usage(&self) -> Result { unreachable!("Not used in tests"); } } @@ -126,7 +124,7 @@ where type Key = K; /// Gets the state value for a given state key. - fn get_state_value(&self, _: &K) -> Result> { + fn get_state_value(&self, _: &K) -> Result, StateviewError> { Ok(None) } @@ -134,7 +132,7 @@ where StateViewId::Miscellaneous } - fn get_usage(&self) -> Result { + fn get_usage(&self) -> Result { unreachable!("Not used in tests"); } } diff --git a/aptos-move/block-executor/src/txn_last_input_output.rs b/aptos-move/block-executor/src/txn_last_input_output.rs index 972d588160e7b..131b5cdca92e0 100644 --- a/aptos-move/block-executor/src/txn_last_input_output.rs +++ b/aptos-move/block-executor/src/txn_last_input_output.rs @@ -3,7 +3,7 @@ use crate::{ captured_reads::CapturedReads, - errors::{Error, IntentionalFallbackToSequential}, + errors::{BlockExecutionError, IntentionalFallbackToSequential}, explicit_sync_wrapper::ExplicitSyncWrapper, task::{ExecutionStatus, TransactionOutput}, types::{InputOutputKey, ReadWriteSummary}, @@ -34,7 +34,7 @@ type TxnInput = CapturedReads; // the WriteOps corresponding to the deltas in the corresponding outputs. #[derive(Debug)] pub(crate) struct TxnOutput { - output_status: ExecutionStatus>, + output_status: ExecutionStatus>, } pub(crate) enum KeyKind { @@ -44,11 +44,11 @@ pub(crate) enum KeyKind { } impl TxnOutput { - pub fn from_output_status(output_status: ExecutionStatus>) -> Self { + pub fn from_output_status(output_status: ExecutionStatus>) -> Self { Self { output_status } } - pub fn output_status(&self) -> &ExecutionStatus> { + pub fn output_status(&self) -> &ExecutionStatus> { &self.output_status } } @@ -126,7 +126,7 @@ impl, E: Debug + Send + Clone> &self, txn_idx: TxnIndex, input: CapturedReads, - output: ExecutionStatus>, + output: ExecutionStatus>, ) -> bool { let written_modules = match &output { ExecutionStatus::Success(output) | ExecutionStatus::SkipRest(output) => { @@ -212,21 +212,31 @@ impl, E: Debug + Send + Clone> ) } - pub(crate) fn maybe_execution_error(&self, txn_idx: TxnIndex) -> Option> { + pub(crate) fn module_rw_intersection_ok( + &self, + ) -> Result<(), PanicOr> { if self.module_read_write_intersection.load(Ordering::Acquire) { - return Some(Error::FallbackToSequential(PanicOr::Or( + Err(PanicOr::Or( IntentionalFallbackToSequential::ModulePathReadWrite, - ))); + )) + } else { + Ok(()) } + } + pub(crate) fn aborted_execution_status( + &self, + txn_idx: TxnIndex, + ) -> Option> { if let ExecutionStatus::Abort(err) = &self.outputs[txn_idx as usize] .load_full() .expect("[BlockSTM]: Execution output must be recorded after execution") .output_status { - return Some(err.clone()); + Some(err.clone()) + } else { + None } - None } pub(crate) fn update_to_skip_rest(&self, txn_idx: TxnIndex) { @@ -470,7 +480,10 @@ impl, E: Debug + Send + Clone> // Must be executed after parallel execution is done, grabs outputs. Will panic if // other outstanding references to the recorded outputs exist. - pub(crate) fn take_output(&self, txn_idx: TxnIndex) -> ExecutionStatus> { + pub(crate) fn take_output( + &self, + txn_idx: TxnIndex, + ) -> ExecutionStatus> { let owning_ptr = self.outputs[txn_idx as usize] .swap(None) .expect("[BlockSTM]: Output must be recorded after execution"); From 7c1bed1e96cb8c95f7c2d92d1e00b6733061f8b3 Mon Sep 17 00:00:00 2001 From: Oliver He Date: Thu, 1 Feb 2024 06:39:53 +0700 Subject: [PATCH 30/44] Add groth16 verification for zkid transaction signatures (#11772) Co-authored-by: Alin Tomescu --- Cargo.lock | 99 ++++- Cargo.toml | 2 + .../src/components/feature_flags.rs | 10 +- aptos-move/aptos-vm/src/aptos_vm.rs | 50 ++- aptos-move/aptos-vm/src/move_vm_ext/vm.rs | 9 +- aptos-move/aptos-vm/src/zkid_validation.rs | 121 +++--- .../framework/move-stdlib/doc/features.md | 120 ++++++ .../move-stdlib/sources/configs/features.move | 23 ++ aptos-move/vm-genesis/src/lib.rs | 4 +- crates/aptos-crypto/src/poseidon_bn254.rs | 11 +- .../generate-format/tests/staged/api.yaml | 25 +- .../generate-format/tests/staged/aptos.yaml | 25 +- .../tests/staged/consensus.yaml | 25 +- testsuite/smoke-test/src/zkid.rs | 387 ++++++++++++++---- types/Cargo.toml | 2 + types/src/bn254_circom.rs | 297 ++++++++++++++ types/src/jwks/rsa.rs | 25 +- types/src/lib.rs | 1 + types/src/on_chain_config/aptos_features.rs | 20 +- types/src/transaction/authenticator.rs | 115 +++++- types/src/zkid.rs | 212 +++++++++- 21 files changed, 1328 insertions(+), 255 deletions(-) create mode 100644 types/src/bn254_circom.rs diff --git a/Cargo.lock b/Cargo.lock index 67bd69210c8fc..f5160a8fcec30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -302,7 +302,7 @@ dependencies = [ "toml 0.7.8", "tonic 0.10.2", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.18", "version-compare", "walkdir", ] @@ -2142,7 +2142,7 @@ dependencies = [ "toml 0.7.8", "tonic 0.10.2", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.18", "warp", ] @@ -2321,7 +2321,7 @@ dependencies = [ "tokio", "toml 0.7.8", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.18", "warp", ] @@ -2415,7 +2415,7 @@ dependencies = [ "toml 0.7.8", "tonic 0.10.2", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.18", "url", "warp", ] @@ -2596,7 +2596,7 @@ dependencies = [ "strum_macros 0.24.3", "tokio", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.18", ] [[package]] @@ -4133,6 +4133,7 @@ dependencies = [ "aptos-experimental-runtimes", "ark-bn254", "ark-ff", + "ark-groth16", "ark-serialize", "arr_macro", "async-trait", @@ -4165,6 +4166,7 @@ dependencies = [ "rayon", "regex", "serde", + "serde-big-array", "serde_bytes", "serde_json", "serde_with", @@ -4495,6 +4497,25 @@ dependencies = [ "ark-std", ] +[[package]] +name = "ark-crypto-primitives" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3a13b34da09176a8baba701233fdffbaa7c1b1192ce031a3da4e55ce1f1a56" +dependencies = [ + "ark-ec", + "ark-ff", + "ark-relations", + "ark-serialize", + "ark-snark", + "ark-std", + "blake2", + "derivative", + "digest 0.10.7", + "rayon", + "sha2 0.10.8", +] + [[package]] name = "ark-ec" version = "0.4.2" @@ -4509,6 +4530,7 @@ dependencies = [ "hashbrown 0.13.2", "itertools 0.10.5", "num-traits", + "rayon", "zeroize", ] @@ -4528,6 +4550,7 @@ dependencies = [ "num-bigint 0.4.4", "num-traits", "paste", + "rayon", "rustc_version", "zeroize", ] @@ -4555,6 +4578,22 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-groth16" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20ceafa83848c3e390f1cbf124bc3193b3e639b3f02009e0e290809a501b95fc" +dependencies = [ + "ark-crypto-primitives", + "ark-ec", + "ark-ff", + "ark-poly", + "ark-relations", + "ark-serialize", + "ark-std", + "rayon", +] + [[package]] name = "ark-poly" version = "0.4.2" @@ -4566,6 +4605,19 @@ dependencies = [ "ark-std", "derivative", "hashbrown 0.13.2", + "rayon", +] + +[[package]] +name = "ark-relations" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00796b6efc05a3f48225e59cb6a2cda78881e7c390872d5786aaf112f31fb4f0" +dependencies = [ + "ark-ff", + "ark-std", + "tracing", + "tracing-subscriber 0.2.25", ] [[package]] @@ -4591,6 +4643,18 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "ark-snark" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84d3cc6833a335bb8a600241889ead68ee89a3cf8448081fb7694c0fe503da63" +dependencies = [ + "ark-ff", + "ark-relations", + "ark-serialize", + "ark-std", +] + [[package]] name = "ark-std" version = "0.4.0" @@ -4599,6 +4663,7 @@ checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" dependencies = [ "num-traits", "rand 0.8.5", + "rayon", ] [[package]] @@ -6123,7 +6188,7 @@ dependencies = [ "tonic 0.9.2", "tracing", "tracing-core", - "tracing-subscriber", + "tracing-subscriber 0.3.18", ] [[package]] @@ -14179,6 +14244,15 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-big-array" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11fc7cc2c76d73e0f27ee52abbd64eec84d46f370c88371120433196934e4b7f" +dependencies = [ + "serde", +] + [[package]] name = "serde-generate" version = "0.20.6" @@ -14413,7 +14487,7 @@ dependencies = [ "tokio", "toml 0.7.8", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.18", "warp", ] @@ -15300,7 +15374,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.3.18", ] [[package]] @@ -15984,6 +16058,15 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-subscriber" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +dependencies = [ + "tracing-core", +] + [[package]] name = "tracing-subscriber" version = "0.3.18" diff --git a/Cargo.toml b/Cargo.toml index 316743829824c..a4d6be9b4cff1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -448,6 +448,7 @@ ark-bls12-381 = "0.4.0" ark-bn254 = "0.4.0" ark-ec = "0.4.0" ark-ff = "0.4.0" +ark-groth16 = "0.4.0" ark-serialize = "0.4.0" ark-std = { version = "0.4.0", features = ["getrandom"] } aptos-moving-average = { git = "https://github.com/aptos-labs/aptos-indexer-processors.git", rev = "4801acae7aea30d7e96bbfbe5ec5b04056dfa4cf" } @@ -643,6 +644,7 @@ sha2_0_10_6 = { package = "sha2", version = "0.10.6" } sha3 = "0.9.1" siphasher = "0.3.10" serde = { version = "1.0.193", features = ["derive", "rc"] } +serde-big-array = "0.5.1" serde_bytes = "0.11.6" serde_json = { version = "1.0.81", features = ["preserve_order", "arbitrary_precision"] } # Note: arbitrary_precision is required to parse u256 in JSON serde_repr = "0.1" diff --git a/aptos-move/aptos-release-builder/src/components/feature_flags.rs b/aptos-move/aptos-release-builder/src/components/feature_flags.rs index 128aba7148b27..6f001fc75e15d 100644 --- a/aptos-move/aptos-release-builder/src/components/feature_flags.rs +++ b/aptos-move/aptos-release-builder/src/components/feature_flags.rs @@ -97,7 +97,7 @@ pub enum FeatureFlag { WebAuthnSignature, ReconfigureWithDKG, ZkIdSignature, - OpenIdSignature, + ZkIdZkLessSignature, RemoveDetailedError, } @@ -253,8 +253,8 @@ impl From for AptosFeatureFlag { FeatureFlag::BN254Structures => AptosFeatureFlag::BN254_STRUCTURES, FeatureFlag::WebAuthnSignature => AptosFeatureFlag::WEBAUTHN_SIGNATURE, FeatureFlag::ReconfigureWithDKG => AptosFeatureFlag::RECONFIGURE_WITH_DKG, - FeatureFlag::ZkIdSignature => AptosFeatureFlag::ZK_ID_SIGNATURE, - FeatureFlag::OpenIdSignature => AptosFeatureFlag::OPEN_ID_SIGNATURE, + FeatureFlag::ZkIdSignature => AptosFeatureFlag::ZK_ID_SIGNATURES, + FeatureFlag::ZkIdZkLessSignature => AptosFeatureFlag::ZK_ID_ZKLESS_SIGNATURE, FeatureFlag::RemoveDetailedError => AptosFeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH, } } @@ -333,8 +333,8 @@ impl From for FeatureFlag { AptosFeatureFlag::BN254_STRUCTURES => FeatureFlag::BN254Structures, AptosFeatureFlag::WEBAUTHN_SIGNATURE => FeatureFlag::WebAuthnSignature, AptosFeatureFlag::RECONFIGURE_WITH_DKG => FeatureFlag::ReconfigureWithDKG, - AptosFeatureFlag::ZK_ID_SIGNATURE => FeatureFlag::ZkIdSignature, - AptosFeatureFlag::OPEN_ID_SIGNATURE => FeatureFlag::OpenIdSignature, + AptosFeatureFlag::ZK_ID_SIGNATURES => FeatureFlag::ZkIdSignature, + AptosFeatureFlag::ZK_ID_ZKLESS_SIGNATURE => FeatureFlag::ZkIdZkLessSignature, AptosFeatureFlag::REMOVE_DETAILED_ERROR_FROM_HASH => FeatureFlag::RemoveDetailedError, } } diff --git a/aptos-move/aptos-vm/src/aptos_vm.rs b/aptos-move/aptos-vm/src/aptos_vm.rs index fee3fcdadde0f..1ca09f430e2ef 100644 --- a/aptos-move/aptos-vm/src/aptos_vm.rs +++ b/aptos-move/aptos-vm/src/aptos_vm.rs @@ -1386,8 +1386,37 @@ impl AptosVM { )); } - zkid_validation::validate_zkid_authenticators(transaction, resolver, session, log_context)?; + // zkID feature gating + let authenticators = aptos_types::zkid::get_zkid_authenticators(transaction); + match &authenticators { + Ok(authenticators) => { + for (_, sig) in authenticators { + if !self.features.is_zkid_enabled() + && matches!(sig.sig, ZkpOrOpenIdSig::Groth16Zkp { .. }) + { + return Err(VMStatus::error(StatusCode::FEATURE_UNDER_GATING, None)); + } + if (!self.features.is_zkid_enabled() || !self.features.is_zkid_zkless_enabled()) + && matches!(sig.sig, ZkpOrOpenIdSig::OpenIdSig { .. }) + { + return Err(VMStatus::error(StatusCode::FEATURE_UNDER_GATING, None)); + } + } + }, + Err(_) => { + return Err(VMStatus::error(StatusCode::INVALID_SIGNATURE, None)); + }, + } + zkid_validation::validate_zkid_authenticators( + &authenticators.unwrap(), + resolver, + self.move_vm.get_chain_id(), + )?; + + // The prologue MUST be run AFTER any validation. Otherwise you may run prologue and hit + // SEQUENCE_NUMBER_TOO_NEW if there is more than one transaction from the same sender and + // end up skipping validation. self.run_prologue_with_payload( session, resolver, @@ -2237,25 +2266,6 @@ impl VMValidator for AptosVM { } } - if !self.features.is_zkid_enabled() || !self.features.is_open_id_signature_enabled() { - if let Ok(authenticators) = aptos_types::zkid::get_zkid_authenticators(&transaction) { - for (_, sig) in authenticators { - if !self.features.is_zkid_enabled() - && matches!(sig.sig, ZkpOrOpenIdSig::Groth16Zkp { .. }) - { - return VMValidatorResult::error(StatusCode::FEATURE_UNDER_GATING); - } - if !self.features.is_open_id_signature_enabled() - && matches!(sig.sig, ZkpOrOpenIdSig::OpenIdSig { .. }) - { - return VMValidatorResult::error(StatusCode::FEATURE_UNDER_GATING); - } - } - } else { - return VMValidatorResult::error(StatusCode::INVALID_SIGNATURE); - }; - }; - let txn = match transaction.check_signature() { Ok(t) => t, _ => { diff --git a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs index f0b0dfbfac840..de2742e714c50 100644 --- a/aptos-move/aptos-vm/src/move_vm_ext/vm.rs +++ b/aptos-move/aptos-vm/src/move_vm_ext/vm.rs @@ -14,7 +14,10 @@ use aptos_gas_algebra::DynamicExpression; use aptos_gas_schedule::{MiscGasParameters, NativeGasParameters}; use aptos_native_interface::SafeNativeBuilder; use aptos_table_natives::NativeTableContext; -use aptos_types::on_chain_config::{FeatureFlag, Features, TimedFeatureFlag, TimedFeatures}; +use aptos_types::{ + chain_id::ChainId, + on_chain_config::{FeatureFlag, Features, TimedFeatureFlag, TimedFeatures}, +}; use move_binary_format::{ deserializer::DeserializerConfig, errors::VMResult, @@ -240,6 +243,10 @@ impl MoveVmExt { self.features.clone(), ) } + + pub fn get_chain_id(&self) -> ChainId { + ChainId::new(self.chain_id) + } } impl Deref for MoveVmExt { diff --git a/aptos-move/aptos-vm/src/zkid_validation.rs b/aptos-move/aptos-vm/src/zkid_validation.rs index ef7fcb0cde9fa..019bcf082e611 100644 --- a/aptos-move/aptos-vm/src/zkid_validation.rs +++ b/aptos-move/aptos-vm/src/zkid_validation.rs @@ -2,18 +2,24 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::move_vm_ext::{AptosMoveResolver, SessionExt}; +use crate::move_vm_ext::AptosMoveResolver; use aptos_types::{ + bn254_circom::get_public_inputs_hash, + chain_id::ChainId, jwks::{jwk::JWK, PatchedJWKs}, on_chain_config::{CurrentTimeMicroseconds, OnChainConfig}, - transaction::SignedTransaction, vm_status::{StatusCode, VMStatus}, - zkid::{ZkpOrOpenIdSig, MAX_ZK_ID_AUTHENTICATORS_ALLOWED}, + zkid::{ZkIdPublicKey, ZkIdSignature, ZkpOrOpenIdSig, MAX_ZK_ID_AUTHENTICATORS_ALLOWED}, }; -use aptos_vm_logging::log_schema::AdapterLogSchema; use move_binary_format::errors::Location; use move_core_types::{language_storage::CORE_CODE_ADDRESS, move_resource::MoveStructType}; +macro_rules! invalid_signature { + ($message:expr) => { + VMStatus::error(StatusCode::INVALID_SIGNATURE, Some($message.to_owned())) + }; +} + fn get_current_time_onchain( resolver: &impl AptosMoveResolver, ) -> anyhow::Result { @@ -38,74 +44,62 @@ fn get_jwks_onchain(resolver: &impl AptosMoveResolver) -> anyhow::Result Result { + let jwt_header_parsed = zkid_sig + .parse_jwt_header() + .map_err(|_| invalid_signature!("Failed to get JWT header"))?; + let jwk_move_struct = jwks + .get_jwk(&zkid_pub_key.iss, &jwt_header_parsed.kid) + .map_err(|_| invalid_signature!("JWK not found"))?; + + let jwk = + JWK::try_from(jwk_move_struct).map_err(|_| invalid_signature!("Could not parse JWK"))?; + Ok(jwk) +} + pub fn validate_zkid_authenticators( - transaction: &SignedTransaction, + authenticators: &Vec<(ZkIdPublicKey, ZkIdSignature)>, resolver: &impl AptosMoveResolver, - _session: &mut SessionExt, - _log_context: &AdapterLogSchema, -) -> anyhow::Result<(), VMStatus> { - // TODO(ZkIdGroth16Zkp): The ZKP/OpenID sig verification does not charge gas. So, we could have DoS attacks. - let zkid_authenticators = - aptos_types::zkid::get_zkid_authenticators(transaction).map_err(|_| { - VMStatus::error( - StatusCode::INVALID_SIGNATURE, - Some("Failed to fetch zkid authenticators".to_owned()), - ) - })?; - - if zkid_authenticators.is_empty() { + chain_id: ChainId, +) -> Result<(), VMStatus> { + if authenticators.is_empty() { return Ok(()); } - if zkid_authenticators.len() > MAX_ZK_ID_AUTHENTICATORS_ALLOWED { - return Err(VMStatus::error( - StatusCode::INVALID_SIGNATURE, - Some("Too many zkid authenticators".to_owned()), - )); + if authenticators.len() > MAX_ZK_ID_AUTHENTICATORS_ALLOWED { + return Err(invalid_signature!("Too many zkID authenticators")); } let onchain_timestamp_obj = get_current_time_onchain(resolver)?; // Check the expiry timestamp on all authenticators first to fail fast - for (_, zkid_sig) in &zkid_authenticators { + for (_, zkid_sig) in authenticators { zkid_sig .verify_expiry(&onchain_timestamp_obj) - .map_err(|_| { - VMStatus::error( - StatusCode::INVALID_SIGNATURE, - Some("The ephemeral keypair has expired".to_owned()), - ) - })?; + .map_err(|_| invalid_signature!("The ephemeral keypair has expired"))?; } let patched_jwks = get_jwks_onchain(resolver)?; - for (zkid_pub_key, zkid_sig) in &zkid_authenticators { - let jwt_header_parsed = zkid_sig.parse_jwt_header().map_err(|_| { - VMStatus::error( - StatusCode::INVALID_SIGNATURE, - Some("Failed to get JWT header".to_owned()), - ) - })?; - let jwk_move_struct = patched_jwks - .get_jwk(&zkid_pub_key.iss, &jwt_header_parsed.kid) - .map_err(|_| { - VMStatus::error( - StatusCode::INVALID_SIGNATURE, - Some("JWK not found".to_owned()), - ) - })?; - - let jwk = JWK::try_from(jwk_move_struct).map_err(|_| { - VMStatus::error( - StatusCode::INVALID_SIGNATURE, - Some("Could not parse JWK".to_owned()), - ) - })?; - - let jwt_header = &zkid_sig.jwt_header; + for (zkid_pub_key, zkid_sig) in authenticators { + let jwk = get_jwk_for_zkid_authenticator(&patched_jwks, zkid_pub_key, zkid_sig)?; match &zkid_sig.sig { - ZkpOrOpenIdSig::Groth16Zkp(_) => {}, + ZkpOrOpenIdSig::Groth16Zkp(proof) => match jwk { + JWK::RSA(rsa_jwk) => { + let public_inputs_hash = + get_public_inputs_hash(zkid_sig, zkid_pub_key, &rsa_jwk).map_err(|_| { + invalid_signature!("Could not compute public inputs hash") + })?; + proof + .verify_proof(public_inputs_hash, chain_id) + .map_err(|_| invalid_signature!("Proof verification failed"))?; + }, + JWK::Unsupported(_) => return Err(invalid_signature!("JWK is not supported")), + }, ZkpOrOpenIdSig::OpenIdSig(openid_sig) => { match jwk { JWK::RSA(rsa_jwk) => { @@ -121,23 +115,14 @@ pub fn validate_zkid_authenticators( // // We are now ready to verify the RSA signature openid_sig - .verify_jwt_signature(rsa_jwk, jwt_header) + .verify_jwt_signature(rsa_jwk, &zkid_sig.jwt_header) .map_err(|_| { - VMStatus::error( - StatusCode::INVALID_SIGNATURE, - Some( - "RSA Signature verification failed for OpenIdSig" - .to_owned(), - ), + invalid_signature!( + "RSA signature verification failed for OpenIdSig" ) })?; }, - JWK::Unsupported(_) => { - return Err(VMStatus::error( - StatusCode::INVALID_SIGNATURE, - Some("JWK is not supported".to_owned()), - )) - }, + JWK::Unsupported(_) => return Err(invalid_signature!("JWK is not supported")), } }, } diff --git a/aptos-move/framework/move-stdlib/doc/features.md b/aptos-move/framework/move-stdlib/doc/features.md index 83aef9bdcff3d..a56f3f691e13c 100644 --- a/aptos-move/framework/move-stdlib/doc/features.md +++ b/aptos-move/framework/move-stdlib/doc/features.md @@ -84,6 +84,10 @@ return true. - [Function `commission_change_delegation_pool_enabled`](#0x1_features_commission_change_delegation_pool_enabled) - [Function `get_bn254_strutures_feature`](#0x1_features_get_bn254_strutures_feature) - [Function `bn254_structures_enabled`](#0x1_features_bn254_structures_enabled) +- [Function `get_zkid_feature`](#0x1_features_get_zkid_feature) +- [Function `zkid_feature_enabled`](#0x1_features_zkid_feature_enabled) +- [Function `get_zkid_zkless_feature`](#0x1_features_get_zkid_zkless_feature) +- [Function `zkid_zkless_feature_enabled`](#0x1_features_zkid_zkless_feature_enabled) - [Function `change_feature_flags`](#0x1_features_change_feature_flags) - [Function `is_enabled`](#0x1_features_is_enabled) - [Function `set`](#0x1_features_set) @@ -577,6 +581,30 @@ Lifetime: transient + + +Whether the zkID feature is enabled, possibly with the ZK-less verification mode. + +Lifetime: transient + + +

const ZK_ID_SIGNATURES: u64 = 46;
+
+ + + + + +Whether the ZK-less mode of the zkID feature is enabled. + +Lifetime: transient + + +
const ZK_ID_ZKLESS_SIGNATURE: u64 = 47;
+
+ + + ## Function `code_dependency_check_enabled` @@ -1798,6 +1826,98 @@ Lifetime: transient + + + + +## Function `get_zkid_feature` + + + +
public fun get_zkid_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_zkid_feature(): u64 { ZK_ID_SIGNATURES }
+
+ + + +
+ + + +## Function `zkid_feature_enabled` + + + +
public fun zkid_feature_enabled(): bool
+
+ + + +
+Implementation + + +
public fun zkid_feature_enabled(): bool acquires Features {
+    is_enabled(ZK_ID_SIGNATURES)
+}
+
+ + + +
+ + + +## Function `get_zkid_zkless_feature` + + + +
public fun get_zkid_zkless_feature(): u64
+
+ + + +
+Implementation + + +
public fun get_zkid_zkless_feature(): u64 { ZK_ID_ZKLESS_SIGNATURE }
+
+ + + +
+ + + +## Function `zkid_zkless_feature_enabled` + + + +
public fun zkid_zkless_feature_enabled(): bool
+
+ + + +
+Implementation + + +
public fun zkid_zkless_feature_enabled(): bool acquires Features {
+    is_enabled(ZK_ID_ZKLESS_SIGNATURE)
+}
+
+ + +
diff --git a/aptos-move/framework/move-stdlib/sources/configs/features.move b/aptos-move/framework/move-stdlib/sources/configs/features.move index e5ae45c6561c1..8db36f003ee42 100644 --- a/aptos-move/framework/move-stdlib/sources/configs/features.move +++ b/aptos-move/framework/move-stdlib/sources/configs/features.move @@ -333,6 +333,29 @@ module std::features { is_enabled(BN254_STRUCTURES) } + /// Whether the zkID feature is enabled, possibly with the ZK-less verification mode. + /// + /// Lifetime: transient + const ZK_ID_SIGNATURES: u64 = 46; + + public fun get_zkid_feature(): u64 { ZK_ID_SIGNATURES } + + public fun zkid_feature_enabled(): bool acquires Features { + is_enabled(ZK_ID_SIGNATURES) + } + + /// Whether the ZK-less mode of the zkID feature is enabled. + /// + /// Lifetime: transient + const ZK_ID_ZKLESS_SIGNATURE: u64 = 47; + + public fun get_zkid_zkless_feature(): u64 { ZK_ID_ZKLESS_SIGNATURE } + + public fun zkid_zkless_feature_enabled(): bool acquires Features { + is_enabled(ZK_ID_ZKLESS_SIGNATURE) + } + + // ============================================================================================ // Feature Flag Implementation diff --git a/aptos-move/vm-genesis/src/lib.rs b/aptos-move/vm-genesis/src/lib.rs index aa1fb78826292..17788f6d64cae 100644 --- a/aptos-move/vm-genesis/src/lib.rs +++ b/aptos-move/vm-genesis/src/lib.rs @@ -444,8 +444,8 @@ pub fn default_features() -> Vec { FeatureFlag::COMMISSION_CHANGE_DELEGATION_POOL, FeatureFlag::WEBAUTHN_SIGNATURE, // FeatureFlag::RECONFIGURE_WITH_DKG, //TODO: re-enable once randomness is ready. - FeatureFlag::ZK_ID_SIGNATURE, - FeatureFlag::OPEN_ID_SIGNATURE, + FeatureFlag::ZK_ID_SIGNATURES, + FeatureFlag::ZK_ID_ZKLESS_SIGNATURE, ] } diff --git a/crates/aptos-crypto/src/poseidon_bn254.rs b/crates/aptos-crypto/src/poseidon_bn254.rs index 65f67f8306e1d..3ee067a40ceff 100644 --- a/crates/aptos-crypto/src/poseidon_bn254.rs +++ b/crates/aptos-crypto/src/poseidon_bn254.rs @@ -46,7 +46,7 @@ pub fn hash_scalars(inputs: Vec) -> anyhow::Result /// /// This function calls `pad_and_pack_bytes_to_scalars_no_len` safely as strings will not contain the zero byte except to terminate. pub fn pad_and_hash_string(str: &str, max_bytes: usize) -> anyhow::Result { - pad_and_hash_bytes_no_len(str.as_bytes(), max_bytes) + pad_and_hash_bytes_with_len(str.as_bytes(), max_bytes) } /// Given $n$ bytes, this function returns $k$ field elements that pack those bytes as tightly as @@ -96,9 +96,9 @@ pub fn pad_and_pack_bytes_to_scalars_with_len( } let len_scalar = pack_bytes_to_one_scalar(&len.to_le_bytes())?; - let scalars = [len_scalar] + let scalars = pad_and_pack_bytes_to_scalars_no_len(bytes, max_bytes)? .into_iter() - .chain(pad_and_pack_bytes_to_scalars_no_len(bytes, max_bytes)?) + .chain([len_scalar]) .collect::>(); Ok(scalars) } @@ -152,6 +152,7 @@ fn hash_bytes(bytes: &[u8]) -> anyhow::Result { /// example ASCII strings. Otherwise unexpected collisions can occur. /// /// Due to risk of collisions due to improper use by the caller, it is not exposed. +#[allow(unused)] fn pad_and_hash_bytes_no_len(bytes: &[u8], max_bytes: usize) -> anyhow::Result { let scalars = pad_and_pack_bytes_to_scalars_no_len(bytes, max_bytes)?; hash_scalars(scalars) @@ -195,8 +196,8 @@ pub fn pack_bytes_to_one_scalar(chunk: &[u8]) -> anyhow::Result { if chunk.len() > BYTES_PACKED_PER_SCALAR { bail!( "Cannot convert chunk to scalar. Max chunk size is {} bytes. Was given {} bytes.", + BYTES_PACKED_PER_SCALAR, chunk.len(), - MAX_NUM_INPUT_BYTES, ); } let fr = ark_bn254::Fr::from_le_bytes_mod_order(chunk); @@ -246,7 +247,7 @@ mod test { let aud_val_hash = poseidon_bn254::pad_and_hash_string(aud, MAX_AUD_VAL_BYTES).unwrap(); assert_eq!( aud_val_hash.to_string(), - "17915006864839806432696532586295153111003299925560813222373957953553432368724" + "4022319167392179362271493931675371567039199401695470709241660273812313544045" ); } diff --git a/testsuite/generate-format/tests/staged/api.yaml b/testsuite/generate-format/tests/staged/api.yaml index 3829a0d6205e5..2ac72adbaa8ba 100644 --- a/testsuite/generate-format/tests/staged/api.yaml +++ b/testsuite/generate-format/tests/staged/api.yaml @@ -260,15 +260,24 @@ ExecutionStatus: MiscellaneousError: NEWTYPE: OPTION: U64 +G1Bytes: + NEWTYPESTRUCT: + TUPLEARRAY: + CONTENT: U8 + SIZE: 32 +G2Bytes: + NEWTYPESTRUCT: + TUPLEARRAY: + CONTENT: U8 + SIZE: 64 Groth16Zkp: STRUCT: - a: - SEQ: STR + TYPENAME: G1Bytes - b: - SEQ: - SEQ: STR + TYPENAME: G2Bytes - c: - SEQ: STR + TYPENAME: G1Bytes HashValue: STRUCT: - hash: @@ -413,6 +422,12 @@ Secp256r1EcdsaPublicKey: NEWTYPESTRUCT: BYTES Secp256r1EcdsaSignature: NEWTYPESTRUCT: BYTES +SignedGroth16Zkp: + STRUCT: + - proof: + TYPENAME: Groth16Zkp + - non_malleability_signature: + TYPENAME: EphemeralSignature SignedTransaction: STRUCT: - raw_txn: @@ -760,7 +775,7 @@ ZkpOrOpenIdSig: 0: Groth16Zkp: NEWTYPE: - TYPENAME: Groth16Zkp + TYPENAME: SignedGroth16Zkp 1: OpenIdSig: NEWTYPE: diff --git a/testsuite/generate-format/tests/staged/aptos.yaml b/testsuite/generate-format/tests/staged/aptos.yaml index e32c495d755de..7a34c8684e073 100644 --- a/testsuite/generate-format/tests/staged/aptos.yaml +++ b/testsuite/generate-format/tests/staged/aptos.yaml @@ -206,15 +206,24 @@ EventKey: - creation_number: U64 - account_address: TYPENAME: AccountAddress +G1Bytes: + NEWTYPESTRUCT: + TUPLEARRAY: + CONTENT: U8 + SIZE: 32 +G2Bytes: + NEWTYPESTRUCT: + TUPLEARRAY: + CONTENT: U8 + SIZE: 64 Groth16Zkp: STRUCT: - a: - SEQ: STR + TYPENAME: G1Bytes - b: - SEQ: - SEQ: STR + TYPENAME: G2Bytes - c: - SEQ: STR + TYPENAME: G1Bytes HashValue: STRUCT: - hash: @@ -345,6 +354,12 @@ Secp256r1EcdsaPublicKey: NEWTYPESTRUCT: BYTES Secp256r1EcdsaSignature: NEWTYPESTRUCT: BYTES +SignedGroth16Zkp: + STRUCT: + - proof: + TYPENAME: Groth16Zkp + - non_malleability_signature: + TYPENAME: EphemeralSignature SignedTransaction: STRUCT: - raw_txn: @@ -642,7 +657,7 @@ ZkpOrOpenIdSig: 0: Groth16Zkp: NEWTYPE: - TYPENAME: Groth16Zkp + TYPENAME: SignedGroth16Zkp 1: OpenIdSig: NEWTYPE: diff --git a/testsuite/generate-format/tests/staged/consensus.yaml b/testsuite/generate-format/tests/staged/consensus.yaml index eb1f1345da68a..6ff285ca8e0bf 100644 --- a/testsuite/generate-format/tests/staged/consensus.yaml +++ b/testsuite/generate-format/tests/staged/consensus.yaml @@ -473,15 +473,24 @@ EventKey: - creation_number: U64 - account_address: TYPENAME: AccountAddress +G1Bytes: + NEWTYPESTRUCT: + TUPLEARRAY: + CONTENT: U8 + SIZE: 32 +G2Bytes: + NEWTYPESTRUCT: + TUPLEARRAY: + CONTENT: U8 + SIZE: 64 Groth16Zkp: STRUCT: - a: - SEQ: STR + TYPENAME: G1Bytes - b: - SEQ: - SEQ: STR + TYPENAME: G2Bytes - c: - SEQ: STR + TYPENAME: G1Bytes HashValue: STRUCT: - hash: @@ -707,6 +716,12 @@ SignedBatchInfoMsg: - signed_infos: SEQ: TYPENAME: SignedBatchInfo +SignedGroth16Zkp: + STRUCT: + - proof: + TYPENAME: Groth16Zkp + - non_malleability_signature: + TYPENAME: EphemeralSignature SignedTransaction: STRUCT: - raw_txn: @@ -1067,7 +1082,7 @@ ZkpOrOpenIdSig: 0: Groth16Zkp: NEWTYPE: - TYPENAME: Groth16Zkp + TYPENAME: SignedGroth16Zkp 1: OpenIdSig: NEWTYPE: diff --git a/testsuite/smoke-test/src/zkid.rs b/testsuite/smoke-test/src/zkid.rs index d15fdbb63006e..d3be06aed3329 100644 --- a/testsuite/smoke-test/src/zkid.rs +++ b/testsuite/smoke-test/src/zkid.rs @@ -9,6 +9,7 @@ use aptos_logger::{debug, info}; use aptos_rest_client::Client; use aptos_sdk::types::{AccountKey, LocalAccount}; use aptos_types::{ + bn254_circom::{G1Bytes, G2Bytes}, jwks::{ jwk::{JWKMoveStruct, JWK}, rsa::RSA_JWK, @@ -18,87 +19,14 @@ use aptos_types::{ authenticator::{AnyPublicKey, EphemeralPublicKey, EphemeralSignature}, SignedTransaction, }, - zkid::{IdCommitment, OpenIdSig, Pepper, ZkIdPublicKey, ZkIdSignature, ZkpOrOpenIdSig}, + zkid::{ + Groth16Zkp, IdCommitment, OpenIdSig, Pepper, SignedGroth16Zkp, ZkIdPublicKey, + ZkIdSignature, ZkpOrOpenIdSig, + }, }; use move_core_types::account_address::AccountAddress; use std::time::Duration; -async fn get_latest_jwkset(rest_client: &Client) -> PatchedJWKs { - let maybe_response = rest_client - .get_account_resource_bcs::(AccountAddress::ONE, "0x1::jwks::PatchedJWKs") - .await; - let response = maybe_response.unwrap(); - response.into_inner() -} - -async fn test_setup(swarm: &mut LocalSwarm, cli: &mut CliTestFramework) { - let client = swarm.validators().next().unwrap().rest_client(); - let root_idx = cli.add_account_with_address_to_cli( - swarm.root_key(), - swarm.chain_info().root_account().address(), - ); - swarm - .wait_for_all_nodes_to_catchup_to_epoch(2, Duration::from_secs(60)) - .await - .expect("Epoch 2 taking too long to come!"); - - let iss = "https://accounts.google.com"; - let jwk = RSA_JWK { - kid:"test_jwk".to_owned(), - kty:"RSA".to_owned(), - alg:"RS256".to_owned(), - e:"AQAB".to_owned(), - n:"6S7asUuzq5Q_3U9rbs-PkDVIdjgmtgWreG5qWPsC9xXZKiMV1AiV9LXyqQsAYpCqEDM3XbfmZqGb48yLhb_XqZaKgSYaC_h2DjM7lgrIQAp9902Rr8fUmLN2ivr5tnLxUUOnMOc2SQtr9dgzTONYW5Zu3PwyvAWk5D6ueIUhLtYzpcB-etoNdL3Ir2746KIy_VUsDwAM7dhrqSK8U2xFCGlau4ikOTtvzDownAMHMrfE7q1B6WZQDAQlBmxRQsyKln5DIsKv6xauNsHRgBAKctUxZG8M4QJIx3S6Aughd3RZC4Ca5Ae9fd8L8mlNYBCrQhOZ7dS0f4at4arlLcajtw".to_owned(), - }; - - info!("Insert a JWK."); - let jwk_patch_script = format!( - r#" -script {{ -use aptos_framework::jwks; -use aptos_framework::aptos_governance; -use std::string::utf8; -fun main(core_resources: &signer) {{ - let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); - let google_jwk_0 = jwks::new_rsa_jwk( - utf8(b"{}"), - utf8(b"RS256"), - utf8(b"AQAB"), - utf8(b"{}") - ); - let patches = vector[ - jwks::new_patch_remove_all(), - jwks::new_patch_upsert_jwk(b"{}", google_jwk_0), - ]; - jwks::set_patches(&framework_signer, patches); -}} -}} -"#, - jwk.kid, jwk.n, iss - ); - - let txn_summary = cli.run_script(root_idx, &jwk_patch_script).await.unwrap(); - debug!("txn_summary={:?}", txn_summary); - - info!("Use resource API to check the patch result."); - let patched_jwks = get_latest_jwkset(&client).await; - debug!("patched_jwks={:?}", patched_jwks); - - let expected_providers_jwks = AllProvidersJWKs { - entries: vec![ProviderJWKs { - issuer: b"https://accounts.google.com".to_vec(), - version: 0, - jwks: vec![JWKMoveStruct::from(JWK::RSA(jwk))], - }], - }; - assert_eq!(expected_providers_jwks, patched_jwks.jwks); - - let mut info = swarm.aptos_public_info(); - - // Increment sequence number since we patched a JWK - info.root_account().increment_sequence_number(); -} - #[tokio::test] async fn test_openid_signature_transaction_submission() { let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) @@ -111,7 +39,7 @@ async fn test_openid_signature_transaction_submission() { let pepper = Pepper::new([0u8; 31]); let idc = - IdCommitment::new_from_preimage("test_client_id", "sub", "test_account", &pepper).unwrap(); + IdCommitment::new_from_preimage(&pepper, "test_client_id", "sub", "test_account").unwrap(); let sender_zkid_public_key = ZkIdPublicKey { iss: "https://accounts.google.com".to_owned(), idc, @@ -155,8 +83,8 @@ async fn test_openid_signature_transaction_submission() { let epk_blinder: [u8; 31] = [0u8; 31]; let jwt_header = "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_string(); - let jwt_payload = "eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsInN1YiI6InRlc3RfYWNjb3VudCIsImVtYWlsIjoidGVzdEBnbWFpbC5jb20iLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwibm9uY2UiOiJFVVRhSE9HdDcwRTNxbk9QMUJibnUzbE03QjR5TTdzaHZTb1NvdXF1VVJ3IiwibmJmIjoxNzAyODA4OTM2LCJpYXQiOjE3MDQ5MDkyMzYsImV4cCI6MTcwNzgxMjgzNiwianRpIjoiZjEwYWZiZjBlN2JiOTcyZWI4ZmE2M2YwMjQ5YjBhMzRhMjMxZmM0MCJ9".to_string(); - let jwt_sig = "CEgO4S7hRgASaINsGST5Ygtl_CY-mUn2GaQ6d7q9q1eGz1MjW0o0yusJQDU6Hi1nDfXlNSvCF2SgD9ayG3uDGC5-18H0AWo2QgyZ2rC_OUa36RCTmhdo-i_H8xmwPxa3yHZZsGC-gJy_vVX-rfMLIh-JgdIFFIzGVPN75MwXLP3bYUaB9Lw52g50rf_006Qg5ubkZ70I13vGUTVbRVWanQIN69naFqHreLCjVsGsEBVBoUtexZw6Ulr8s0VajBpcTUqlMvbvqMfQ33NXaBQYvu3YZivpkus8rcG_eAMrFbYFY9AZF7AaW2HUaYo5QjzMQDsIA1lpnAcOW3GzWvb0vw".to_string(); + let jwt_payload = "eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsInN1YiI6InRlc3RfYWNjb3VudCIsImVtYWlsIjoidGVzdEBnbWFpbC5jb20iLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwibm9uY2UiOiIxYlFsNF9YYzUtSXBDcFViS19BZVhwZ2Q2R1o0MGxVVjN1YjN5b19FTHhrIiwibmJmIjoxNzAyODA4OTM2LCJpYXQiOjE3MDQ5MDkyMzYsImV4cCI6MTcwNzgxMjgzNiwianRpIjoiZjEwYWZiZjBlN2JiOTcyZWI4ZmE2M2YwMjQ5YjBhMzRhMjMxZmM0MCJ9".to_string(); + let jwt_sig = "oBdOiIUc-ioG2-sHV1hWDLjgk4NrVf3z6V-HmgbOrVAz3PV1CwdfyTXsmVaCqLzOHzcbFB6ZRDxShs3aR7PsqdlhI0Dh8WrfU8kBkyk1FAmx2nST4SoSJROXsnusaOpNFpgSl96Rq3SXgr-yPBE9dEwTfD00vq2gH_fH1JAIeJJhc6WicMcsEZ7iONT1RZOid_9FlDrg1GxlGtNmpn4nEAmIxqnT0JrCESiRvzmuuXUibwx9xvHgIxhyVuAA9amlzaD1DL6jEc5B_0YnGKN7DO_l2Hkj9MbQZvU0beR-Lfcz8jxCjojODTYmWgbtu5E7YWIyC6dsjiBnTxc-svCsmQ".to_string(); let openid_signature = OpenIdSig { jwt_sig, @@ -169,7 +97,7 @@ async fn test_openid_signature_transaction_submission() { let zk_sig = ZkIdSignature { sig: ZkpOrOpenIdSig::OpenIdSig(openid_signature), jwt_header, - exp_timestamp_secs: 2000000000, + exp_timestamp_secs: 1707812836, ephemeral_pubkey: ephemeral_public_key, ephemeral_signature, }; @@ -194,7 +122,7 @@ async fn test_openid_signature_transaction_submission_fails_jwt_verification() { let pepper = Pepper::new([0u8; 31]); let idc = - IdCommitment::new_from_preimage("test_client_id", "sub", "test_account", &pepper).unwrap(); + IdCommitment::new_from_preimage(&pepper, "test_client_id", "sub", "test_account").unwrap(); let sender_zkid_public_key = ZkIdPublicKey { iss: "https://accounts.google.com".to_owned(), idc, @@ -238,7 +166,7 @@ async fn test_openid_signature_transaction_submission_fails_jwt_verification() { let epk_blinder: [u8; 31] = [0u8; 31]; let jwt_header = "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_string(); - let jwt_payload = "eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsInN1YiI6InRlc3RfYWNjb3VudCIsImVtYWlsIjoidGVzdEBnbWFpbC5jb20iLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwibm9uY2UiOiJFVVRhSE9HdDcwRTNxbk9QMUJibnUzbE03QjR5TTdzaHZTb1NvdXF1VVJ3IiwibmJmIjoxNzAyODA4OTM2LCJpYXQiOjE3MDQ5MDkyMzYsImV4cCI6MTcwNzgxMjgzNiwianRpIjoiZjEwYWZiZjBlN2JiOTcyZWI4ZmE2M2YwMjQ5YjBhMzRhMjMxZmM0MCJ9".to_string(); + let jwt_payload = "eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsInN1YiI6InRlc3RfYWNjb3VudCIsImVtYWlsIjoidGVzdEBnbWFpbC5jb20iLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwibm9uY2UiOiIxYlFsNF9YYzUtSXBDcFViS19BZVhwZ2Q2R1o0MGxVVjN1YjN5b19FTHhrIiwibmJmIjoxNzAyODA4OTM2LCJpYXQiOjE3MDQ5MDkyMzYsImV4cCI6MTcwNzgxMjgzNiwianRpIjoiZjEwYWZiZjBlN2JiOTcyZWI4ZmE2M2YwMjQ5YjBhMzRhMjMxZmM0MCJ9".to_string(); let jwt_sig = "bad_signature".to_string(); let openid_signature = OpenIdSig { @@ -252,7 +180,7 @@ async fn test_openid_signature_transaction_submission_fails_jwt_verification() { let zk_sig = ZkIdSignature { sig: ZkpOrOpenIdSig::OpenIdSig(openid_signature), jwt_header, - exp_timestamp_secs: 2000000000, + exp_timestamp_secs: 1707812836, ephemeral_pubkey: ephemeral_public_key, ephemeral_signature, }; @@ -278,7 +206,7 @@ async fn test_openid_signature_transaction_submission_epk_expired() { let pepper = Pepper::new([0u8; 31]); let idc = - IdCommitment::new_from_preimage("test_client_id", "sub", "test_account", &pepper).unwrap(); + IdCommitment::new_from_preimage(&pepper, "test_client_id", "sub", "test_account").unwrap(); let sender_zkid_public_key = ZkIdPublicKey { iss: "https://accounts.google.com".to_owned(), idc, @@ -322,8 +250,8 @@ async fn test_openid_signature_transaction_submission_epk_expired() { let epk_blinder: [u8; 31] = [0u8; 31]; let jwt_header = "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_string(); - let jwt_payload = "eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsInN1YiI6InRlc3RfYWNjb3VudCIsImVtYWlsIjoidGVzdEBnbWFpbC5jb20iLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwibm9uY2UiOiJIVEtvTDVGTDFOb0N1Vm1faHF1UWk2ZzAxckxPNjVhT2hQck5BVWxETVNNIiwibmJmIjoxNzAyODA4OTM2LCJpYXQiOjE3MDQ5MDkyMzYsImV4cCI6MTcwNzgxMjgzNiwianRpIjoiZjEwYWZiZjBlN2JiOTcyZWI4ZmE2M2YwMjQ5YjBhMzRhMjMxZmM0MCJ9".to_string(); - let jwt_sig = "yX7vGd87u3O78GyBU7IuKnimM69yusEURgN4bXsXhJsujWTGQfvwVrXemO_gmWkykw2Awx-Vr8sNFD7vbNdbkLIdRAxoYow0hMNNvpcvAKriOiRX3ObGEJjpJNbiexQt6hJLh5sSfOW0wCmD_82KsOrNqDvegj1y-d_uemgrX9-I52tLemO76bplJQdFx5X-q2pC8y5HV4VsSgsigxpPfZ7lIwSB5db6vubTgPIYvzXnAajZkpAR-uMRFo1RoOtukeQjGBVxt104DIBh0sLW_9EH2f9j_7L6YWBtilpLSWBea2qDJ1dGPG_BvpBqVm5hcVy8qHRnX6fJXKMXnXvTKQ".to_string(); + let jwt_payload = "eyJpc3MiOiJodHRwczovL2FjY291bnRzLmdvb2dsZS5jb20iLCJhdWQiOiJ0ZXN0X2NsaWVudF9pZCIsInN1YiI6InRlc3RfYWNjb3VudCIsImVtYWlsIjoidGVzdEBnbWFpbC5jb20iLCJlbWFpbF92ZXJpZmllZCI6dHJ1ZSwibm9uY2UiOiIxYlFsNF9YYzUtSXBDcFViS19BZVhwZ2Q2R1o0MGxVVjN1YjN5b19FTHhrIiwibmJmIjoxNzAyODA4OTM2LCJpYXQiOjE3MDQ5MDkyMzYsImV4cCI6MTcwNzgxMjgzNiwianRpIjoiZjEwYWZiZjBlN2JiOTcyZWI4ZmE2M2YwMjQ5YjBhMzRhMjMxZmM0MCJ9".to_string(); + let jwt_sig = "oBdOiIUc-ioG2-sHV1hWDLjgk4NrVf3z6V-HmgbOrVAz3PV1CwdfyTXsmVaCqLzOHzcbFB6ZRDxShs3aR7PsqdlhI0Dh8WrfU8kBkyk1FAmx2nST4SoSJROXsnusaOpNFpgSl96Rq3SXgr-yPBE9dEwTfD00vq2gH_fH1JAIeJJhc6WicMcsEZ7iONT1RZOid_9FlDrg1GxlGtNmpn4nEAmIxqnT0JrCESiRvzmuuXUibwx9xvHgIxhyVuAA9amlzaD1DL6jEc5B_0YnGKN7DO_l2Hkj9MbQZvU0beR-Lfcz8jxCjojODTYmWgbtu5E7YWIyC6dsjiBnTxc-svCsmQ".to_string(); let openid_signature = OpenIdSig { jwt_sig, @@ -350,3 +278,288 @@ async fn test_openid_signature_transaction_submission_epk_expired() { .await .unwrap_err(); } + +#[tokio::test] +async fn test_groth16_signature_transaction_submission() { + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) + .with_aptos() + .build_with_cli(0) + .await; + test_setup(&mut swarm, &mut cli).await; + let mut info = swarm.aptos_public_info(); + + let pepper = Pepper::from_number(76); + let idc = IdCommitment::new_from_preimage( + &pepper, + "407408718192.apps.googleusercontent.com", + "sub", + "113990307082899718775", + ) + .unwrap(); + let sender_zkid_public_key = ZkIdPublicKey { + iss: "https://accounts.google.com".to_owned(), + idc, + }; + let sender_any_public_key = AnyPublicKey::zkid(sender_zkid_public_key.clone()); + let account_address = info + .create_user_account_with_any_key(&sender_any_public_key) + .await + .unwrap(); + info.mint(account_address, 10_000_000_000).await.unwrap(); + + let ephemeral_private_key: Ed25519PrivateKey = EncodingType::Hex + .decode_key( + "zkid test ephemeral private key", + "0x76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7" + .as_bytes() + .to_vec(), + ) + .unwrap(); + let ephemeral_account: aptos_sdk::types::LocalAccount = LocalAccount::new( + account_address, + AccountKey::from_private_key(ephemeral_private_key), + 0, + ); + let ephemeral_public_key = EphemeralPublicKey::ed25519(ephemeral_account.public_key().clone()); + + let recipient = info + .create_and_fund_user_account(20_000_000_000) + .await + .unwrap(); + + let raw_txn = info + .transaction_factory() + .payload(aptos_stdlib::aptos_coin_transfer(recipient.address(), 100)) + .sender(account_address) + .sequence_number(1) + .build(); + + let sender_sig = ephemeral_account.private_key().sign(&raw_txn).unwrap(); + let ephemeral_signature = EphemeralSignature::ed25519(sender_sig); + + let a = G1Bytes::new_unchecked( + "11685701338011120485255682535216931952523490513574344095859176729155974193429", + "19570000702948951151001315672614758851000529478920585316943681012227747910337", + ) + .unwrap(); + let b = G2Bytes::new_unchecked( + [ + "10039243553158378944380740968043887743081233734014916979736214569065002261361", + "4926621746570487391149084476602889692047252928870676314074045787488022393462", + ], + [ + "8151326214925440719229499872086146990795191649649968979609056373308460653969", + "12483309147304635788397060225283577172417980480151834869358925058077916828359", + ], + ) + .unwrap(); + let c = G1Bytes::new_unchecked( + "17509024307642709963307435885289611077932619305068428354097243520217914637634", + "17824783754604065652634030354434350582834434348663254057492956883323214722668", + ) + .unwrap(); + let proof = Groth16Zkp::new(a, b, c); + + let jwt_header = "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_string(); + + let proof_sig = ephemeral_account.private_key().sign(&proof).unwrap(); + let ephem_proof_sig = EphemeralSignature::ed25519(proof_sig); + + let zk_sig = ZkIdSignature { + sig: ZkpOrOpenIdSig::Groth16Zkp(SignedGroth16Zkp { + proof: proof.clone(), + non_malleability_signature: ephem_proof_sig, + }), + jwt_header, + exp_timestamp_secs: 1900255944, + ephemeral_pubkey: ephemeral_public_key, + ephemeral_signature, + }; + + let signed_txn = SignedTransaction::new_zkid(raw_txn, sender_zkid_public_key, zk_sig); + + info!("Submit zero knowledge transaction"); + info.client() + .submit_without_serializing_response(&signed_txn) + .await + .unwrap(); +} + +#[tokio::test] +async fn test_groth16_signature_transaction_submission_proof_signature_check_fails() { + let (mut swarm, mut cli, _faucet) = SwarmBuilder::new_local(4) + .with_aptos() + .build_with_cli(0) + .await; + test_setup(&mut swarm, &mut cli).await; + let mut info = swarm.aptos_public_info(); + + let pepper = Pepper::from_number(76); + let idc = IdCommitment::new_from_preimage( + &pepper, + "407408718192.apps.googleusercontent.com", + "sub", + "113990307082899718775", + ) + .unwrap(); + let sender_zkid_public_key = ZkIdPublicKey { + iss: "https://accounts.google.com".to_owned(), + idc, + }; + let sender_any_public_key = AnyPublicKey::zkid(sender_zkid_public_key.clone()); + let account_address = info + .create_user_account_with_any_key(&sender_any_public_key) + .await + .unwrap(); + info.mint(account_address, 10_000_000_000).await.unwrap(); + + let ephemeral_private_key: Ed25519PrivateKey = EncodingType::Hex + .decode_key( + "zkid test ephemeral private key", + "0x76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7" + .as_bytes() + .to_vec(), + ) + .unwrap(); + let ephemeral_account: aptos_sdk::types::LocalAccount = LocalAccount::new( + account_address, + AccountKey::from_private_key(ephemeral_private_key), + 0, + ); + let ephemeral_public_key = EphemeralPublicKey::ed25519(ephemeral_account.public_key().clone()); + + let recipient = info + .create_and_fund_user_account(20_000_000_000) + .await + .unwrap(); + + let raw_txn = info + .transaction_factory() + .payload(aptos_stdlib::aptos_coin_transfer(recipient.address(), 100)) + .sender(account_address) + .sequence_number(1) + .build(); + + let sender_sig = ephemeral_account.private_key().sign(&raw_txn).unwrap(); + let ephemeral_signature = EphemeralSignature::ed25519(sender_sig); + + let a = G1Bytes::new_unchecked( + "11685701338011120485255682535216931952523490513574344095859176729155974193429", + "19570000702948951151001315672614758851000529478920585316943681012227747910337", + ) + .unwrap(); + let b = G2Bytes::new_unchecked( + [ + "10039243553158378944380740968043887743081233734014916979736214569065002261361", + "4926621746570487391149084476602889692047252928870676314074045787488022393462", + ], + [ + "8151326214925440719229499872086146990795191649649968979609056373308460653969", + "12483309147304635788397060225283577172417980480151834869358925058077916828359", + ], + ) + .unwrap(); + let c = G1Bytes::new_unchecked( + "17509024307642709963307435885289611077932619305068428354097243520217914637634", + "17824783754604065652634030354434350582834434348663254057492956883323214722668", + ) + .unwrap(); + let proof = Groth16Zkp::new(a, b, c); + + let jwt_header = "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_string(); + + let zk_sig = ZkIdSignature { + sig: ZkpOrOpenIdSig::Groth16Zkp(SignedGroth16Zkp { + proof: proof.clone(), + non_malleability_signature: ephemeral_signature.clone(), // Wrong signature + }), + jwt_header, + exp_timestamp_secs: 1900255944, + ephemeral_pubkey: ephemeral_public_key, + ephemeral_signature, + }; + + let signed_txn = SignedTransaction::new_zkid(raw_txn, sender_zkid_public_key, zk_sig); + + info!("Submit zero knowledge transaction"); + info.client() + .submit_without_serializing_response(&signed_txn) + .await + .unwrap_err(); +} + +async fn test_setup(swarm: &mut LocalSwarm, cli: &mut CliTestFramework) { + let client = swarm.validators().next().unwrap().rest_client(); + let root_idx = cli.add_account_with_address_to_cli( + swarm.root_key(), + swarm.chain_info().root_account().address(), + ); + swarm + .wait_for_all_nodes_to_catchup_to_epoch(2, Duration::from_secs(60)) + .await + .expect("Epoch 2 taking too long to come!"); + + let iss = "https://accounts.google.com"; + let jwk = RSA_JWK { + kid:"test_jwk".to_owned(), + kty:"RSA".to_owned(), + alg:"RS256".to_owned(), + e:"AQAB".to_owned(), + n:"6S7asUuzq5Q_3U9rbs-PkDVIdjgmtgWreG5qWPsC9xXZKiMV1AiV9LXyqQsAYpCqEDM3XbfmZqGb48yLhb_XqZaKgSYaC_h2DjM7lgrIQAp9902Rr8fUmLN2ivr5tnLxUUOnMOc2SQtr9dgzTONYW5Zu3PwyvAWk5D6ueIUhLtYzpcB-etoNdL3Ir2746KIy_VUsDwAM7dhrqSK8U2xFCGlau4ikOTtvzDownAMHMrfE7q1B6WZQDAQlBmxRQsyKln5DIsKv6xauNsHRgBAKctUxZG8M4QJIx3S6Aughd3RZC4Ca5Ae9fd8L8mlNYBCrQhOZ7dS0f4at4arlLcajtw".to_owned(), + }; + + info!("Insert a JWK."); + let jwk_patch_script = format!( + r#" +script {{ +use aptos_framework::jwks; +use aptos_framework::aptos_governance; +use std::string::utf8; +fun main(core_resources: &signer) {{ + let framework_signer = aptos_governance::get_signer_testnet_only(core_resources, @0000000000000000000000000000000000000000000000000000000000000001); + let google_jwk_0 = jwks::new_rsa_jwk( + utf8(b"{}"), + utf8(b"RS256"), + utf8(b"AQAB"), + utf8(b"{}") + ); + let patches = vector[ + jwks::new_patch_remove_all(), + jwks::new_patch_upsert_jwk(b"{}", google_jwk_0), + ]; + jwks::set_patches(&framework_signer, patches); +}} +}} +"#, + jwk.kid, jwk.n, iss + ); + + let txn_summary = cli.run_script(root_idx, &jwk_patch_script).await.unwrap(); + debug!("txn_summary={:?}", txn_summary); + + info!("Use resource API to check the patch result."); + let patched_jwks = get_latest_jwkset(&client).await; + debug!("patched_jwks={:?}", patched_jwks); + + let expected_providers_jwks = AllProvidersJWKs { + entries: vec![ProviderJWKs { + issuer: b"https://accounts.google.com".to_vec(), + version: 0, + jwks: vec![JWKMoveStruct::from(JWK::RSA(jwk))], + }], + }; + assert_eq!(expected_providers_jwks, patched_jwks.jwks); + + let mut info = swarm.aptos_public_info(); + + // Increment sequence number since we patched a JWK + info.root_account().increment_sequence_number(); +} + +async fn get_latest_jwkset(rest_client: &Client) -> PatchedJWKs { + let maybe_response = rest_client + .get_account_resource_bcs::(AccountAddress::ONE, "0x1::jwks::PatchedJWKs") + .await; + let response = maybe_response.unwrap(); + response.into_inner() +} diff --git a/types/Cargo.toml b/types/Cargo.toml index 6297bb0fb4a29..20b73cf3b09b2 100644 --- a/types/Cargo.toml +++ b/types/Cargo.toml @@ -20,6 +20,7 @@ aptos-crypto-derive = { workspace = true } aptos-experimental-runtimes = { workspace = true } ark-bn254 = { workspace = true } ark-ff = { workspace = true } +ark-groth16 = { workspace = true } ark-serialize = { workspace = true } arr_macro = { workspace = true } base64 = { workspace = true } @@ -44,6 +45,7 @@ proptest-derive = { workspace = true, optional = true } rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } +serde-big-array = { workspace = true } serde_bytes = { workspace = true } serde_json = { workspace = true } serde_with = { workspace = true } diff --git a/types/src/bn254_circom.rs b/types/src/bn254_circom.rs new file mode 100644 index 0000000000000..4505ad7edc3dc --- /dev/null +++ b/types/src/bn254_circom.rs @@ -0,0 +1,297 @@ +// Copyright © Aptos Foundation + +use crate::{ + jwks::rsa::RSA_JWK, + zkid::{ + ZkIdPublicKey, ZkIdSignature, MAX_EPK_BYTES, MAX_EXPIRY_HORIZON_SECS, MAX_ISS_BYTES, + MAX_JWT_HEADER_BYTES, + }, +}; +use anyhow::bail; +use aptos_crypto::{poseidon_bn254, CryptoMaterialError}; +use ark_bn254::{Fq, Fq2}; +use ark_ff::PrimeField; +use ark_groth16::{PreparedVerifyingKey, VerifyingKey}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use once_cell::sync::Lazy; +use serde::{Deserialize, Serialize}; +use serde_big_array::BigArray; + +// TODO(zkid): Some of this stuff, if not all, belongs to the aptos-crypto crate + +pub const G1_PROJECTIVE_COMPRESSED_NUM_BYTES: usize = 32; +pub const G2_PROJECTIVE_COMPRESSED_NUM_BYTES: usize = 64; + +pub static DEVNET_VERIFYING_KEY: Lazy> = + Lazy::new(devnet_pvk); + +/// This will do the proper subgroup membership checks. +fn g1_projective_str_to_affine(x: &str, y: &str) -> anyhow::Result { + let g1_affine = G1Bytes::new_unchecked(x, y)?.deserialize_into_affine()?; + Ok(g1_affine) +} + +/// This will do the proper subgroup membership checks. +fn g2_projective_str_to_affine(x: [&str; 2], y: [&str; 2]) -> anyhow::Result { + let g2_affine = G2Bytes::new_unchecked(x, y)?.to_affine()?; + Ok(g2_affine) +} + +fn devnet_pvk() -> PreparedVerifyingKey { + // Convert the projective points to affine. + let alpha_g1 = g1_projective_str_to_affine( + "16672231080302629756836614130913173861541009360974119524782950408048375831661", + "1076145001163048025135533382088266750240489485046298539187659509488738517245", + ) + .unwrap(); + + let beta_g2 = g2_projective_str_to_affine( + [ + "1125365732643211423779651913319958385653115422366520671538751860820509133538", + "10055196097002324305342942912758079446356594743098794928675544207400347950287", + ], + [ + "10879716754714953827605171295191459580695363989155343984818520267224463075503", + "440220374146936557739765173414663598678359360031905981547938788314460390904", + ], + ) + .unwrap(); + + let gamma_g2 = g2_projective_str_to_affine( + [ + "10857046999023057135944570762232829481370756359578518086990519993285655852781", + "11559732032986387107991004021392285783925812861821192530917403151452391805634", + ], + [ + "8495653923123431417604973247489272438418190587263600148770280649306958101930", + "4082367875863433681332203403145435568316851327593401208105741076214120093531", + ], + ) + .unwrap(); + + let delta_g2 = g2_projective_str_to_affine( + [ + "19799867077440075892798570892827678991452882191483986973420950266983588147526", + "7261406229996412667156189606964369006242293247396567701023787052439810543589", + ], + [ + "15618356441847575237880159451782511420373837463064250522093342825487687558812", + "20490123502151072560031041764173142979409281632225526952209676367033524880945", + ], + ) + .unwrap(); + + let mut gamma_abc_g1 = Vec::new(); + for points in [ + g1_projective_str_to_affine( + "16119992548622948701752093197035559180088659648245261797962160821523395857787", + "10895012769720065848112628781322097989082134121307195027616506940584635557433", + ) + .unwrap(), + g1_projective_str_to_affine( + "12743680909720798417558674763081930985009983383780261525309863653205478749832", + "10808093222645961212778297519773755506856954740368509958745099866520706196565", + ) + .unwrap(), + ] { + gamma_abc_g1.push(points); + } + + let vk = VerifyingKey { + alpha_g1, + beta_g2, + gamma_g2, + delta_g2, + gamma_abc_g1, + }; + + PreparedVerifyingKey::from(vk) +} + +fn parse_field_element(s: &str) -> Result { + s.parse::() + .map_err(|_e| CryptoMaterialError::DeserializationError) +} + +macro_rules! serialize { + ($obj:expr, $method:ident) => {{ + let mut buf = vec![]; + $obj.$method(&mut buf)?; + buf + }}; +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize)] +pub struct G1Bytes(pub(crate) [u8; G1_PROJECTIVE_COMPRESSED_NUM_BYTES]); + +impl G1Bytes { + pub fn new_unchecked(x: &str, y: &str) -> anyhow::Result { + let g1 = ark_bn254::G1Projective::new_unchecked( + parse_field_element(x)?, + parse_field_element(y)?, + parse_field_element("1")?, + ); + + let bytes: Vec = serialize!(g1, serialize_compressed); + Self::new_from_vec(bytes) + } + + pub fn new_from_vec(vec: Vec) -> anyhow::Result { + if vec.len() == G1_PROJECTIVE_COMPRESSED_NUM_BYTES { + let mut bytes = [0; G1_PROJECTIVE_COMPRESSED_NUM_BYTES]; + bytes.copy_from_slice(&vec); + Ok(Self(bytes)) + } else { + bail!( + "Serialized BN254 G1 must have exactly {} bytes", + G1_PROJECTIVE_COMPRESSED_NUM_BYTES + ) + } + } + + pub fn deserialize_into_affine(&self) -> Result { + self.try_into() + } +} + +impl TryInto for &G1Bytes { + type Error = CryptoMaterialError; + + fn try_into(self) -> Result { + ark_bn254::G1Projective::deserialize_compressed(self.0.as_slice()) + .map_err(|_| CryptoMaterialError::DeserializationError) + } +} + +impl TryInto for &G1Bytes { + type Error = CryptoMaterialError; + + fn try_into(self) -> Result { + let g1_projective: ark_bn254::G1Projective = self.try_into()?; + Ok(g1_projective.into()) + } +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize)] +pub struct G2Bytes(#[serde(with = "BigArray")] pub(crate) [u8; G2_PROJECTIVE_COMPRESSED_NUM_BYTES]); + +impl G2Bytes { + pub fn new_unchecked(x: [&str; 2], y: [&str; 2]) -> anyhow::Result { + let g2 = ark_bn254::G2Projective::new_unchecked( + Fq2::new(parse_field_element(x[0])?, parse_field_element(x[1])?), + Fq2::new(parse_field_element(y[0])?, parse_field_element(y[1])?), + Fq2::new(parse_field_element("1")?, parse_field_element("0")?), + ); + + let bytes: Vec = serialize!(g2, serialize_compressed); + Self::new_from_vec(bytes) + } + + pub fn new_from_vec(vec: Vec) -> anyhow::Result { + if vec.len() == G2_PROJECTIVE_COMPRESSED_NUM_BYTES { + let mut bytes = [0; G2_PROJECTIVE_COMPRESSED_NUM_BYTES]; + bytes.copy_from_slice(&vec); + Ok(Self(bytes)) + } else { + bail!( + "Serialized BN254 G2 must have exactly {} bytes", + G2_PROJECTIVE_COMPRESSED_NUM_BYTES + ) + } + } + + pub fn to_affine(&self) -> Result { + self.try_into() + } +} + +impl TryInto for &G2Bytes { + type Error = CryptoMaterialError; + + fn try_into(self) -> Result { + ark_bn254::G2Projective::deserialize_compressed(self.0.as_slice()) + .map_err(|_| CryptoMaterialError::DeserializationError) + } +} + +impl TryInto for &G2Bytes { + type Error = CryptoMaterialError; + + fn try_into(self) -> Result { + let g2_projective: ark_bn254::G2Projective = self.try_into()?; + Ok(g2_projective.into()) + } +} + +pub fn get_public_inputs_hash( + sig: &ZkIdSignature, + pk: &ZkIdPublicKey, + jwk: &RSA_JWK, +) -> anyhow::Result { + // Add the epk as padded and packed scalars + let mut frs = poseidon_bn254::pad_and_pack_bytes_to_scalars_with_len( + sig.ephemeral_pubkey.to_bytes().as_slice(), + MAX_EPK_BYTES, + )?; + + // Add the id_commitment as a scalar + frs.push(ark_bn254::Fr::from_le_bytes_mod_order(&pk.idc.0)); + + // Add the exp_timestamp_secs as a scalar + frs.push(ark_bn254::Fr::from(sig.exp_timestamp_secs)); + + // Add the epk lifespan as a scalar + frs.push(ark_bn254::Fr::from(MAX_EXPIRY_HORIZON_SECS)); + + // Add the hash of the iss (formatted key-value pair string). + let formatted_iss = format!("\"iss\":\"{}\",", pk.iss); + frs.push(poseidon_bn254::pad_and_hash_string( + &formatted_iss, + MAX_ISS_BYTES, + )?); + + // Add the hash of the jwt_header with the "." separator appended + let jwt_header_with_separator = format!("{}.", sig.jwt_header); + frs.push(poseidon_bn254::pad_and_hash_string( + &jwt_header_with_separator, + MAX_JWT_HEADER_BYTES, + )?); + + frs.push(jwk.to_poseidon_scalar()?); + + poseidon_bn254::hash_scalars(frs) +} + +#[cfg(test)] +mod test { + use crate::bn254_circom::{ + G1Bytes, G2Bytes, G1_PROJECTIVE_COMPRESSED_NUM_BYTES, G2_PROJECTIVE_COMPRESSED_NUM_BYTES, + }; + + #[test] + pub fn test_bn254_serialized_sizes() { + let g1 = G1Bytes::new_unchecked( + "16672231080302629756836614130913173861541009360974119524782950408048375831661", + "1076145001163048025135533382088266750240489485046298539187659509488738517245", + ) + .unwrap(); + + let g2 = G2Bytes::new_unchecked( + [ + "1125365732643211423779651913319958385653115422366520671538751860820509133538", + "10055196097002324305342942912758079446356594743098794928675544207400347950287", + ], + [ + "10879716754714953827605171295191459580695363989155343984818520267224463075503", + "440220374146936557739765173414663598678359360031905981547938788314460390904", + ], + ) + .unwrap(); + + let g1_bytes = bcs::to_bytes(&g1).unwrap(); + assert_eq!(g1_bytes.len(), G1_PROJECTIVE_COMPRESSED_NUM_BYTES); + + let g2_bytes = bcs::to_bytes(&g2).unwrap(); + assert_eq!(g2_bytes.len(), G2_PROJECTIVE_COMPRESSED_NUM_BYTES); + } +} diff --git a/types/src/jwks/rsa.rs b/types/src/jwks/rsa.rs index 657b0a881790d..42337b13325b2 100644 --- a/types/src/jwks/rsa.rs +++ b/types/src/jwks/rsa.rs @@ -3,13 +3,17 @@ #[cfg(test)] use crate::move_any::Any as MoveAny; use crate::{move_any::AsMoveAny, move_utils::as_move_value::AsMoveValue, zkid::Claims}; -use anyhow::{anyhow, ensure, Result}; +use anyhow::{anyhow, bail, ensure, Result}; +use aptos_crypto::poseidon_bn254; +use base64::URL_SAFE_NO_PAD; use jsonwebtoken::{Algorithm, DecodingKey, TokenData, Validation}; use move_core_types::value::{MoveStruct, MoveValue}; use serde::{Deserialize, Serialize}; #[cfg(test)] use std::str::FromStr; +pub const RSA_MODULUS_BYTES: usize = 256; + /// Move type `0x1::jwks::RSA_JWK` in rust. #[allow(non_camel_case_types)] #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] @@ -33,6 +37,25 @@ impl RSA_JWK { } } + // TODO(zkid): Move this to aptos-crypto so other services can use this + pub fn to_poseidon_scalar(&self) -> Result { + let mut modulus = base64::decode_config(&self.n, URL_SAFE_NO_PAD)?; + // The circuit only supports RSA256 + if modulus.len() != RSA_MODULUS_BYTES { + bail!("Wrong modulus size, must be {} bytes", RSA_MODULUS_BYTES); + } + modulus.reverse(); // This is done to match the circuit, which requires the modulus in a verify specific format due to how RSA verification is implemented + // TODO(zkid): finalize the jwk hashing scheme. + let mut scalars = modulus + .chunks(24) // Pack 3 64 bit limbs per scalar, so chunk into 24 bytes per scalar + .map(|chunk| { + poseidon_bn254::pack_bytes_to_one_scalar(chunk).expect("chunk converts to scalar") + }) + .collect::>(); + scalars.push(ark_bn254::Fr::from(RSA_MODULUS_BYTES as i32)); + poseidon_bn254::hash_scalars(scalars) + } + pub fn verify_signature(&self, jwt_token: &str) -> Result> { let mut validation = Validation::new(Algorithm::RS256); validation.validate_exp = false; diff --git a/types/src/lib.rs b/types/src/lib.rs index 73f5f493dab12..b443cdf32903e 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -61,6 +61,7 @@ pub mod account_view; pub mod aggregate_signature; pub mod aggregator; pub mod block_executor; +pub mod bn254_circom; pub mod bytes; pub mod state_store; #[cfg(test)] diff --git a/types/src/on_chain_config/aptos_features.rs b/types/src/on_chain_config/aptos_features.rs index e5fe29864572e..a72027b926c04 100644 --- a/types/src/on_chain_config/aptos_features.rs +++ b/types/src/on_chain_config/aptos_features.rs @@ -53,8 +53,8 @@ pub enum FeatureFlag { BN254_STRUCTURES = 43, WEBAUTHN_SIGNATURE = 44, RECONFIGURE_WITH_DKG = 45, - ZK_ID_SIGNATURE = 46, - OPEN_ID_SIGNATURE = 47, + ZK_ID_SIGNATURES = 46, + ZK_ID_ZKLESS_SIGNATURE = 47, REMOVE_DETAILED_ERROR_FROM_HASH = 48, } @@ -150,14 +150,20 @@ impl Features { self.is_enabled(FeatureFlag::RESOURCE_GROUPS_CHARGE_AS_SIZE_SUM) } - /// Whether the extra verification of TXN authenticators in the Move prologue is enabled. - /// (Necessary for fully-validating zkID transactions.) + /// Whether the zkID feature is enabled, specifically the ZK path with ZKP-based signatures. + /// The ZK-less path is controlled via a different `FeatureFlag::ZK_ID_ZKLESS_SIGNATURE` flag. pub fn is_zkid_enabled(&self) -> bool { - self.is_enabled(FeatureFlag::ZK_ID_SIGNATURE) + self.is_enabled(FeatureFlag::ZK_ID_SIGNATURES) } - pub fn is_open_id_signature_enabled(&self) -> bool { - self.is_enabled(FeatureFlag::OPEN_ID_SIGNATURE) + /// If `FeatureFlag::ZK_ID_SIGNATURES` is enabled, this feature additionally allows for a "ZK-less + /// path" where the blockchain can verify OpenID signatures directly. This ZK-less mode exists + /// for two reasons. First, it gives as a simpler way to test the feature. Second, it acts as a + /// safety precaution in case of emergency (e.g., if the ZK-based signatures must be temporarily + /// turned off due to a zeroday exploit, the ZK-less path will still allow users to transact, + /// but without privacy). + pub fn is_zkid_zkless_enabled(&self) -> bool { + self.is_enabled(FeatureFlag::ZK_ID_ZKLESS_SIGNATURE) } pub fn is_remove_detailed_error_from_hash_enabled(&self) -> bool { diff --git a/types/src/transaction/authenticator.rs b/types/src/transaction/authenticator.rs index 374e6f54af1bc..ca146f6650b38 100644 --- a/types/src/transaction/authenticator.rs +++ b/types/src/transaction/authenticator.rs @@ -1006,7 +1006,9 @@ impl AnySignature { (Self::WebAuthn { signature }, _) => signature.verify(message, public_key), (Self::ZkId { signature }, AnyPublicKey::ZkId { public_key }) => { match &signature.sig { - ZkpOrOpenIdSig::Groth16Zkp(_) => {}, + ZkpOrOpenIdSig::Groth16Zkp(proof) => { + proof.verify_non_malleability(&signature.ephemeral_pubkey)? + }, ZkpOrOpenIdSig::OpenIdSig(oidc_sig) => oidc_sig.verify_jwt_claims( signature.exp_timestamp_secs, &signature.ephemeral_pubkey, @@ -1127,8 +1129,11 @@ impl TryFrom<&[u8]> for EphemeralPublicKey { mod tests { use super::*; use crate::{ + bn254_circom::{G1Bytes, G2Bytes}, transaction::{webauthn::AssertionSignature, SignedTransaction}, - zkid::{IdCommitment, OpenIdSig, Pepper, EPK_BLINDER_NUM_BYTES}, + zkid::{ + Groth16Zkp, IdCommitment, OpenIdSig, Pepper, SignedGroth16Zkp, EPK_BLINDER_NUM_BYTES, + }, }; use aptos_crypto::{ ed25519::Ed25519PrivateKey, @@ -1683,7 +1688,7 @@ mod tests { "iss": "{}", "{}": "{}", "aud": "{}", - "nonce": "7BgjE1MZgLKY_4NwVWoJKUKPgpBcB0espRwKYASGkgw", + "nonce": "uxxgjhTml_fhiFwyWCyExJTD3J2YK3MoVDOYdnxieiE", "exp": 1311281970, "iat": 1311280970, "name": "Jane Doe", @@ -1697,7 +1702,7 @@ mod tests { iss, uid_key, uid_val, aud ); let idc = - IdCommitment::new_from_preimage(aud, uid_key, uid_val, &Pepper::from_number(pepper)) + IdCommitment::new_from_preimage(&Pepper::from_number(pepper), aud, uid_key, uid_val) .unwrap(); let zkid_pubkey = ZkIdPublicKey { iss: iss.to_owned(), @@ -1712,7 +1717,7 @@ mod tests { &jwt_payload_json, ); let verification_result = signed_txn.verify_signature(); - assert!(verification_result.is_ok()); + verification_result.unwrap(); } #[test] @@ -1733,7 +1738,7 @@ mod tests { "iss": "{}", "{}": "{}", "aud": "{}", - "nonce": "7BgjE1MZgLKY_4NwVWoJKUKPgpBcB0espRwKYASGkgw", + "nonce": "uxxgjhTml_fhiFwyWCyExJTD3J2YK3MoVDOYdnxieiE", "exp": 1311281970, "iat": 1311280970, "name": "Jane Doe", @@ -1747,7 +1752,7 @@ mod tests { iss, uid_key, uid_val, aud ); let idc = - IdCommitment::new_from_preimage(aud, uid_key, uid_val, &Pepper::from_number(pepper)) + IdCommitment::new_from_preimage(&Pepper::from_number(pepper), aud, uid_key, uid_val) .unwrap(); let zkid_pubkey = ZkIdPublicKey { iss: iss.to_owned(), @@ -1781,7 +1786,7 @@ mod tests { "iss": "{}", "{}": "{}", "aud": "{}", - "nonce": "7BgjE1MZgLKY_4NwVWoJKUKPgpBcB0espRwKYASGkgw", + "nonce": "uxxgjhTml_fhiFwyWCyExJTD3J2YK3MoVDOYdnxieiE", "exp": 1311281970, "iat": 1311280970, "name": "Jane Doe", @@ -1795,7 +1800,7 @@ mod tests { iss, uid_key, uid_val, aud ); let idc = - IdCommitment::new_from_preimage(aud, uid_key, uid_val, &Pepper::from_number(pepper)) + IdCommitment::new_from_preimage(&Pepper::from_number(pepper), aud, uid_key, uid_val) .unwrap(); let zkid_pubkey = ZkIdPublicKey { iss: iss.to_owned(), @@ -1830,7 +1835,7 @@ mod tests { "iss": "{}", "{}": "{}", "aud": "{}", - "nonce": "7BgjE1MZgLKY_4NwVWoJKUKPgpBcB0espRwKYASGkgw", + "nonce": "uxxgjhTml_fhiFwyWCyExJTD3J2YK3MoVDOYdnxieiE", "exp": {}, "iat": 1311280970, "name": "Jane Doe", @@ -1844,7 +1849,7 @@ mod tests { iss, uid_key, "bad_uid_val", aud, exp_timestamp_secs ); let idc = - IdCommitment::new_from_preimage(aud, uid_key, uid_val, &Pepper::from_number(pepper)) + IdCommitment::new_from_preimage(&Pepper::from_number(pepper), aud, uid_key, uid_val) .unwrap(); let zkid_pubkey = ZkIdPublicKey { iss: iss.to_owned(), @@ -1893,7 +1898,7 @@ mod tests { iss, uid_key, uid_val, aud, exp_timestamp_secs ); let idc = - IdCommitment::new_from_preimage(aud, uid_key, uid_val, &Pepper::from_number(pepper)) + IdCommitment::new_from_preimage(&Pepper::from_number(pepper), aud, uid_key, uid_val) .unwrap(); let zkid_pubkey = ZkIdPublicKey { iss: iss.to_owned(), @@ -1915,7 +1920,7 @@ mod tests { fn verify_zkid_open_id_single_key_auth_fails_with_bad_ephemeral_signature() { let pepper = Pepper::from_number(76); let idc = - IdCommitment::new_from_preimage("s6BhdRkqt3", "sub", "248289761001", &pepper).unwrap(); + IdCommitment::new_from_preimage(&pepper, "s6BhdRkqt3", "sub", "248289761001").unwrap(); let sender_zkid_public_key = ZkIdPublicKey { iss: "https://server.example.com".to_owned(), idc, @@ -2017,7 +2022,7 @@ mod tests { iss, uid_key, uid_val, aud, exp_timestamp_secs ); let idc = - IdCommitment::new_from_preimage(aud, uid_key, uid_val, &Pepper::from_number(pepper)) + IdCommitment::new_from_preimage(&Pepper::from_number(pepper), aud, uid_key, uid_val) .unwrap(); let zkid_pubkey = ZkIdPublicKey { iss: "bad_iss".to_owned(), @@ -2035,6 +2040,88 @@ mod tests { assert!(verification_result.is_err()); } + #[test] + fn test_groth16_proof_verification() { + let a = G1Bytes::new_unchecked( + "11685701338011120485255682535216931952523490513574344095859176729155974193429", + "19570000702948951151001315672614758851000529478920585316943681012227747910337", + ) + .unwrap(); + let b = G2Bytes::new_unchecked( + [ + "10039243553158378944380740968043887743081233734014916979736214569065002261361", + "4926621746570487391149084476602889692047252928870676314074045787488022393462", + ], + [ + "8151326214925440719229499872086146990795191649649968979609056373308460653969", + "12483309147304635788397060225283577172417980480151834869358925058077916828359", + ], + ) + .unwrap(); + let c = G1Bytes::new_unchecked( + "17509024307642709963307435885289611077932619305068428354097243520217914637634", + "17824783754604065652634030354434350582834434348663254057492956883323214722668", + ) + .unwrap(); + let proof = Groth16Zkp::new(a, b, c); + + let sender = Ed25519PrivateKey::generate_for_testing(); + let sender_pub = sender.public_key(); + let sender_auth_key = AuthenticationKey::ed25519(&sender_pub); + let sender_addr = sender_auth_key.account_address(); + let raw_txn = crate::test_helpers::transaction_test_helpers::get_test_signed_transaction( + sender_addr, + 0, + &sender, + sender.public_key(), + None, + 0, + 0, + None, + ) + .into_raw_transaction(); + + let sender_sig = sender.sign(&raw_txn).unwrap(); + + let epk = EphemeralPublicKey::ed25519(sender.public_key()); + let es = EphemeralSignature::ed25519(sender_sig); + + let proof_sig = sender.sign(&proof).unwrap(); + let ephem_proof_sig = EphemeralSignature::ed25519(proof_sig); + ephem_proof_sig.verify(&proof, &epk).unwrap(); + let zk_sig = ZkIdSignature { + sig: ZkpOrOpenIdSig::Groth16Zkp(SignedGroth16Zkp { + proof: proof.clone(), + non_malleability_signature: ephem_proof_sig, + }), + jwt_header: "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_owned(), + exp_timestamp_secs: 1900255944, + ephemeral_pubkey: epk, + ephemeral_signature: es, + }; + + let pepper = Pepper::from_number(76); + let addr_seed = IdCommitment::new_from_preimage( + &pepper, + "407408718192.apps.googleusercontent.com", + "sub", + "113990307082899718775", + ) + .unwrap(); + + let zk_pk = ZkIdPublicKey { + iss: "https://accounts.google.com".to_owned(), + idc: addr_seed, + }; + + let sk_auth = + SingleKeyAuthenticator::new(AnyPublicKey::zkid(zk_pk), AnySignature::zkid(zk_sig)); + let account_auth = AccountAuthenticator::single_key(sk_auth); + let signed_txn = SignedTransaction::new_single_sender(raw_txn, account_auth); + let verification_result = signed_txn.verify_signature(); + verification_result.unwrap(); + } + fn zkid_test_setup( zkid_pubkey: ZkIdPublicKey, uid_key: &str, diff --git a/types/src/zkid.rs b/types/src/zkid.rs index 98b62bdeea8f7..180a19d6c2369 100644 --- a/types/src/zkid.rs +++ b/types/src/zkid.rs @@ -1,6 +1,8 @@ // Copyright © Aptos Foundation use crate::{ + bn254_circom::{G1Bytes, G2Bytes, DEVNET_VERIFYING_KEY}, + chain_id::ChainId, jwks::rsa::RSA_JWK, on_chain_config::CurrentTimeMicroseconds, transaction::{ @@ -10,9 +12,11 @@ use crate::{ SignedTransaction, }, }; -use anyhow::{anyhow, ensure, Context, Ok, Result}; +use anyhow::{bail, ensure, Context, Result}; use aptos_crypto::{poseidon_bn254, CryptoMaterialError, ValidCryptoMaterial}; +use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; use ark_bn254; +use ark_groth16::{Groth16, Proof}; use ark_serialize::CanonicalSerialize; use base64::{URL_SAFE, URL_SAFE_NO_PAD}; use serde::{Deserialize, Serialize}; @@ -32,12 +36,16 @@ pub const IDC_NUM_BYTES: usize = 32; // TODO(ZkIdGroth16Zkp): add some static asserts here that these don't exceed the MAX poseidon input sizes // TODO(ZkIdGroth16Zkp): determine what our circuit will accept -pub const MAX_EPK_BYTES: usize = 93; // Supports public key lengths of up to 93 bytes. -pub const MAX_ISS_BYTES: usize = 248; -pub const MAX_AUD_VAL_BYTES: usize = 248; -pub const MAX_UID_KEY_BYTES: usize = 248; -pub const MAX_UID_VAL_BYTES: usize = 248; -pub const MAX_JWT_HEADER_BYTES: usize = 248; +/// We support ephemeral public key lengths of up to 93 bytes. +pub const MAX_EPK_BYTES: usize = 3 * poseidon_bn254::BYTES_PACKED_PER_SCALAR; +// The values here are consistent with our public inputs hashing scheme. +// Everything is a multiple of `poseidon_bn254::BYTES_PACKED_PER_SCALAR` to maximize the input +// sizes that can be hashed. +pub const MAX_ISS_BYTES: usize = 5 * poseidon_bn254::BYTES_PACKED_PER_SCALAR; +pub const MAX_AUD_VAL_BYTES: usize = 4 * poseidon_bn254::BYTES_PACKED_PER_SCALAR; +pub const MAX_UID_KEY_BYTES: usize = 2 * poseidon_bn254::BYTES_PACKED_PER_SCALAR; +pub const MAX_UID_VAL_BYTES: usize = 4 * poseidon_bn254::BYTES_PACKED_PER_SCALAR; +pub const MAX_JWT_HEADER_BYTES: usize = 8 * poseidon_bn254::BYTES_PACKED_PER_SCALAR; pub const MAX_ZK_PUBLIC_KEY_BYTES: usize = MAX_ISS_BYTES + MAX_EPK_BYTES; @@ -49,7 +57,7 @@ pub const MAX_ZK_SIGNATURE_BYTES: usize = 2048; pub const MAX_ZK_ID_AUTHENTICATORS_ALLOWED: usize = 10; // How far in the future from the JWT issued at time the EPK expiry can be set. -pub const MAX_EXPIRY_HORIZON_SECS: u64 = 1728000000; // 20000 days TODO(zkid): finalize this value +pub const MAX_EXPIRY_HORIZON_SECS: u64 = 100255944; // 1159.55 days TODO(zkid): finalize this value #[derive(Clone, Debug, Eq, PartialEq, Ord, PartialOrd)] pub struct JwkId { @@ -113,10 +121,10 @@ impl OpenIdSig { ensure!( IdCommitment::new_from_preimage( + &self.pepper, &claims.oidc_claims.aud, &self.uid_key, - &uid_val, - &self.pepper + &uid_val )? .eq(&pk.idc), "Address IDC verification failed" @@ -225,13 +233,46 @@ impl Claims { } } -pub type G1 = Vec; -pub type G2 = Vec>; -#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize)] +#[derive( + Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize, CryptoHasher, BCSCryptoHash, +)] pub struct Groth16Zkp { - a: G1, - b: G2, - c: G1, + a: G1Bytes, + b: G2Bytes, + c: G1Bytes, +} + +#[derive(Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize)] +pub struct SignedGroth16Zkp { + pub proof: Groth16Zkp, + /// The signature of the proof signed by the private key of the `ephemeral_pubkey`. + pub non_malleability_signature: EphemeralSignature, + // TODO: add training_wheels_signature: EphemeralSignature, +} + +impl SignedGroth16Zkp { + pub fn verify_non_malleability(&self, pub_key: &EphemeralPublicKey) -> Result<()> { + self.non_malleability_signature.verify(&self.proof, pub_key) + } + + pub fn verify_proof(&self, public_inputs_hash: ark_bn254::Fr, chain_id: ChainId) -> Result<()> { + let vk = match chain_id.is_mainnet() { + true => { + bail!("verifying key for main net missing") + }, + false => &DEVNET_VERIFYING_KEY, + }; + let proof: Proof = Proof { + a: self.proof.a.deserialize_into_affine()?, + b: self.proof.b.to_affine()?, + c: self.proof.c.deserialize_into_affine()?, + }; + let result = Groth16::::verify_proof(vk, &proof, &[public_inputs_hash])?; + if !result { + bail!("groth16 proof verification failed") + } + Ok(()) + } } impl TryFrom<&[u8]> for Groth16Zkp { @@ -242,11 +283,36 @@ impl TryFrom<&[u8]> for Groth16Zkp { } } +impl Groth16Zkp { + pub fn new(a: G1Bytes, b: G2Bytes, c: G1Bytes) -> Self { + Groth16Zkp { a, b, c } + } + + pub fn verify_proof(&self, public_inputs_hash: ark_bn254::Fr, chain_id: ChainId) -> Result<()> { + let vk = match chain_id.is_mainnet() { + true => { + bail!("verifying key for main net missing") + }, + false => &DEVNET_VERIFYING_KEY, + }; + let proof: Proof = Proof { + a: self.a.deserialize_into_affine()?, + b: self.b.to_affine()?, + c: self.c.deserialize_into_affine()?, + }; + let result = Groth16::::verify_proof(vk, &proof, &[public_inputs_hash])?; + if !result { + bail!("groth16 proof verification failed") + } + Ok(()) + } +} + /// Allows us to support direct verification of OpenID signatures, in the rare case that we would /// need to turn off ZK proofs due to a bug in the circuit. #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Hash, Serialize)] pub enum ZkpOrOpenIdSig { - Groth16Zkp(Groth16Zkp), + Groth16Zkp(SignedGroth16Zkp), OpenIdSig(OpenIdSig), } @@ -305,7 +371,7 @@ impl ZkIdSignature { let expiry_time = seconds_from_epoch(self.exp_timestamp_secs); if block_time > expiry_time { - Err(anyhow!("zkID Signature is expired")) + bail!("zkID Signature is expired"); } else { Ok(()) } @@ -324,7 +390,7 @@ impl Pepper { &self.0 } - #[cfg(test)] + // Used for testing. #[cfg(test)] doesn't seem to allow for use in smoke tests. pub fn from_number(num: u128) -> Self { let big_int = num_bigint::BigUint::from(num); let bytes: Vec = big_int.to_bytes_le(); @@ -339,10 +405,10 @@ pub struct IdCommitment(pub(crate) [u8; IDC_NUM_BYTES]); impl IdCommitment { pub fn new_from_preimage( + pepper: &Pepper, aud: &str, uid_key: &str, uid_val: &str, - pepper: &Pepper, ) -> Result { let aud_val_hash = poseidon_bn254::pad_and_hash_string(aud, MAX_AUD_VAL_BYTES)?; let uid_key_hash = poseidon_bn254::pad_and_hash_string(uid_key, MAX_UID_KEY_BYTES)?; @@ -350,10 +416,10 @@ impl IdCommitment { let pepper_scalar = poseidon_bn254::pack_bytes_to_one_scalar(pepper.0.as_slice())?; let fr = poseidon_bn254::hash_scalars(vec![ + pepper_scalar, aud_val_hash, - uid_key_hash, uid_val_hash, - pepper_scalar, + uid_key_hash, ])?; let mut idc_bytes = [0u8; IDC_NUM_BYTES]; @@ -433,3 +499,105 @@ fn base64url_to_str(b64: &str) -> Result { fn seconds_from_epoch(secs: u64) -> SystemTime { UNIX_EPOCH + Duration::from_secs(secs) } + +#[cfg(test)] +mod test { + use crate::{ + bn254_circom::get_public_inputs_hash, + chain_id::ChainId, + jwks::rsa::RSA_JWK, + transaction::authenticator::{AuthenticationKey, EphemeralPublicKey, EphemeralSignature}, + zkid::{ + G1Bytes, G2Bytes, Groth16Zkp, IdCommitment, Pepper, SignedGroth16Zkp, ZkIdPublicKey, + ZkIdSignature, ZkpOrOpenIdSig, + }, + }; + use aptos_crypto::{ed25519::Ed25519PrivateKey, PrivateKey, SigningKey, Uniform}; + + #[test] + fn test_groth16_proof_verification() { + let a = G1Bytes::new_unchecked( + "11685701338011120485255682535216931952523490513574344095859176729155974193429", + "19570000702948951151001315672614758851000529478920585316943681012227747910337", + ) + .unwrap(); + let b = G2Bytes::new_unchecked( + [ + "10039243553158378944380740968043887743081233734014916979736214569065002261361", + "4926621746570487391149084476602889692047252928870676314074045787488022393462", + ], + [ + "8151326214925440719229499872086146990795191649649968979609056373308460653969", + "12483309147304635788397060225283577172417980480151834869358925058077916828359", + ], + ) + .unwrap(); + let c = G1Bytes::new_unchecked( + "17509024307642709963307435885289611077932619305068428354097243520217914637634", + "17824783754604065652634030354434350582834434348663254057492956883323214722668", + ) + .unwrap(); + let proof = Groth16Zkp::new(a, b, c); + + let sender = Ed25519PrivateKey::generate_for_testing(); + let sender_pub = sender.public_key(); + let sender_auth_key = AuthenticationKey::ed25519(&sender_pub); + let sender_addr = sender_auth_key.account_address(); + let raw_txn = crate::test_helpers::transaction_test_helpers::get_test_signed_transaction( + sender_addr, + 0, + &sender, + sender.public_key(), + None, + 0, + 0, + None, + ) + .into_raw_transaction(); + + let sender_sig = sender.sign(&raw_txn).unwrap(); + + let epk = EphemeralPublicKey::ed25519(sender.public_key()); + let es = EphemeralSignature::ed25519(sender_sig); + + let proof_sig = sender.sign(&proof).unwrap(); + let ephem_proof_sig = EphemeralSignature::ed25519(proof_sig); + let zk_sig = ZkIdSignature { + sig: ZkpOrOpenIdSig::Groth16Zkp(SignedGroth16Zkp { + proof: proof.clone(), + non_malleability_signature: ephem_proof_sig, + }), + jwt_header: "eyJhbGciOiJSUzI1NiIsImtpZCI6InRlc3RfandrIiwidHlwIjoiSldUIn0".to_owned(), + exp_timestamp_secs: 1900255944, + ephemeral_pubkey: epk, + ephemeral_signature: es, + }; + + let pepper = Pepper::from_number(76); + let addr_seed = IdCommitment::new_from_preimage( + &pepper, + "407408718192.apps.googleusercontent.com", + "sub", + "113990307082899718775", + ) + .unwrap(); + + let zk_pk = ZkIdPublicKey { + iss: "https://accounts.google.com".to_owned(), + idc: addr_seed, + }; + let jwk = RSA_JWK { + kid:"1".to_owned(), + kty:"RSA".to_owned(), + alg:"RS256".to_owned(), + e:"AQAB".to_owned(), + n:"6S7asUuzq5Q_3U9rbs-PkDVIdjgmtgWreG5qWPsC9xXZKiMV1AiV9LXyqQsAYpCqEDM3XbfmZqGb48yLhb_XqZaKgSYaC_h2DjM7lgrIQAp9902Rr8fUmLN2ivr5tnLxUUOnMOc2SQtr9dgzTONYW5Zu3PwyvAWk5D6ueIUhLtYzpcB-etoNdL3Ir2746KIy_VUsDwAM7dhrqSK8U2xFCGlau4ikOTtvzDownAMHMrfE7q1B6WZQDAQlBmxRQsyKln5DIsKv6xauNsHRgBAKctUxZG8M4QJIx3S6Aughd3RZC4Ca5Ae9fd8L8mlNYBCrQhOZ7dS0f4at4arlLcajtw".to_owned(), + }; + + let public_inputs_hash = get_public_inputs_hash(&zk_sig, &zk_pk, &jwk).unwrap(); + + proof + .verify_proof(public_inputs_hash, ChainId::test()) + .unwrap(); + } +} From 4e3b559eb407cd6389b5ae885110935f50381e89 Mon Sep 17 00:00:00 2001 From: George Mitenkov Date: Thu, 1 Feb 2024 00:08:29 +0000 Subject: [PATCH 31/44] [move] Remove natives for vector (#11775) Vector uses bytecode instructions, so we should not maintain that. --- third_party/move/move-stdlib/docs/vector.md | 2 +- .../move/move-stdlib/sources/vector.move | 4 +- .../move/move-stdlib/src/natives/mod.rs | 18 +- .../move/move-stdlib/src/natives/vector.rs | 319 ------------------ .../tests/builtins/vec_borrow_and_modify.exp | 1 - .../tests/builtins/vec_borrow_and_modify.mvir | 57 ---- .../tests/builtins/vec_pop.exp | 1 - .../tests/builtins/vec_pop.mvir | 58 ---- .../tests/builtins/vec_push.exp | 1 - .../tests/builtins/vec_push.mvir | 57 ---- .../tests/builtins/vec_swap.exp | 1 - .../tests/builtins/vec_swap.mvir | 62 ---- .../tests/instructions/vec_copy_nested.exp | 1 - .../tests/instructions/vec_copy_nested.mvir | 110 ------ .../tests/native_functions/vector_module.exp | 1 - .../tests/native_functions/vector_module.mvir | 74 ---- ...ector_resource_not_destroyed_at_return.exp | 10 - ...ctor_resource_not_destroyed_at_return.mvir | 14 - ...nrestricted_not_destroyed_at_return_ok.exp | 1 - ...restricted_not_destroyed_at_return_ok.mvir | 11 - .../non_existant_native_struct.exp | 10 - .../non_existant_native_struct.mvir | 6 - 22 files changed, 3 insertions(+), 816 deletions(-) delete mode 100644 third_party/move/move-stdlib/src/natives/vector.rs delete mode 100644 third_party/move/move-vm/transactional-tests/tests/builtins/vec_borrow_and_modify.exp delete mode 100644 third_party/move/move-vm/transactional-tests/tests/builtins/vec_borrow_and_modify.mvir delete mode 100644 third_party/move/move-vm/transactional-tests/tests/builtins/vec_pop.exp delete mode 100644 third_party/move/move-vm/transactional-tests/tests/builtins/vec_pop.mvir delete mode 100644 third_party/move/move-vm/transactional-tests/tests/builtins/vec_push.exp delete mode 100644 third_party/move/move-vm/transactional-tests/tests/builtins/vec_push.mvir delete mode 100644 third_party/move/move-vm/transactional-tests/tests/builtins/vec_swap.exp delete mode 100644 third_party/move/move-vm/transactional-tests/tests/builtins/vec_swap.mvir delete mode 100644 third_party/move/move-vm/transactional-tests/tests/instructions/vec_copy_nested.exp delete mode 100644 third_party/move/move-vm/transactional-tests/tests/instructions/vec_copy_nested.mvir delete mode 100644 third_party/move/move-vm/transactional-tests/tests/native_functions/vector_module.exp delete mode 100644 third_party/move/move-vm/transactional-tests/tests/native_functions/vector_module.mvir delete mode 100644 third_party/move/move-vm/transactional-tests/tests/native_functions/vector_resource_not_destroyed_at_return.exp delete mode 100644 third_party/move/move-vm/transactional-tests/tests/native_functions/vector_resource_not_destroyed_at_return.mvir delete mode 100644 third_party/move/move-vm/transactional-tests/tests/native_functions/vector_unrestricted_not_destroyed_at_return_ok.exp delete mode 100644 third_party/move/move-vm/transactional-tests/tests/native_functions/vector_unrestricted_not_destroyed_at_return_ok.mvir delete mode 100644 third_party/move/move-vm/transactional-tests/tests/native_structs/non_existant_native_struct.exp delete mode 100644 third_party/move/move-vm/transactional-tests/tests/native_structs/non_existant_native_struct.mvir diff --git a/third_party/move/move-stdlib/docs/vector.md b/third_party/move/move-stdlib/docs/vector.md index d2401baabbb5e..98275c022b6c8 100644 --- a/third_party/move/move-stdlib/docs/vector.md +++ b/third_party/move/move-stdlib/docs/vector.md @@ -316,7 +316,7 @@ Reverses the order of the elements in the vector v in place.
public fun reverse<Element>(v: &mut vector<Element>) {
     let len = length(v);
-    if (len == 0) return ();
+    if (len == 0) return;
 
     let front_index = 0;
     let back_index = len -1;
diff --git a/third_party/move/move-stdlib/sources/vector.move b/third_party/move/move-stdlib/sources/vector.move
index 7a5f1d779504f..82c3387e4744e 100644
--- a/third_party/move/move-stdlib/sources/vector.move
+++ b/third_party/move/move-stdlib/sources/vector.move
@@ -57,8 +57,6 @@ module std::vector {
         v
     }
     spec singleton {
-        // TODO: when using opaque here, we get verification errors.
-        // pragma opaque;
         aborts_if false;
         ensures result == vec(e);
     }
@@ -66,7 +64,7 @@ module std::vector {
     /// Reverses the order of the elements in the vector `v` in place.
     public fun reverse(v: &mut vector) {
         let len = length(v);
-        if (len == 0) return ();
+        if (len == 0) return;
 
         let front_index = 0;
         let back_index = len -1;
diff --git a/third_party/move/move-stdlib/src/natives/mod.rs b/third_party/move/move-stdlib/src/natives/mod.rs
index 0d3b405407371..95917c886a501 100644
--- a/third_party/move/move-stdlib/src/natives/mod.rs
+++ b/third_party/move/move-stdlib/src/natives/mod.rs
@@ -6,14 +6,12 @@ pub mod bcs;
 pub mod debug;
 pub mod event;
 pub mod hash;
+mod helpers;
 pub mod signer;
 pub mod string;
 pub mod type_name;
 #[cfg(feature = "testing")]
 pub mod unit_test;
-pub mod vector;
-
-mod helpers;
 
 use move_core_types::account_address::AccountAddress;
 use move_vm_runtime::native_functions::{make_table_from_iter, NativeFunctionTable};
@@ -25,7 +23,6 @@ pub struct GasParameters {
     pub signer: signer::GasParameters,
     pub string: string::GasParameters,
     pub type_name: type_name::GasParameters,
-    pub vector: vector::GasParameters,
 
     #[cfg(feature = "testing")]
     pub unit_test: unit_test::GasParameters,
@@ -79,18 +76,6 @@ impl GasParameters {
                     per_byte_searched: 0.into(),
                 },
             },
-            vector: vector::GasParameters {
-                empty: vector::EmptyGasParameters { base: 0.into() },
-                length: vector::LengthGasParameters { base: 0.into() },
-                push_back: vector::PushBackGasParameters {
-                    base: 0.into(),
-                    legacy_per_abstract_memory_unit: 0.into(),
-                },
-                borrow: vector::BorrowGasParameters { base: 0.into() },
-                pop_back: vector::PopBackGasParameters { base: 0.into() },
-                destroy_empty: vector::DestroyEmptyGasParameters { base: 0.into() },
-                swap: vector::SwapGasParameters { base: 0.into() },
-            },
             #[cfg(feature = "testing")]
             unit_test: unit_test::GasParameters {
                 create_signers_for_testing: unit_test::CreateSignersForTestingGasParameters {
@@ -121,7 +106,6 @@ pub fn all_natives(
     add_natives!("signer", signer::make_all(gas_params.signer));
     add_natives!("string", string::make_all(gas_params.string));
     add_natives!("type_name", type_name::make_all(gas_params.type_name));
-    add_natives!("vector", vector::make_all(gas_params.vector));
     #[cfg(feature = "testing")]
     {
         add_natives!("unit_test", unit_test::make_all(gas_params.unit_test));
diff --git a/third_party/move/move-stdlib/src/natives/vector.rs b/third_party/move/move-stdlib/src/natives/vector.rs
deleted file mode 100644
index de71a7d6c1b21..0000000000000
--- a/third_party/move/move-stdlib/src/natives/vector.rs
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright (c) The Diem Core Contributors
-// Copyright (c) The Move Contributors
-// SPDX-License-Identifier: Apache-2.0
-
-use crate::natives::helpers::make_module_natives;
-use move_binary_format::errors::{PartialVMError, PartialVMResult};
-use move_core_types::{
-    gas_algebra::{InternalGas, InternalGasPerAbstractMemoryUnit},
-    vm_status::StatusCode,
-};
-use move_vm_runtime::native_functions::{NativeContext, NativeFunction};
-use move_vm_types::{
-    loaded_data::runtime_types::Type,
-    natives::function::NativeResult,
-    pop_arg,
-    values::{Value, Vector, VectorRef},
-    views::ValueView,
-};
-use std::{collections::VecDeque, sync::Arc};
-
-/***************************************************************************************************
- * native fun empty
- *
- *   gas cost: base_cost
- *
- **************************************************************************************************/
-#[derive(Debug, Clone)]
-pub struct EmptyGasParameters {
-    pub base: InternalGas,
-}
-
-pub fn native_empty(
-    gas_params: &EmptyGasParameters,
-    _context: &mut NativeContext,
-    ty_args: Vec,
-    args: VecDeque,
-) -> PartialVMResult {
-    debug_assert!(ty_args.len() == 1);
-    debug_assert!(args.is_empty());
-
-    NativeResult::map_partial_vm_result_one(gas_params.base, Vector::empty(&ty_args[0]))
-}
-
-pub fn make_native_empty(gas_params: EmptyGasParameters) -> NativeFunction {
-    Arc::new(
-        move |context, ty_args, args| -> PartialVMResult {
-            native_empty(&gas_params, context, ty_args, args)
-        },
-    )
-}
-
-/***************************************************************************************************
- * native fun length
- *
- *   gas cost: base_cost
- *
- **************************************************************************************************/
-#[derive(Debug, Clone)]
-pub struct LengthGasParameters {
-    pub base: InternalGas,
-}
-
-pub fn native_length(
-    gas_params: &LengthGasParameters,
-    _context: &mut NativeContext,
-    ty_args: Vec,
-    mut args: VecDeque,
-) -> PartialVMResult {
-    debug_assert!(ty_args.len() == 1);
-    debug_assert!(args.len() == 1);
-
-    let r = pop_arg!(args, VectorRef);
-    NativeResult::map_partial_vm_result_one(gas_params.base, r.len(&ty_args[0]))
-}
-
-pub fn make_native_length(gas_params: LengthGasParameters) -> NativeFunction {
-    Arc::new(
-        move |context, ty_args, args| -> PartialVMResult {
-            native_length(&gas_params, context, ty_args, args)
-        },
-    )
-}
-
-/***************************************************************************************************
- * native fun push_back
- *
- *   gas cost: base_cost + legacy_unit_cost * max(1, size_of(val))
- *
- **************************************************************************************************/
-#[derive(Debug, Clone)]
-pub struct PushBackGasParameters {
-    pub base: InternalGas,
-    pub legacy_per_abstract_memory_unit: InternalGasPerAbstractMemoryUnit,
-}
-
-pub fn native_push_back(
-    gas_params: &PushBackGasParameters,
-    _context: &mut NativeContext,
-    ty_args: Vec,
-    mut args: VecDeque,
-) -> PartialVMResult {
-    debug_assert!(ty_args.len() == 1);
-    debug_assert!(args.len() == 2);
-
-    let e = args.pop_back().unwrap();
-    let r = pop_arg!(args, VectorRef);
-
-    let mut cost = gas_params.base;
-    if gas_params.legacy_per_abstract_memory_unit != 0.into() {
-        cost += gas_params.legacy_per_abstract_memory_unit
-            * std::cmp::max(e.legacy_abstract_memory_size(), 1.into());
-    }
-
-    NativeResult::map_partial_vm_result_empty(cost, r.push_back(e, &ty_args[0]))
-}
-
-pub fn make_native_push_back(gas_params: PushBackGasParameters) -> NativeFunction {
-    Arc::new(
-        move |context, ty_args, args| -> PartialVMResult {
-            native_push_back(&gas_params, context, ty_args, args)
-        },
-    )
-}
-
-/***************************************************************************************************
- * native fun borrow
- *
- *   gas cost: base_cost
- *
- **************************************************************************************************/
-#[derive(Debug, Clone)]
-pub struct BorrowGasParameters {
-    pub base: InternalGas,
-}
-
-pub fn native_borrow(
-    gas_params: &BorrowGasParameters,
-    _context: &mut NativeContext,
-    ty_args: Vec,
-    mut args: VecDeque,
-) -> PartialVMResult {
-    debug_assert!(ty_args.len() == 1);
-    debug_assert!(args.len() == 2);
-
-    let idx = pop_arg!(args, u64) as usize;
-    let r = pop_arg!(args, VectorRef);
-    NativeResult::map_partial_vm_result_one(
-        gas_params.base,
-        r.borrow_elem(idx, &ty_args[0])
-            .map_err(native_error_to_abort),
-    )
-}
-
-pub fn make_native_borrow(gas_params: BorrowGasParameters) -> NativeFunction {
-    Arc::new(
-        move |context, ty_args, args| -> PartialVMResult {
-            native_borrow(&gas_params, context, ty_args, args)
-        },
-    )
-}
-
-/***************************************************************************************************
- * native fun pop
- *
- *   gas cost: base_cost
- *
- **************************************************************************************************/
-#[derive(Debug, Clone)]
-pub struct PopBackGasParameters {
-    pub base: InternalGas,
-}
-
-pub fn native_pop_back(
-    gas_params: &PopBackGasParameters,
-    _context: &mut NativeContext,
-    ty_args: Vec,
-    mut args: VecDeque,
-) -> PartialVMResult {
-    debug_assert!(ty_args.len() == 1);
-    debug_assert!(args.len() == 1);
-
-    let r = pop_arg!(args, VectorRef);
-    NativeResult::map_partial_vm_result_one(
-        gas_params.base,
-        r.pop(&ty_args[0]).map_err(native_error_to_abort),
-    )
-}
-
-pub fn make_native_pop_back(gas_params: PopBackGasParameters) -> NativeFunction {
-    Arc::new(
-        move |context, ty_args, args| -> PartialVMResult {
-            native_pop_back(&gas_params, context, ty_args, args)
-        },
-    )
-}
-
-/***************************************************************************************************
- * native fun destroy_empty
- *
- *   gas cost: base_cost
- *
- **************************************************************************************************/
-#[derive(Debug, Clone)]
-pub struct DestroyEmptyGasParameters {
-    pub base: InternalGas,
-}
-
-pub fn native_destroy_empty(
-    gas_params: &DestroyEmptyGasParameters,
-    _context: &mut NativeContext,
-    ty_args: Vec,
-    mut args: VecDeque,
-) -> PartialVMResult {
-    debug_assert!(ty_args.len() == 1);
-    debug_assert!(args.len() == 1);
-
-    let v = pop_arg!(args, Vector);
-    NativeResult::map_partial_vm_result_empty(
-        gas_params.base,
-        v.destroy_empty(&ty_args[0]).map_err(native_error_to_abort),
-    )
-}
-
-pub fn make_native_destroy_empty(gas_params: DestroyEmptyGasParameters) -> NativeFunction {
-    Arc::new(
-        move |context, ty_args, args| -> PartialVMResult {
-            native_destroy_empty(&gas_params, context, ty_args, args)
-        },
-    )
-}
-
-/***************************************************************************************************
- * native fun swap
- **************************************************************************************************/
-#[derive(Debug, Clone)]
-pub struct SwapGasParameters {
-    pub base: InternalGas,
-}
-
-pub fn native_swap(
-    gas_params: &SwapGasParameters,
-    _context: &mut NativeContext,
-    ty_args: Vec,
-    mut args: VecDeque,
-) -> PartialVMResult {
-    debug_assert!(ty_args.len() == 1);
-    debug_assert!(args.len() == 3);
-
-    let idx2 = pop_arg!(args, u64) as usize;
-    let idx1 = pop_arg!(args, u64) as usize;
-    let r = pop_arg!(args, VectorRef);
-    NativeResult::map_partial_vm_result_empty(
-        gas_params.base,
-        r.swap(idx1, idx2, &ty_args[0])
-            .map_err(native_error_to_abort),
-    )
-}
-
-pub fn make_native_swap(gas_params: SwapGasParameters) -> NativeFunction {
-    Arc::new(
-        move |context, ty_args, args| -> PartialVMResult {
-            native_swap(&gas_params, context, ty_args, args)
-        },
-    )
-}
-
-fn native_error_to_abort(err: PartialVMError) -> PartialVMError {
-    let (major_status, sub_status_opt, message_opt, exec_state_opt, indices, offsets) =
-        err.all_data();
-    let new_err = match major_status {
-        StatusCode::VECTOR_OPERATION_ERROR => PartialVMError::new(StatusCode::ABORTED),
-        _ => PartialVMError::new(major_status),
-    };
-    let new_err = match sub_status_opt {
-        None => new_err,
-        Some(code) => new_err.with_sub_status(code),
-    };
-    let new_err = match message_opt {
-        None => new_err,
-        Some(message) => new_err.with_message(message),
-    };
-    let new_err = match exec_state_opt {
-        None => new_err,
-        Some(stacktrace) => new_err.with_exec_state(stacktrace),
-    };
-    new_err.at_indices(indices).at_code_offsets(offsets)
-}
-
-/***************************************************************************************************
- * module
- **************************************************************************************************/
-#[derive(Debug, Clone)]
-pub struct GasParameters {
-    pub empty: EmptyGasParameters,
-    pub length: LengthGasParameters,
-    pub push_back: PushBackGasParameters,
-    pub borrow: BorrowGasParameters,
-    pub pop_back: PopBackGasParameters,
-    pub destroy_empty: DestroyEmptyGasParameters,
-    pub swap: SwapGasParameters,
-}
-
-pub fn make_all(gas_params: GasParameters) -> impl Iterator {
-    let natives = [
-        ("empty", make_native_empty(gas_params.empty)),
-        ("length", make_native_length(gas_params.length)),
-        ("push_back", make_native_push_back(gas_params.push_back)),
-        ("borrow", make_native_borrow(gas_params.borrow.clone())),
-        ("borrow_mut", make_native_borrow(gas_params.borrow)),
-        ("pop_back", make_native_pop_back(gas_params.pop_back)),
-        (
-            "destroy_empty",
-            make_native_destroy_empty(gas_params.destroy_empty),
-        ),
-        ("swap", make_native_swap(gas_params.swap)),
-    ];
-
-    make_module_natives(natives)
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_borrow_and_modify.exp b/third_party/move/move-vm/transactional-tests/tests/builtins/vec_borrow_and_modify.exp
deleted file mode 100644
index 457ace9c4acb6..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_borrow_and_modify.exp
+++ /dev/null
@@ -1 +0,0 @@
-processed 4 tasks
diff --git a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_borrow_and_modify.mvir b/third_party/move/move-vm/transactional-tests/tests/builtins/vec_borrow_and_modify.mvir
deleted file mode 100644
index 527c837143861..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_borrow_and_modify.mvir
+++ /dev/null
@@ -1,57 +0,0 @@
-//# publish
-module 0x1.M {
-    import 0x1.vector;
-
-    struct R has key { v: vector }
-
-    public publish(s: &signer) {
-        let v: vector;
-    label b0:
-        v = vector.empty();
-        vector.push_back(&mut v, 100);
-        vector.push_back(&mut v, 200);
-        move_to(move(s), R { v: move(v) });
-        return;
-    }
-
-    public borrow_and_modify(addr: address) acquires R {
-    label b0:
-        *vector.borrow_mut(&mut borrow_global_mut(move(addr)).R::v, 0) = 300;
-        return;
-    }
-
-    public verify_effects(addr: address) acquires R {
-    label b0:
-        assert(*vector.borrow(&borrow_global(move(addr)).R::v, 0) == 300, 1000);
-        return;
-    }
-}
-
-//# run --signers 0x1
-import 0x1.M;
-
-main(s: signer) {
-label b0:
-    M.publish(&s);
-    return;
-}
-
-//# run --signers 0x1
-import 0x1.M;
-import 0x1.signer;
-
-main(s: signer) {
-label b0:
-    M.borrow_and_modify(signer.address_of(&s));
-    return;
-}
-
-//# run --signers 0x1
-import 0x1.M;
-import 0x1.signer;
-
-main(s: signer) {
-label b0:
-    M.verify_effects(signer.address_of(&s));
-    return;
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_pop.exp b/third_party/move/move-vm/transactional-tests/tests/builtins/vec_pop.exp
deleted file mode 100644
index 457ace9c4acb6..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_pop.exp
+++ /dev/null
@@ -1 +0,0 @@
-processed 4 tasks
diff --git a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_pop.mvir b/third_party/move/move-vm/transactional-tests/tests/builtins/vec_pop.mvir
deleted file mode 100644
index 9e2314919f6e8..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_pop.mvir
+++ /dev/null
@@ -1,58 +0,0 @@
-//# publish
-module 0x1.M {
-    import 0x1.vector;
-
-    struct R has key { v: vector }
-
-    public publish(s: &signer) {
-        let v: vector;
-    label b0:
-        v = vector.empty();
-        vector.push_back(&mut v, 100);
-        vector.push_back(&mut v, 200);
-        move_to(move(s), R { v: move(v) });
-        return;
-    }
-
-    public borrow_and_pop(addr: address) acquires R {
-    label b0:
-        assert(vector.pop_back(&mut borrow_global_mut(move(addr)).R::v) == 200, 1000);
-        return;
-    }
-
-    public verify_effects(addr: address) acquires R {
-    label b0:
-        assert(vector.length(&borrow_global(move(addr)).R::v) == 1, 1001);
-        return;
-    }
-}
-
-//# run --signers 0x1
-import 0x1.M;
-
-main(s: signer) {
-label b0:
-    M.publish(&s);
-    return;
-}
-
-
-//# run --signers 0x1
-import 0x1.M;
-import 0x1.signer;
-
-main(s: signer) {
-label b0:
-    M.borrow_and_pop(signer.address_of(&s));
-    return;
-}
-
-//# run --signers 0x1
-import 0x1.M;
-import 0x1.signer;
-
-main(s: signer) {
-label b0:
-    M.verify_effects(signer.address_of(&s));
-    return;
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_push.exp b/third_party/move/move-vm/transactional-tests/tests/builtins/vec_push.exp
deleted file mode 100644
index 457ace9c4acb6..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_push.exp
+++ /dev/null
@@ -1 +0,0 @@
-processed 4 tasks
diff --git a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_push.mvir b/third_party/move/move-vm/transactional-tests/tests/builtins/vec_push.mvir
deleted file mode 100644
index 8e0e07193f4b8..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_push.mvir
+++ /dev/null
@@ -1,57 +0,0 @@
-//# publish
-module 0x1.M {
-    import 0x1.vector;
-
-    struct R has key { v: vector }
-
-    public publish(s: &signer) {
-    label b0:
-        move_to(move(s), R { v: vector.empty() });
-        return;
-    }
-
-    public borrow_and_push(addr: address) acquires R {
-        let r: &mut Self.R;
-    label b0:
-        r = borrow_global_mut(move(addr));
-        vector.push_back(&mut move(r).R::v, 42);
-        return;
-    }
-
-    public verify_effects(addr: address) acquires R {
-        let r: &Self.R;
-    label b0:
-        r = borrow_global(move(addr));
-        assert(vector.length(& move(r).R::v) == 1, 1000);
-        return;
-    }
-}
-
-//# run --signers 0x1
-import 0x1.M;
-
-main(s: signer) {
-label b0:
-    M.publish(&s);
-    return;
-}
-
-//# run --signers 0x1
-import 0x1.M;
-import 0x1.signer;
-
-main(s: signer) {
-label b0:
-    M.borrow_and_push(signer.address_of(&s));
-    return;
-}
-
-//# run --signers 0x1
-import 0x1.M;
-import 0x1.signer;
-
-main(s: signer) {
-label b0:
-    M.verify_effects(signer.address_of(&s));
-    return;
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_swap.exp b/third_party/move/move-vm/transactional-tests/tests/builtins/vec_swap.exp
deleted file mode 100644
index 457ace9c4acb6..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_swap.exp
+++ /dev/null
@@ -1 +0,0 @@
-processed 4 tasks
diff --git a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_swap.mvir b/third_party/move/move-vm/transactional-tests/tests/builtins/vec_swap.mvir
deleted file mode 100644
index 124da1df94a81..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/builtins/vec_swap.mvir
+++ /dev/null
@@ -1,62 +0,0 @@
-//# publish
-module 0x1.M {
-    import 0x1.vector;
-
-    struct R has key { v: vector }
-
-    public publish(s: &signer) {
-        let v: vector;
-    label b0:
-        v = vector.empty();
-        vector.push_back(&mut v, 100);
-        vector.push_back(&mut v, 200);
-        move_to(move(s), R { v: move(v) });
-        return;
-    }
-
-    public borrow_and_swap(addr: address) acquires R {
-        let r: &mut Self.R;
-    label b0:
-        r = borrow_global_mut(move(addr));
-        vector.swap(&mut move(r).R::v, 0, 1);
-        return;
-    }
-
-    public verify_effects(addr: address) acquires R {
-        let v: &vector;
-    label b0:
-        v = & borrow_global(move(addr)).R::v;
-        assert(*vector.borrow(copy(v), 0) == 200, 1000);
-        assert(*vector.borrow(move(v), 1) == 100, 1001);
-        return;
-    }
-}
-
-//# run --signers 0x1
-import 0x1.M;
-
-main(s: signer) {
-label b0:
-    M.publish(&s);
-    return;
-}
-
-//# run --signers 0x1
-import 0x1.M;
-import 0x1.signer;
-
-main(s: signer) {
-label b0:
-    M.borrow_and_swap(signer.address_of(&s));
-    return;
-}
-
-//# run --signers 0x1
-import 0x1.M;
-import 0x1.signer;
-
-main(s: signer) {
-label b0:
-    M.verify_effects(signer.address_of(&s));
-    return;
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/instructions/vec_copy_nested.exp b/third_party/move/move-vm/transactional-tests/tests/instructions/vec_copy_nested.exp
deleted file mode 100644
index 5d92c423f3fd8..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/instructions/vec_copy_nested.exp
+++ /dev/null
@@ -1 +0,0 @@
-processed 2 tasks
diff --git a/third_party/move/move-vm/transactional-tests/tests/instructions/vec_copy_nested.mvir b/third_party/move/move-vm/transactional-tests/tests/instructions/vec_copy_nested.mvir
deleted file mode 100644
index b8423d9bfd5f7..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/instructions/vec_copy_nested.mvir
+++ /dev/null
@@ -1,110 +0,0 @@
-//# publish
-module 0x42.DeepCopy {
-    import 0x1.vector;
-
-    struct Config has copy, drop, store { i: u64 }
-    struct Nested has copy, drop, store { c: Self.Config }
-
-    public test_struct_shallow() {
-        let c1: Self.Config;
-        let c2: Self.Config;
-    label b0:
-        c1 = Config { i: 0 };
-        c2 = copy(c1);
-
-        // mutate c1.i to 1
-        *(&mut (&mut c1).Config::i) = 1;
-        // c2.i should still be 0
-        assert(*&(&c2).Config::i == 0, 77);
-
-        // mutate c2.i to 2
-        *(&mut (&mut c2).Config::i) = 2;
-        // c1.i should still be 1
-        assert(*&(&c1).Config::i == 1, 78);
-
-        return;
-    }
-
-    public test_struct_deep() {
-        let n1: Self.Nested;
-        let n2: Self.Nested;
-    label b0:
-        n1 = Nested { c: Config { i: 0 } };
-        n2 = copy(n1);
-
-        // mutate n1.c.i to 1
-        *(&mut (&mut (&mut n1).Nested::c).Config::i) = 1;
-        // n2.c.i should still be 0
-        assert(*&(&(&n2).Nested::c).Config::i == 0, 79);
-        // n1.c.i is 1
-        assert(*&(&(&n1).Nested::c).Config::i == 1, 80);
-
-        // mutate n2.c.i to 2
-        *(&mut (&mut (&mut n2).Nested::c).Config::i) = 2;
-        // n1.c.i should still be 1
-        assert(*&(&(&n1).Nested::c).Config::i == 1, 81);
-        // n2.c.i is 2
-        assert(*&(&(&n2).Nested::c).Config::i == 2, 82);
-
-        return;
-    }
-
-    public test_vector() {
-        let v1: vector;
-        let v2: vector;
-    label b0:
-        v1 = vector.empty();
-        vector.push_back(&mut v1, 0);
-        v2 = copy(v1);
-        *vector.borrow_mut(&mut v1, 0) = 1;
-        assert(*vector.borrow(&v2, 0) == 0, 83);
-
-        *vector.borrow_mut(&mut v2, 0) = 2;
-        assert(*vector.borrow(&v1, 0) == 1, 84);
-
-        return;
-    }
-
-    public test_vector_to_struct() {
-        let v1: vector;
-        let v2: vector;
-        let c1: Self.Config;
-        let c2: Self.Config;
-        let r1: &mut Self.Config;
-        let r2: &mut Self.Config;
-        let ri1: &mut u64;
-    label b0:
-        c1 = Config { i: 0 };
-        c2 = copy(c1);
-        *(&mut (&mut c2).Config::i) = 1;
-
-        v1 = vector.empty();
-        vector.push_back(&mut v1, move(c1));
-        vector.push_back(&mut v1, move(c2));
-        v2 = copy(v1);
-
-        r1 = vector.borrow_mut(&mut v1, 0);
-        r2 = vector.borrow_mut(&mut v2, 0);
-        assert(*©(r1).Config::i == 0, 90);
-        assert(*©(r1).Config::i == *&move(r2).Config::i, 91);
-        r2 = vector.borrow_mut(&mut v2, 1);
-        assert(*©(r2).Config::i == 1, 91);
-        assert(*©(r1).Config::i != *©(r2).Config::i, 91);
-        *&mut copy(r1).Config::i = 1;
-        assert(*©(r1).Config::i == *©(r2).Config::i, 92);
-
-        return;
-    }
-}
-
-//# run
-import 0x42.DeepCopy;
-
-main() {
-label b0:
-    DeepCopy.test_struct_shallow();
-    DeepCopy.test_struct_deep();
-    DeepCopy.test_vector();
-    DeepCopy.test_vector_to_struct();
-    return;
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_module.exp b/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_module.exp
deleted file mode 100644
index fc5a4436b29d4..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_module.exp
+++ /dev/null
@@ -1 +0,0 @@
-processed 3 tasks
diff --git a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_module.mvir b/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_module.mvir
deleted file mode 100644
index 8b1184e55eb14..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_module.mvir
+++ /dev/null
@@ -1,74 +0,0 @@
-//# publish
-module 0x1.Coin {
-    struct Coin has store { value: u64 }
-    public value(c: &Self.Coin): u64 {
-    label b0:
-        return *&move(c).Coin::value;
-    }
-    public zero(): Self.Coin {
-    label b0:
-        return Coin { value: 0 };
-    }
-}
-
-//# publish
-module 0x42.M {
-    import 0x1.Coin;
-    import 0x1.vector;
-    import 0x1.signer;
-    struct Coins has key { f: vector }
-
-    public new(account: &signer) {
-        let coin_vec: vector;
-        let coins: Self.Coins;
-    label b0:
-        coin_vec = vector.empty();
-        coins = Coins { f: move(coin_vec)};
-
-        move_to(move(account), move(coins));
-        return;
-    }
-
-    public put_coin(account: &signer, coin: Coin.Coin) acquires Coins {
-        let coins_ref: &mut Self.Coins;
-        let v_ref: &mut vector;
-    label b0:
-        coins_ref = borrow_global_mut(signer.address_of(move(account)));
-        v_ref = &mut move(coins_ref).Coins::f;
-        vector.push_back(move(v_ref), move(coin));
-        return;
-    }
-
-    public get_value(account: &signer, i: u64): u64 acquires Coins {
-        let coins_ref: &Self.Coins;
-        let v_ref: &vector;
-        let coin_ref: &Coin.Coin;
-    label b0:
-        coins_ref = borrow_global(signer.address_of(move(account)));
-        v_ref = &move(coins_ref).Coins::f;
-        coin_ref = vector.borrow(move(v_ref), move(i));
-
-        return Coin.value(move(coin_ref));
-    }
-
-    public pop(account: &signer): Coin.Coin acquires Coins {
-        let coins_ref: &mut Self.Coins;
-        let v_ref: &mut vector;
-    label b0:
-        coins_ref = borrow_global_mut(signer.address_of(move(account)));
-        v_ref = &mut move(coins_ref).Coins::f;
-        return vector.pop_back(move(v_ref));
-    }
-}
-
-//# run --signers 0x1
-import 0x42.M;
-import 0x1.Coin;
-
-main(account: signer) {
-label b0:
-    M.new(&account);
-    M.put_coin(&account, Coin.zero());
-
-    return;
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_resource_not_destroyed_at_return.exp b/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_resource_not_destroyed_at_return.exp
deleted file mode 100644
index c7ff68dbedb5b..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_resource_not_destroyed_at_return.exp
+++ /dev/null
@@ -1,10 +0,0 @@
-processed 1 task
-
-task 0 'publish'. lines 1-14:
-Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::M'. Got VMError: {
-    major_status: UNSAFE_RET_UNUSED_VALUES_WITHOUT_DROP,
-    sub_status: None,
-    location: 0x42::M,
-    indices: [(FunctionDefinition, 0)],
-    offsets: [(FunctionDefinitionIndex(0), 2)],
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_resource_not_destroyed_at_return.mvir b/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_resource_not_destroyed_at_return.mvir
deleted file mode 100644
index 24461b545fb6e..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_resource_not_destroyed_at_return.mvir
+++ /dev/null
@@ -1,14 +0,0 @@
-//# publish
-module 0x42.M {
-    import 0x1.vector;
-
-    struct R has store { b: bool}
-
-    f() {
-        let v: vector;
-    label b0:
-        v = vector.empty();
-        // error even though vector is empty, doesn't have drop
-        return;
-    }
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_unrestricted_not_destroyed_at_return_ok.exp b/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_unrestricted_not_destroyed_at_return_ok.exp
deleted file mode 100644
index 6cd67db3f6472..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_unrestricted_not_destroyed_at_return_ok.exp
+++ /dev/null
@@ -1 +0,0 @@
-processed 1 task
diff --git a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_unrestricted_not_destroyed_at_return_ok.mvir b/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_unrestricted_not_destroyed_at_return_ok.mvir
deleted file mode 100644
index 15e0a92a258e9..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/native_functions/vector_unrestricted_not_destroyed_at_return_ok.mvir
+++ /dev/null
@@ -1,11 +0,0 @@
-//# publish
-module 0x42.M {
-    import 0x1.vector;
-
-    f() {
-        let v: vector;
-    label b0:
-        v = vector.empty();
-        return;
-    }
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/native_structs/non_existant_native_struct.exp b/third_party/move/move-vm/transactional-tests/tests/native_structs/non_existant_native_struct.exp
deleted file mode 100644
index 966d133be2800..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/native_structs/non_existant_native_struct.exp
+++ /dev/null
@@ -1,10 +0,0 @@
-processed 1 task
-
-task 0 'publish'. lines 1-6:
-Error: Unable to publish module '0000000000000000000000000000000000000000000000000000000000000042::M'. Got VMError: {
-    major_status: MISSING_DEPENDENCY,
-    sub_status: None,
-    location: 0x42::M,
-    indices: [(FunctionHandle, 0)],
-    offsets: [],
-}
diff --git a/third_party/move/move-vm/transactional-tests/tests/native_structs/non_existant_native_struct.mvir b/third_party/move/move-vm/transactional-tests/tests/native_structs/non_existant_native_struct.mvir
deleted file mode 100644
index 9d12bf2d5063e..0000000000000
--- a/third_party/move/move-vm/transactional-tests/tests/native_structs/non_existant_native_struct.mvir
+++ /dev/null
@@ -1,6 +0,0 @@
-//# publish
-module 0x42.M {
-    // error, missing dep
-    native struct T;
-    native struct T2;
-}

From 50136209c970169ddfbb912e77deac4ae6f12aa0 Mon Sep 17 00:00:00 2001
From: Satya Vusirikala 
Date: Thu, 1 Feb 2024 06:21:40 +0530
Subject: [PATCH 32/44] [aggregator] Do layout matching in change_set only
 sporadically (#11204)

* Randomly check layout matches in change_set.rs

* Addressing PR comments

* Invoking randomly_check_layout_matches function in executor.rs
---
 Cargo.lock                                    |  1 +
 aptos-move/aptos-vm-types/Cargo.toml          |  1 +
 aptos-move/aptos-vm-types/src/change_set.rs   | 48 +++++++++++++------
 .../src/move_vm_ext/respawned_session.rs      | 31 +-----------
 aptos-move/block-executor/src/executor.rs     |  8 +++-
 5 files changed, 43 insertions(+), 46 deletions(-)

diff --git a/Cargo.lock b/Cargo.lock
index f5160a8fcec30..b67a858e37730 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -4393,6 +4393,7 @@ dependencies = [
  "either",
  "move-binary-format",
  "move-core-types",
+ "rand 0.7.3",
  "serde",
  "test-case",
 ]
diff --git a/aptos-move/aptos-vm-types/Cargo.toml b/aptos-move/aptos-vm-types/Cargo.toml
index 21108c9f0a5bb..e5ad3bcc2ab82 100644
--- a/aptos-move/aptos-vm-types/Cargo.toml
+++ b/aptos-move/aptos-vm-types/Cargo.toml
@@ -24,6 +24,7 @@ claims = { workspace = true }
 either = { workspace = true }
 move-binary-format = { workspace = true }
 move-core-types = { workspace = true }
+rand = { workspace = true }
 serde = { workspace = true }
 
 [dev-dependencies]
diff --git a/aptos-move/aptos-vm-types/src/change_set.rs b/aptos-move/aptos-vm-types/src/change_set.rs
index 75088e2ea7b04..c619cf953edd4 100644
--- a/aptos-move/aptos-vm-types/src/change_set.rs
+++ b/aptos-move/aptos-vm-types/src/change_set.rs
@@ -33,6 +33,7 @@ use move_core_types::{
     value::MoveTypeLayout,
     vm_status::StatusCode,
 };
+use rand::Rng;
 use std::{
     collections::{
         btree_map::Entry::{Occupied, Vacant},
@@ -42,6 +43,34 @@ use std::{
     sync::Arc,
 };
 
+/// Sporadically checks if the given two input type layouts match.
+pub fn randomly_check_layout_matches(
+    layout_1: Option<&MoveTypeLayout>,
+    layout_2: Option<&MoveTypeLayout>,
+) -> Result<(), PanicError> {
+    if layout_1.is_some() != layout_2.is_some() {
+        return Err(code_invariant_error(format!(
+            "Layouts don't match when they are expected to: {:?} and {:?}",
+            layout_1, layout_2
+        )));
+    }
+    if layout_1.is_some() {
+        // Checking if 2 layouts are equal is a recursive operation and is expensive.
+        // We generally call this `randomly_check_layout_matches` function when we know
+        // that the layouts are supposed to match. As an optimization, we only randomly
+        // check if the layouts are matching.
+        let mut rng = rand::thread_rng();
+        let random_number: u32 = rng.gen_range(0, 100);
+        if random_number == 1 && layout_1 != layout_2 {
+            return Err(code_invariant_error(format!(
+                "Layouts don't match when they are expected to: {:?} and {:?}",
+                layout_1, layout_2
+            )));
+        }
+    }
+    Ok(())
+}
+
 /// A change set produced by the VM.
 ///
 /// **WARNING**: Just like VMOutput, this type should only be used inside the
@@ -634,13 +663,10 @@ impl VMChangeSet {
                     // Squash entry and additional entries if type layouts match.
                     let (additional_write_op, additional_type_layout) = additional_entry;
                     let (write_op, type_layout) = entry.get_mut();
-                    if *type_layout != additional_type_layout {
-                        return Err(code_invariant_error(format!(
-                            "Cannot squash two writes with different type layouts.
-                            key: {:?}, type_layout: {:?}, additional_type_layout: {:?}",
-                            key, type_layout, additional_type_layout
-                        )));
-                    }
+                    randomly_check_layout_matches(
+                        type_layout.as_deref(),
+                        additional_type_layout.as_deref(),
+                    )?;
                     let noop = !WriteOp::squash(write_op, additional_write_op).map_err(|e| {
                         code_invariant_error(format!("Error while squashing two write ops: {}.", e))
                     })?;
@@ -691,13 +717,7 @@ impl VMChangeSet {
                                 materialized_size: additional_materialized_size,
                             }),
                         ) => {
-                            if layout != additional_layout {
-                                return Err(code_invariant_error(format!(
-                                    "Cannot squash two writes with different type layouts.
-                                    key: {:?}, type_layout: {:?}, additional_type_layout: {:?}",
-                                    key, layout, additional_layout
-                                )));
-                            }
+                            randomly_check_layout_matches(Some(layout), Some(additional_layout))?;
                             let to_delete = !WriteOp::squash(write_op, additional_write_op.clone())
                                 .map_err(|e| {
                                     code_invariant_error(format!(
diff --git a/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs b/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs
index c90df6d68588c..3c0c5170a34e5 100644
--- a/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs
+++ b/aptos-move/aptos-vm/src/move_vm_ext/respawned_session.rs
@@ -30,7 +30,7 @@ use aptos_types::{
 };
 use aptos_vm_types::{
     abstract_write_op::{AbstractResourceWriteOp, WriteWithDelayedFieldsOp},
-    change_set::VMChangeSet,
+    change_set::{randomly_check_layout_matches, VMChangeSet},
     resolver::{
         ExecutorView, ResourceGroupSize, ResourceGroupView, StateStorageView, TModuleView,
         TResourceGroupView, TResourceView,
@@ -44,7 +44,6 @@ use move_core_types::{
     value::MoveTypeLayout,
     vm_status::{err_msg, StatusCode, VMStatus},
 };
-use rand::Rng;
 use std::{
     collections::{BTreeMap, HashMap, HashSet},
     sync::Arc,
@@ -148,34 +147,6 @@ impl<'r, 'l> RespawnedSession<'r, 'l> {
     }
 }
 
-// Sporadically checks if the given two input type layouts match
-pub fn randomly_check_layout_matches(
-    layout_1: Option<&MoveTypeLayout>,
-    layout_2: Option<&MoveTypeLayout>,
-) -> Result<(), PanicError> {
-    if layout_1.is_some() != layout_2.is_some() {
-        return Err(code_invariant_error(format!(
-            "Layouts don't match when they are expected to: {:?} and {:?}",
-            layout_1, layout_2
-        )));
-    }
-    if layout_1.is_some() {
-        // Checking if 2 layouts are equal is a recursive operation and is expensive.
-        // We generally call this `randomly_check_layout_matches` function when we know
-        // that the layouts are supposed to match. As an optimization, we only randomly
-        // check if the layouts are matching.
-        let mut rng = rand::thread_rng();
-        let random_number: u32 = rng.gen_range(0, 100);
-        if random_number == 1 && layout_1 != layout_2 {
-            return Err(code_invariant_error(format!(
-                "Layouts don't match when they are expected to: {:?} and {:?}",
-                layout_1, layout_2
-            )));
-        }
-    }
-    Ok(())
-}
-
 /// Adapter to allow resolving the calls to `ExecutorView` via change set.
 pub struct ExecutorViewWithChangeSet<'r> {
     base_executor_view: &'r dyn ExecutorView,
diff --git a/aptos-move/block-executor/src/executor.rs b/aptos-move/block-executor/src/executor.rs
index 0c67be88bab0c..06a34b415ef92 100644
--- a/aptos-move/block-executor/src/executor.rs
+++ b/aptos-move/block-executor/src/executor.rs
@@ -42,6 +42,7 @@ use aptos_types::{
     write_set::{TransactionWrite, WriteOp},
 };
 use aptos_vm_logging::{alert, clear_speculative_txn_logs, init_speculative_logs, prelude::*};
+use aptos_vm_types::change_set::randomly_check_layout_matches;
 use bytes::Bytes;
 use claims::assert_none;
 use core::panic;
@@ -848,10 +849,13 @@ where
             for (key, layout) in reads_needing_delayed_field_exchange.into_iter() {
                 if let Ok(MVDataOutput::Versioned(
                     _,
-                    ValueWithLayout::Exchanged(value, _existing_layout),
+                    ValueWithLayout::Exchanged(value, existing_layout),
                 )) = versioned_cache.data().fetch_data(&key, txn_idx)
                 {
-                    // TODO[agg_v2](fix) add randomly_check_layout_matches(Some(_existing_layout), layout);
+                    randomly_check_layout_matches(
+                        existing_layout.as_deref(),
+                        Some(layout.as_ref()),
+                    )?;
                     patched_resource_write_set.insert(
                         key,
                         Self::replace_ids_with_values(&value, layout.as_ref(), &latest_view),

From e875621b29a2dc5374d3954b97797a96637bac29 Mon Sep 17 00:00:00 2001
From: Teng Zhang 
Date: Wed, 31 Jan 2024 17:02:42 -0800
Subject: [PATCH 33/44] [Compiler-v2] Fix type checking issue due to name
 conflicts between built-in functions and local variables (#11739)

* fix type checking issue due to name conflicts

* Update third_party/move/move-model/src/builder/exp_builder.rs

Co-authored-by: Vineeth Kashyap 

---------

Co-authored-by: Vineeth Kashyap 
---
 .../move-packages/DPN/sources/DiemSystem.move |  4 ++--
 .../core/sources/configs/DiemSystem.move      |  4 ++--
 .../experimental/sources/MultiToken.move      |  2 +-
 .../sources/MultiTokenBalance.move            |  2 +-
 .../tests/checking/specs/len_ok.exp           | 19 +++++++++++++++++++
 .../tests/checking/specs/len_ok.move          |  9 +++++++++
 .../checking/specs/len_same_fun_name_err.exp  |  7 +++++++
 .../checking/specs/len_same_fun_name_err.move | 14 ++++++++++++++
 .../tests/move_check/typing/len_err.exp       |  8 ++++++++
 .../tests/move_check/typing/len_err.move      | 10 ++++++++++
 .../move-model/src/builder/exp_builder.rs     |  9 +++++++++
 11 files changed, 82 insertions(+), 6 deletions(-)
 create mode 100644 third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp
 create mode 100644 third_party/move/move-compiler-v2/tests/checking/specs/len_ok.move
 create mode 100644 third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.exp
 create mode 100644 third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.move
 create mode 100644 third_party/move/move-compiler/tests/move_check/typing/len_err.exp
 create mode 100644 third_party/move/move-compiler/tests/move_check/typing/len_err.move

diff --git a/third_party/move/documentation/examples/diem-framework/move-packages/DPN/sources/DiemSystem.move b/third_party/move/documentation/examples/diem-framework/move-packages/DPN/sources/DiemSystem.move
index 4a16fccceead0..fe5bba8c472c1 100644
--- a/third_party/move/documentation/examples/diem-framework/move-packages/DPN/sources/DiemSystem.move
+++ b/third_party/move/documentation/examples/diem-framework/move-packages/DPN/sources/DiemSystem.move
@@ -522,10 +522,10 @@ module DiemFramework::DiemSystem {
         };
         let new_validator_config = ValidatorConfig::get_config(validator_info.addr);
         // check if information is the same
-        let config_ref = &mut validator_info.config;
-        if (config_ref == &new_validator_config) {
+        if (&validator_info.config == &new_validator_config) {
             return false
         };
+        let config_ref = &mut validator_info.config;
         *config_ref = new_validator_config;
         true
     }
diff --git a/third_party/move/documentation/examples/diem-framework/move-packages/core/sources/configs/DiemSystem.move b/third_party/move/documentation/examples/diem-framework/move-packages/core/sources/configs/DiemSystem.move
index ca944a3e3db0a..5304e686da609 100644
--- a/third_party/move/documentation/examples/diem-framework/move-packages/core/sources/configs/DiemSystem.move
+++ b/third_party/move/documentation/examples/diem-framework/move-packages/core/sources/configs/DiemSystem.move
@@ -282,10 +282,10 @@ module CoreFramework::DiemSystem {
         };
         let new_validator_config = ValidatorConfig::get_config(validator_info.addr);
         // check if information is the same
-        let config_ref = &mut validator_info.config;
-        if (config_ref == &new_validator_config) {
+        if (&validator_info.config == &new_validator_config) {
             return false
         };
+        let config_ref = &mut validator_info.config;
         *config_ref = new_validator_config;
         true
     }
diff --git a/third_party/move/documentation/examples/diem-framework/move-packages/experimental/sources/MultiToken.move b/third_party/move/documentation/examples/diem-framework/move-packages/experimental/sources/MultiToken.move
index 494f2105c43e4..4106a49d8f712 100644
--- a/third_party/move/documentation/examples/diem-framework/move-packages/experimental/sources/MultiToken.move
+++ b/third_party/move/documentation/examples/diem-framework/move-packages/experimental/sources/MultiToken.move
@@ -160,7 +160,7 @@ module ExperimentalFramework::MultiToken {
         let len = vector::length(gallery);
         while ({spec {
             invariant i >= 0;
-            invariant i <= len(gallery);
+            invariant i <= ::len(gallery);
             invariant forall k in 0..i: gallery[k].token_id.id != id;
         };(i < len)}) {
             if (guid::eq_id(&vector::borrow(gallery, i).token_id, id)) {
diff --git a/third_party/move/documentation/examples/diem-framework/move-packages/experimental/sources/MultiTokenBalance.move b/third_party/move/documentation/examples/diem-framework/move-packages/experimental/sources/MultiTokenBalance.move
index f87809d8a977b..e08acd8164338 100644
--- a/third_party/move/documentation/examples/diem-framework/move-packages/experimental/sources/MultiTokenBalance.move
+++ b/third_party/move/documentation/examples/diem-framework/move-packages/experimental/sources/MultiTokenBalance.move
@@ -100,7 +100,7 @@ module ExperimentalFramework::MultiTokenBalance {
         let len = vector::length(gallery);
         while ({spec {
             invariant i >= 0;
-            invariant i <= len(gallery);
+            invariant i <= ::len(gallery);
             invariant forall k in 0..i: gallery[k].id != id;
         };(i < len)}) {
             if (MultiToken::id(vector::borrow(gallery, i)) == *id) {
diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp b/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp
new file mode 100644
index 0000000000000..1ef2cd90f4a7c
--- /dev/null
+++ b/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp
@@ -0,0 +1,19 @@
+// ---- Model Dump
+module 0x42::m {
+    private fun f(gallery: &vector) {
+        {
+          let len: u64 = 5;
+          spec {
+            assert Ge(Len($t0), 0);
+          }
+          ;
+          Tuple()
+        }
+    }
+    spec fun $f(gallery: vector) {
+        {
+          let len: u256 = 5;
+          Tuple()
+        }
+    }
+} // end 0x42::m
diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.move b/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.move
new file mode 100644
index 0000000000000..a082f4b344719
--- /dev/null
+++ b/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.move
@@ -0,0 +1,9 @@
+module 0x42::m {
+
+    fun f(gallery: &vector) {
+        let len = 5;
+        spec {
+            assert len(gallery) >= 0;
+        };
+    }
+}
diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.exp b/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.exp
new file mode 100644
index 0000000000000..9cf694dbbac09
--- /dev/null
+++ b/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.exp
@@ -0,0 +1,7 @@
+
+Diagnostics:
+error: invalid call of `m::len`: argument count mismatch (expected 0 but found 1)
+   ┌─ tests/checking/specs/len_same_fun_name_err.move:10:20
+   │
+10 │             assert len(gallery) >= 0; // err is raised here because the built-in one is shadowed.
+   │                    ^^^^^^^^^^^^
diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.move b/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.move
new file mode 100644
index 0000000000000..48c56deb9d945
--- /dev/null
+++ b/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.move
@@ -0,0 +1,14 @@
+module 0x42::m {
+
+    fun len(): bool {
+        true
+    }
+
+    fun f(gallery: &vector) {
+        let len = 5;
+        spec {
+            assert len(gallery) >= 0; // err is raised here because the built-in one is shadowed.
+            assert len();
+        };
+    }
+}
diff --git a/third_party/move/move-compiler/tests/move_check/typing/len_err.exp b/third_party/move/move-compiler/tests/move_check/typing/len_err.exp
new file mode 100644
index 0000000000000..514580232fb50
--- /dev/null
+++ b/third_party/move/move-compiler/tests/move_check/typing/len_err.exp
@@ -0,0 +1,8 @@
+error[E02010]: invalid name
+  ┌─ tests/move_check/typing/len_err.move:5:9
+  │  
+5 │ ╭         spec {
+6 │ │             assert len(gallery) >= len;
+7 │ │         };
+  │ ╰─────────^ Conflicting name 'len' is used as both a variable and a function pointer (including built-in functions) in spec
+
diff --git a/third_party/move/move-compiler/tests/move_check/typing/len_err.move b/third_party/move/move-compiler/tests/move_check/typing/len_err.move
new file mode 100644
index 0000000000000..73485764cbc63
--- /dev/null
+++ b/third_party/move/move-compiler/tests/move_check/typing/len_err.move
@@ -0,0 +1,10 @@
+module 0x42::m {
+
+    fun f_err(gallery: &vector) {
+        let len = 5;
+        spec {
+            assert len(gallery) >= len;
+        };
+    }
+
+}
diff --git a/third_party/move/move-model/src/builder/exp_builder.rs b/third_party/move/move-model/src/builder/exp_builder.rs
index 89e307669ea4b..c7026dae64a84 100644
--- a/third_party/move/move-model/src/builder/exp_builder.rs
+++ b/third_party/move/move-model/src/builder/exp_builder.rs
@@ -1943,6 +1943,15 @@ impl<'env, 'translator, 'module_translator> ExpTranslator<'env, 'translator, 'mo
                 if n.value.as_str() == "update_field" {
                     return Some(self.translate_update_field(expected_type, loc, generics, args));
                 }
+                let builtin_module = self.parent.parent.builtin_module();
+                let full_name = QualifiedSymbol {
+                    module_name: builtin_module,
+                    symbol: self.symbol_pool().make(&n.value),
+                };
+                // For other built-in functions, type check is performed in translate_call
+                if self.parent.parent.spec_fun_table.get(&full_name).is_some() {
+                    return None;
+                }
             }
         }
         if let EA::ModuleAccess_::Name(n) = &maccess.value {

From 8d7d6f0d6787107a4ecdeca947f23444bad7fa3e Mon Sep 17 00:00:00 2001
From: Justin Chang <37165464+just-in-chang@users.noreply.github.com>
Date: Wed, 31 Jan 2024 19:57:11 -0800
Subject: [PATCH 34/44] [NFT Metadata Crawler] Add column to mark URIs to not
 be parsed (#11846)

---
 .../down.sql                                  |  1 +
 .../up.sql                                    |  1 +
 .../src/models/nft_metadata_crawler_uris.rs   | 10 ++++++
 .../models/nft_metadata_crawler_uris_query.rs | 19 ++++++++++
 .../nft-metadata-crawler-parser/src/schema.rs |  1 +
 .../src/utils/counters.rs                     |  9 +++++
 .../src/utils/database.rs                     |  5 +--
 .../src/utils/gcs.rs                          |  2 +-
 .../nft-metadata-crawler-parser/src/worker.rs | 36 ++++++++++++++-----
 9 files changed, 72 insertions(+), 12 deletions(-)
 create mode 100644 ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/down.sql
 create mode 100644 ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/up.sql

diff --git a/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/down.sql b/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/down.sql
new file mode 100644
index 0000000000000..cdf49e8755b12
--- /dev/null
+++ b/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/down.sql
@@ -0,0 +1 @@
+ALTER TABLE nft_metadata_crawler.parsed_asset_uris DROP COLUMN do_not_parse;
diff --git a/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/up.sql b/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/up.sql
new file mode 100644
index 0000000000000..1e480c0d5d43f
--- /dev/null
+++ b/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/up.sql
@@ -0,0 +1 @@
+ALTER TABLE nft_metadata_crawler.parsed_asset_uris ADD COLUMN do_not_parse BOOLEAN NOT NULL DEFAULT FALSE;
diff --git a/ecosystem/nft-metadata-crawler-parser/src/models/nft_metadata_crawler_uris.rs b/ecosystem/nft-metadata-crawler-parser/src/models/nft_metadata_crawler_uris.rs
index 5fd76d09f3536..4316b1f6b4797 100644
--- a/ecosystem/nft-metadata-crawler-parser/src/models/nft_metadata_crawler_uris.rs
+++ b/ecosystem/nft-metadata-crawler-parser/src/models/nft_metadata_crawler_uris.rs
@@ -19,6 +19,7 @@ pub struct NFTMetadataCrawlerURIs {
     json_parser_retry_count: i32,
     image_optimizer_retry_count: i32,
     animation_optimizer_retry_count: i32,
+    do_not_parse: bool,
 }
 
 impl NFTMetadataCrawlerURIs {
@@ -33,6 +34,7 @@ impl NFTMetadataCrawlerURIs {
             json_parser_retry_count: 0,
             image_optimizer_retry_count: 0,
             animation_optimizer_retry_count: 0,
+            do_not_parse: false,
         }
     }
 
@@ -142,4 +144,12 @@ impl NFTMetadataCrawlerURIs {
     pub fn increment_animation_optimizer_retry_count(&mut self) {
         self.animation_optimizer_retry_count += 1;
     }
+
+    pub fn get_do_not_parse(&self) -> bool {
+        self.do_not_parse
+    }
+
+    pub fn set_do_not_parse(&mut self, do_not_parse: bool) {
+        self.do_not_parse = do_not_parse;
+    }
 }
diff --git a/ecosystem/nft-metadata-crawler-parser/src/models/nft_metadata_crawler_uris_query.rs b/ecosystem/nft-metadata-crawler-parser/src/models/nft_metadata_crawler_uris_query.rs
index 5d030d6ec1c2a..11bb7b0ab99f2 100644
--- a/ecosystem/nft-metadata-crawler-parser/src/models/nft_metadata_crawler_uris_query.rs
+++ b/ecosystem/nft-metadata-crawler-parser/src/models/nft_metadata_crawler_uris_query.rs
@@ -26,6 +26,7 @@ pub struct NFTMetadataCrawlerURIsQuery {
     pub image_optimizer_retry_count: i32,
     pub animation_optimizer_retry_count: i32,
     pub inserted_at: chrono::NaiveDateTime,
+    pub do_not_parse: bool,
 }
 
 impl NFTMetadataCrawlerURIsQuery {
@@ -102,3 +103,21 @@ impl NFTMetadataCrawlerURIsQuery {
         })
     }
 }
+
+impl Default for NFTMetadataCrawlerURIsQuery {
+    fn default() -> Self {
+        Self {
+            asset_uri: "".to_string(),
+            raw_image_uri: None,
+            raw_animation_uri: None,
+            cdn_json_uri: None,
+            cdn_image_uri: None,
+            cdn_animation_uri: None,
+            json_parser_retry_count: 0,
+            image_optimizer_retry_count: 0,
+            animation_optimizer_retry_count: 0,
+            inserted_at: chrono::NaiveDateTime::default(),
+            do_not_parse: false,
+        }
+    }
+}
diff --git a/ecosystem/nft-metadata-crawler-parser/src/schema.rs b/ecosystem/nft-metadata-crawler-parser/src/schema.rs
index a26e10c680497..8c1de4b3e2471 100644
--- a/ecosystem/nft-metadata-crawler-parser/src/schema.rs
+++ b/ecosystem/nft-metadata-crawler-parser/src/schema.rs
@@ -21,6 +21,7 @@ pub mod nft_metadata_crawler {
             image_optimizer_retry_count -> Int4,
             animation_optimizer_retry_count -> Int4,
             inserted_at -> Timestamp,
+            do_not_parse -> Bool,
         }
     }
 
diff --git a/ecosystem/nft-metadata-crawler-parser/src/utils/counters.rs b/ecosystem/nft-metadata-crawler-parser/src/utils/counters.rs
index 18e0bf1bee066..3eaa98ca19ab5 100644
--- a/ecosystem/nft-metadata-crawler-parser/src/utils/counters.rs
+++ b/ecosystem/nft-metadata-crawler-parser/src/utils/counters.rs
@@ -34,6 +34,15 @@ pub static PARSER_FAIL_COUNT: Lazy = Lazy::new(|| {
     .unwrap()
 });
 
+/// Number of times the NFT Metadata Crawler Parser has received a URI marked as not to parse
+pub static DO_NOT_PARSE_COUNT: Lazy = Lazy::new(|| {
+    register_int_counter!(
+        "nft_metadata_crawler_parser_do_not_parse_count",
+        "Number of times the parser received a URI marked as not to parse",
+    )
+    .unwrap()
+});
+
 // PUBSUB METRICS
 
 /// Number of times a PubSub message has successfully been ACK'd
diff --git a/ecosystem/nft-metadata-crawler-parser/src/utils/database.rs b/ecosystem/nft-metadata-crawler-parser/src/utils/database.rs
index cd608ee007da3..57fe8ff2cbcb8 100644
--- a/ecosystem/nft-metadata-crawler-parser/src/utils/database.rs
+++ b/ecosystem/nft-metadata-crawler-parser/src/utils/database.rs
@@ -34,12 +34,12 @@ pub fn run_migrations(pool: &Pool>) {
 /// Upserts URIs into database
 pub fn upsert_uris(
     conn: &mut PooledConnection>,
-    entry: NFTMetadataCrawlerURIs,
+    entry: &NFTMetadataCrawlerURIs,
 ) -> anyhow::Result {
     use schema::nft_metadata_crawler::parsed_asset_uris::dsl::*;
 
     let query = diesel::insert_into(schema::nft_metadata_crawler::parsed_asset_uris::table)
-        .values(&entry)
+        .values(entry)
         .on_conflict(asset_uri)
         .do_update()
         .set((
@@ -51,6 +51,7 @@ pub fn upsert_uris(
             image_optimizer_retry_count.eq(excluded(image_optimizer_retry_count)),
             json_parser_retry_count.eq(excluded(json_parser_retry_count)),
             animation_optimizer_retry_count.eq(excluded(animation_optimizer_retry_count)),
+            do_not_parse.eq(excluded(do_not_parse)),
         ));
 
     let debug_query = diesel::debug_query::(&query).to_string();
diff --git a/ecosystem/nft-metadata-crawler-parser/src/utils/gcs.rs b/ecosystem/nft-metadata-crawler-parser/src/utils/gcs.rs
index 72f49e5586f73..2c605e49c572f 100644
--- a/ecosystem/nft-metadata-crawler-parser/src/utils/gcs.rs
+++ b/ecosystem/nft-metadata-crawler-parser/src/utils/gcs.rs
@@ -22,7 +22,7 @@ use std::time::Duration;
 pub async fn write_json_to_gcs(
     bucket: String,
     id: String,
-    json: Value,
+    json: &Value,
     client: &Client,
 ) -> anyhow::Result {
     GCS_UPLOAD_INVOCATION_COUNT.inc();
diff --git a/ecosystem/nft-metadata-crawler-parser/src/worker.rs b/ecosystem/nft-metadata-crawler-parser/src/worker.rs
index 4e77169d83b75..7b2631468b5b0 100644
--- a/ecosystem/nft-metadata-crawler-parser/src/worker.rs
+++ b/ecosystem/nft-metadata-crawler-parser/src/worker.rs
@@ -306,15 +306,19 @@ impl Worker {
     pub async fn parse(&mut self) -> anyhow::Result<()> {
         // Deduplicate asset_uri
         // Exit if not force or if asset_uri has already been parsed
-        if !self.force
-            && NFTMetadataCrawlerURIsQuery::get_by_asset_uri(self.asset_uri.clone(), &mut self.conn)
-                .is_some()
-        {
+        let prev_model =
+            NFTMetadataCrawlerURIsQuery::get_by_asset_uri(self.asset_uri.clone(), &mut self.conn);
+        if !self.force && prev_model.is_some() {
             self.log_info("Duplicate asset_uri found, skipping parse");
             DUPLICATE_ASSET_URI_COUNT.inc();
             return Ok(());
         }
 
+        if prev_model.unwrap_or_default().do_not_parse {
+            self.log_info("do_not_parse is true, skipping parse");
+            return Ok(());
+        }
+
         // Skip if asset_uri contains any of the uris in URI_SKIP_LIST
         if let Some(blacklist) = self.config.uri_blacklist.clone() {
             if blacklist.iter().any(|uri| self.asset_uri.contains(uri)) {
@@ -326,8 +330,12 @@ impl Worker {
 
         // Skip if asset_uri is not a valid URI
         if Url::parse(&self.asset_uri).is_err() {
-            self.log_info("URI is invalid, skipping parse");
+            self.log_info("URI is invalid, skipping parse, marking as do_not_parse");
+            self.model.set_do_not_parse(true);
             SKIP_URI_COUNT.with_label_values(&["invalid"]).inc();
+            if let Err(e) = upsert_uris(&mut self.conn, &self.model) {
+                self.log_error("Commit to Postgres failed", &e);
+            }
             return Ok(());
         }
 
@@ -369,7 +377,7 @@ impl Worker {
             let cdn_json_uri_result = write_json_to_gcs(
                 self.config.bucket.clone(),
                 self.asset_data_id.clone(),
-                json,
+                &json,
                 &self.gcs_client,
             )
             .await;
@@ -389,7 +397,7 @@ impl Worker {
 
         // Commit model to Postgres
         self.log_info("Committing JSON to Postgres");
-        if let Err(e) = upsert_uris(&mut self.conn, self.model.clone()) {
+        if let Err(e) = upsert_uris(&mut self.conn, &self.model) {
             self.log_error("Commit to Postgres failed", &e);
         }
 
@@ -453,6 +461,16 @@ impl Worker {
                 (vec![], ImageFormat::Png)
             });
 
+            if image.is_empty() && json == Value::Null {
+                self.log_info("Image and JSON are empty, skipping parse, marking as do_not_parse");
+                self.model.set_do_not_parse(true);
+                SKIP_URI_COUNT.with_label_values(&["empty"]).inc();
+                if let Err(e) = upsert_uris(&mut self.conn, &self.model) {
+                    self.log_error("Commit to Postgres failed", &e);
+                }
+                return Ok(());
+            }
+
             // Save resized and optimized image to GCS
             if !image.is_empty() {
                 self.log_info("Writing image to GCS");
@@ -481,7 +499,7 @@ impl Worker {
 
         // Commit model to Postgres
         self.log_info("Committing image to Postgres");
-        if let Err(e) = upsert_uris(&mut self.conn, self.model.clone()) {
+        if let Err(e) = upsert_uris(&mut self.conn, &self.model) {
             self.log_error("Commit to Postgres failed", &e);
         }
 
@@ -570,7 +588,7 @@ impl Worker {
 
         // Commit model to Postgres
         self.log_info("Committing animation to Postgres");
-        if let Err(e) = upsert_uris(&mut self.conn, self.model.clone()) {
+        if let Err(e) = upsert_uris(&mut self.conn, &self.model) {
             self.log_error("Commit to Postgres failed", &e);
         }
 

From 47d1e81eb72fb1120db39ae717642bfc99aa0f37 Mon Sep 17 00:00:00 2001
From: Teng Zhang 
Date: Wed, 31 Jan 2024 22:57:46 -0800
Subject: [PATCH 35/44] add constraints (#11834)

---
 .../tests/checking/inlining/lambda_cast.exp   | 74 +++++++++++++++++++
 .../tests/checking/inlining/lambda_cast.move  | 16 ++++
 .../checking/inlining/lambda_cast_err.exp     |  7 ++
 .../checking/inlining/lambda_cast_err.move    |  9 +++
 .../tests/checking/specs/len_ok.exp           |  8 ++
 .../move-model/src/builder/exp_builder.rs     | 32 +++++---
 6 files changed, 137 insertions(+), 9 deletions(-)
 create mode 100644 third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast.exp
 create mode 100644 third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast.move
 create mode 100644 third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast_err.exp
 create mode 100644 third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast_err.move

diff --git a/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast.exp b/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast.exp
new file mode 100644
index 0000000000000..6462e35c10d98
--- /dev/null
+++ b/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast.exp
@@ -0,0 +1,74 @@
+// ---- Model Dump
+module 0x12391283::M {
+    use std::vector;
+    private fun test_1(): u64 {
+        {
+          let gas_schedule_blob: vector = Vector(115, 115, 95, 112, 97, 99, 107, 101, 100, 32, 0, 0, 0, 0, 0, 0, 0);
+          {
+            let (v: vector, init: u64) = Tuple(gas_schedule_blob, Cast(0));
+            {
+              let accu: u64 = init;
+              {
+                let (v: vector) = Tuple(v);
+                vector::reverse(Borrow(Mutable)(v));
+                loop {
+                  if Not(vector::is_empty(Borrow(Immutable)(v))) {
+                    {
+                      let e: u8 = vector::pop_back(Borrow(Mutable)(v));
+                      {
+                        let (elem: u8) = Tuple(e);
+                        accu: u64 = {
+                          let (sum: u64, addend: u8) = Tuple(accu, elem);
+                          Add(sum, Cast(addend))
+                        }
+                      };
+                      Tuple()
+                    }
+                  } else {
+                    break
+                  }
+                };
+                Tuple()
+              };
+              accu
+            }
+          }
+        }
+    }
+    private fun test_2(): u64 {
+        {
+          let gas_schedule_blob: vector = Vector(115, 115, 95, 112, 97, 99, 107, 101, 100, 32, 0, 0, 0, 0, 0, 0, 0);
+          {
+            let (v: vector, init: u64) = Tuple(gas_schedule_blob, Cast(0));
+            {
+              let accu: u64 = init;
+              {
+                let (v: vector) = Tuple(v);
+                vector::reverse(Borrow(Mutable)(v));
+                loop {
+                  if Not(vector::is_empty(Borrow(Immutable)(v))) {
+                    {
+                      let e: u8 = vector::pop_back(Borrow(Mutable)(v));
+                      {
+                        let (elem: u8) = Tuple(e);
+                        accu: u64 = {
+                          let (sum: u64, addend: u8) = Tuple(accu, elem);
+                          Add(sum, Cast(addend))
+                        }
+                      };
+                      Tuple()
+                    }
+                  } else {
+                    break
+                  }
+                };
+                Tuple()
+              };
+              accu
+            }
+          }
+        }
+    }
+    spec fun $test_1(): u64;
+    spec fun $test_2(): u64;
+} // end 0x12391283::M
diff --git a/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast.move b/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast.move
new file mode 100644
index 0000000000000..302dccb072cac
--- /dev/null
+++ b/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast.move
@@ -0,0 +1,16 @@
+module 0x12391283::M {
+    use std::vector;
+    fun test_1() : u64 {
+        let gas_schedule_blob: vector = vector[
+            115, 115, 95, 112, 97, 99, 107, 101, 100, 32, 0, 0, 0, 0, 0, 0, 0,
+        ];
+        vector::fold(gas_schedule_blob, (0 as u64), |sum, addend| sum + (addend as u64))
+    }
+
+    fun test_2() : u64 {
+        let gas_schedule_blob: vector = vector[
+            115, 115, 95, 112, 97, 99, 107, 101, 100, 32, 0, 0, 0, 0, 0, 0, 0,
+        ];
+        vector::fold(gas_schedule_blob, (0 as u64), |sum, addend| sum + (addend as u64))
+    }
+}
diff --git a/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast_err.exp b/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast_err.exp
new file mode 100644
index 0000000000000..451aeb2513576
--- /dev/null
+++ b/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast_err.exp
@@ -0,0 +1,7 @@
+
+Diagnostics:
+error: invalid call of `vector::fold`: expected `integer` but found `vector` for argument 3
+  ┌─ tests/checking/inlining/lambda_cast_err.move:7:53
+  │
+7 │         vector::fold(gas_schedule_blob, (0 as u64), |sum, addend| sum + (addend as u64))
+  │                                                     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast_err.move b/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast_err.move
new file mode 100644
index 0000000000000..8c52cc09a751c
--- /dev/null
+++ b/third_party/move/move-compiler-v2/tests/checking/inlining/lambda_cast_err.move
@@ -0,0 +1,9 @@
+module 0x12391283::M {
+    use std::vector;
+    fun test(gas_schedule_blob: vector) : u64 {
+        let gas_schedule_blob: vector> = vector[
+            vector[115], vector[115], vector[95],
+        ];
+        vector::fold(gas_schedule_blob, (0 as u64), |sum, addend| sum + (addend as u64))
+    }
+}
diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp b/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp
index 1ef2cd90f4a7c..b6f5e04220537 100644
--- a/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp
+++ b/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp
@@ -1,3 +1,11 @@
+
+Diagnostics:
+warning: Unused local variable `len`. Consider removing or prefixing with an underscore: `_len`
+  ┌─ tests/checking/specs/len_ok.move:4:13
+  │
+4 │         let len = 5;
+  │             ^^^
+
 // ---- Model Dump
 module 0x42::m {
     private fun f(gallery: &vector) {
diff --git a/third_party/move/move-model/src/builder/exp_builder.rs b/third_party/move/move-model/src/builder/exp_builder.rs
index c7026dae64a84..37c68250ab6ef 100644
--- a/third_party/move/move-model/src/builder/exp_builder.rs
+++ b/third_party/move/move-model/src/builder/exp_builder.rs
@@ -1406,17 +1406,31 @@ impl<'env, 'translator, 'module_translator> ExpTranslator<'env, 'translator, 'mo
                 let (exp_ty, exp) = self.translate_exp_free(exp);
                 if !ty.is_number() {
                     self.error(&loc, "cast target type must be a number");
-                    self.new_error_exp()
-                } else if !self.subs.is_some_number(&exp_ty) {
+                    return self.new_error_exp();
+                } else if !self.subs.is_some_number(&exp_ty)
+                    && !self.subs.is_free_var_without_constraints(&exp_ty)
+                {
                     self.error(&loc, "operand of cast must be a number");
-                    self.new_error_exp()
-                } else {
-                    ExpData::Call(
-                        self.new_node_id_with_type_loc(&ty, &loc),
-                        Operation::Cast,
-                        vec![exp.into_exp()],
-                    )
+                    return self.new_error_exp();
+                } else if let Type::Var(idx) = exp_ty {
+                    let all_ints = PrimitiveType::all_int_types()
+                        .into_iter()
+                        .collect::>();
+                    self.subs
+                        .add_constraint(
+                            &self.unification_context,
+                            idx,
+                            loc.clone(),
+                            WideningOrder::LeftToRight,
+                            Constraint::SomeNumber(all_ints),
+                        )
+                        .expect("success on var");
                 }
+                ExpData::Call(
+                    self.new_node_id_with_type_loc(&ty, &loc),
+                    Operation::Cast,
+                    vec![exp.into_exp()],
+                )
             },
             EA::Exp_::Annotate(exp, typ) => {
                 let ty = self.translate_type(typ);

From 629539e032de52c209086766133e0d02b286da99 Mon Sep 17 00:00:00 2001
From: Zekun Wang <41706692+fEst1ck@users.noreply.github.com>
Date: Thu, 1 Feb 2024 02:41:07 -0500
Subject: [PATCH 36/44] [Compiler V2] Derive abstract domain (#11674)

Implements a procedural macro to automatically derive AbstractDomain for structs. The derived join method joins selected fields of a struct, or all fields for structs with anonymous fields, and returns the combined join results. The joined fields must implement AbstractDomain.

Refactors several trivial existing implementations of AbstractDomain using this derive macro.
---
 Cargo.lock                                    |  13 ++
 third_party/move/move-compiler-v2/Cargo.toml  |   1 +
 .../pipeline/livevar_analysis_processor.rs    |   9 +-
 .../pipeline/reference_safety_processor.rs    |  12 +-
 .../move/move-model/bytecode/Cargo.toml       |   1 +
 .../abstract_domain_derive/Cargo.toml         |  17 +++
 .../abstract_domain_derive/src/lib.rs         | 136 ++++++++++++++++++
 .../abstract_domain_derive/tests/test.rs      |  80 +++++++++++
 .../bytecode/src/borrow_analysis.rs           |  12 +-
 .../bytecode/src/dataflow_domains.rs          |   1 -
 .../move-model/bytecode/src/usage_analysis.rs |  39 +----
 .../move-prover/bytecode-pipeline/Cargo.toml  |   1 +
 .../src/packed_types_analysis.rs              |  16 +--
 13 files changed, 262 insertions(+), 76 deletions(-)
 create mode 100644 third_party/move/move-model/bytecode/abstract_domain_derive/Cargo.toml
 create mode 100644 third_party/move/move-model/bytecode/abstract_domain_derive/src/lib.rs
 create mode 100644 third_party/move/move-model/bytecode/abstract_domain_derive/tests/test.rs

diff --git a/Cargo.lock b/Cargo.lock
index b67a858e37730..a38195b3653b5 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -8,6 +8,16 @@ version = "0.11.4"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3"
 
+[[package]]
+name = "abstract-domain-derive"
+version = "0.1.0"
+dependencies = [
+ "move-stackless-bytecode",
+ "proc-macro2 1.0.75",
+ "quote 1.0.35",
+ "syn 2.0.47",
+]
+
 [[package]]
 name = "addr2line"
 version = "0.21.0"
@@ -10606,6 +10616,7 @@ dependencies = [
 name = "move-compiler-v2"
 version = "0.1.0"
 dependencies = [
+ "abstract-domain-derive",
  "anyhow",
  "bcs 0.1.4",
  "clap 4.4.12",
@@ -10976,6 +10987,7 @@ dependencies = [
 name = "move-prover-bytecode-pipeline"
 version = "0.1.0"
 dependencies = [
+ "abstract-domain-derive",
  "anyhow",
  "async-trait",
  "atty",
@@ -11034,6 +11046,7 @@ dependencies = [
 name = "move-stackless-bytecode"
 version = "0.1.0"
 dependencies = [
+ "abstract-domain-derive",
  "anyhow",
  "codespan",
  "codespan-reporting",
diff --git a/third_party/move/move-compiler-v2/Cargo.toml b/third_party/move/move-compiler-v2/Cargo.toml
index 0d688ea2e58a6..c9e04b143c012 100644
--- a/third_party/move/move-compiler-v2/Cargo.toml
+++ b/third_party/move/move-compiler-v2/Cargo.toml
@@ -10,6 +10,7 @@ publish = false
 edition = "2021"
 
 [dependencies]
+abstract-domain-derive = { path = "../move-model/bytecode/abstract_domain_derive" }
 anyhow = "1.0.62"
 move-binary-format = { path = "../move-binary-format" }
 move-bytecode-source-map = { path = "../move-ir-compiler/move-bytecode-source-map" }
diff --git a/third_party/move/move-compiler-v2/src/pipeline/livevar_analysis_processor.rs b/third_party/move/move-compiler-v2/src/pipeline/livevar_analysis_processor.rs
index a1673c6ff5bf3..cac2e160ba1a3 100644
--- a/third_party/move/move-compiler-v2/src/pipeline/livevar_analysis_processor.rs
+++ b/third_party/move/move-compiler-v2/src/pipeline/livevar_analysis_processor.rs
@@ -12,6 +12,7 @@
 
 use super::ability_checker::check_copy;
 use crate::pipeline::ability_checker::has_ability;
+use abstract_domain_derive::AbstractDomain;
 use codespan_reporting::diagnostic::Severity;
 use itertools::Itertools;
 use move_binary_format::file_format::{Ability, CodeOffset};
@@ -218,17 +219,11 @@ impl LiveVarAnalysisProcessor {
 // Dataflow Analysis
 
 /// State of the livevar analysis,
-#[derive(Debug, Clone, Eq, PartialEq, PartialOrd)]
+#[derive(AbstractDomain, Debug, Clone, Eq, PartialEq, PartialOrd)]
 struct LiveVarState {
     livevars: MapDomain,
 }
 
-impl AbstractDomain for LiveVarState {
-    fn join(&mut self, other: &Self) -> JoinResult {
-        self.livevars.join(&other.livevars)
-    }
-}
-
 impl AbstractDomain for LiveVarInfo {
     fn join(&mut self, other: &Self) -> JoinResult {
         let count = self.usages.len();
diff --git a/third_party/move/move-compiler-v2/src/pipeline/reference_safety_processor.rs b/third_party/move/move-compiler-v2/src/pipeline/reference_safety_processor.rs
index 86a70cc5abc79..76ab95613da6a 100644
--- a/third_party/move/move-compiler-v2/src/pipeline/reference_safety_processor.rs
+++ b/third_party/move/move-compiler-v2/src/pipeline/reference_safety_processor.rs
@@ -40,6 +40,7 @@ use crate::{
     pipeline::livevar_analysis_processor::{LiveVarAnnotation, LiveVarInfoAtCodeOffset},
     Experiment, Options,
 };
+use abstract_domain_derive::AbstractDomain;
 use codespan_reporting::diagnostic::Severity;
 use im::ordmap::Entry;
 use itertools::Itertools;
@@ -73,9 +74,10 @@ use std::{
 /// `MemoryLocation`, as well as the children of the node, given by list of `BorrowEdge`s.
 /// The node also has backlinks to its parents, given by a set of `LifetimeLabel`, for
 /// more flexible navigation through the (acyclic) graph.
-#[derive(Clone, Debug, PartialEq, Eq)]
+#[derive(AbstractDomain, Clone, Debug, PartialEq, Eq)]
 struct LifetimeNode {
     /// Memory location associated with this node.
+    #[no_join]
     location: MemoryLocation,
     /// Outgoing edges to children.
     children: SetDomain,
@@ -192,14 +194,6 @@ pub struct LifetimeState {
     moved: SetDomain,
 }
 
-impl AbstractDomain for LifetimeNode {
-    fn join(&mut self, other: &Self) -> JoinResult {
-        self.children
-            .join(&other.children)
-            .combine(self.parents.join(&other.parents))
-    }
-}
-
 impl AbstractDomain for LifetimeState {
     /// The join operator of the dataflow analysis domain. This calls into `join_label_map`
     /// which does the work of graph gluing.
diff --git a/third_party/move/move-model/bytecode/Cargo.toml b/third_party/move/move-model/bytecode/Cargo.toml
index f1c6e367485c5..2d4d6114a3e2a 100644
--- a/third_party/move/move-model/bytecode/Cargo.toml
+++ b/third_party/move/move-model/bytecode/Cargo.toml
@@ -10,6 +10,7 @@ publish = false
 edition = "2021"
 
 [dependencies]
+abstract-domain-derive = { path = "./abstract_domain_derive" }
 move-binary-format = { path = "../../move-binary-format" }
 move-borrow-graph = { path = "../../move-borrow-graph" }
 move-bytecode-verifier = { path = "../../move-bytecode-verifier" }
diff --git a/third_party/move/move-model/bytecode/abstract_domain_derive/Cargo.toml b/third_party/move/move-model/bytecode/abstract_domain_derive/Cargo.toml
new file mode 100644
index 0000000000000..5ecc1842031b8
--- /dev/null
+++ b/third_party/move/move-model/bytecode/abstract_domain_derive/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name = "abstract-domain-derive"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[lib]
+proc-macro = true
+
+[dependencies]
+syn = "2.0"
+quote = "1.0"
+proc-macro2 = "1.0"
+
+[dev-dependencies]
+move-stackless-bytecode = { path = ".." }
diff --git a/third_party/move/move-model/bytecode/abstract_domain_derive/src/lib.rs b/third_party/move/move-model/bytecode/abstract_domain_derive/src/lib.rs
new file mode 100644
index 0000000000000..b32e5efe48a8c
--- /dev/null
+++ b/third_party/move/move-model/bytecode/abstract_domain_derive/src/lib.rs
@@ -0,0 +1,136 @@
+// Copyright © Aptos Foundation
+// SPDX-License-Identifier: Apache-2.0
+
+//! Derive macro for `AbstractDomain`
+//!
+//! Currently we can only derive for structs.
+//! For tuple structs, the derived join pair-wise joins each field;
+//! for structs with named fields, the derived join pair-wise joins each field without #[no_join] attribute.
+
+use proc_macro::TokenStream;
+use quote::{quote, ToTokens};
+use syn::{self, parse_macro_input, DeriveInput, Fields};
+
+/// Given a field name, generates TokenStream of
+/// `join_result = JoinResult::combine(join_result, self.field_name.join(&other.field_name));`
+fn gen_join_field(field: proc_macro2::TokenStream) -> proc_macro2::TokenStream {
+    quote! {
+        join_result = JoinResult::combine(join_result, self.#field.join(&other.#field));
+    }
+}
+
+#[proc_macro_derive(AbstractDomain, attributes(no_join))]
+/// Derives `AbstractDomain` for structs. The derived `join` method pair-wise joins selected fields of a struct,
+/// or all fields for structs with anonymous fields, and returns the combined join results.
+/// The joined fields must implement `AbstractDomain`.
+/// # Usage
+///
+/// Add `#[derive(AbstractDomain)]` attribute on the struct definition,
+/// and `#[no_join]` on the fields not to be pair-wise joined.
+/// All fields without `#[no_join]` will be pair-wise joined.
+/// For example,
+/// ```
+/// use move_stackless_bytecode::{
+///     dataflow_domains::{AbstractDomain, JoinResult, MapDomain, SetDomain},
+///     stackless_bytecode::{BorrowEdge, BorrowNode}
+/// };
+/// use abstract_domain_derive::AbstractDomain;
+/// pub struct BorrowInfo {
+///     live_nodes: SetDomain,
+
+///     borrowed_by: MapDomain>,
+///     /// Backward borrow information. This field is not used during analysis, but computed once
+///     /// analysis is done.
+///     borrows_from: MapDomain>,
+/// }
+///
+/// impl AbstractDomain for BorrowInfo {
+///     fn join(&mut self, other: &Self) -> JoinResult {
+///         let live_changed = self.live_nodes.join(&other.live_nodes);
+///         let borrowed_changed = self.borrowed_by.join(&other.borrowed_by);
+///         borrowed_changed.combine(live_changed)
+///     }
+/// }
+/// ```
+/// Can be derived with
+/// ```
+/// use move_stackless_bytecode::{
+///     dataflow_domains::{AbstractDomain, JoinResult, MapDomain, SetDomain},
+///     stackless_bytecode::{BorrowEdge, BorrowNode}
+/// };
+/// use abstract_domain_derive::AbstractDomain;
+/// #[derive(AbstractDomain)]
+/// pub struct BorrowInfo {
+///     live_nodes: SetDomain,
+///     borrowed_by: MapDomain>,
+///     // this field is not joined
+///     #[no_join]
+///     borrows_from: MapDomain>,
+/// }
+/// ```
+/// For structs with unnamed fields, the derived `join` method joins *every* field. For example,
+/// ```
+/// use move_stackless_bytecode::dataflow_domains::{AbstractDomain, JoinResult, SetDomain};
+/// use abstract_domain_derive::AbstractDomain;
+/// type TempIndex = usize;
+/// #[derive(AbstractDomain)]
+/// struct LiveVars(SetDomain);
+/// ```
+/// derives a `join` that joins the wrapped field.
+///
+/// This also works for unit structs. For example,
+/// ```
+/// use abstract_domain_derive::AbstractDomain;
+/// use move_stackless_bytecode::dataflow_domains::{AbstractDomain, JoinResult};
+/// #[derive(AbstractDomain)]
+/// struct Unit;
+/// ```
+/// derives a `join` that does nothing and always returns `Unchanged` since `Unit` has no fields.
+pub fn abstract_domain_derive(input: TokenStream) -> TokenStream {
+    let input = parse_macro_input!(input as DeriveInput);
+    let name = &input.ident;
+    // statements for joining fields
+    let join_fields: Vec<_> = if let syn::Data::Struct(data_struct) = &input.data {
+        match &data_struct.fields {
+            Fields::Named(fields_named) => fields_named
+                .named
+                .iter()
+                .filter_map(|field| {
+                    if field
+                        .attrs
+                        .iter()
+                        .any(|attr| attr.path().is_ident("no_join"))
+                    {
+                        None
+                    } else {
+                        let field_name =
+                            field.ident.as_ref().expect("field name").to_token_stream();
+                        Some(gen_join_field(field_name))
+                    }
+                })
+                .collect(),
+            Fields::Unnamed(fields_unnamed) => fields_unnamed
+                .unnamed
+                .iter()
+                .enumerate()
+                .map(|(idx, _)| {
+                    let field_index = syn::Index::from(idx).to_token_stream();
+                    gen_join_field(field_index)
+                })
+                .collect(),
+            Fields::Unit => Vec::new(),
+        }
+    } else {
+        panic!("AbstractDomain is only implemented for structs");
+    };
+    let expanded = quote! {
+        impl AbstractDomain for #name {
+            fn join(&mut self, other: &Self) -> JoinResult {
+                let mut join_result = JoinResult::Unchanged;
+                #(#join_fields)*
+                join_result
+            }
+        }
+    };
+    expanded.into()
+}
diff --git a/third_party/move/move-model/bytecode/abstract_domain_derive/tests/test.rs b/third_party/move/move-model/bytecode/abstract_domain_derive/tests/test.rs
new file mode 100644
index 0000000000000..b1b2bb4a5600b
--- /dev/null
+++ b/third_party/move/move-model/bytecode/abstract_domain_derive/tests/test.rs
@@ -0,0 +1,80 @@
+use abstract_domain_derive::AbstractDomain;
+use move_stackless_bytecode::dataflow_domains::{AbstractDomain, JoinResult};
+
+#[derive(AbstractDomain)]
+struct Unit;
+
+/// Add a formal top and bottom element to type `T`
+#[derive(PartialEq, Eq, Clone)]
+pub enum Plus2 {
+    Top,
+    Mid(T),
+    Bot,
+}
+
+impl AbstractDomain for Plus2 {
+    fn join(&mut self, other: &Self) -> JoinResult {
+        match (&self, other) {
+            (Plus2::Top, _) | (_, Plus2::Bot) => JoinResult::Unchanged,
+            (Plus2::Mid(x), Plus2::Mid(y)) if x == y => JoinResult::Unchanged,
+            (Plus2::Mid(_), _mid_or_top) => {
+                *self = Plus2::Top;
+                JoinResult::Changed
+            },
+            (Plus2::Bot, _mid_or_top) => {
+                *self = other.clone();
+                JoinResult::Changed
+            },
+        }
+    }
+}
+
+type Three = Plus2<()>;
+
+#[derive(Eq, PartialEq, Clone, AbstractDomain)]
+struct Foo(Three, Three);
+
+#[derive(AbstractDomain)]
+struct Bar {
+    x: Three,
+    #[no_join]
+    y: Three,
+}
+
+#[test]
+fn test_unit() {
+    let mut x = Unit;
+    let y = Unit;
+    assert!(x.join(&y) == JoinResult::Unchanged);
+}
+
+#[test]
+fn test_plus2() {
+    let mut top: Three = Plus2::Top;
+    let mut bot: Three = Plus2::Bot;
+    assert!(top.join(&bot) == JoinResult::Unchanged);
+    assert!(bot.join(&top) == JoinResult::Changed);
+    assert!(bot == Plus2::Top);
+}
+
+#[test]
+fn test_named_tuple_derive() {
+    let mut x = Foo(Plus2::Bot, Plus2::Bot);
+    let y = Foo(Plus2::Top, Plus2::Mid(()));
+    assert!(x.join(&y) == JoinResult::Changed);
+    assert!(x == Foo(Plus2::Top, Plus2::Mid(())));
+}
+
+#[test]
+fn test_struct_derive() {
+    let mut x = Bar {
+        x: Plus2::Bot,
+        y: Plus2::Bot,
+    };
+    let y = Bar {
+        x: Plus2::Top,
+        y: Plus2::Top,
+    };
+    assert!(x.join(&y) == JoinResult::Changed);
+    assert!(x.x == Plus2::Top && x.y == Plus2::Bot);
+}
diff --git a/third_party/move/move-model/bytecode/src/borrow_analysis.rs b/third_party/move/move-model/bytecode/src/borrow_analysis.rs
index dc6ae36f03d16..bb44eea1fe26f 100644
--- a/third_party/move/move-model/bytecode/src/borrow_analysis.rs
+++ b/third_party/move/move-model/bytecode/src/borrow_analysis.rs
@@ -13,6 +13,7 @@ use crate::{
     stackless_bytecode::{AssignKind, BorrowEdge, BorrowNode, Bytecode, IndexEdgeKind, Operation},
     stackless_control_flow_graph::StacklessControlFlowGraph,
 };
+use abstract_domain_derive::AbstractDomain;
 use itertools::Itertools;
 use move_binary_format::file_format::CodeOffset;
 use move_model::{
@@ -24,7 +25,7 @@ use move_model::{
 };
 use std::{borrow::BorrowMut, collections::BTreeMap, fmt};
 
-#[derive(Debug, Clone, Eq, Ord, PartialEq, PartialOrd, Default)]
+#[derive(AbstractDomain, Debug, Clone, Eq, Ord, PartialEq, PartialOrd, Default)]
 pub struct BorrowInfo {
     /// Contains the nodes which are alive. This excludes nodes which are alive because
     /// other nodes which are alive borrow from them.
@@ -35,6 +36,7 @@ pub struct BorrowInfo {
 
     /// Backward borrow information. This field is not used during analysis, but computed once
     /// analysis is done.
+    #[no_join]
     borrows_from: MapDomain>,
 }
 
@@ -742,14 +744,6 @@ impl<'a> TransferFunctions for BorrowAnalysis<'a> {
 
 impl<'a> DataflowAnalysis for BorrowAnalysis<'a> {}
 
-impl AbstractDomain for BorrowInfo {
-    fn join(&mut self, other: &Self) -> JoinResult {
-        let live_changed = self.live_nodes.join(&other.live_nodes);
-        let borrowed_changed = self.borrowed_by.join(&other.borrowed_by);
-        borrowed_changed.combine(live_changed)
-    }
-}
-
 // =================================================================================================
 // Formatting
 
diff --git a/third_party/move/move-model/bytecode/src/dataflow_domains.rs b/third_party/move/move-model/bytecode/src/dataflow_domains.rs
index f919252790f60..513e6f3889ca0 100644
--- a/third_party/move/move-model/bytecode/src/dataflow_domains.rs
+++ b/third_party/move/move-model/bytecode/src/dataflow_domains.rs
@@ -41,7 +41,6 @@ impl JoinResult {
 
 /// A trait to be implemented by domains which support a join.
 pub trait AbstractDomain {
-    // TODO: would be cool to add a derive(Join) macro for this
     fn join(&mut self, other: &Self) -> JoinResult;
 }
 
diff --git a/third_party/move/move-model/bytecode/src/usage_analysis.rs b/third_party/move/move-model/bytecode/src/usage_analysis.rs
index 07eb2771cf0b0..208c7e1e86eba 100644
--- a/third_party/move/move-model/bytecode/src/usage_analysis.rs
+++ b/third_party/move/move-model/bytecode/src/usage_analysis.rs
@@ -10,6 +10,7 @@ use crate::{
     function_target_pipeline::{FunctionTargetProcessor, FunctionTargetsHolder, FunctionVariant},
     stackless_bytecode::{BorrowNode, Bytecode, Operation, PropKind},
 };
+use abstract_domain_derive::AbstractDomain;
 use itertools::Itertools;
 use move_binary_format::file_format::CodeOffset;
 use move_model::{
@@ -28,7 +29,7 @@ pub fn get_memory_usage<'env>(target: &FunctionTarget<'env>) -> &'env UsageState
 }
 
 /// A summary of the memory accessed / modified per function, both directly and transitively.
-#[derive(Default, Clone)]
+#[derive(AbstractDomain, Default, Clone)]
 pub struct MemoryUsage {
     // The memory directly used in the function.
     pub direct: SetDomain>,
@@ -38,7 +39,7 @@ pub struct MemoryUsage {
     pub all: SetDomain>,
 }
 
-#[derive(Default, Clone)]
+#[derive(AbstractDomain, Default, Clone)]
 pub struct UsageState {
     /// The memory accessed by this function. This is the union of the three individual fields
     /// below.
@@ -117,21 +118,6 @@ impl MemoryUsage {
     }
 }
 
-impl AbstractDomain for MemoryUsage {
-    fn join(&mut self, other: &Self) -> JoinResult {
-        match (
-            self.direct.join(&other.direct),
-            self.transitive.join(&other.transitive),
-            self.all.join(&other.all),
-        ) {
-            (JoinResult::Unchanged, JoinResult::Unchanged, JoinResult::Unchanged) => {
-                JoinResult::Unchanged
-            },
-            _ => JoinResult::Changed,
-        }
-    }
-}
-
 macro_rules! generate_inserter {
     ($field:ident, $method:ident) => {
         paste! {
@@ -183,25 +169,6 @@ impl UsageState {
     }
 }
 
-impl AbstractDomain for UsageState {
-    fn join(&mut self, other: &Self) -> JoinResult {
-        match (
-            self.accessed.join(&other.accessed),
-            self.modified.join(&other.modified),
-            self.assumed.join(&other.assumed),
-            self.asserted.join(&other.asserted),
-        ) {
-            (
-                JoinResult::Unchanged,
-                JoinResult::Unchanged,
-                JoinResult::Unchanged,
-                JoinResult::Unchanged,
-            ) => JoinResult::Unchanged,
-            _ => JoinResult::Changed,
-        }
-    }
-}
-
 struct MemoryUsageAnalysis<'a> {
     cache: SummaryCache<'a>,
 }
diff --git a/third_party/move/move-prover/bytecode-pipeline/Cargo.toml b/third_party/move/move-prover/bytecode-pipeline/Cargo.toml
index 3845e728e550a..243de643f470f 100644
--- a/third_party/move/move-prover/bytecode-pipeline/Cargo.toml
+++ b/third_party/move/move-prover/bytecode-pipeline/Cargo.toml
@@ -7,6 +7,7 @@ edition = "2021"
 license = "Apache-2.0"
 
 [dependencies]
+abstract-domain-derive = { path = "../../move-model/bytecode/abstract_domain_derive" }
 anyhow = "1.0.52"
 move-binary-format = { path = "../../move-binary-format" }
 move-core-types = { path = "../../move-core/types" }
diff --git a/third_party/move/move-prover/bytecode-pipeline/src/packed_types_analysis.rs b/third_party/move/move-prover/bytecode-pipeline/src/packed_types_analysis.rs
index 26a062f2e5e26..494973803cfca 100644
--- a/third_party/move/move-prover/bytecode-pipeline/src/packed_types_analysis.rs
+++ b/third_party/move/move-prover/bytecode-pipeline/src/packed_types_analysis.rs
@@ -2,6 +2,7 @@
 // Copyright (c) The Move Contributors
 // SPDX-License-Identifier: Apache-2.0
 
+use abstract_domain_derive::AbstractDomain;
 use move_binary_format::file_format::CodeOffset;
 use move_core_types::language_storage::{StructTag, TypeTag};
 use move_model::{
@@ -74,7 +75,7 @@ pub fn get_packed_types(
     packed_types
 }
 
-#[derive(Debug, Clone, Default, Eq, PartialOrd, PartialEq)]
+#[derive(AbstractDomain, Debug, Clone, Default, Eq, PartialOrd, PartialEq)]
 struct PackedTypesState {
     // Closed types (i.e., with no free type variables) that may be directly or transitively packed by this function.
     closed_types: SetDomain,
@@ -82,19 +83,6 @@ struct PackedTypesState {
     open_types: SetDomain,
 }
 
-impl AbstractDomain for PackedTypesState {
-    // TODO: would be cool to add a derive(Join) macro for this
-    fn join(&mut self, other: &Self) -> JoinResult {
-        match (
-            self.closed_types.join(&other.closed_types),
-            self.open_types.join(&other.open_types),
-        ) {
-            (JoinResult::Unchanged, JoinResult::Unchanged) => JoinResult::Unchanged,
-            _ => JoinResult::Changed,
-        }
-    }
-}
-
 struct PackedTypesAnalysis<'a> {
     cache: SummaryCache<'a>,
 }

From cf9fefd65b27a48e8c1da6d385fb5001a689d2e4 Mon Sep 17 00:00:00 2001
From: sionescu 
Date: Wed, 31 Jan 2024 02:11:16 +0000
Subject: [PATCH 37/44] Sync Terraform & Helm changes

GitOrigin-RevId: 8c34ef870907ecabefccb89970587e48f7f5562d
---
 terraform/.tflint.hcl                         |  17 -
 .../aws/.terraform.lock.hcl                   | 157 ++++++++
 terraform/aptos-node-testnet/aws/addons.tf    |  49 +--
 terraform/aptos-node-testnet/aws/dns.tf       |   4 +-
 terraform/aptos-node-testnet/aws/forge.tf     |  13 +-
 terraform/aptos-node-testnet/aws/main.tf      |  20 +-
 terraform/aptos-node-testnet/aws/variables.tf |  70 ++--
 .../gcp/.terraform.lock.hcl                   | 156 ++++++++
 terraform/aptos-node-testnet/gcp/addons.tf    |  35 +-
 terraform/aptos-node-testnet/gcp/main.tf      |  34 +-
 terraform/aptos-node-testnet/gcp/security.tf  |   1 +
 terraform/aptos-node-testnet/gcp/variables.tf | 155 ++++++--
 terraform/aptos-node/aws/cluster.tf           |   2 +-
 terraform/aptos-node/aws/kubernetes.tf        | 102 +----
 terraform/aptos-node/aws/network.tf           |   4 +-
 terraform/aptos-node/aws/security.tf          |  37 --
 terraform/aptos-node/aws/variables.tf         |  80 ++--
 terraform/aptos-node/aws/versions.tf          |   5 +-
 terraform/aptos-node/azure/cluster.tf         |   1 -
 terraform/aptos-node/azure/kubernetes.tf      |  65 ----
 terraform/aptos-node/azure/variables.tf       |  37 +-
 terraform/aptos-node/azure/versions.tf        |   2 +-
 terraform/aptos-node/gcp/cluster.tf           | 243 +++++++++---
 terraform/aptos-node/gcp/kubernetes.tf        | 104 +----
 terraform/aptos-node/gcp/main.tf              |   1 -
 terraform/aptos-node/gcp/variables.tf         | 210 +++++-----
 terraform/aptos-node/gcp/versions.tf          |   6 +-
 terraform/fullnode/aws/addons.tf              |  38 ++
 terraform/fullnode/aws/kubernetes.tf          | 109 ------
 terraform/fullnode/aws/network.tf             |   4 +-
 terraform/fullnode/aws/security.tf            |  24 --
 terraform/fullnode/aws/variables.tf           |  73 ++--
 terraform/fullnode/aws/versions.tf            |   5 +-
 terraform/fullnode/digital_ocean/variables.tf |  18 +-
 terraform/fullnode/gcp/addons.tf              |  43 +-
 terraform/fullnode/gcp/auth.tf                |   4 -
 terraform/fullnode/gcp/cluster.tf             | 276 +++++++++++--
 terraform/fullnode/gcp/kubernetes.tf          | 111 ++----
 terraform/fullnode/gcp/main.tf                |   4 +-
 terraform/fullnode/gcp/network.tf             |   6 +-
 terraform/fullnode/gcp/security.tf            |  24 --
 terraform/fullnode/gcp/variables.tf           | 275 +++++++++----
 terraform/fullnode/gcp/versions.tf            |   8 +-
 terraform/fullnode/vultr/variables.tf         |  19 +-
 terraform/helm/aptos-node/README.md           |   7 +-
 terraform/helm/aptos-node/files/haproxy.cfg   |  30 ++
 .../helm/aptos-node/templates/fullnode.yaml   |  50 ++-
 .../helm/aptos-node/templates/haproxy.yaml    |  28 +-
 .../helm/aptos-node/templates/validator.yaml  |  17 +-
 terraform/helm/aptos-node/values.yaml         |  38 +-
 terraform/helm/autoscaling/templates/dns.yaml |   2 +-
 terraform/helm/autoscaling/values.yaml        |   4 +-
 terraform/helm/fullnode/files/backup/gcs.yaml |  10 +-
 .../helm/fullnode/templates/fullnode.yaml     |  89 ++++-
 .../helm/fullnode/templates/restore.yaml      | 107 -----
 .../helm/fullnode/templates/service.yaml      |  10 +
 terraform/helm/fullnode/values.yaml           |  48 ++-
 terraform/helm/genesis/files/genesis.sh       | 126 +++---
 terraform/helm/genesis/templates/genesis.yaml |   6 -
 terraform/helm/genesis/values.yaml            |   4 -
 terraform/helm/logger/.helmignore             |  23 --
 terraform/helm/logger/Chart.yaml              |   3 -
 terraform/helm/logger/README.md               |  37 --
 terraform/helm/logger/files/vector.toml       |  70 ----
 terraform/helm/logger/templates/NOTES.txt     |   1 -
 terraform/helm/logger/templates/_helpers.tpl  |  63 ---
 terraform/helm/logger/templates/logging.yaml  | 139 -------
 .../helm/logger/templates/serviceaccount.yaml |   8 -
 terraform/helm/logger/values.yaml             |  38 --
 terraform/helm/logger/values/mainnet.yaml     |   9 -
 terraform/helm/logger/values/premainnet.yaml  |   9 -
 terraform/helm/monitoring/Chart.lock          |   9 -
 terraform/helm/monitoring/Chart.yaml          |  13 -
 .../charts/kube-state-metrics-4.16.0.tgz      | Bin 8795 -> 0 bytes
 .../charts/prometheus-node-exporter-4.0.0.tgz | Bin 7383 -> 0 bytes
 .../helm/monitoring/files/alertmanager.yml    |  33 --
 terraform/helm/monitoring/files/dashboards    |   1 -
 terraform/helm/monitoring/files/grafana.ini   |  34 --
 .../helm/monitoring/files/prometheus.yml      | 225 -----------
 .../helm/monitoring/files/rules/alerts.yml    | 166 --------
 .../helm/monitoring/templates/_helpers.tpl    |  63 ---
 .../helm/monitoring/templates/monitoring.yaml | 368 ------------------
 .../monitoring/templates/serviceaccount.yaml  |   8 -
 terraform/helm/monitoring/values.yaml         | 118 ------
 .../helm/node-health-checker/.helmignore      |  22 --
 terraform/helm/node-health-checker/Chart.yaml |   8 -
 terraform/helm/node-health-checker/README.md  |  34 --
 .../files/nhc_baseline_fullnode.yaml          |  59 ---
 .../templates/_helpers.tpl                    |  63 ---
 .../templates/configmap.yaml                  |   9 -
 .../templates/deployment.yaml                 |  70 ----
 .../templates/service.yaml                    |  15 -
 .../helm/node-health-checker/values.yaml      |  31 --
 terraform/helm/pfn-addons/README.md           |  54 +++
 .../helm/pfn-addons/templates/ingress.yaml    |   2 +-
 .../templates/loadtest.yaml                   |  12 +-
 .../helm/pfn-addons/templates/service.yaml    |  36 ++
 .../templates/serviceaccount.yaml             |   4 +-
 terraform/helm/pfn-addons/values.yaml         |  59 +++
 terraform/helm/testnet-addons/Chart.yaml      |   4 +-
 terraform/helm/testnet-addons/README.md       |  21 +-
 .../testnet-addons/templates/ingress.yaml     |  17 +-
 .../testnet-addons/templates/service.yaml     |   4 +
 .../testnet-addons/templates/waypoint.yaml    |   4 +
 terraform/helm/testnet-addons/values.yaml     |  50 +--
 terraform/helm/vector-log-agent/Chart.yaml    |   2 +-
 .../vector-log-agent/files/vector-config.yaml |   3 +-
 .../files/vector-transforms.yaml              |   3 +-
 .../helm/vector-log-agent/testing/test1.json  |   3 +-
 terraform/helm/vector-log-agent/values.yaml   |   2 +-
 terraform/modules/eks/cluster.tf              |  16 +-
 terraform/modules/eks/kubernetes.tf           |  59 +--
 terraform/modules/eks/network.tf              |   4 +-
 terraform/modules/eks/variables.tf            |  19 +-
 terraform/modules/eks/versions.tf             |   2 +-
 terraform/modules/resources/instance.tf       | 159 ++++++++
 .../scripts/migrate_cluster_psp_to_pss.sh     | 195 +++++-----
 117 files changed, 2308 insertions(+), 3402 deletions(-)
 delete mode 100644 terraform/.tflint.hcl
 create mode 100644 terraform/aptos-node-testnet/aws/.terraform.lock.hcl
 create mode 100644 terraform/aptos-node-testnet/gcp/.terraform.lock.hcl
 delete mode 100644 terraform/helm/fullnode/templates/restore.yaml
 delete mode 100644 terraform/helm/logger/.helmignore
 delete mode 100644 terraform/helm/logger/Chart.yaml
 delete mode 100644 terraform/helm/logger/README.md
 delete mode 100644 terraform/helm/logger/files/vector.toml
 delete mode 100644 terraform/helm/logger/templates/NOTES.txt
 delete mode 100644 terraform/helm/logger/templates/_helpers.tpl
 delete mode 100644 terraform/helm/logger/templates/logging.yaml
 delete mode 100644 terraform/helm/logger/templates/serviceaccount.yaml
 delete mode 100644 terraform/helm/logger/values.yaml
 delete mode 100644 terraform/helm/logger/values/mainnet.yaml
 delete mode 100644 terraform/helm/logger/values/premainnet.yaml
 delete mode 100644 terraform/helm/monitoring/Chart.lock
 delete mode 100644 terraform/helm/monitoring/Chart.yaml
 delete mode 100644 terraform/helm/monitoring/charts/kube-state-metrics-4.16.0.tgz
 delete mode 100644 terraform/helm/monitoring/charts/prometheus-node-exporter-4.0.0.tgz
 delete mode 100644 terraform/helm/monitoring/files/alertmanager.yml
 delete mode 120000 terraform/helm/monitoring/files/dashboards
 delete mode 100644 terraform/helm/monitoring/files/grafana.ini
 delete mode 100644 terraform/helm/monitoring/files/prometheus.yml
 delete mode 100644 terraform/helm/monitoring/files/rules/alerts.yml
 delete mode 100644 terraform/helm/monitoring/templates/_helpers.tpl
 delete mode 100644 terraform/helm/monitoring/templates/monitoring.yaml
 delete mode 100644 terraform/helm/monitoring/templates/serviceaccount.yaml
 delete mode 100644 terraform/helm/monitoring/values.yaml
 delete mode 100644 terraform/helm/node-health-checker/.helmignore
 delete mode 100644 terraform/helm/node-health-checker/Chart.yaml
 delete mode 100644 terraform/helm/node-health-checker/README.md
 delete mode 100644 terraform/helm/node-health-checker/files/nhc_baseline_fullnode.yaml
 delete mode 100644 terraform/helm/node-health-checker/templates/_helpers.tpl
 delete mode 100644 terraform/helm/node-health-checker/templates/configmap.yaml
 delete mode 100644 terraform/helm/node-health-checker/templates/deployment.yaml
 delete mode 100644 terraform/helm/node-health-checker/templates/service.yaml
 delete mode 100644 terraform/helm/node-health-checker/values.yaml
 create mode 100644 terraform/helm/pfn-addons/README.md
 rename terraform/helm/{testnet-addons => pfn-addons}/templates/loadtest.yaml (91%)
 rename terraform/helm/{node-health-checker => pfn-addons}/templates/serviceaccount.yaml (60%)
 create mode 100644 terraform/modules/resources/instance.tf

diff --git a/terraform/.tflint.hcl b/terraform/.tflint.hcl
deleted file mode 100644
index e469dfc05aa65..0000000000000
--- a/terraform/.tflint.hcl
+++ /dev/null
@@ -1,17 +0,0 @@
-plugin "aws" {
-  enabled = true
-  version = "0.16.1"
-  source  = "github.com/terraform-linters/tflint-ruleset-aws"
-}
-
-plugin "azurerm" {
-  enabled = true
-  version = "0.17.1"
-  source  = "github.com/terraform-linters/tflint-ruleset-azurerm"
-}
-
-plugin "google" {
-  enabled = true
-  version = "0.19.0"
-  source  = "github.com/terraform-linters/tflint-ruleset-google"
-}
diff --git a/terraform/aptos-node-testnet/aws/.terraform.lock.hcl b/terraform/aptos-node-testnet/aws/.terraform.lock.hcl
new file mode 100644
index 0000000000000..f00d4ecc67ee2
--- /dev/null
+++ b/terraform/aptos-node-testnet/aws/.terraform.lock.hcl
@@ -0,0 +1,157 @@
+# This file is maintained automatically by "terraform init".
+# Manual edits may be lost in future updates.
+
+provider "registry.terraform.io/hashicorp/aws" {
+  version = "5.14.0"
+  hashes = [
+    "h1:MkK5wbWd3g9MAR+LqyWhqlGXolkrryf6LWkOAtOdG9k=",
+    "zh:03b80869b97dfca4ce6ee94a005e15ccec4d98af0876084a963963b05c9ab743",
+    "zh:11d148800fe028fcd10590f0473c5df306e220776e359aa838c2f07e5a89187e",
+    "zh:15d696cf583dc2917b257891e4a33afe7c3e8f20b63183f510267d709baaaf3d",
+    "zh:34c41e44534fbbf95a5f89b38404ee52b41c6c70af68f7e63a423b276fbcf797",
+    "zh:4211d0fd4753f7ba202f3e4a8afb2e03d12112dd4db4f9267c472bd597dc71ca",
+    "zh:47b6017d0cdd2f62b9e46137de38cd618441f658f8570a8e2687cce7643bf953",
+    "zh:51785b942d6f588825f4bfa86e05502be8721194b289c474121072e49acff6c3",
+    "zh:565f76885d41ecfea192b8a2e2f3d4b3dd278790d1d82b204706ae3582d51cf6",
+    "zh:703d670e1d73360d2533b02dbe9e2e055bf6f36a478cd4d66f2349861575c2ed",
+    "zh:7e4701f38590c22066da90b75dd92d81a685225d2d222d22425b7ccb26e92b4a",
+    "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
+    "zh:ca3449252d70df14ad713d5b95fa0610da8087f12c9deb87beffe788f518d06d",
+    "zh:e2ed3d6d8c12d3fe56fb03fe272779270a92f6157ade8c3db1c987b83b62e68c",
+    "zh:f0b07b84a43d1afc3a9790ca699771970525c132fa8551e7b326d1f263414dd1",
+    "zh:f1d83b3e5a29bae471f9841a4e0153eac5bccedbdece369e2f6186e9044db64e",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/helm" {
+  version = "2.11.0"
+  hashes = [
+    "h1:AOp9vXIM4uT1c/PVwsWTPiLVGlO2SSYrfiirV5rjCMQ=",
+    "zh:013857c88f3e19a4b162344e21dc51891c4ac8b600da8391f7fb2b6d234961e1",
+    "zh:044fffa233a93cdcf8384afbe9e1ab6c9d0b5b176cbae56ff465eb9611302975",
+    "zh:208b7cdd4fa3a1b25ae817dc00a9198ef98be0ddc3a577b5b72bc0f006afb997",
+    "zh:3e8b33f56cfe387277572a92037a1ca1cbe4e3aa6b5c19a8c2431193b07f7865",
+    "zh:7dd663d5619bd71676899b05b19d36f585189fdabc6b0b03c23579524a8fd9bf",
+    "zh:ae5329cb3e5bf0b86b02e823aac3ef3bd0d4b1618ff013cd0076dca0be8322e4",
+    "zh:ba6201695b55d51bedacdb017cb8d03d7a8ada51d0168ac44fef3fa791a85ab4",
+    "zh:c61285c8b1ba10f50cf94c9dcf98f2f3b720f14906a18be71b9b422279b5d806",
+    "zh:d522d388246f38b9f329c511ec579b516d212670b954f9dab64efb27e51862af",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+    "zh:f92546e26b670da61437ae2cbd038427c9374ce5f7a78df52193397da90bd997",
+    "zh:f9ad1407e5c0d5e3474094491025bf100828e8c1a01acdf9591d7dd1eb59f961",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/kubernetes" {
+  version = "2.23.0"
+  hashes = [
+    "h1:arTzD0XG/DswGCAx9JEttkSKe9RyyFW9W7UWcXF13dU=",
+    "zh:10488a12525ed674359585f83e3ee5e74818b5c98e033798351678b21b2f7d89",
+    "zh:1102ba5ca1a595f880e67102bbf999cc8b60203272a078a5b1e896d173f3f34b",
+    "zh:1347cf958ed3f3f80b3c7b3e23ddda3d6c6573a81847a8ee92b7df231c238bf6",
+    "zh:2cb18e9f5156bc1b1ee6bc580a709f7c2737d142722948f4a6c3c8efe757fa8d",
+    "zh:5506aa6f28dcca2a265ccf8e34478b5ec2cb43b867fe6d93b0158f01590fdadd",
+    "zh:6217a20686b631b1dcb448ee4bc795747ebc61b56fbe97a1ad51f375ebb0d996",
+    "zh:8accf916c00579c22806cb771e8909b349ffb7eb29d9c5468d0a3f3166c7a84a",
+    "zh:9379b0b54a0fa030b19c7b9356708ec8489e194c3b5e978df2d31368563308e5",
+    "zh:aa99c580890691036c2931841e88e7ee80d59ae52289c8c2c28ea0ac23e31520",
+    "zh:c57376d169875990ac68664d227fb69cd0037b92d0eba6921d757c3fd1879080",
+    "zh:e6068e3f94f6943b5586557b73f109debe19d1a75ca9273a681d22d1ce066579",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/local" {
+  version = "2.4.0"
+  hashes = [
+    "h1:ZUEYUmm2t4vxwzxy1BvN1wL6SDWrDxfH7pxtzX8c6d0=",
+    "zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9",
+    "zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf",
+    "zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732",
+    "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+    "zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35",
+    "zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04",
+    "zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406",
+    "zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6",
+    "zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7",
+    "zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2",
+    "zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc",
+    "zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/null" {
+  version = "3.2.1"
+  hashes = [
+    "h1:ydA0/SNRVB1o95btfshvYsmxA+jZFRZcvKzZSB+4S1M=",
+    "zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840",
+    "zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb",
+    "zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5",
+    "zh:74cb22c6700e48486b7cabefa10b33b801dfcab56f1a6ac9b6624531f3d36ea3",
+    "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+    "zh:79e553aff77f1cfa9012a2218b8238dd672ea5e1b2924775ac9ac24d2a75c238",
+    "zh:a1e06ddda0b5ac48f7e7c7d59e1ab5a4073bbcf876c73c0299e4610ed53859dc",
+    "zh:c37a97090f1a82222925d45d84483b2aa702ef7ab66532af6cbcfb567818b970",
+    "zh:e4453fbebf90c53ca3323a92e7ca0f9961427d2f0ce0d2b65523cc04d5d999c2",
+    "zh:e80a746921946d8b6761e77305b752ad188da60688cfd2059322875d363be5f5",
+    "zh:fbdb892d9822ed0e4cb60f2fedbdbb556e4da0d88d3b942ae963ed6ff091e48f",
+    "zh:fca01a623d90d0cad0843102f9b8b9fe0d3ff8244593bd817f126582b52dd694",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/random" {
+  version = "3.5.1"
+  hashes = [
+    "h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
+    "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
+    "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
+    "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
+    "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
+    "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
+    "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+    "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
+    "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
+    "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
+    "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
+    "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
+    "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/time" {
+  version = "0.9.1"
+  hashes = [
+    "h1:VxyoYYOCaJGDmLz4TruZQTSfQhvwEcMxvcKclWdnpbs=",
+    "zh:00a1476ecf18c735cc08e27bfa835c33f8ac8fa6fa746b01cd3bcbad8ca84f7f",
+    "zh:3007f8fc4a4f8614c43e8ef1d4b0c773a5de1dcac50e701d8abc9fdc8fcb6bf5",
+    "zh:5f79d0730fdec8cb148b277de3f00485eff3e9cf1ff47fb715b1c969e5bbd9d4",
+    "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+    "zh:8c8094689a2bed4bb597d24a418bbbf846e15507f08be447d0a5acea67c2265a",
+    "zh:a6d9206e95d5681229429b406bc7a9ba4b2d9b67470bda7df88fa161508ace57",
+    "zh:aa299ec058f23ebe68976c7581017de50da6204883950de228ed9246f309e7f1",
+    "zh:b129f00f45fba1991db0aa954a6ba48d90f64a738629119bfb8e9a844b66e80b",
+    "zh:ef6cecf5f50cda971c1b215847938ced4cb4a30a18095509c068643b14030b00",
+    "zh:f1f46a4f6c65886d2dd27b66d92632232adc64f92145bf8403fe64d5ffa5caea",
+    "zh:f79d6155cda7d559c60d74883a24879a01c4d5f6fd7e8d1e3250f3cd215fb904",
+    "zh:fd59fa73074805c3575f08cd627eef7acda14ab6dac2c135a66e7a38d262201c",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/tls" {
+  version = "4.0.4"
+  hashes = [
+    "h1:GZcFizg5ZT2VrpwvxGBHQ/hO9r6g0vYdQqx3bFD3anY=",
+    "zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55",
+    "zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848",
+    "zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be",
+    "zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5",
+    "zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe",
+    "zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e",
+    "zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48",
+    "zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8",
+    "zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60",
+    "zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e",
+    "zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+  ]
+}
diff --git a/terraform/aptos-node-testnet/aws/addons.tf b/terraform/aptos-node-testnet/aws/addons.tf
index 29e61afe391dc..2437969df2642 100644
--- a/terraform/aptos-node-testnet/aws/addons.tf
+++ b/terraform/aptos-node-testnet/aws/addons.tf
@@ -1,8 +1,7 @@
 locals {
-  autoscaling_helm_chart_path         = "${path.module}/../../helm/autoscaling"
-  chaos_mesh_helm_chart_path          = "${path.module}/../../helm/chaos"
-  testnet_addons_helm_chart_path      = "${path.module}/../../helm/testnet-addons"
-  node_health_checker_helm_chart_path = "${path.module}/../../helm/node-health-checker"
+  autoscaling_helm_chart_path    = "${path.module}/../../helm/autoscaling"
+  chaos_mesh_helm_chart_path     = "${path.module}/../../helm/chaos"
+  testnet_addons_helm_chart_path = "${path.module}/../../helm/testnet-addons"
 }
 
 resource "helm_release" "autoscaling" {
@@ -31,10 +30,6 @@ resource "helm_release" "autoscaling" {
       autoscaler = {
         enabled     = true
         clusterName = module.validator.aws_eks_cluster.name
-        image = {
-          # EKS does not report patch version
-          tag = "v${module.validator.aws_eks_cluster.version}.0"
-        }
         serviceAccount = {
           annotations = {
             "eks.amazonaws.com/role-arn" = aws_iam_role.cluster-autoscaler.arn
@@ -127,18 +122,19 @@ resource "aws_iam_role_policy" "cluster-autoscaler" {
 }
 
 resource "kubernetes_namespace" "chaos-mesh" {
+  count = var.enable_forge ? 1 : 0
   metadata {
     annotations = {
       name = "chaos-mesh"
     }
-
     name = "chaos-mesh"
   }
 }
 
 resource "helm_release" "chaos-mesh" {
+  count     = var.enable_forge ? 1 : 0
   name      = "chaos-mesh"
-  namespace = kubernetes_namespace.chaos-mesh.metadata[0].name
+  namespace = kubernetes_namespace.chaos-mesh[0].metadata[0].name
 
   chart       = local.chaos_mesh_helm_chart_path
   max_history = 5
@@ -255,12 +251,6 @@ resource "helm_release" "testnet-addons" {
         acm_certificate          = length(aws_acm_certificate.ingress) > 0 ? aws_acm_certificate.ingress[0].arn : null
         loadBalancerSourceRanges = var.client_sources_ipv4
       }
-      load_test = {
-        fullnodeGroups = try(var.aptos_node_helm_values.fullnode.groups, [])
-        config = {
-          numFullnodeGroups = var.num_fullnode_groups
-        }
-      }
     }),
     jsonencode(var.testnet_addons_helm_values)
   ]
@@ -273,30 +263,3 @@ resource "helm_release" "testnet-addons" {
     }
   }
 }
-
-resource "helm_release" "node-health-checker" {
-  count       = var.enable_node_health_checker ? 1 : 0
-  name        = "node-health-checker"
-  chart       = local.node_health_checker_helm_chart_path
-  max_history = 5
-  wait        = false
-
-  values = [
-    jsonencode({
-      imageTag = var.image_tag
-      # borrow the serviceaccount for the rest of the testnet addon components
-      # TODO: just create a service account for the node-health-checker
-      serviceAccount = {
-        create = false
-        name   = "testnet-addons"
-      }
-    }),
-    jsonencode(var.node_health_checker_helm_values)
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.node_health_checker_helm_chart_path, "**") : filesha1("${local.node_health_checker_helm_chart_path}/${f}")]))
-  }
-}
diff --git a/terraform/aptos-node-testnet/aws/dns.tf b/terraform/aptos-node-testnet/aws/dns.tf
index 028cd3dbe7577..688f9af7f79d7 100644
--- a/terraform/aptos-node-testnet/aws/dns.tf
+++ b/terraform/aptos-node-testnet/aws/dns.tf
@@ -26,7 +26,7 @@ resource "aws_acm_certificate" "ingress" {
 }
 
 resource "aws_route53_record" "ingress-acm-validation" {
-  for_each = var.zone_id == "" ? {} : { for dvo in aws_acm_certificate.ingress[0].domain_validation_options : dvo.domain_name => dvo }
+  for_each = length(aws_acm_certificate.ingress) == 0 ? {} : { for dvo in aws_acm_certificate.ingress[0].domain_validation_options : dvo.domain_name => dvo }
 
   zone_id         = var.zone_id
   allow_overwrite = true
@@ -37,7 +37,7 @@ resource "aws_route53_record" "ingress-acm-validation" {
 }
 
 resource "aws_acm_certificate_validation" "ingress" {
-  count = var.zone_id != "" ? 1 : 0
+  count = length(aws_acm_certificate.ingress) > 0 ? 1 : 0
 
   certificate_arn         = aws_acm_certificate.ingress[0].arn
   validation_record_fqdns = [for dvo in aws_acm_certificate.ingress[0].domain_validation_options : dvo.resource_record_name]
diff --git a/terraform/aptos-node-testnet/aws/forge.tf b/terraform/aptos-node-testnet/aws/forge.tf
index db7f537a45d87..d566c833f9f7c 100644
--- a/terraform/aptos-node-testnet/aws/forge.tf
+++ b/terraform/aptos-node-testnet/aws/forge.tf
@@ -17,7 +17,7 @@ resource "helm_release" "forge" {
       }
       serviceAccount = {
         annotations = {
-          "eks.amazonaws.com/role-arn" = aws_iam_role.forge.arn
+          "eks.amazonaws.com/role-arn" = aws_iam_role.forge[0].arn
         }
       }
     }),
@@ -32,6 +32,7 @@ resource "helm_release" "forge" {
 }
 
 data "aws_iam_policy_document" "forge-assume-role" {
+  count = var.enable_forge ? 1 : 0
   statement {
     actions = ["sts:AssumeRoleWithWebIdentity"]
 
@@ -58,6 +59,7 @@ data "aws_iam_policy_document" "forge-assume-role" {
 }
 
 data "aws_iam_policy_document" "forge" {
+  count = var.enable_forge ? 1 : 0
   statement {
     sid = "AllowS3"
     actions = [
@@ -71,15 +73,16 @@ data "aws_iam_policy_document" "forge" {
 }
 
 resource "aws_iam_role" "forge" {
+  count                = var.enable_forge ? 1 : 0
   name                 = "aptos-node-testnet-${local.workspace_name}-forge"
   path                 = var.iam_path
   permissions_boundary = var.permissions_boundary_policy
-  assume_role_policy   = data.aws_iam_policy_document.forge-assume-role.json
+  assume_role_policy   = data.aws_iam_policy_document.forge-assume-role[0].json
 }
 
 resource "aws_iam_role_policy" "forge" {
+  count  = var.enable_forge ? 1 : 0
   name   = "Helm"
-  role   = aws_iam_role.forge.name
-  policy = data.aws_iam_policy_document.forge.json
+  role   = aws_iam_role.forge[0].name
+  policy = data.aws_iam_policy_document.forge[0].json
 }
-
diff --git a/terraform/aptos-node-testnet/aws/main.tf b/terraform/aptos-node-testnet/aws/main.tf
index fe322e7800af6..2cb77e43cc6b5 100644
--- a/terraform/aptos-node-testnet/aws/main.tf
+++ b/terraform/aptos-node-testnet/aws/main.tf
@@ -36,8 +36,6 @@ module "validator" {
 
   # if forge enabled, standardize the helm release name for ease of operations
   helm_release_name_override = var.enable_forge ? "aptos-node" : ""
-  # Forge testing does not require calico for validator NetworkPolicies
-  enable_calico = !var.enable_forge
 
   k8s_api_sources = var.admin_sources_ipv4
   k8s_admin_roles = var.k8s_admin_roles
@@ -57,21 +55,15 @@ module "validator" {
   helm_values         = var.aptos_node_helm_values
 
   # allow all nodegroups to surge to 2x their size by default, in case of total nodes replacement
-  validator_instance_num     = var.num_validator_instance > 0 ? 2 * var.num_validator_instance : var.num_validators
-  validator_instance_max_num = var.validator_instance_max_num
+  validator_instance_num          = var.num_validator_instance > 0 ? 2 * var.num_validator_instance : var.num_validators
+  validator_instance_max_num      = var.validator_instance_max_num
+  validator_instance_enable_taint = true
   # create one utility instance per validator, since HAProxy requires resources 1.5 CPU, 2Gi memory for now
   utility_instance_num     = var.num_utility_instance > 0 ? var.num_utility_instance : var.num_validators
   utility_instance_max_num = var.utility_instance_max_num
 
   utility_instance_type   = var.utility_instance_type
   validator_instance_type = var.validator_instance_type
-
-  # addons
-  enable_monitoring               = var.enable_monitoring
-  enable_prometheus_node_exporter = var.enable_prometheus_node_exporter
-  enable_kube_state_metrics       = var.enable_kube_state_metrics
-  monitoring_helm_values          = var.monitoring_helm_values
-  logger_helm_values              = var.logger_helm_values
 }
 
 locals {
@@ -81,14 +73,14 @@ locals {
 provider "helm" {
   kubernetes {
     host                   = module.validator.aws_eks_cluster.endpoint
-    cluster_ca_certificate = base64decode(module.validator.aws_eks_cluster.certificate_authority.0.data)
+    cluster_ca_certificate = base64decode(module.validator.aws_eks_cluster.certificate_authority[0].data)
     token                  = module.validator.aws_eks_cluster_auth_token
   }
 }
 
 provider "kubernetes" {
   host                   = module.validator.aws_eks_cluster.endpoint
-  cluster_ca_certificate = base64decode(module.validator.aws_eks_cluster.certificate_authority.0.data)
+  cluster_ca_certificate = base64decode(module.validator.aws_eks_cluster.certificate_authority[0].data)
   token                  = module.validator.aws_eks_cluster_auth_token
 }
 
@@ -96,8 +88,8 @@ locals {
   genesis_helm_chart_path = "${path.module}/../../helm/genesis"
 }
 
-
 resource "helm_release" "genesis" {
+  count       = var.enable_genesis ? 1 : 0
   name        = "genesis"
   chart       = local.genesis_helm_chart_path
   max_history = 5
diff --git a/terraform/aptos-node-testnet/aws/variables.tf b/terraform/aptos-node-testnet/aws/variables.tf
index d1257333dca9a..9ce02d893d623 100644
--- a/terraform/aptos-node-testnet/aws/variables.tf
+++ b/terraform/aptos-node-testnet/aws/variables.tf
@@ -2,20 +2,24 @@
 
 variable "region" {
   description = "AWS region"
+  type        = string
 }
 
 variable "maximize_single_az_capacity" {
   description = "TEST ONLY: Whether to maximize the capacity of the cluster by allocating a large CIDR block to the first AZ"
+  type        = bool
   default     = false
 }
 
 variable "zone_id" {
   description = "Route53 Zone ID to create records in"
+  type        = string
   default     = ""
 }
 
 variable "workspace_name_override" {
   description = "If specified, overrides the usage of Terraform workspace for naming purposes"
+  type        = string
   default     = ""
 }
 
@@ -27,17 +31,19 @@ variable "tls_sans" {
 
 variable "workspace_dns" {
   description = "Include Terraform workspace name in DNS records"
+  type        = bool
   default     = true
 }
 
 variable "iam_path" {
-  default     = "/"
   description = "Path to use when naming IAM objects"
+  type        = string
+  default     = "/"
 }
 
 variable "permissions_boundary_policy" {
-  default     = ""
   description = "ARN of IAM policy to set as permissions boundary on created roles"
+  type        = string
 }
 
 variable "admin_sources_ipv4" {
@@ -68,26 +74,31 @@ variable "k8s_admins" {
 
 variable "chain_id" {
   description = "Aptos chain ID. If var.enable_forge set, defaults to 4"
+  type        = number
   default     = 4
 }
 
 variable "era" {
   description = "Chain era, used to start a clean chain"
+  type        = number
   default     = 15
 }
 
 variable "chain_name" {
   description = "Aptos chain name. If unset, defaults to using the workspace name"
+  type        = string
   default     = ""
 }
 
 variable "image_tag" {
   description = "Docker image tag for all Aptos workloads, including validators, fullnodes, backup, restore, genesis, and other tooling"
+  type        = string
   default     = "devnet"
 }
 
 variable "validator_image_tag" {
   description = "Docker image tag for validators and fullnodes. If set, overrides var.image_tag for those nodes"
+  type        = string
   default     = ""
 }
 
@@ -105,31 +116,10 @@ variable "genesis_helm_values" {
   default     = {}
 }
 
-variable "logger_helm_values" {
-  description = "Map of values to pass to logger helm chart"
-  type        = any
-  default     = {}
-}
-
-variable "enable_monitoring" {
-  description = "Enable monitoring helm chart"
-  default     = false
-}
-
-variable "monitoring_helm_values" {
-  description = "Map of values to pass to monitoring helm chart"
-  type        = any
-  default     = {}
-}
-
-variable "enable_prometheus_node_exporter" {
-  description = "Enable prometheus-node-exporter within monitoring helm chart"
-  default     = false
-}
-
-variable "enable_kube_state_metrics" {
-  description = "Enable kube-state-metrics within monitoring helm chart"
-  default     = false
+variable "enable_genesis" {
+  description = "Perform genesis automatically"
+  type        = bool
+  default     = true
 }
 
 variable "testnet_addons_helm_values" {
@@ -138,68 +128,67 @@ variable "testnet_addons_helm_values" {
   default     = {}
 }
 
-variable "enable_node_health_checker" {
-  description = "Enable node-health-checker"
-  default     = false
-}
-
-variable "node_health_checker_helm_values" {
-  description = "Map of values to pass to node-health-checker helm chart"
-  type        = any
-  default     = {}
-}
-
 ### EKS nodegroups
 
 variable "num_validators" {
   description = "The number of validator nodes to create"
+  type        = number
   default     = 4
 }
 
 variable "num_fullnode_groups" {
   description = "The number of fullnode groups to create"
+  type        = number
   default     = 1
 }
 
 variable "num_utility_instance" {
   description = "Number of instances for utilities node pool, when it's 0, it will be set to var.num_validators"
+  type        = number
   default     = 0
 }
 
 variable "num_validator_instance" {
   description = "Number of instances for validator node pool, when it's 0, it will be set to 2 * var.num_validators"
+  type        = number
   default     = 0
 }
 
 variable "utility_instance_max_num" {
   description = "Maximum number of instances for utilities. If left 0, defaults to 2 * var.num_validators"
+  type        = number
   default     = 0
 }
 
 variable "validator_instance_max_num" {
   description = "Maximum number of instances for utilities. If left 0, defaults to 2 * var.num_validators"
+  type        = number
   default     = 0
 }
 
 variable "utility_instance_type" {
   description = "Instance type used for utilities"
+  type        = string
   default     = "t3.2xlarge"
 }
 
 variable "validator_instance_type" {
   description = "Instance type used for validator and fullnodes"
-  default     = "c6i.4xlarge"
+  type        = string
+  default     = "c6i.8xlarge"
 }
 
 ### Forge
 
 variable "enable_forge" {
   description = "Enable Forge test framework, also creating an internal helm repo"
+  type        = bool
   default     = false
 }
 
 variable "forge_config_s3_bucket" {
   description = "S3 bucket in which Forge config is stored"
+  type        = string
   default     = "forge-wrapper-config"
 }
 
@@ -211,6 +200,7 @@ variable "forge_helm_values" {
 
 variable "validator_storage_class" {
   description = "Which storage class to use for the validator and fullnode"
+  type        = string
   default     = "io1"
   validation {
     condition     = contains(["gp3", "io1", "io2"], var.validator_storage_class)
@@ -220,6 +210,7 @@ variable "validator_storage_class" {
 
 variable "fullnode_storage_class" {
   description = "Which storage class to use for the validator and fullnode"
+  type        = string
   default     = "io1"
   validation {
     condition     = contains(["gp3", "io1", "io2"], var.fullnode_storage_class)
@@ -229,5 +220,6 @@ variable "fullnode_storage_class" {
 
 variable "manage_via_tf" {
   description = "Whether to manage the aptos-node k8s workload via Terraform. If set to false, the helm_release resource will still be created and updated when values change, but it may not be updated on every apply"
+  type        = bool
   default     = true
 }
diff --git a/terraform/aptos-node-testnet/gcp/.terraform.lock.hcl b/terraform/aptos-node-testnet/gcp/.terraform.lock.hcl
new file mode 100644
index 0000000000000..c3e2a86fa13e3
--- /dev/null
+++ b/terraform/aptos-node-testnet/gcp/.terraform.lock.hcl
@@ -0,0 +1,156 @@
+# This file is maintained automatically by "terraform init".
+# Manual edits may be lost in future updates.
+
+provider "registry.terraform.io/hashicorp/google" {
+  version     = "4.54.0"
+  constraints = "~> 4.54.0"
+  hashes = [
+    "h1:1cWQdF2IRqCUMG3wGygzqDIPq6SYtaubbQ+vXGRw25k=",
+    "zh:31f8f881703ba166b3ce4579dcba4b4c3192360642bd34ff633815d635c39003",
+    "zh:40ac861876276001c6483c52232072a4640491c36ebfba0e1e4baa5873e8183f",
+    "zh:4a1dfb601e7426e2aee1cd9cbab721638a330186398b587400a275981adf9e43",
+    "zh:71ef5b767fe25f4f03535fe006986cd9829a68788185914098dfe9d7cdb8f0de",
+    "zh:92ce2d5b8cbf2b0641f9c954959cfd8e2975f3912642b14a89dc7376c8edc8b9",
+    "zh:9c817bbe912e986f62f036fac18c25de8b225c065343f8c39362feffb25f9b37",
+    "zh:a21b8cfa15a56a7c332769d14a9fd1b321393cba1547f3155ff70aa7cb0bf0b2",
+    "zh:b42e883e3272c3aeba2cdc53d07a2058321e8e68d68238d08a73a804274e29d0",
+    "zh:bc25f7f9a1b8fee60a853c87f3762c5860598daf0a0a3c3e67563632f67b1c45",
+    "zh:bfd60ab7cf42380dc7dab87e50c62f6ad5c1af8d55d854a890a3f6dfb778aba5",
+    "zh:c79ad29ebff06da641c8d67b2104b72749df56f919d48bd1ca6ce31057d86b9b",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/google-beta" {
+  version     = "4.54.0"
+  constraints = "~> 4.54.0"
+  hashes = [
+    "h1:W5uwH+CNWJijGTCQtMfo5wqQyiMMmcxDPaeptIdzUsI=",
+    "zh:0ae198224a7d116f4f9e7a38b2b6cb2e281050929e22d38a98060eecf2eeccf8",
+    "zh:2962d48ac3789c32ef2b8e61f9c14335398c64603af98a308d21512008728dfc",
+    "zh:5f40b3655c71b081a67f06fb9ecc08174946d80e6411a90d4cce6265802515f8",
+    "zh:72c111e0c49b3d7091155f719c224209bbb7a011b215382eeaf9506c8067b60e",
+    "zh:73d86b00181803e9c30bfd92611a8475900acb31b5abcd02e7d36bdcb0de9a35",
+    "zh:7c4af6ca624e7454db679e3adf32b1f8288babc95e05a1220388e3cb53e16ab8",
+    "zh:7fe723ce93803fe5596d706f52be46094273ee07e858f3808f1bafdab65bb6ed",
+    "zh:bbec2859a5dc9f736e2d75a486d9a0200a6e5a6f712bd4595e24b8d8e9a4f19b",
+    "zh:cc90a6021a55df0d836f5b22014a76c8dd7b55dfc5bdf60c7ce3264c1a07e3ff",
+    "zh:eaa842ab7c8aba3fe104c975d0bc0dc4412098814cd1a374136379b6d107eafe",
+    "zh:f0457715d8f12d42d4ed128ea3e5b74e2a1f7a60fa5ff8a9971dfff31ebf2f27",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/helm" {
+  version = "2.11.0"
+  hashes = [
+    "h1:AOp9vXIM4uT1c/PVwsWTPiLVGlO2SSYrfiirV5rjCMQ=",
+    "zh:013857c88f3e19a4b162344e21dc51891c4ac8b600da8391f7fb2b6d234961e1",
+    "zh:044fffa233a93cdcf8384afbe9e1ab6c9d0b5b176cbae56ff465eb9611302975",
+    "zh:208b7cdd4fa3a1b25ae817dc00a9198ef98be0ddc3a577b5b72bc0f006afb997",
+    "zh:3e8b33f56cfe387277572a92037a1ca1cbe4e3aa6b5c19a8c2431193b07f7865",
+    "zh:7dd663d5619bd71676899b05b19d36f585189fdabc6b0b03c23579524a8fd9bf",
+    "zh:ae5329cb3e5bf0b86b02e823aac3ef3bd0d4b1618ff013cd0076dca0be8322e4",
+    "zh:ba6201695b55d51bedacdb017cb8d03d7a8ada51d0168ac44fef3fa791a85ab4",
+    "zh:c61285c8b1ba10f50cf94c9dcf98f2f3b720f14906a18be71b9b422279b5d806",
+    "zh:d522d388246f38b9f329c511ec579b516d212670b954f9dab64efb27e51862af",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+    "zh:f92546e26b670da61437ae2cbd038427c9374ce5f7a78df52193397da90bd997",
+    "zh:f9ad1407e5c0d5e3474094491025bf100828e8c1a01acdf9591d7dd1eb59f961",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/kubernetes" {
+  version = "2.23.0"
+  hashes = [
+    "h1:arTzD0XG/DswGCAx9JEttkSKe9RyyFW9W7UWcXF13dU=",
+    "zh:10488a12525ed674359585f83e3ee5e74818b5c98e033798351678b21b2f7d89",
+    "zh:1102ba5ca1a595f880e67102bbf999cc8b60203272a078a5b1e896d173f3f34b",
+    "zh:1347cf958ed3f3f80b3c7b3e23ddda3d6c6573a81847a8ee92b7df231c238bf6",
+    "zh:2cb18e9f5156bc1b1ee6bc580a709f7c2737d142722948f4a6c3c8efe757fa8d",
+    "zh:5506aa6f28dcca2a265ccf8e34478b5ec2cb43b867fe6d93b0158f01590fdadd",
+    "zh:6217a20686b631b1dcb448ee4bc795747ebc61b56fbe97a1ad51f375ebb0d996",
+    "zh:8accf916c00579c22806cb771e8909b349ffb7eb29d9c5468d0a3f3166c7a84a",
+    "zh:9379b0b54a0fa030b19c7b9356708ec8489e194c3b5e978df2d31368563308e5",
+    "zh:aa99c580890691036c2931841e88e7ee80d59ae52289c8c2c28ea0ac23e31520",
+    "zh:c57376d169875990ac68664d227fb69cd0037b92d0eba6921d757c3fd1879080",
+    "zh:e6068e3f94f6943b5586557b73f109debe19d1a75ca9273a681d22d1ce066579",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/local" {
+  version = "2.4.0"
+  hashes = [
+    "h1:ZUEYUmm2t4vxwzxy1BvN1wL6SDWrDxfH7pxtzX8c6d0=",
+    "zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9",
+    "zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf",
+    "zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732",
+    "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+    "zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35",
+    "zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04",
+    "zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406",
+    "zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6",
+    "zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7",
+    "zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2",
+    "zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc",
+    "zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/random" {
+  version = "3.5.1"
+  hashes = [
+    "h1:IL9mSatmwov+e0+++YX2V6uel+dV6bn+fC/cnGDK3Ck=",
+    "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64",
+    "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d",
+    "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831",
+    "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3",
+    "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f",
+    "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+    "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b",
+    "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2",
+    "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865",
+    "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03",
+    "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602",
+    "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/time" {
+  version = "0.9.1"
+  hashes = [
+    "h1:VxyoYYOCaJGDmLz4TruZQTSfQhvwEcMxvcKclWdnpbs=",
+    "zh:00a1476ecf18c735cc08e27bfa835c33f8ac8fa6fa746b01cd3bcbad8ca84f7f",
+    "zh:3007f8fc4a4f8614c43e8ef1d4b0c773a5de1dcac50e701d8abc9fdc8fcb6bf5",
+    "zh:5f79d0730fdec8cb148b277de3f00485eff3e9cf1ff47fb715b1c969e5bbd9d4",
+    "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
+    "zh:8c8094689a2bed4bb597d24a418bbbf846e15507f08be447d0a5acea67c2265a",
+    "zh:a6d9206e95d5681229429b406bc7a9ba4b2d9b67470bda7df88fa161508ace57",
+    "zh:aa299ec058f23ebe68976c7581017de50da6204883950de228ed9246f309e7f1",
+    "zh:b129f00f45fba1991db0aa954a6ba48d90f64a738629119bfb8e9a844b66e80b",
+    "zh:ef6cecf5f50cda971c1b215847938ced4cb4a30a18095509c068643b14030b00",
+    "zh:f1f46a4f6c65886d2dd27b66d92632232adc64f92145bf8403fe64d5ffa5caea",
+    "zh:f79d6155cda7d559c60d74883a24879a01c4d5f6fd7e8d1e3250f3cd215fb904",
+    "zh:fd59fa73074805c3575f08cd627eef7acda14ab6dac2c135a66e7a38d262201c",
+  ]
+}
+
+provider "registry.terraform.io/hashicorp/tls" {
+  version = "4.0.4"
+  hashes = [
+    "h1:GZcFizg5ZT2VrpwvxGBHQ/hO9r6g0vYdQqx3bFD3anY=",
+    "zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55",
+    "zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848",
+    "zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be",
+    "zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5",
+    "zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe",
+    "zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e",
+    "zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48",
+    "zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8",
+    "zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60",
+    "zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e",
+    "zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316",
+    "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c",
+  ]
+}
diff --git a/terraform/aptos-node-testnet/gcp/addons.tf b/terraform/aptos-node-testnet/gcp/addons.tf
index 01dee100f4b5e..d09ca63336cf0 100644
--- a/terraform/aptos-node-testnet/gcp/addons.tf
+++ b/terraform/aptos-node-testnet/gcp/addons.tf
@@ -4,6 +4,7 @@ locals {
 }
 
 resource "kubernetes_namespace" "chaos-mesh" {
+  count = var.enable_forge ? 1 : 0
   metadata {
     annotations = {
       name = "chaos-mesh"
@@ -14,8 +15,9 @@ resource "kubernetes_namespace" "chaos-mesh" {
 }
 
 resource "helm_release" "chaos-mesh" {
+  count     = var.enable_forge ? 1 : 0
   name      = "chaos-mesh"
-  namespace = kubernetes_namespace.chaos-mesh.metadata[0].name
+  namespace = kubernetes_namespace.chaos-mesh[0].metadata[0].name
 
   chart       = local.chaos_mesh_helm_chart_path
   max_history = 5
@@ -24,29 +26,14 @@ resource "helm_release" "chaos-mesh" {
   values = [
     jsonencode({
       chaos-mesh = {
+        images = {
+          registry = "us-docker.pkg.dev/aptos-registry/docker/ghcr.io"
+          tag      = "aptos-patch" // Same as the patched chart in helm/chaos
+        },
         chaosDaemon = {
           runtime    = "containerd"
           socketPath = "/run/containerd/containerd.sock"
-          image = {
-            repository = "aptos-internal/chaos-daemon"
-            tag        = "latest"
-          }
-        },
-        controllerManager = {
-          image = {
-            repository = "aptos-internal/chaos-mesh"
-            tag        = "latest"
-          }
         },
-        dashboard = {
-          image = {
-            repository = "aptos-internal/chaos-dashboard"
-            tag        = "latest"
-          }
-        }
-        images = {
-          registry = "us-west1-docker.pkg.dev/aptos-global"
-        }
       }
     })
   ]
@@ -153,13 +140,7 @@ resource "helm_release" "testnet-addons" {
       }
       ingress = {
         gce_static_ip           = "aptos-${local.workspace_name}-testnet-addons-ingress"
-        gce_managed_certificate = "aptos-${local.workspace_name}-${var.zone_name}-testnet-addons"
-      }
-      load_test = {
-        fullnodeGroups = try(var.aptos_node_helm_values.fullnode.groups, [])
-        config = {
-          numFullnodeGroups = var.num_fullnode_groups
-        }
+        gce_managed_certificate = var.create_google_managed_ssl_certificate ? "aptos-${local.workspace_name}-${var.zone_name}-testnet-addons" : null
       }
     }),
     jsonencode(var.testnet_addons_helm_values)
diff --git a/terraform/aptos-node-testnet/gcp/main.tf b/terraform/aptos-node-testnet/gcp/main.tf
index d9b7193b0457c..4386d7d4c1756 100644
--- a/terraform/aptos-node-testnet/gcp/main.tf
+++ b/terraform/aptos-node-testnet/gcp/main.tf
@@ -17,13 +17,13 @@ provider "helm" {
 module "validator" {
   source = "../../aptos-node/gcp"
 
-  cluster_bootstrap = var.cluster_bootstrap
-  manage_via_tf     = var.manage_via_tf
+  manage_via_tf = var.manage_via_tf
 
   # Project config
-  project = var.project
-  zone    = var.zone
-  region  = var.region
+  project        = var.project
+  zone           = var.zone
+  region         = var.region
+  node_locations = var.node_locations
 
   # DNS
   zone_name     = var.zone_name # keep empty if you don't want a DNS name
@@ -51,7 +51,7 @@ module "validator" {
   gke_enable_node_autoprovisioning     = var.gke_enable_node_autoprovisioning
   gke_node_autoprovisioning_max_cpu    = var.gke_node_autoprovisioning_max_cpu
   gke_node_autoprovisioning_max_memory = var.gke_node_autoprovisioning_max_memory
-  gke_enable_autoscaling               = var.gke_enable_autoscaling
+  gke_autoscaling_profile              = var.gke_autoscaling_profile
   gke_autoscaling_max_node_count       = var.gke_autoscaling_max_node_count
 
   # Testnet config
@@ -63,14 +63,18 @@ module "validator" {
   num_fullnode_groups        = var.num_fullnode_groups
 
   # Instance config
-  utility_instance_type   = var.utility_instance_type
-  validator_instance_type = var.validator_instance_type
-
-  # addons
-  enable_monitoring      = var.enable_monitoring
-  enable_node_exporter   = var.enable_prometheus_node_exporter
-  monitoring_helm_values = var.monitoring_helm_values
-
+  default_disk_size_gb            = var.default_disk_size_gb
+  default_disk_type               = var.default_disk_type
+  create_nodepools                = var.create_nodepools
+  nodepool_sysctls                = var.nodepool_sysctls
+  core_instance_type              = var.core_instance_type
+  utility_instance_type           = var.utility_instance_type
+  validator_instance_type         = var.validator_instance_type
+  utility_instance_enable_taint   = var.utility_instance_enable_taint
+  validator_instance_enable_taint = var.validator_instance_enable_taint
+
+  enable_clouddns        = var.enable_clouddns
+  enable_image_streaming = var.enable_image_streaming
   gke_maintenance_policy = var.gke_maintenance_policy
 }
 
@@ -86,8 +90,8 @@ locals {
   aptos_node_helm_prefix = var.enable_forge ? "aptos-node" : "${module.validator.helm_release_name}-aptos-node"
 }
 
-
 resource "helm_release" "genesis" {
+  count       = var.enable_genesis ? 1 : 0
   name        = "genesis"
   chart       = local.genesis_helm_chart_path
   max_history = 5
diff --git a/terraform/aptos-node-testnet/gcp/security.tf b/terraform/aptos-node-testnet/gcp/security.tf
index d492bdc963f57..42734bb6771b6 100644
--- a/terraform/aptos-node-testnet/gcp/security.tf
+++ b/terraform/aptos-node-testnet/gcp/security.tf
@@ -17,6 +17,7 @@ locals {
 }
 
 resource "kubernetes_labels" "pss-chaos-mesh" {
+  count       = var.enable_forge ? 1 : 0
   api_version = "v1"
   kind        = "Namespace"
   metadata {
diff --git a/terraform/aptos-node-testnet/gcp/variables.tf b/terraform/aptos-node-testnet/gcp/variables.tf
index f76ea1231f4c5..b56355fae99b7 100644
--- a/terraform/aptos-node-testnet/gcp/variables.tf
+++ b/terraform/aptos-node-testnet/gcp/variables.tf
@@ -1,11 +1,5 @@
 ### Project config
 
-variable "cluster_bootstrap" {
-  description = "Set when bootstrapping a new cluster"
-  type        = bool
-  default     = false
-}
-
 variable "project" {
   description = "GCP project"
   type        = string
@@ -19,10 +13,18 @@ variable "region" {
 variable "zone" {
   description = "GCP zone suffix"
   type        = string
+  default     = ""
+}
+
+variable "node_locations" {
+  description = "List of node locations"
+  type        = list(string)
+  default     = [] # if empty, let GCP choose
 }
 
 variable "manage_via_tf" {
   description = "Whether to manage the aptos-node k8s workload via Terraform. If set to false, the helm_release resource will still be created and updated when values change, but it may not be updated on every apply"
+  type        = bool
   default     = true
 }
 
@@ -30,21 +32,25 @@ variable "manage_via_tf" {
 
 variable "era" {
   description = "Chain era, used to start a clean chain"
+  type        = number
   default     = 1
 }
 
 variable "chain_id" {
   description = "Aptos chain ID"
+  type        = string
   default     = "TESTING"
 }
 
 variable "chain_name" {
   description = "Aptos chain name"
+  type        = string
   default     = "testnet"
 }
 
 variable "image_tag" {
   description = "Docker image tag for Aptos node"
+  type        = string
   default     = "devnet"
 }
 
@@ -52,36 +58,49 @@ variable "image_tag" {
 
 variable "workspace_dns" {
   description = "Include Terraform workspace name in DNS records"
+  type        = bool
   default     = true
 }
 
 variable "dns_prefix_name" {
   description = "DNS prefix for fullnode url"
+  type        = string
   default     = "fullnode"
 }
 
 variable "zone_name" {
   description = "Zone name of GCP Cloud DNS zone to create records in"
+  type        = string
   default     = ""
 }
 
 variable "zone_project" {
   description = "GCP project which the DNS zone is in (if different)"
+  type        = string
   default     = ""
 }
 
+variable "create_google_managed_ssl_certificate" {
+  description = "Whether to create a Google Managed SSL Certificate for the GCE Ingress"
+  type        = bool
+  default     = false
+}
+
 variable "record_name" {
   description = "DNS record name to use ( is replaced with the TF workspace name)"
+  type        = string
   default     = ".aptos"
 }
 
 variable "create_dns_records" {
   description = "Creates DNS records in var.zone_name that point to k8s service, as opposed to using external-dns or other means"
+  type        = bool
   default     = true
 }
 
 variable "dns_ttl" {
   description = "Time-to-Live for the Validator and Fullnode DNS records"
+  type        = number
   default     = 300
 }
 
@@ -89,11 +108,13 @@ variable "dns_ttl" {
 
 variable "workspace_name_override" {
   description = "If specified, overrides the usage of Terraform workspace for naming purposes"
+  type        = string
   default     = ""
 }
 
 variable "helm_release_name_override" {
   description = "If set, overrides the name of the aptos-node helm chart"
+  type        = string
   default     = ""
 }
 
@@ -117,11 +138,13 @@ variable "forge_helm_values" {
 
 variable "num_validators" {
   description = "The number of validator nodes to create"
+  type        = number
   default     = 1
 }
 
 variable "num_fullnode_groups" {
   description = "The number of fullnode groups to create"
+  type        = number
   default     = 1
 }
 
@@ -130,84 +153,136 @@ variable "num_fullnode_groups" {
 
 variable "k8s_api_sources" {
   description = "List of CIDR subnets which can access the Kubernetes API endpoint"
+  type        = list(string)
   default     = ["0.0.0.0/0"]
 }
 
-### Instance config
-
-variable "utility_instance_type" {
-  description = "Instance type used for utilities"
-  default     = "n2-standard-8"
-}
-
-variable "validator_instance_type" {
-  description = "Instance type used for validator and fullnodes"
-  default     = "n2-standard-32"
-}
-
 ### Addons
 
 variable "enable_forge" {
   description = "Enable Forge"
+  type        = bool
   default     = false
 }
 
-variable "enable_monitoring" {
-  description = "Enable monitoring helm chart"
-  default     = false
+variable "enable_genesis" {
+  description = "Perform genesis automatically"
+  type        = bool
+  default     = true
 }
 
-variable "monitoring_helm_values" {
-  description = "Map of values to pass to monitoring Helm"
+variable "testnet_addons_helm_values" {
+  description = "Map of values to pass to testnet-addons helm chart"
   type        = any
   default     = {}
 }
 
-variable "enable_prometheus_node_exporter" {
-  description = "Enable prometheus-node-exporter within monitoring helm chart"
-  default     = false
+### Node pools and Autoscaling
+
+variable "default_disk_size_gb" {
+  description = "Default disk size for nodes"
+  type        = number
+  default     = 200
 }
 
-variable "testnet_addons_helm_values" {
-  description = "Map of values to pass to testnet-addons helm chart"
-  type        = any
+variable "default_disk_type" {
+  description = "Default disk type for nodes"
+  type        = string
+  default     = "pd-standard"
+}
+
+variable "create_nodepools" {
+  description = "Create managed nodepools"
+  type        = bool
+  default     = true
+}
+
+variable "nodepool_sysctls" {
+  description = "Sysctls to set on nodepools"
+  type        = map(string)
   default     = {}
 }
 
-### Autoscaling
+variable "core_instance_type" {
+  description = "Instance type used for core pods"
+  type        = string
+  default     = "e2-medium"
+}
+
+variable "utility_instance_type" {
+  description = "Instance type used for utility pods"
+  type        = string
+  default     = "e2-standard-8"
+}
+
+variable "validator_instance_type" {
+  description = "Instance type used for validator and fullnodes"
+  type        = string
+  default     = "t2d-standard-16"
+}
+
+variable "utility_instance_enable_taint" {
+  description = "Whether to taint instances in the utilities nodegroup"
+  type        = bool
+  default     = true
+}
+
+variable "validator_instance_enable_taint" {
+  description = "Whether to taint instances in the validator nodegroup"
+  type        = bool
+  default     = true
+}
 
 variable "gke_enable_node_autoprovisioning" {
-  description = "Enable node autoprovisioning for GKE cluster. See https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning"
-  default     = false
+  description = "Enable GKE node autoprovisioning"
+  type        = bool
+  default     = true
 }
 
 variable "gke_node_autoprovisioning_max_cpu" {
-  description = "Maximum CPU utilization for GKE node_autoprovisioning"
-  default     = 10
+  description = "Maximum CPU allocation for GKE node_autoprovisioning"
+  type        = number
+  default     = 500
 }
 
 variable "gke_node_autoprovisioning_max_memory" {
-  description = "Maximum memory utilization for GKE node_autoprovisioning"
-  default     = 100
+  description = "Maximum memory allocation for GKE node_autoprovisioning"
+  type        = number
+  default     = 2000
 }
 
-variable "gke_enable_autoscaling" {
-  description = "Enable autoscaling for the nodepools in the GKE cluster. See https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler"
-  default     = true
+variable "gke_autoscaling_profile" {
+  description = "Autoscaling profile for GKE cluster. See https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#autoscaling_profiles"
+  type        = string
+  default     = "OPTIMIZE_UTILIZATION"
 }
 
 variable "gke_autoscaling_max_node_count" {
   description = "Maximum number of nodes for GKE nodepool autoscaling"
-  default     = 10
+  type        = number
+  default     = 250
 }
 
 ### GKE cluster config
 
 variable "cluster_ipv4_cidr_block" {
   description = "The IP address range of the container pods in this cluster, in CIDR notation. See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#cluster_ipv4_cidr_block"
+  type        = string
   default     = ""
 }
 
+variable "enable_clouddns" {
+  description = "Enable CloudDNS (Google-managed cluster DNS)"
+  type        = bool
+  default     = false
+}
+
+variable "enable_image_streaming" {
+  description = "Enable image streaming (GCFS)"
+  type        = bool
+  default     = false
+}
+
 variable "gke_maintenance_policy" {
   description = "The maintenance policy to use for the cluster. See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#maintenance_policy"
   type = object({
diff --git a/terraform/aptos-node/aws/cluster.tf b/terraform/aptos-node/aws/cluster.tf
index b0103425c0afe..b5a99a185bb5a 100644
--- a/terraform/aptos-node/aws/cluster.tf
+++ b/terraform/aptos-node/aws/cluster.tf
@@ -12,7 +12,7 @@ resource "aws_eks_cluster" "aptos" {
   tags                      = local.default_tags
 
   vpc_config {
-    subnet_ids              = concat(aws_subnet.public.*.id, aws_subnet.private.*.id)
+    subnet_ids              = concat(aws_subnet.public[*].id, aws_subnet.private[*].id)
     public_access_cidrs     = var.k8s_api_sources
     endpoint_private_access = true
     security_group_ids      = [aws_security_group.cluster.id]
diff --git a/terraform/aptos-node/aws/kubernetes.tf b/terraform/aptos-node/aws/kubernetes.tf
index 92b8219c48c7c..358b4e04de2e4 100644
--- a/terraform/aptos-node/aws/kubernetes.tf
+++ b/terraform/aptos-node/aws/kubernetes.tf
@@ -1,13 +1,13 @@
 provider "kubernetes" {
   host                   = aws_eks_cluster.aptos.endpoint
-  cluster_ca_certificate = base64decode(aws_eks_cluster.aptos.certificate_authority.0.data)
+  cluster_ca_certificate = base64decode(aws_eks_cluster.aptos.certificate_authority[0].data)
   token                  = data.aws_eks_cluster_auth.aptos.token
 }
 
 provider "helm" {
   kubernetes {
     host                   = aws_eks_cluster.aptos.endpoint
-    cluster_ca_certificate = base64decode(aws_eks_cluster.aptos.certificate_authority.0.data)
+    cluster_ca_certificate = base64decode(aws_eks_cluster.aptos.certificate_authority[0].data)
     token                  = data.aws_eks_cluster_auth.aptos.token
   }
 }
@@ -16,8 +16,6 @@ locals {
   kubeconfig = "/tmp/kube.config.${md5(timestamp())}"
 
   # helm chart paths
-  monitoring_helm_chart_path = "${path.module}/../../helm/monitoring"
-  logger_helm_chart_path     = "${path.module}/../../helm/logger"
   aptos_node_helm_chart_path = var.helm_chart != "" ? var.helm_chart : "${path.module}/../../helm/aptos-node"
 }
 
@@ -43,7 +41,7 @@ resource "kubernetes_storage_class" "gp3" {
     type = "gp3"
   }
 
-  depends_on = [null_resource.delete-gp2, aws_eks_addon.aws-ebs-csi-driver]
+  depends_on = [null_resource.delete-gp2]
 }
 
 resource "kubernetes_storage_class" "io1" {
@@ -70,25 +68,6 @@ resource "kubernetes_storage_class" "io2" {
   }
 }
 
-resource "kubernetes_namespace" "tigera-operator" {
-  metadata {
-    annotations = {
-      name = "tigera-operator"
-    }
-
-    name = "tigera-operator"
-  }
-}
-
-resource "helm_release" "calico" {
-  count      = var.enable_calico ? 1 : 0
-  name       = "calico"
-  repository = "https://docs.tigera.io/calico/charts"
-  chart      = "tigera-operator"
-  version    = "3.26.0"
-  namespace  = "tigera-operator"
-}
-
 locals {
   helm_values = jsonencode({
     numValidators     = var.num_validators
@@ -113,7 +92,6 @@ locals {
         value  = "validators"
         effect = "NoExecute"
       }]
-      remoteLogAddress = var.enable_logger ? "${helm_release.logger[0].name}-aptos-logger.${helm_release.logger[0].namespace}.svc:5044" : null
     }
     fullnode = {
       storage = {
@@ -171,80 +149,6 @@ resource "helm_release" "validator" {
   }
 }
 
-resource "helm_release" "logger" {
-  count       = var.enable_logger ? 1 : 0
-  name        = "${local.helm_release_name}-log"
-  chart       = local.logger_helm_chart_path
-  max_history = 5
-  wait        = false
-
-  values = [
-    jsonencode({
-      logger = {
-        name = "aptos-logger"
-      }
-      chain = {
-        name = var.chain_name
-      }
-      serviceAccount = {
-        create = false
-        # this name must match the serviceaccount created by the aptos-node helm chart
-        name = local.helm_release_name == "aptos-node" ? "aptos-node-validator" : "${local.helm_release_name}-aptos-node-validator"
-      }
-    }),
-    jsonencode(var.logger_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.logger_helm_chart_path, "**") : filesha1("${local.logger_helm_chart_path}/${f}")]))
-  }
-}
-
-
-resource "helm_release" "monitoring" {
-  count       = var.enable_monitoring ? 1 : 0
-  name        = "${local.helm_release_name}-mon"
-  chart       = local.monitoring_helm_chart_path
-  max_history = 5
-  wait        = false
-
-  values = [
-    jsonencode({
-      chain = {
-        name = var.chain_name
-      }
-      validator = {
-        name = var.validator_name
-      }
-      service = {
-        domain = local.domain
-      }
-      monitoring = {
-        prometheus = {
-          storage = {
-            class = kubernetes_storage_class.gp3.metadata[0].name
-          }
-        }
-      }
-      kube-state-metrics = {
-        enabled = var.enable_kube_state_metrics
-      }
-      prometheus-node-exporter = {
-        enabled = var.enable_prometheus_node_exporter
-      }
-    }),
-    jsonencode(var.monitoring_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.monitoring_helm_chart_path, "**") : filesha1("${local.monitoring_helm_chart_path}/${f}")]))
-  }
-}
-
 resource "kubernetes_cluster_role" "debug" {
   metadata {
     name = "debug"
diff --git a/terraform/aptos-node/aws/network.tf b/terraform/aptos-node/aws/network.tf
index e1f28d501c683..d690356631c74 100644
--- a/terraform/aptos-node/aws/network.tf
+++ b/terraform/aptos-node/aws/network.tf
@@ -68,7 +68,7 @@ resource "aws_route_table" "public" {
 
 resource "aws_route_table_association" "public" {
   count          = local.num_azs
-  subnet_id      = element(aws_subnet.public.*.id, count.index)
+  subnet_id      = element(aws_subnet.public[*].id, count.index)
   route_table_id = aws_route_table.public.id
 }
 
@@ -114,7 +114,7 @@ resource "aws_route_table" "private" {
 
 resource "aws_route_table_association" "private" {
   count          = local.num_azs
-  subnet_id      = element(aws_subnet.private.*.id, count.index)
+  subnet_id      = element(aws_subnet.private[*].id, count.index)
   route_table_id = aws_route_table.private.id
 }
 
diff --git a/terraform/aptos-node/aws/security.tf b/terraform/aptos-node/aws/security.tf
index f037dec881ad3..4addace8ff219 100644
--- a/terraform/aptos-node/aws/security.tf
+++ b/terraform/aptos-node/aws/security.tf
@@ -1,9 +1,6 @@
 # Security-related resources
 
-data "kubernetes_all_namespaces" "all" {}
-
 locals {
-  kubernetes_master_version = substr(aws_eks_cluster.aptos.version, 0, 4)
   baseline_pss_labels = {
     "pod-security.kubernetes.io/audit"   = "baseline"
     "pod-security.kubernetes.io/warn"    = "baseline"
@@ -11,40 +8,6 @@ locals {
   }
 }
 
-# FIXME: Remove when migrating to K8s 1.25
-resource "kubernetes_role_binding" "disable-psp" {
-  for_each = toset(local.kubernetes_master_version <= "1.24" ? data.kubernetes_all_namespaces.all.namespaces : [])
-  metadata {
-    name      = "privileged-psp"
-    namespace = each.value
-  }
-
-  role_ref {
-    api_group = "rbac.authorization.k8s.io"
-    kind      = "ClusterRole"
-    name      = "eks:podsecuritypolicy:privileged"
-  }
-
-  subject {
-    api_group = "rbac.authorization.k8s.io"
-    kind      = "Group"
-    name      = "system:serviceaccounts:${each.value}"
-  }
-}
-
-# FIXME: Remove when migrating to K8s 1.25
-resource "null_resource" "delete-psp-authenticated" {
-  count = local.kubernetes_master_version <= "1.24" ? 1 : 0
-  provisioner "local-exec" {
-    command = <<-EOT
-      aws --region ${var.region} eks update-kubeconfig --name ${aws_eks_cluster.aptos.name} --kubeconfig ${local.kubeconfig} &&
-      kubectl --kubeconfig ${local.kubeconfig} delete --ignore-not-found clusterrolebinding eks:podsecuritypolicy:authenticated
-    EOT
-  }
-
-  depends_on = [kubernetes_role_binding.disable-psp]
-}
-
 resource "kubernetes_labels" "pss-default" {
   api_version = "v1"
   kind        = "Namespace"
diff --git a/terraform/aptos-node/aws/variables.tf b/terraform/aptos-node/aws/variables.tf
index 117f4df19404a..38b48580f1657 100644
--- a/terraform/aptos-node/aws/variables.tf
+++ b/terraform/aptos-node/aws/variables.tf
@@ -5,41 +5,49 @@ variable "region" {
 
 variable "num_azs" {
   description = "Number of availability zones"
+  type        = number
   default     = 3
 }
 
 variable "kubernetes_version" {
   description = "Version of Kubernetes to use for EKS cluster"
-  default     = "1.24"
+  type        = string
+  default     = "1.26"
 }
 
 variable "k8s_api_sources" {
   description = "List of CIDR subnets which can access the Kubernetes API endpoint"
+  type        = list(string)
   default     = ["0.0.0.0/0"]
 }
 
 variable "num_validators" {
   description = "The number of validator nodes to create"
+  type        = number
   default     = 1
 }
 
 variable "num_fullnode_groups" {
   description = "The number of fullnode groups to create"
+  type        = number
   default     = 1
 }
 
 variable "era" {
   description = "Chain era, used to start a clean chain"
+  type        = number
   default     = 1
 }
 
 variable "chain_id" {
   description = "Aptos chain ID"
+  type        = string
   default     = "TESTING"
 }
 
 variable "chain_name" {
   description = "Aptos chain name"
+  type        = string
   default     = "testnet"
 }
 
@@ -50,31 +58,37 @@ variable "validator_name" {
 
 variable "image_tag" {
   description = "Docker image tag for Aptos node"
+  type        = string
   default     = "devnet"
 }
 
 variable "zone_id" {
   description = "Zone ID of Route 53 domain to create records in"
+  type        = string
   default     = ""
 }
 
 variable "workspace_dns" {
   description = "Include Terraform workspace name in DNS records"
+  type        = bool
   default     = true
 }
 
 variable "record_name" {
   description = "DNS record name to use ( is replaced with the TF workspace name)"
+  type        = string
   default     = ".aptos"
 }
 
 variable "create_records" {
   description = "Creates DNS records in var.zone_id that point to k8s service, as opposed to using external-dns or other means"
+  type        = bool
   default     = true
 }
 
 variable "helm_chart" {
   description = "Path to aptos-validator Helm chart file"
+  type        = string
   default     = ""
 }
 
@@ -86,6 +100,7 @@ variable "helm_values" {
 
 variable "helm_values_file" {
   description = "Path to file containing values for Helm chart"
+  type        = string
   default     = ""
 }
 
@@ -126,130 +141,109 @@ variable "k8s_debugger_roles" {
 }
 
 variable "iam_path" {
-  default     = "/"
   description = "Path to use when naming IAM objects"
+  type        = string
+  default     = "/"
 }
 
 variable "permissions_boundary_policy" {
-  default     = ""
   description = "ARN of IAM policy to set as permissions boundary on created roles"
+  type        = string
 }
 
 variable "vpc_cidr_block" {
-  default     = "192.168.0.0/16"
   description = "VPC CIDR Block"
+  type        = string
+  default     = "192.168.0.0/16"
 }
 
 variable "maximize_single_az_capacity" {
   description = "Whether to maximize the capacity of the cluster by allocating more IPs to the first AZ"
+  type        = bool
   default     = false
 }
 
 variable "helm_enable_validator" {
   description = "Enable deployment of the validator Helm chart"
+  type        = bool
   default     = true
 }
 
 variable "utility_instance_type" {
   description = "Instance type used for utilities"
+  type        = string
   default     = "t3.2xlarge"
 }
 
 variable "utility_instance_num" {
   description = "Number of instances for utilities"
+  type        = number
   default     = 1
 }
 
 variable "utility_instance_min_num" {
   description = "Minimum number of instances for utilities"
+  type        = number
   default     = 1
 }
 
 variable "utility_instance_max_num" {
   description = "Maximum number of instances for utilities. If left 0, defaults to 2 * var.utility_instance_num"
+  type        = number
   default     = 0
 }
 
 variable "utility_instance_enable_taint" {
   description = "Whether to taint the instances in the utility nodegroup"
+  type        = bool
   default     = false
 }
 
 variable "validator_instance_type" {
   description = "Instance type used for validator and fullnodes"
+  type        = string
   default     = "c6i.8xlarge"
 }
 
 variable "validator_instance_num" {
   description = "Number of instances used for validator and fullnodes"
+  type        = number
   default     = 2
 }
 
 variable "validator_instance_min_num" {
   description = "Minimum number of instances for validators"
+  type        = number
   default     = 1
 }
 
 variable "validator_instance_max_num" {
   description = "Maximum number of instances for utilities. If left 0, defaults to 2 * var.validator_instance_num"
+  type        = number
   default     = 0
 }
 
 variable "validator_instance_enable_taint" {
   description = "Whether to taint instances in the validator nodegroup"
+  type        = bool
   default     = false
 }
 
 variable "workspace_name_override" {
   description = "If specified, overrides the usage of Terraform workspace for naming purposes"
+  type        = string
   default     = ""
 }
 
-variable "enable_calico" {
-  description = "Enable Calico networking for NetworkPolicy"
-  default     = true
-}
-
-variable "enable_logger" {
-  description = "Enable logger helm chart"
-  default     = false
-}
-
-variable "logger_helm_values" {
-  description = "Map of values to pass to logger Helm"
-  type        = any
-  default     = {}
-}
-
-
-variable "enable_monitoring" {
-  description = "Enable monitoring helm chart"
-  default     = false
-}
-
-variable "monitoring_helm_values" {
-  description = "Map of values to pass to monitoring Helm"
-  type        = any
-  default     = {}
-}
-
-variable "enable_prometheus_node_exporter" {
-  description = "Enable prometheus-node-exporter within monitoring helm chart"
-  default     = false
-}
-
-variable "enable_kube_state_metrics" {
-  description = "Enable kube-state-metrics within monitoring helm chart"
-  default     = false
-}
-
 variable "helm_release_name_override" {
   description = "If set, overrides the name of the aptos-node helm chart"
+  type        = string
   default     = ""
 }
 
 variable "validator_storage_class" {
   description = "Which storage class to use for the validator and fullnode"
+  type        = string
   default     = "io1"
   validation {
     condition     = contains(["gp3", "io1", "io2"], var.validator_storage_class)
@@ -259,6 +253,7 @@ variable "validator_storage_class" {
 
 variable "fullnode_storage_class" {
   description = "Which storage class to use for the validator and fullnode"
+  type        = string
   default     = "io1"
   validation {
     condition     = contains(["gp3", "io1", "io2"], var.fullnode_storage_class)
@@ -268,5 +263,6 @@ variable "fullnode_storage_class" {
 
 variable "manage_via_tf" {
   description = "Whether to manage the aptos-node k8s workload via Terraform. If set to false, the helm_release resource will still be created and updated when values change, but it may not be updated on every apply"
+  type        = bool
   default     = true
 }
diff --git a/terraform/aptos-node/aws/versions.tf b/terraform/aptos-node/aws/versions.tf
index a2b7631af994b..1403cae46c48a 100644
--- a/terraform/aptos-node/aws/versions.tf
+++ b/terraform/aptos-node/aws/versions.tf
@@ -1,8 +1,9 @@
 terraform {
-  required_version = "~> 1.3.6"
+  required_version = "~> 1.5.6"
   required_providers {
     aws = {
-      source = "hashicorp/aws"
+      source  = "hashicorp/aws"
+      version = "~> 4.35.0"
     }
     helm = {
       source = "hashicorp/helm"
diff --git a/terraform/aptos-node/azure/cluster.tf b/terraform/aptos-node/azure/cluster.tf
index 48d156b68d8b0..69f23ecbc5d85 100644
--- a/terraform/aptos-node/azure/cluster.tf
+++ b/terraform/aptos-node/azure/cluster.tf
@@ -12,7 +12,6 @@ resource "azurerm_kubernetes_cluster" "aptos" {
 
   network_profile {
     network_plugin    = "kubenet"
-    network_policy    = "calico"
     load_balancer_sku = "standard"
   }
 
diff --git a/terraform/aptos-node/azure/kubernetes.tf b/terraform/aptos-node/azure/kubernetes.tf
index cb704f20dbbaa..14864dc62d6c9 100644
--- a/terraform/aptos-node/azure/kubernetes.tf
+++ b/terraform/aptos-node/azure/kubernetes.tf
@@ -16,8 +16,6 @@ provider "helm" {
 
 locals {
   # helm chart paths
-  monitoring_helm_chart_path = "${path.module}/../../helm/monitoring"
-  logger_helm_chart_path     = "${path.module}/../../helm/logger"
   aptos_node_helm_chart_path = var.helm_chart != "" ? var.helm_chart : "${path.module}/../../helm/aptos-node"
 }
 
@@ -73,66 +71,3 @@ resource "helm_release" "validator" {
     value = sha1(join("", [for f in fileset(local.aptos_node_helm_chart_path, "**") : filesha1("${local.aptos_node_helm_chart_path}/${f}")]))
   }
 }
-
-resource "helm_release" "logger" {
-  count       = var.enable_logger ? 1 : 0
-  name        = "${terraform.workspace}-log"
-  chart       = local.logger_helm_chart_path
-  max_history = 10
-  wait        = false
-
-  values = [
-    jsonencode({
-      logger = {
-        name = "aptos-logger"
-      }
-      chain = {
-        name = var.chain_name
-      }
-      serviceAccount = {
-        create = false
-        name   = "${terraform.workspace}-aptos-node-validator"
-      }
-    }),
-    jsonencode(var.logger_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.logger_helm_chart_path, "**") : filesha1("${local.logger_helm_chart_path}/${f}")]))
-  }
-}
-
-resource "helm_release" "monitoring" {
-  count       = var.enable_monitoring ? 1 : 0
-  name        = "${terraform.workspace}-mon"
-  chart       = local.monitoring_helm_chart_path
-  max_history = 10
-  wait        = false
-
-  values = [
-    jsonencode({
-      chain = {
-        name = var.chain_name
-      }
-      validator = {
-        name = var.validator_name
-      }
-      monitoring = {
-        prometheus = {
-          storage = {
-            class = "default"
-          }
-        }
-      }
-    }),
-    jsonencode(var.monitoring_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.monitoring_helm_chart_path, "**") : filesha1("${local.monitoring_helm_chart_path}/${f}")]))
-  }
-}
diff --git a/terraform/aptos-node/azure/variables.tf b/terraform/aptos-node/azure/variables.tf
index e8e73e5e1cb12..f8ad067280f62 100644
--- a/terraform/aptos-node/azure/variables.tf
+++ b/terraform/aptos-node/azure/variables.tf
@@ -5,16 +5,19 @@ variable "region" {
 
 variable "era" {
   description = "Chain era, used to start a clean chain"
+  type        = number
   default     = 1
 }
 
 variable "chain_id" {
   description = "Aptos chain ID"
+  type        = string
   default     = "TESTING"
 }
 
 variable "chain_name" {
   description = "Aptos chain name"
+  type        = string
   default     = "testnet"
 }
 
@@ -25,26 +28,31 @@ variable "validator_name" {
 
 variable "image_tag" {
   description = "Docker image tag for Aptos node"
+  type        = string
   default     = "devnet"
 }
 
 variable "zone_name" {
   description = "Zone name of Azure DNS domain to create records in"
+  type        = string
   default     = ""
 }
 
 variable "zone_resource_group" {
   description = "Azure resource group name of the DNS zone"
+  type        = string
   default     = ""
 }
 
 variable "record_name" {
   description = "DNS record name to use ( is replaced with the TF workspace name)"
+  type        = string
   default     = ".aptos"
 }
 
 variable "helm_chart" {
   description = "Path to aptos-validator Helm chart file"
+  type        = string
   default     = ""
 }
 
@@ -56,11 +64,13 @@ variable "helm_values" {
 
 variable "helm_values_file" {
   description = "Path to file containing values for Helm chart"
+  type        = string
   default     = ""
 }
 
 variable "k8s_api_sources" {
   description = "List of CIDR subnets which can access the Kubernetes API endpoint"
+  type        = list(string)
   default     = ["0.0.0.0/0"]
 }
 
@@ -84,47 +94,30 @@ variable "k8s_debugger_groups" {
 
 variable "utility_instance_type" {
   description = "Instance type used for utilities"
+  type        = string
   default     = "Standard_B8ms"
 }
 
 variable "utility_instance_num" {
   description = "Number of instances for utilities"
+  type        = number
   default     = 1
 }
 
 variable "validator_instance_type" {
   description = "Instance type used for validator and fullnodes"
+  type        = string
   default     = "Standard_F4s_v2"
 }
 
 variable "validator_instance_num" {
   description = "Number of instances used for validator and fullnodes"
+  type        = string
   default     = 2
 }
 
 variable "validator_instance_enable_taint" {
   description = "Whether to taint the instances in the validator nodegroup"
+  type        = bool
   default     = false
 }
-
-variable "enable_logger" {
-  description = "Enable logger helm chart"
-  default     = false
-}
-
-variable "logger_helm_values" {
-  description = "Map of values to pass to logger Helm"
-  type        = any
-  default     = {}
-}
-
-variable "enable_monitoring" {
-  description = "Enable monitoring helm chart"
-  default     = false
-}
-
-variable "monitoring_helm_values" {
-  description = "Map of values to pass to monitoring Helm"
-  type        = any
-  default     = {}
-}
diff --git a/terraform/aptos-node/azure/versions.tf b/terraform/aptos-node/azure/versions.tf
index 487dac291ae73..c18e160e7df9a 100644
--- a/terraform/aptos-node/azure/versions.tf
+++ b/terraform/aptos-node/azure/versions.tf
@@ -1,7 +1,7 @@
 provider "azuread" {}
 
 terraform {
-  required_version = "~> 1.3.6"
+  required_version = "~> 1.5.6"
   required_providers {
     azuread = {
       source  = "hashicorp/azuread"
diff --git a/terraform/aptos-node/gcp/cluster.tf b/terraform/aptos-node/gcp/cluster.tf
index dae2708215e8a..2508bef0643b7 100644
--- a/terraform/aptos-node/gcp/cluster.tf
+++ b/terraform/aptos-node/gcp/cluster.tf
@@ -1,20 +1,23 @@
+locals {
+  location = var.zone == "" ? var.region : "${var.region}-${var.zone}"
+}
+
 resource "google_container_cluster" "aptos" {
-  provider = google-beta
-  name     = "aptos-${local.workspace_name}"
-  location = local.zone
-  network  = google_compute_network.aptos.id
+  provider       = google-beta
+  name           = "aptos-${local.workspace_name}"
+  location       = local.location
+  node_locations = var.node_locations
+  network        = google_compute_network.aptos.id
 
   remove_default_node_pool = true
   initial_node_count       = 1
-  logging_service          = "logging.googleapis.com/kubernetes"
-  monitoring_service       = "monitoring.googleapis.com/kubernetes"
 
-  release_channel {
-    channel = "REGULAR"
+  cost_management_config {
+    enabled = true
   }
 
-  pod_security_policy_config {
-    enabled = false
+  release_channel {
+    channel = "STABLE"
   }
 
   master_auth {
@@ -48,7 +51,7 @@ resource "google_container_cluster" "aptos" {
 
   addons_config {
     network_policy_config {
-      disabled = false
+      disabled = true
     }
   }
 
@@ -56,18 +59,76 @@ resource "google_container_cluster" "aptos" {
     enabled = false
   }
 
-  cluster_autoscaling {
-    enabled = var.gke_enable_node_autoprovisioning
+  pod_security_policy_config {
+    enabled = false
+  }
+
+  dynamic "dns_config" {
+    for_each = var.enable_clouddns ? ["clouddns"] : []
+    content {
+      cluster_dns       = "CLOUD_DNS"
+      cluster_dns_scope = "CLUSTER_SCOPE"
+    }
+  }
 
-    dynamic "resource_limits" {
-      for_each = var.gke_enable_node_autoprovisioning ? {
-        "cpu"    = var.gke_node_autoprovisioning_max_cpu
-        "memory" = var.gke_node_autoprovisioning_max_memory
-      } : {}
-      content {
-        resource_type = resource_limits.key
-        minimum       = 1
-        maximum       = resource_limits.value
+  monitoring_config {
+    managed_prometheus {
+      enabled = true
+    }
+    # Enable all components.
+    enable_components = [
+      "APISERVER",
+      "CONTROLLER_MANAGER",
+      "DAEMONSET",
+      "DEPLOYMENT",
+      "HPA",
+      "POD",
+      "SCHEDULER",
+      "STATEFULSET",
+      "STORAGE",
+      "SYSTEM_COMPONENTS",
+    ]
+  }
+
+  dynamic "cluster_autoscaling" {
+    for_each = var.gke_enable_node_autoprovisioning ? [1] : []
+    content {
+      enabled             = var.gke_enable_node_autoprovisioning
+      autoscaling_profile = var.gke_autoscaling_profile
+
+      dynamic "resource_limits" {
+        for_each = {
+          "cpu"    = var.gke_node_autoprovisioning_max_cpu
+          "memory" = var.gke_node_autoprovisioning_max_memory
+        }
+        content {
+          resource_type = resource_limits.key
+          minimum       = 1
+          maximum       = resource_limits.value
+        }
+      }
+
+      auto_provisioning_defaults {
+        service_account = google_service_account.gke.email
+        oauth_scopes    = ["https://www.googleapis.com/auth/cloud-platform"]
+        disk_size       = var.default_disk_size_gb
+        disk_type       = var.default_disk_type
+        management {
+          auto_upgrade = true
+          auto_repair  = true
+        }
+        shielded_instance_config {
+          enable_integrity_monitoring = true
+          enable_secure_boot          = true
+        }
+      }
+    }
+  }
+
+  node_pool_defaults {
+    node_config_defaults {
+      gcfs_config {
+        enabled = var.enable_image_streaming
       }
     }
   }
@@ -82,36 +143,99 @@ resource "google_container_cluster" "aptos" {
       }
     }
   }
+
+  lifecycle {
+    ignore_changes = [
+      private_cluster_config,
+    ]
+  }
+  deletion_protection = false
+}
+
+resource "google_container_node_pool" "core" {
+  count      = var.create_nodepools ? 1 : 0
+  provider   = google-beta
+  name       = "core"
+  location   = local.location
+  cluster    = google_container_cluster.aptos.name
+  node_count = lookup(var.node_pool_sizes, "core", null)
+
+  node_config {
+    machine_type    = var.core_instance_type
+    image_type      = "COS_CONTAINERD"
+    disk_size_gb    = lookup(var.instance_disk_sizes, "core", var.default_disk_size_gb)
+    service_account = google_service_account.gke.email
+    tags            = ["core"]
+    oauth_scopes    = ["https://www.googleapis.com/auth/cloud-platform"]
+
+    workload_metadata_config {
+      mode = "GKE_METADATA"
+    }
+
+    shielded_instance_config {
+      enable_integrity_monitoring = true
+      enable_secure_boot          = true
+    }
+
+    # The core machine type is too small (<16G) to support image streaming.
+    gcfs_config {
+      enabled = false
+    }
+
+    gvnic {
+      enabled = true
+    }
+
+    kubelet_config {
+      cpu_manager_policy = "none"
+    }
+  }
+
+  autoscaling {
+    min_node_count = 0
+    max_node_count = var.gke_autoscaling_max_node_count
+  }
 }
 
 resource "google_container_node_pool" "utilities" {
-  provider = google-beta
-  name     = "utilities"
-  location = local.zone
-  cluster  = google_container_cluster.aptos.name
-  # If cluster autoscaling is enabled, node_count should not be set
-  # If node auto-provisioning is enabled, node_count should be set to 0 as this nodepool is most likely ignored
-  node_count = var.gke_enable_autoscaling ? null : (var.gke_enable_node_autoprovisioning ? 0 : lookup(var.node_pool_sizes, "utilities", var.utility_instance_num))
+  count      = var.create_nodepools ? 1 : 0
+  provider   = google-beta
+  name       = "utilities"
+  location   = local.location
+  cluster    = google_container_cluster.aptos.name
+  node_count = lookup(var.node_pool_sizes, "utilities", null)
 
   node_config {
     machine_type    = var.utility_instance_type
     image_type      = "COS_CONTAINERD"
-    disk_size_gb    = var.utility_instance_disk_size_gb
+    disk_size_gb    = lookup(var.instance_disk_sizes, "utilities", var.default_disk_size_gb)
     service_account = google_service_account.gke.email
     tags            = ["utilities"]
     oauth_scopes    = ["https://www.googleapis.com/auth/cloud-platform"]
 
+    workload_metadata_config {
+      mode = "GKE_METADATA"
+    }
+
     shielded_instance_config {
-      enable_secure_boot = true
+      enable_integrity_monitoring = true
+      enable_secure_boot          = true
     }
 
-    workload_metadata_config {
-      mode = "GKE_METADATA"
+    gvnic {
+      enabled = true
+    }
+
+    kubelet_config {
+      cpu_manager_policy = "none"
+    }
+    linux_node_config {
+      sysctls = var.nodepool_sysctls
     }
 
     # if the NodeGroup should be tainted, then create the below dynamic block
     dynamic "taint" {
-      for_each = var.utility_instance_enable_taint ? ["utilities"] : []
+      for_each = var.validator_instance_enable_taint ? ["utilities"] : []
       content {
         key    = "aptos.org/nodepool"
         value  = taint.value
@@ -120,38 +244,46 @@ resource "google_container_node_pool" "utilities" {
     }
   }
 
-  dynamic "autoscaling" {
-    for_each = var.gke_enable_autoscaling ? [1] : []
-    content {
-      min_node_count = 1
-      max_node_count = var.gke_autoscaling_max_node_count
-    }
+  autoscaling {
+    min_node_count = 0
+    max_node_count = var.gke_autoscaling_max_node_count
   }
 }
 
 resource "google_container_node_pool" "validators" {
-  provider = google-beta
-  name     = "validators"
-  location = local.zone
-  cluster  = google_container_cluster.aptos.name
-  # If cluster autoscaling is enabled, node_count should not be set
-  # If node auto-provisioning is enabled, node_count should be set to 0 as this nodepool is most likely ignored
-  node_count = var.gke_enable_autoscaling ? null : (var.gke_enable_node_autoprovisioning ? 0 : lookup(var.node_pool_sizes, "validators", var.validator_instance_num))
+  count      = var.create_nodepools ? 1 : 0
+  provider   = google-beta
+  name       = "validators"
+  location   = local.location
+  cluster    = google_container_cluster.aptos.name
+  node_count = lookup(var.node_pool_sizes, "validators", null)
 
   node_config {
     machine_type    = var.validator_instance_type
     image_type      = "COS_CONTAINERD"
-    disk_size_gb    = var.validator_instance_disk_size_gb
+    disk_size_gb    = lookup(var.instance_disk_sizes, "validators", var.default_disk_size_gb)
     service_account = google_service_account.gke.email
     tags            = ["validators"]
     oauth_scopes    = ["https://www.googleapis.com/auth/cloud-platform"]
 
+    workload_metadata_config {
+      mode = "GKE_METADATA"
+    }
+
     shielded_instance_config {
-      enable_secure_boot = true
+      enable_integrity_monitoring = true
+      enable_secure_boot          = true
     }
 
-    workload_metadata_config {
-      mode = "GKE_METADATA"
+    gvnic {
+      enabled = true
+    }
+
+    kubelet_config {
+      cpu_manager_policy = "static"
+    }
+    linux_node_config {
+      sysctls = var.nodepool_sysctls
     }
 
     # if the NodeGroup should be tainted, then create the below dynamic block
@@ -165,11 +297,8 @@ resource "google_container_node_pool" "validators" {
     }
   }
 
-  dynamic "autoscaling" {
-    for_each = var.gke_enable_autoscaling ? [1] : []
-    content {
-      min_node_count = 1
-      max_node_count = var.gke_autoscaling_max_node_count
-    }
+  autoscaling {
+    min_node_count = 0
+    max_node_count = var.gke_autoscaling_max_node_count
   }
 }
diff --git a/terraform/aptos-node/gcp/kubernetes.tf b/terraform/aptos-node/gcp/kubernetes.tf
index 970b4487b94c8..1ff60429cd2e0 100644
--- a/terraform/aptos-node/gcp/kubernetes.tf
+++ b/terraform/aptos-node/gcp/kubernetes.tf
@@ -25,8 +25,6 @@ provider "helm" {
 
 locals {
   # helm chart paths
-  monitoring_helm_chart_path = "${path.module}/../../helm/monitoring"
-  logger_helm_chart_path     = "${path.module}/../../helm/logger"
   aptos_node_helm_chart_path = var.helm_chart != "" ? var.helm_chart : "${path.module}/../../helm/aptos-node"
 
   # override the helm release name if an override exists, otherwise adopt the workspace name
@@ -55,9 +53,9 @@ resource "helm_release" "validator" {
         storage = {
           class = kubernetes_storage_class.ssd.metadata[0].name
         }
-        nodeSelector = var.gke_enable_node_autoprovisioning ? {} : {
-          "cloud.google.com/gke-nodepool" = google_container_node_pool.validators.name
-        }
+        nodeSelector = var.validator_instance_enable_taint ? {
+          "cloud.google.com/gke-nodepool" = "validators"
+        } : {}
         tolerations = [{
           key    = "aptos.org/nodepool"
           value  = "validators"
@@ -68,9 +66,9 @@ resource "helm_release" "validator" {
         storage = {
           class = kubernetes_storage_class.ssd.metadata[0].name
         }
-        nodeSelector = var.gke_enable_node_autoprovisioning ? {} : {
-          "cloud.google.com/gke-nodepool" = google_container_node_pool.validators.name
-        }
+        nodeSelector = var.validator_instance_enable_taint ? {
+          "cloud.google.com/gke-nodepool" = "validators"
+        } : {}
         tolerations = [{
           key    = "aptos.org/nodepool"
           value  = "validators"
@@ -78,9 +76,14 @@ resource "helm_release" "validator" {
         }]
       }
       haproxy = {
-        nodeSelector = var.gke_enable_node_autoprovisioning ? {} : {
-          "cloud.google.com/gke-nodepool" = google_container_node_pool.utilities.name
-        }
+        nodeSelector = var.utility_instance_enable_taint ? {
+          "cloud.google.com/gke-nodepool" = "utilities"
+        } : {}
+        tolerations = [{
+          key    = "aptos.org/nodepool"
+          value  = "utilities"
+          effect = "NoExecute"
+        }]
       }
       service = {
         domain = local.domain
@@ -99,82 +102,3 @@ resource "helm_release" "validator" {
     }
   }
 }
-
-resource "helm_release" "logger" {
-  count       = var.enable_logger ? 1 : 0
-  name        = "${local.helm_release_name}-log"
-  chart       = local.logger_helm_chart_path
-  max_history = 10
-  wait        = false
-
-  values = [
-    jsonencode({
-      logger = {
-        name = "aptos-logger"
-      }
-      chain = {
-        name = var.chain_name
-      }
-      serviceAccount = {
-        create = false
-        # this name must match the serviceaccount created by the aptos-node helm chart
-      name = local.helm_release_name == "aptos-node" ? "aptos-node-validator" : "${local.helm_release_name}-aptos-node-validator" }
-    }),
-    jsonencode(var.logger_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.logger_helm_chart_path, "**") : filesha1("${local.logger_helm_chart_path}/${f}")]))
-  }
-}
-
-resource "helm_release" "monitoring" {
-  count       = var.enable_monitoring ? 1 : 0
-  name        = "${local.helm_release_name}-mon"
-  chart       = local.monitoring_helm_chart_path
-  max_history = 10
-  wait        = false
-
-  values = [
-    jsonencode({
-      chain = {
-        name = var.chain_name
-      }
-      validator = {
-        name = var.validator_name
-      }
-      monitoring = {
-        prometheus = {
-          storage = {
-            class = kubernetes_storage_class.ssd.metadata[0].name
-          }
-        }
-      }
-    }),
-    jsonencode(var.monitoring_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.monitoring_helm_chart_path, "**") : filesha1("${local.monitoring_helm_chart_path}/${f}")]))
-  }
-}
-
-resource "helm_release" "node_exporter" {
-  count       = var.enable_node_exporter ? 1 : 0
-  name        = "prometheus-node-exporter"
-  repository  = "https://prometheus-community.github.io/helm-charts"
-  chart       = "prometheus-node-exporter"
-  version     = "4.0.0"
-  namespace   = "kube-system"
-  max_history = 5
-  wait        = false
-
-  values = [
-    jsonencode({}),
-    jsonencode(var.node_exporter_helm_values),
-  ]
-}
diff --git a/terraform/aptos-node/gcp/main.tf b/terraform/aptos-node/gcp/main.tf
index 7229840f6c49b..5a56adef7c8a9 100644
--- a/terraform/aptos-node/gcp/main.tf
+++ b/terraform/aptos-node/gcp/main.tf
@@ -11,7 +11,6 @@ provider "google-beta" {
 data "google_client_config" "provider" {}
 
 locals {
-  zone           = "${var.region}-${var.zone}"
   workspace_name = var.workspace_name_override == "" ? terraform.workspace : var.workspace_name_override
 }
 
diff --git a/terraform/aptos-node/gcp/variables.tf b/terraform/aptos-node/gcp/variables.tf
index c5dd42daf4f58..04a25dd872d99 100644
--- a/terraform/aptos-node/gcp/variables.tf
+++ b/terraform/aptos-node/gcp/variables.tf
@@ -1,11 +1,5 @@
 ### Project config
 
-variable "cluster_bootstrap" {
-  description = "Set when bootstrapping a new cluster"
-  type        = bool
-  default     = false
-}
-
 variable "project" {
   description = "GCP project"
   type        = string
@@ -19,20 +13,30 @@ variable "region" {
 variable "zone" {
   description = "GCP zone suffix"
   type        = string
+  default     = "" # if empty, it's a regional cluster
+}
+
+variable "node_locations" {
+  description = "List of node locations"
+  type        = list(string)
+  default     = [] # if empty, let GCP choose
 }
 
 variable "era" {
   description = "Chain era, used to start a clean chain"
+  type        = number
   default     = 1
 }
 
 variable "chain_id" {
   description = "Aptos chain ID"
+  type        = string
   default     = "TESTING"
 }
 
 variable "chain_name" {
   description = "Aptos chain name"
+  type        = string
   default     = "testnet"
 }
 
@@ -43,11 +47,13 @@ variable "validator_name" {
 
 variable "image_tag" {
   description = "Docker image tag for Aptos node"
+  type        = string
   default     = "devnet"
 }
 
 variable "helm_chart" {
   description = "Path to aptos-validator Helm chart file"
+  type        = string
   default     = ""
 }
 
@@ -59,166 +65,169 @@ variable "helm_values" {
 
 variable "helm_values_file" {
   description = "Path to file containing values for Helm chart"
+  type        = string
   default     = ""
 }
 
 variable "k8s_api_sources" {
   description = "List of CIDR subnets which can access the Kubernetes API endpoint"
+  type        = list(string)
   default     = ["0.0.0.0/0"]
 }
 
-variable "node_pool_sizes" {
-  type        = map(number)
-  default     = {}
-  description = "Override the number of nodes in the specified pool"
-}
-
-variable "utility_instance_type" {
-  description = "Instance type used for utilities"
-  default     = "n2-standard-8"
+variable "manage_via_tf" {
+  description = "Whether to manage the aptos-node k8s workload via Terraform. If set to false, the helm_release resource will still be created and updated when values change, but it may not be updated on every apply"
+  type        = bool
+  default     = true
 }
 
-variable "utility_instance_num" {
-  description = "Number of instances for utilities"
-  default     = 1
-}
+### DNS
 
-variable "utility_instance_enable_taint" {
-  description = "Whether to taint the instances in the utility nodegroup"
-  default     = false
+variable "zone_name" {
+  description = "Zone name of GCP Cloud DNS zone to create records in"
+  type        = string
+  default     = ""
 }
 
-variable "utility_instance_disk_size_gb" {
-  description = "Disk size for utility instances"
-  default     = 20
+variable "zone_project" {
+  description = "GCP project which the DNS zone is in (if different)"
+  type        = string
+  default     = ""
 }
 
-variable "validator_instance_type" {
-  description = "Instance type used for validator and fullnodes"
-  default     = "n2-standard-32"
+variable "workspace_dns" {
+  description = "Include Terraform workspace name in DNS records"
+  type        = bool
+  default     = true
 }
 
-variable "validator_instance_num" {
-  description = "Number of instances used for validator and fullnodes"
-  default     = 2
+variable "record_name" {
+  description = "DNS record name to use ( is replaced with the TF workspace name)"
+  type        = string
+  default     = ".aptos"
 }
 
-variable "validator_instance_enable_taint" {
-  description = "Whether to taint instances in the validator nodegroup"
-  default     = false
+variable "create_dns_records" {
+  description = "Creates DNS records in var.zone_name that point to k8s service, as opposed to using external-dns or other means"
+  type        = bool
+  default     = true
 }
 
-variable "validator_instance_disk_size_gb" {
-  description = "Disk size for validator instances"
-  default     = 20
+variable "dns_ttl" {
+  description = "Time-to-Live for the Validator and Fullnode DNS records"
+  type        = number
+  default     = 300
 }
 
-variable "enable_logger" {
-  description = "Enable logger helm chart"
-  default     = false
-}
+### Node pools and Autoscaling
 
-variable "logger_helm_values" {
-  description = "Map of values to pass to logger Helm"
-  type        = any
+variable "node_pool_sizes" {
+  type        = map(number)
   default     = {}
+  description = "Override the number of nodes in the specified pool"
 }
 
-variable "enable_monitoring" {
-  description = "Enable monitoring helm chart"
-  default     = false
-}
-
-variable "monitoring_helm_values" {
-  description = "Map of values to pass to monitoring Helm"
-  type        = any
+variable "instance_disk_sizes" {
+  type        = map(number)
   default     = {}
+  description = "Override the disk size in the specified pool"
 }
 
-variable "enable_node_exporter" {
-  description = "Enable Prometheus node exporter helm chart"
-  default     = false
+variable "default_disk_size_gb" {
+  description = "Default disk size for nodes"
+  type        = number
+  default     = 100
 }
 
-variable "node_exporter_helm_values" {
-  description = "Map of values to pass to node exporter Helm"
-  type        = any
-  default     = {}
+variable "default_disk_type" {
+  description = "Default disk type for nodes"
+  type        = string
+  default     = "pd-standard"
 }
 
-variable "manage_via_tf" {
-  description = "Whether to manage the aptos-node k8s workload via Terraform. If set to false, the helm_release resource will still be created and updated when values change, but it may not be updated on every apply"
+variable "create_nodepools" {
+  description = "Create managed nodepools"
+  type        = bool
   default     = true
 }
 
-### DNS
-
-variable "zone_name" {
-  description = "Zone name of GCP Cloud DNS zone to create records in"
-  default     = ""
+variable "nodepool_sysctls" {
+  description = "Sysctls to set on nodepools"
+  type        = map(string)
+  default     = {}
 }
 
-variable "zone_project" {
-  description = "GCP project which the DNS zone is in (if different)"
-  default     = ""
+variable "core_instance_type" {
+  description = "Instance type used for core pods"
+  type        = string
+  default     = "e2-medium"
 }
 
-variable "workspace_dns" {
-  description = "Include Terraform workspace name in DNS records"
-  default     = true
+variable "utility_instance_type" {
+  description = "Instance type used for utility pods"
+  type        = string
+  default     = "e2-standard-8"
 }
 
-variable "record_name" {
-  description = "DNS record name to use ( is replaced with the TF workspace name)"
-  default     = ".aptos"
+variable "validator_instance_type" {
+  description = "Instance type used for validator and fullnodes"
+  type        = string
+  default     = "t2d-standard-16"
 }
 
-variable "create_dns_records" {
-  description = "Creates DNS records in var.zone_name that point to k8s service, as opposed to using external-dns or other means"
-  default     = true
+variable "utility_instance_enable_taint" {
+  description = "Whether to taint instances in the utilities nodegroup"
+  type        = bool
+  default     = false
 }
 
-variable "dns_ttl" {
-  description = "Time-to-Live for the Validator and Fullnode DNS records"
-  default     = 300
+variable "validator_instance_enable_taint" {
+  description = "Whether to taint instances in the validator nodegroup"
+  type        = bool
+  default     = false
 }
 
-### Autoscaling
-
 variable "gke_enable_node_autoprovisioning" {
-  description = "Enable node autoprovisioning for GKE cluster. See https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning"
-  default     = false
+  description = "Enable GKE node autoprovisioning"
+  type        = bool
+  default     = true
 }
 
 variable "gke_node_autoprovisioning_max_cpu" {
-  description = "Maximum CPU utilization for GKE node_autoprovisioning"
-  default     = 10
+  description = "Maximum CPU allocation for GKE node autoprovisioning"
+  type        = number
+  default     = 500
 }
 
 variable "gke_node_autoprovisioning_max_memory" {
-  description = "Maximum memory utilization for GKE node_autoprovisioning"
-  default     = 100
+  description = "Maximum memory allocation for GKE node autoprovisioning"
+  type        = number
+  default     = 2000
 }
 
-variable "gke_enable_autoscaling" {
-  description = "Enable autoscaling for the nodepools in the GKE cluster. See https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler"
-  default     = true
+variable "gke_autoscaling_profile" {
+  description = "Autoscaling profile for GKE cluster. See https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#autoscaling_profiles"
+  type        = string
+  default     = "OPTIMIZE_UTILIZATION"
 }
 
 variable "gke_autoscaling_max_node_count" {
   description = "Maximum number of nodes for GKE nodepool autoscaling"
-  default     = 10
+  type        = number
+  default     = 250
 }
 
 ### Naming overrides
 
 variable "helm_release_name_override" {
   description = "If set, overrides the name of the aptos-node helm chart"
+  type        = string
   default     = ""
 }
 
 variable "workspace_name_override" {
   description = "If specified, overrides the usage of Terraform workspace for naming purposes"
+  type        = string
   default     = ""
 }
 
@@ -226,18 +235,33 @@ variable "workspace_name_override" {
 
 variable "cluster_ipv4_cidr_block" {
   description = "The IP address range of the container pods in this cluster, in CIDR notation. See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#cluster_ipv4_cidr_block"
+  type        = string
   default     = ""
 }
 
+variable "enable_clouddns" {
+  description = "Enable CloudDNS (Google-managed cluster DNS)"
+  type        = bool
+  default     = false
+}
+
+variable "enable_image_streaming" {
+  description = "Enable image streaming (GCFS)"
+  type        = bool
+  default     = false
+}
+
 ### Helm
 
 variable "num_validators" {
   description = "The number of validator nodes to create"
+  type        = number
   default     = 1
 }
 
 variable "num_fullnode_groups" {
   description = "The number of fullnode groups to create"
+  type        = number
   default     = 1
 }
 
diff --git a/terraform/aptos-node/gcp/versions.tf b/terraform/aptos-node/gcp/versions.tf
index 2b8786efb55aa..b74536e8f4cc8 100644
--- a/terraform/aptos-node/gcp/versions.tf
+++ b/terraform/aptos-node/gcp/versions.tf
@@ -1,13 +1,13 @@
 terraform {
-  required_version = "~> 1.3.6"
+  required_version = "~> 1.5.6"
   required_providers {
     google = {
       source  = "hashicorp/google"
-      version = "~> 4.54.0"
+      version = "~> 5.0.0"
     }
     google-beta = {
       source  = "hashicorp/google-beta"
-      version = "~> 4.54.0"
+      version = "~> 5.0.0"
     }
     helm = {
       source = "hashicorp/helm"
diff --git a/terraform/fullnode/aws/addons.tf b/terraform/fullnode/aws/addons.tf
index 0f301173bd899..52bb764c77565 100644
--- a/terraform/fullnode/aws/addons.tf
+++ b/terraform/fullnode/aws/addons.tf
@@ -54,4 +54,42 @@ resource "helm_release" "external-dns" {
   ]
 }
 
+resource "helm_release" "pfn-addons" {
+  depends_on = [
+    helm_release.fullnode
+  ]
+  name        = "pfn-addons"
+  chart       = local.pfn_addons_helm_chart_path
+  max_history = 10
+  wait        = false
 
+  values = [
+    jsonencode({
+      service = {
+        domain   = local.domain
+        aws_tags = local.aws_tags
+        fullnode = {
+          numFullnodes             = var.num_fullnodes
+          loadBalancerSourceRanges = var.client_sources_ipv4
+        }
+      }
+      ingress = {
+        class                    = "alb"
+        acm_certificate          = var.zone_id != "" ? aws_acm_certificate.ingress[0].arn : null
+        loadBalancerSourceRanges = var.client_sources_ipv4
+      }
+      load_test = {
+        config = {
+          numFullnodeGroups = var.num_fullnodes
+        }
+      }
+    }),
+    jsonencode(var.pfn_helm_values),
+  ]
+
+  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
+  set {
+    name  = "chart_sha1"
+    value = sha1(join("", [for f in fileset(local.pfn_addons_helm_chart_path, "**") : filesha1("${local.pfn_addons_helm_chart_path}/${f}")]))
+  }
+}
diff --git a/terraform/fullnode/aws/kubernetes.tf b/terraform/fullnode/aws/kubernetes.tf
index 3be8bea180f11..263106320e6f4 100644
--- a/terraform/fullnode/aws/kubernetes.tf
+++ b/terraform/fullnode/aws/kubernetes.tf
@@ -1,43 +1,6 @@
 locals {
   pfn_addons_helm_chart_path = "${path.module}/../../helm/pfn-addons"
-  pfn_logger_helm_chart_path = "${path.module}/../../helm/logger"
   fullnode_helm_chart_path   = "${path.module}/../../helm/fullnode"
-  monitoring_helm_chart_path = "${path.module}/../../helm/monitoring"
-}
-
-resource "helm_release" "pfn-addons" {
-  depends_on = [
-    helm_release.fullnode
-  ]
-  name        = "pfn-addons"
-  chart       = local.pfn_addons_helm_chart_path
-  max_history = 10
-  wait        = false
-
-  values = [
-    jsonencode({
-      service = {
-        domain   = local.domain
-        aws_tags = local.aws_tags
-        fullnode = {
-          numFullnodes             = var.num_fullnodes
-          loadBalancerSourceRanges = var.client_sources_ipv4
-        }
-      }
-      ingress = {
-        class                    = "alb"
-        acm_certificate          = var.zone_id != "" ? aws_acm_certificate.ingress[0].arn : null
-        loadBalancerSourceRanges = var.client_sources_ipv4
-      }
-    }),
-    jsonencode(var.pfn_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.pfn_addons_helm_chart_path, "**") : filesha1("${local.pfn_addons_helm_chart_path}/${f}")]))
-  }
 }
 
 resource "helm_release" "fullnode" {
@@ -60,9 +23,6 @@ resource "helm_release" "fullnode" {
       image = {
         tag = local.image_tag
       }
-      logging = {
-        address = var.enable_pfn_logger ? "fullnode-pfn-aptos-logger:5044" : ""
-      }
       nodeSelector = {
         "eks.amazonaws.com/nodegroup" = "fullnode"
       }
@@ -113,72 +73,3 @@ resource "helm_release" "fullnode" {
     }
   }
 }
-
-
-resource "helm_release" "pfn-logger" {
-  count       = var.enable_pfn_logger ? 1 : 0
-  name        = "pfn-logger"
-  chart       = local.pfn_logger_helm_chart_path
-  max_history = 10
-  wait        = false
-
-  values = [
-    jsonencode({
-      logger = {
-        name = "pfn"
-      }
-      chain = {
-        name = "aptos-${local.workspace_name}"
-      }
-    }),
-    jsonencode(var.pfn_logger_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.pfn_logger_helm_chart_path, "**") : filesha1("${local.pfn_logger_helm_chart_path}/${f}")]))
-  }
-}
-
-resource "helm_release" "monitoring" {
-  count       = var.enable_monitoring ? 1 : 0
-  name        = "aptos-monitoring"
-  chart       = local.monitoring_helm_chart_path
-  max_history = 5
-  wait        = false
-
-  values = [
-    jsonencode({
-      chain = {
-        name = var.chain_name
-      }
-      fullnode = {
-        name = var.fullnode_name
-      }
-      service = {
-        domain = local.domain
-      }
-      kube-state-metrics = {
-        enabled = var.enable_kube_state_metrics
-      }
-      prometheus-node-exporter = {
-        enabled = var.enable_prometheus_node_exporter
-      }
-      monitoring = {
-        prometheus = {
-          storage = {
-            class = "gp3"
-          }
-        }
-      }
-    }),
-    jsonencode(var.monitoring_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.monitoring_helm_chart_path, "**") : filesha1("${local.monitoring_helm_chart_path}/${f}")]))
-  }
-}
diff --git a/terraform/fullnode/aws/network.tf b/terraform/fullnode/aws/network.tf
index 9ac9bad604596..9f0e636f8d221 100644
--- a/terraform/fullnode/aws/network.tf
+++ b/terraform/fullnode/aws/network.tf
@@ -5,14 +5,14 @@ data "aws_route53_zone" "pfn" {
 
 locals {
   dns_prefix = var.workspace_dns ? "${local.workspace_name}.${var.dns_prefix_name}." : "${var.dns_prefix_name}."
-  domain     = var.zone_id != "" ? "${local.dns_prefix}${data.aws_route53_zone.pfn[0].name}" : null
+  domain     = var.zone_id != "" ? "${local.dns_prefix}${data.aws_route53_zone.pfn[0].name}" : terraform.workspace
 }
 
 resource "aws_acm_certificate" "ingress" {
   count = var.zone_id != "" ? 1 : 0
 
   domain_name               = local.domain
-  subject_alternative_names = concat(["*.${local.domain}"], var.tls_sans)
+  subject_alternative_names = distinct(concat(["*.${local.domain}"], var.tls_sans))
   validation_method         = "DNS"
 
   lifecycle {
diff --git a/terraform/fullnode/aws/security.tf b/terraform/fullnode/aws/security.tf
index 6dbf2c6a4933c..4addace8ff219 100644
--- a/terraform/fullnode/aws/security.tf
+++ b/terraform/fullnode/aws/security.tf
@@ -1,9 +1,6 @@
 # Security-related resources
 
-data "kubernetes_all_namespaces" "all" {}
-
 locals {
-  kubernetes_master_version = substr(data.aws_eks_cluster.aptos.version, 0, 4)
   baseline_pss_labels = {
     "pod-security.kubernetes.io/audit"   = "baseline"
     "pod-security.kubernetes.io/warn"    = "baseline"
@@ -11,27 +8,6 @@ locals {
   }
 }
 
-# FIXME: Remove when migrating to K8s 1.25
-resource "kubernetes_role_binding" "disable-psp" {
-  for_each = toset(local.kubernetes_master_version <= "1.24" ? data.kubernetes_all_namespaces.all.namespaces : [])
-  metadata {
-    name      = "privileged-psp"
-    namespace = each.value
-  }
-
-  role_ref {
-    api_group = "rbac.authorization.k8s.io"
-    kind      = "ClusterRole"
-    name      = "eks:podsecuritypolicy:privileged"
-  }
-
-  subject {
-    api_group = "rbac.authorization.k8s.io"
-    kind      = "Group"
-    name      = "system:serviceaccounts:${each.value}"
-  }
-}
-
 resource "kubernetes_labels" "pss-default" {
   api_version = "v1"
   kind        = "Namespace"
diff --git a/terraform/fullnode/aws/variables.tf b/terraform/fullnode/aws/variables.tf
index af70ce8a0402b..98fc577581b82 100644
--- a/terraform/fullnode/aws/variables.tf
+++ b/terraform/fullnode/aws/variables.tf
@@ -1,20 +1,23 @@
 variable "region" {
   description = "AWS region"
+  type        = string
 }
 
 variable "workspace_name_override" {
   description = "If specified, overrides the usage of Terraform workspace for naming purposes"
+  type        = string
   default     = ""
 }
 
 variable "iam_path" {
-  default     = "/"
   description = "Path to use when naming IAM objects"
+  type        = string
+  default     = "/"
 }
 
 variable "permissions_boundary_policy" {
-  default     = ""
   description = "ARN of IAM policy to set as permissions boundary on created roles"
+  type        = string
 }
 
 variable "admin_sources_ipv4" {
@@ -40,31 +43,38 @@ variable "k8s_admins" {
 }
 
 variable "num_fullnodes" {
-  default = 1
+  description = "Number of fullnodes."
+  type        = number
+  default     = 1
 }
 
 variable "image_tag" {
   description = "Docker image tag for aptos components. Overrides ecr_repo method."
+  type        = string
   default     = ""
 }
 
 variable "ecr_repo" {
   description = "Name of an ECR repo to resolve 'stable' tag to a specific revision"
+  type        = string
   default     = ""
 }
 
 variable "era" {
   description = "Chain era, used to start a clean chain"
+  type        = number
   default     = 15
 }
 
 variable "chain_id" {
-  description = "aptos chain ID"
+  description = "Aptos chain ID"
+  type        = string
   default     = "DEVNET"
 }
 
 variable "chain_name" {
   description = "Aptos chain name"
+  type        = string
   default     = "devnet"
 }
 
@@ -93,6 +103,7 @@ variable "fullnode_helm_values_list" {
 
 variable "zone_id" {
   description = "Route53 Zone ID to create records in"
+  type        = string
   default     = ""
 }
 
@@ -104,86 +115,64 @@ variable "tls_sans" {
 
 variable "workspace_dns" {
   description = "Include Terraform workspace name in DNS records"
+  type        = bool
   default     = true
 }
 
 variable "dns_prefix_name" {
   description = "DNS prefix for fullnode url"
+  type        = string
   default     = "fullnode"
 }
 
-variable "enable_pfn_logger" {
-  description = "Enable separate public fullnode logger pod"
-  default     = false
-}
-
-variable "pfn_logger_helm_values" {
-  description = "Map of values to pass to public fullnode logger Helm"
-  type        = any
-  default     = {}
-}
-
 variable "utility_instance_type" {
   description = "Instance type used for utilities"
+  type        = string
   default     = "t3.medium"
 }
 
 variable "fullnode_instance_type" {
   description = "Instance type used for validator and fullnodes"
+  type        = string
   default     = "c6i.8xlarge"
 }
 
 variable "num_extra_instance" {
-  default     = 0
   description = "Number of extra instances to add into node pool"
+  type        = number
+  default     = 0
 }
 
 variable "enable_backup" {
-  description = "enable data backup from fullnode"
+  description = "Enable data backup from fullnode"
+  type        = bool
   default     = false
 }
 
 variable "enable_public_backup" {
-  description = "provide data backups to the public"
+  description = "Provide data backups to the public"
+  type        = bool
   default     = false
 }
 
 variable "backup_fullnode_index" {
-  description = "index of fullnode to backup data from"
+  description = "Index of fullnode to backup data from"
+  type        = number
   default     = 0
 }
 
 variable "fullnode_storage_class" {
   description = "Which storage class to use for the validator and fullnode"
+  type        = string
   default     = "io1"
   validation {
-    condition     = contains(["gp3", "gp2", "io1", "io2"], var.fullnode_storage_class)
-    error_message = "Supported storage classes are gp3, io1, io2"
+    condition     = contains(["gp2", "gp3", "io1", "io2"], var.fullnode_storage_class)
+    error_message = "Supported storage classes are gp2, gp3, io1, io2"
   }
 }
 
-variable "enable_monitoring" {
-  description = "Enable monitoring helm chart"
-  default     = false
-}
-
-variable "monitoring_helm_values" {
-  description = "Map of values to pass to monitoring Helm"
-  type        = any
-  default     = {}
-}
-
-variable "enable_prometheus_node_exporter" {
-  description = "Enable prometheus-node-exporter within monitoring helm chart"
-  default     = false
-}
-
-variable "enable_kube_state_metrics" {
-  description = "Enable kube-state-metrics within monitoring helm chart"
-  default     = false
-}
-
 variable "manage_via_tf" {
   description = "Whether to manage the aptos-node k8s workload via Terraform. If set to false, the helm_release resource will still be created and updated when values change, but it may not be updated on every apply"
+  type        = bool
   default     = true
 }
diff --git a/terraform/fullnode/aws/versions.tf b/terraform/fullnode/aws/versions.tf
index 9e00b537a668e..134ae20d5c34d 100644
--- a/terraform/fullnode/aws/versions.tf
+++ b/terraform/fullnode/aws/versions.tf
@@ -1,8 +1,9 @@
 terraform {
-  required_version = "~> 1.3.6"
+  required_version = "~> 1.5.6"
   required_providers {
     aws = {
-      source = "hashicorp/aws"
+      source  = "hashicorp/aws"
+      version = "~> 4.35.0"
     }
     helm = {
       source = "hashicorp/helm"
diff --git a/terraform/fullnode/digital_ocean/variables.tf b/terraform/fullnode/digital_ocean/variables.tf
index 89a0b5eca62bb..a6ed5dd0ad6fa 100644
--- a/terraform/fullnode/digital_ocean/variables.tf
+++ b/terraform/fullnode/digital_ocean/variables.tf
@@ -11,8 +11,8 @@ variable "fullnode_helm_values" {
 }
 
 variable "do_token" {
-  type        = string
   description = "Digital Notion API token"
+  type        = string
 }
 
 variable "region" {
@@ -27,41 +27,49 @@ variable "fullnode_helm_values_list" {
 }
 
 variable "k8s_namespace" {
-  default     = "aptos"
   description = "Kubernetes namespace that the fullnode will be deployed into"
+  type        = string
+  default     = "aptos"
 }
 
 variable "k8s_api_sources" {
   description = "List of CIDR subnets which can access the Kubernetes API endpoint"
+  type        = list(string)
   default     = ["0.0.0.0/0"]
 }
 
 variable "num_fullnodes" {
-  default     = 1
   description = "Number of fullnodes"
+  type        = number
+  default     = 1
 }
 
 variable "image_tag" {
-  default     = "devnet"
   description = "Docker image tag to use for the fullnode"
+  type        = string
+  default     = "devnet"
 }
 
 variable "era" {
   description = "Chain era, used to start a clean chain"
+  type        = number
   default     = 1
 }
 
 variable "chain_id" {
-  description = "aptos chain ID"
+  description = "Aptos chain ID"
+  type        = string
   default     = "DEVNET"
 }
 
 variable "chain_name" {
   description = "Aptos chain name"
+  type        = string
   default     = "devnet"
 }
 
 variable "machine_type" {
   description = "Machine type for running fullnode"
+  type        = string
   default     = "s-16vcpu-32gb"
 }
diff --git a/terraform/fullnode/gcp/addons.tf b/terraform/fullnode/gcp/addons.tf
index 8985415254817..de354a72a76be 100644
--- a/terraform/fullnode/gcp/addons.tf
+++ b/terraform/fullnode/gcp/addons.tf
@@ -31,8 +31,9 @@ data "google_dns_managed_zone" "pfn" {
 }
 
 locals {
-  dns_prefix = var.workspace_dns ? "${local.workspace_name}.${var.dns_prefix_name}." : "${var.dns_prefix_name}."
-  domain     = var.zone_name != "" ? trimsuffix("${local.dns_prefix}${data.google_dns_managed_zone.pfn[0].dns_name}", ".") : null
+  zone_project = var.zone_project != "" ? var.zone_project : var.project
+  dns_prefix   = var.workspace_dns ? "${local.workspace_name}.${var.dns_prefix_name}." : "${var.dns_prefix_name}."
+  domain       = var.zone_name != "" ? trimsuffix("${local.dns_prefix}${data.google_dns_managed_zone.pfn[0].dns_name}", ".") : null
 }
 
 resource "helm_release" "external-dns" {
@@ -61,3 +62,41 @@ resource "helm_release" "external-dns" {
     })
   ]
 }
+
+resource "helm_release" "pfn-addons" {
+  depends_on = [
+    helm_release.fullnode
+  ]
+  name        = "pfn-addons"
+  chart       = local.pfn_addons_helm_chart_path
+  max_history = 10
+  wait        = false
+  namespace   = var.k8s_namespace
+
+  values = [
+    jsonencode({
+      service = {
+        domain = local.domain
+      }
+      ingress = {
+        class                           = "gce"
+        backend_http2                   = var.backend_http2
+        gce_managed_certificate         = var.create_google_managed_ssl_certificate ? "aptos-${local.workspace_name}-ingress" : null
+        gce_managed_certificate_domains = var.create_google_managed_ssl_certificate ? join(",", distinct(concat([local.domain], var.tls_sans))) : ""
+        # loadBalancerSourceRanges = var.client_sources_ipv4 # not supported yet
+      }
+      load_test = {
+        config = {
+          numFullnodeGroups = var.num_fullnodes
+        }
+      }
+    }),
+    jsonencode(var.pfn_helm_values),
+  ]
+
+  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
+  set {
+    name  = "chart_sha1"
+    value = sha1(join("", [for f in fileset(local.pfn_addons_helm_chart_path, "**") : filesha1("${local.pfn_addons_helm_chart_path}/${f}")]))
+  }
+}
diff --git a/terraform/fullnode/gcp/auth.tf b/terraform/fullnode/gcp/auth.tf
index c098b1b962382..4af1fe37bfd83 100644
--- a/terraform/fullnode/gcp/auth.tf
+++ b/terraform/fullnode/gcp/auth.tf
@@ -1,7 +1,3 @@
-locals {
-  zone_project = var.zone_project != "" ? var.zone_project : var.project
-}
-
 resource "google_service_account" "gke" {
   account_id = "aptos-${terraform.workspace}-gke"
 }
diff --git a/terraform/fullnode/gcp/cluster.tf b/terraform/fullnode/gcp/cluster.tf
index 2b74252fbb336..ae573e650dd20 100644
--- a/terraform/fullnode/gcp/cluster.tf
+++ b/terraform/fullnode/gcp/cluster.tf
@@ -1,24 +1,23 @@
-resource "google_container_cluster" "aptos" {
-  provider = google-beta
-  name     = "aptos-${terraform.workspace}"
-  location = local.zone
-  network  = google_compute_network.aptos.id
+locals {
+  location = var.zone == "" ? var.region : "${var.region}-${var.zone}"
+}
 
-  lifecycle {
-    ignore_changes = [
-      private_cluster_config,
-      cluster_autoscaling[0].auto_provisioning_defaults[0].shielded_instance_config
-    ]
-    prevent_destroy = true
-  }
+resource "google_container_cluster" "aptos" {
+  provider       = google-beta
+  name           = "aptos-${terraform.workspace}"
+  location       = local.location
+  node_locations = var.node_locations
+  network        = google_compute_network.aptos.id
 
   remove_default_node_pool = true
   initial_node_count       = 1
-  logging_service          = "logging.googleapis.com/kubernetes"
-  monitoring_service       = "monitoring.googleapis.com/kubernetes"
+
+  cost_management_config {
+    enabled = true
+  }
 
   release_channel {
-    channel = "REGULAR"
+    channel = "STABLE"
   }
 
   master_auth {
@@ -37,7 +36,7 @@ resource "google_container_cluster" "aptos" {
   }
 
   private_cluster_config {
-    enable_private_nodes    = var.gke_enable_private_nodes
+    enable_private_nodes    = true
     enable_private_endpoint = false
     master_ipv4_cidr_block  = "172.16.0.0/28"
   }
@@ -52,7 +51,7 @@ resource "google_container_cluster" "aptos" {
 
   addons_config {
     network_policy_config {
-      disabled = false
+      disabled = true
     }
   }
 
@@ -60,55 +59,246 @@ resource "google_container_cluster" "aptos" {
     enabled = false
   }
 
-  cluster_autoscaling {
-    enabled = var.gke_enable_node_autoprovisioning
+  pod_security_policy_config {
+    enabled = false
+  }
+
+  dynamic "dns_config" {
+    for_each = var.enable_clouddns ? ["clouddns"] : []
+    content {
+      cluster_dns       = "CLOUD_DNS"
+      cluster_dns_scope = "CLUSTER_SCOPE"
+    }
+  }
+
+  monitoring_config {
+    managed_prometheus {
+      enabled = true
+    }
+    # Enable all components.
+    enable_components = [
+      "APISERVER",
+      "CONTROLLER_MANAGER",
+      "DAEMONSET",
+      "DEPLOYMENT",
+      "HPA",
+      "POD",
+      "SCHEDULER",
+      "STATEFULSET",
+      "STORAGE",
+      "SYSTEM_COMPONENTS",
+    ]
+  }
+
+  dynamic "cluster_autoscaling" {
+    for_each = var.gke_enable_node_autoprovisioning ? [1] : []
+    content {
+      enabled             = var.gke_enable_node_autoprovisioning
+      autoscaling_profile = var.gke_autoscaling_profile
+
+      dynamic "resource_limits" {
+        for_each = {
+          "cpu"    = var.gke_node_autoprovisioning_max_cpu
+          "memory" = var.gke_node_autoprovisioning_max_memory
+        }
+        content {
+          resource_type = resource_limits.key
+          minimum       = 1
+          maximum       = resource_limits.value
+        }
+      }
+
+      auto_provisioning_defaults {
+        service_account = google_service_account.gke.email
+        oauth_scopes    = ["https://www.googleapis.com/auth/cloud-platform"]
+        disk_size       = var.default_disk_size_gb
+        disk_type       = var.default_disk_type
+        management {
+          auto_upgrade = true
+          auto_repair  = true
+        }
+        shielded_instance_config {
+          enable_integrity_monitoring = true
+          enable_secure_boot          = true
+        }
+      }
+    }
+  }
+
+  node_pool_defaults {
+    node_config_defaults {
+      gcfs_config {
+        enabled = var.enable_image_streaming
+      }
+    }
+  }
 
-    dynamic "resource_limits" {
-      for_each = var.gke_enable_node_autoprovisioning ? {
-        "cpu"    = var.gke_node_autoprovisioning_max_cpu
-        "memory" = var.gke_node_autoprovisioning_max_memory
-      } : {}
+  maintenance_policy {
+    dynamic "recurring_window" {
+      for_each = var.gke_maintenance_policy.recurring_window != null ? [1] : []
       content {
-        resource_type = resource_limits.key
-        minimum       = 1
-        maximum       = resource_limits.value
+        start_time = var.gke_maintenance_policy.recurring_window.start_time
+        end_time   = var.gke_maintenance_policy.recurring_window.end_time
+        recurrence = var.gke_maintenance_policy.recurring_window.recurrence
       }
     }
-    auto_provisioning_defaults {
-      oauth_scopes    = ["https://www.googleapis.com/auth/cloud-platform"]
-      service_account = google_service_account.gke.email
+  }
+
+  lifecycle {
+    ignore_changes = [
+      private_cluster_config,
+    ]
+  }
+  deletion_protection = false
+}
+
+resource "google_container_node_pool" "core" {
+  count      = var.create_nodepools ? 1 : 0
+  provider   = google-beta
+  name       = "core"
+  location   = local.location
+  cluster    = google_container_cluster.aptos.name
+  node_count = lookup(var.node_pool_sizes, "core", null)
+
+  node_config {
+    machine_type    = var.core_instance_type
+    image_type      = "COS_CONTAINERD"
+    disk_size_gb    = lookup(var.instance_disk_sizes, "core", var.default_disk_size_gb)
+    service_account = google_service_account.gke.email
+    tags            = ["core"]
+    oauth_scopes    = ["https://www.googleapis.com/auth/cloud-platform"]
+
+    workload_metadata_config {
+      mode = "GKE_METADATA"
+    }
+
+    shielded_instance_config {
+      enable_integrity_monitoring = true
+      enable_secure_boot          = true
+    }
+
+    # The core machine type is too small (<16G) to support image streaming.
+    gcfs_config {
+      enabled = false
+    }
+
+    gvnic {
+      enabled = true
+    }
+
+    kubelet_config {
+      cpu_manager_policy = "none"
     }
   }
+
+  autoscaling {
+    min_node_count = 0
+    max_node_count = var.gke_autoscaling_max_node_count
+  }
+}
+
+resource "google_container_node_pool" "utilities" {
+  count      = var.create_nodepools ? 1 : 0
+  provider   = google-beta
+  name       = "utilities"
+  location   = local.location
+  cluster    = google_container_cluster.aptos.name
+  node_count = lookup(var.node_pool_sizes, "utilities", null)
+
+  node_config {
+    machine_type    = var.utility_instance_type
+    image_type      = "COS_CONTAINERD"
+    disk_size_gb    = lookup(var.instance_disk_sizes, "utilities", var.default_disk_size_gb)
+    service_account = google_service_account.gke.email
+    tags            = ["utilities"]
+    oauth_scopes    = ["https://www.googleapis.com/auth/cloud-platform"]
+
+    workload_metadata_config {
+      mode = "GKE_METADATA"
+    }
+
+    shielded_instance_config {
+      enable_integrity_monitoring = true
+      enable_secure_boot          = true
+    }
+
+    gvnic {
+      enabled = true
+    }
+
+    kubelet_config {
+      cpu_manager_policy = "none"
+    }
+    linux_node_config {
+      sysctls = var.nodepool_sysctls
+    }
+
+    # if the NodeGroup should be tainted, then create the below dynamic block
+    dynamic "taint" {
+      for_each = var.utility_instance_enable_taint ? ["utilities"] : []
+      content {
+        key    = "aptos.org/nodepool"
+        value  = taint.value
+        effect = "NO_EXECUTE"
+      }
+    }
+  }
+
+  autoscaling {
+    min_node_count = 0
+    max_node_count = var.gke_autoscaling_max_node_count
+  }
 }
 
 resource "google_container_node_pool" "fullnodes" {
+  count      = var.create_nodepools ? 1 : 0
   provider   = google-beta
   name       = "fullnodes"
-  location   = local.zone
+  location   = local.location
   cluster    = google_container_cluster.aptos.name
-  node_count = var.gke_enable_autoscaling ? null : var.num_fullnodes + var.num_extra_instance
+  node_count = lookup(var.node_pool_sizes, "fullnodes", null)
 
   node_config {
-    machine_type    = var.machine_type
+    machine_type    = var.fullnode_instance_type
     image_type      = "COS_CONTAINERD"
-    disk_size_gb    = var.instance_disk_size_gb
+    disk_size_gb    = lookup(var.instance_disk_sizes, "fullnodes", var.default_disk_size_gb)
     service_account = google_service_account.gke.email
     tags            = ["fullnodes"]
+    oauth_scopes    = ["https://www.googleapis.com/auth/cloud-platform"]
+
+    workload_metadata_config {
+      mode = "GKE_METADATA"
+    }
 
     shielded_instance_config {
-      enable_secure_boot = true
+      enable_integrity_monitoring = true
+      enable_secure_boot          = true
     }
 
-    workload_metadata_config {
-      mode = "GKE_METADATA"
+    gvnic {
+      enabled = true
     }
-  }
 
-  dynamic "autoscaling" {
-    for_each = var.gke_enable_autoscaling ? [1] : []
-    content {
-      min_node_count = 1
-      max_node_count = var.gke_autoscaling_max_node_count
+    kubelet_config {
+      cpu_manager_policy = "static"
+    }
+    linux_node_config {
+      sysctls = var.nodepool_sysctls
     }
+
+    # if the NodeGroup should be tainted, then create the below dynamic block
+    dynamic "taint" {
+      for_each = var.fullnode_instance_enable_taint ? ["fullnodes"] : []
+      content {
+        key    = "aptos.org/nodepool"
+        value  = taint.value
+        effect = "NO_EXECUTE"
+      }
+    }
+  }
+
+  autoscaling {
+    min_node_count = 0
+    max_node_count = var.gke_autoscaling_max_node_count
   }
 }
diff --git a/terraform/fullnode/gcp/kubernetes.tf b/terraform/fullnode/gcp/kubernetes.tf
index 0acd8c7338473..b9975f7e90b3f 100644
--- a/terraform/fullnode/gcp/kubernetes.tf
+++ b/terraform/fullnode/gcp/kubernetes.tf
@@ -32,40 +32,15 @@ provider "helm" {
 locals {
   fullnode_helm_chart_path   = "${path.module}/../../helm/fullnode"
   pfn_addons_helm_chart_path = "${path.module}/../../helm/pfn-addons"
-  monitoring_helm_chart_path = "${path.module}/../../helm/monitoring"
-}
-
-
-resource "helm_release" "pfn-addons" {
-  depends_on = [
-    helm_release.fullnode
-  ]
-  name        = "pfn-addons"
-  chart       = local.pfn_addons_helm_chart_path
-  max_history = 10
-  wait        = false
-  namespace   = var.k8s_namespace
 
-  values = [
-    jsonencode({
-      service = {
-        domain = local.domain
-      }
-      ingress = {
-        class                           = "gce"
-        gce_managed_certificate         = var.create_google_managed_ssl_certificate ? "aptos-${local.workspace_name}-ingress" : null
-        gce_managed_certificate_domains = var.create_google_managed_ssl_certificate ? join(",", concat([for x in range(var.num_fullnodes) : "pfn${x}.${local.domain}"], [local.domain], var.tls_sans)) : ""
-        # loadBalancerSourceRanges = var.client_sources_ipv4 # not supported yet
-      }
-    }),
-    jsonencode(var.pfn_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.pfn_addons_helm_chart_path, "**") : filesha1("${local.pfn_addons_helm_chart_path}/${f}")]))
-  }
+  utility_nodeSelector = var.utility_instance_enable_taint ? {
+    "cloud.google.com/gke-nodepool" = "utilities"
+  } : {}
+  utility_tolerations = [{
+    key    = "aptos.org/nodepool"
+    value  = "utilities"
+    effect = "NoExecute"
+  }]
 }
 
 resource "helm_release" "fullnode" {
@@ -88,10 +63,14 @@ resource "helm_release" "fullnode" {
       image = {
         tag = var.image_tag
       }
-      nodeSelector = var.gke_enable_node_autoprovisioning ? {} : {
-        "cloud.google.com/gke-nodepool"          = "fullnodes"
-        "iam.gke.io/gke-metadata-server-enabled" = "true"
-      }
+      nodeSelector = var.fullnode_instance_enable_taint ? {
+        "cloud.google.com/gke-nodepool" = "fullnodes"
+      } : {}
+      tolerations = [{
+        key    = "aptos.org/nodepool"
+        value  = "fullnodes"
+        effect = "NoExecute"
+      }]
       storage = {
         class = kubernetes_storage_class.ssd.metadata[0].name
       }
@@ -103,7 +82,9 @@ resource "helm_release" "fullnode" {
       }
       backup = {
         # only enable backup for fullnode 0
-        enable = count.index == var.backup_fullnode_index ? var.enable_backup : false
+        enable       = count.index == var.backup_fullnode_index ? var.enable_backup : false
+        nodeSelector = local.utility_nodeSelector
+        tolerations  = local.utility_tolerations
         config = {
           location = "gcs"
           gcs = {
@@ -111,6 +92,14 @@ resource "helm_release" "fullnode" {
           }
         }
       }
+      backup_verify = {
+        nodeSelector = local.utility_nodeSelector
+        tolerations  = local.utility_tolerations
+      }
+      backup_compaction = {
+        nodeSelector = local.utility_nodeSelector
+        tolerations  = local.utility_tolerations
+      }
       restore = {
         config = {
           location = "gcs"
@@ -139,49 +128,3 @@ resource "helm_release" "fullnode" {
     }
   }
 }
-
-
-
-resource "helm_release" "monitoring" {
-  count       = var.enable_monitoring ? 1 : 0
-  name        = "aptos-monitoring"
-  chart       = local.monitoring_helm_chart_path
-  max_history = 5
-  wait        = false
-  namespace   = var.k8s_namespace
-
-
-  values = [
-    jsonencode({
-      chain = {
-        name = var.chain_name
-      }
-      fullnode = {
-        name = var.fullnode_name
-      }
-      service = {
-        domain = var.zone_name != "" ? trimsuffix(local.domain, ".") : ""
-      }
-      kube-state-metrics = {
-        enabled = var.enable_kube_state_metrics
-      }
-      prometheus-node-exporter = {
-        enabled = var.enable_prometheus_node_exporter
-      }
-      monitoring = {
-        prometheus = {
-          storage = {
-            class = "standard"
-          }
-        }
-      }
-    }),
-    jsonencode(var.monitoring_helm_values),
-  ]
-
-  # inspired by https://stackoverflow.com/a/66501021 to trigger redeployment whenever any of the charts file contents change.
-  set {
-    name  = "chart_sha1"
-    value = sha1(join("", [for f in fileset(local.monitoring_helm_chart_path, "**") : filesha1("${local.monitoring_helm_chart_path}/${f}")]))
-  }
-}
diff --git a/terraform/fullnode/gcp/main.tf b/terraform/fullnode/gcp/main.tf
index 478577495fd7b..5a56adef7c8a9 100644
--- a/terraform/fullnode/gcp/main.tf
+++ b/terraform/fullnode/gcp/main.tf
@@ -11,19 +11,21 @@ provider "google-beta" {
 data "google_client_config" "provider" {}
 
 locals {
-  zone           = "${var.region}-${var.zone}"
   workspace_name = var.workspace_name_override == "" ? terraform.workspace : var.workspace_name_override
 }
 
 resource "google_project_service" "services" {
   for_each = {
     "clouderrorreporting.googleapis.com"  = true
+    "cloudkms.googleapis.com"             = true
     "cloudresourcemanager.googleapis.com" = true
     "compute.googleapis.com"              = true
     "container.googleapis.com"            = true
     "iam.googleapis.com"                  = true
     "logging.googleapis.com"              = true
     "monitoring.googleapis.com"           = true
+    "secretmanager.googleapis.com"        = true
+    "spanner.googleapis.com"              = true
   }
   service            = each.key
   disable_on_destroy = false
diff --git a/terraform/fullnode/gcp/network.tf b/terraform/fullnode/gcp/network.tf
index 41f24d0f4fafa..839e4a01474e0 100644
--- a/terraform/fullnode/gcp/network.tf
+++ b/terraform/fullnode/gcp/network.tf
@@ -22,15 +22,13 @@ resource "google_compute_router" "nat" {
 }
 
 resource "google_compute_address" "nat" {
-  count = var.gke_enable_private_nodes ? 1 : 0
-  name  = "aptos-${terraform.workspace}-nat"
+  name = "aptos-${terraform.workspace}-nat"
 }
 
 resource "google_compute_router_nat" "nat" {
-  count                              = var.gke_enable_private_nodes ? 1 : 0
   name                               = "aptos-${terraform.workspace}-nat"
   router                             = google_compute_router.nat.name
   nat_ip_allocate_option             = "MANUAL_ONLY"
-  nat_ips                            = [google_compute_address.nat[0].self_link]
+  nat_ips                            = [google_compute_address.nat.self_link]
   source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES"
 }
diff --git a/terraform/fullnode/gcp/security.tf b/terraform/fullnode/gcp/security.tf
index 206c41b54e3d3..4addace8ff219 100644
--- a/terraform/fullnode/gcp/security.tf
+++ b/terraform/fullnode/gcp/security.tf
@@ -1,9 +1,6 @@
 # Security-related resources
 
-data "kubernetes_all_namespaces" "all" {}
-
 locals {
-  kubernetes_master_version = substr(google_container_cluster.aptos.master_version, 0, 4)
   baseline_pss_labels = {
     "pod-security.kubernetes.io/audit"   = "baseline"
     "pod-security.kubernetes.io/warn"    = "baseline"
@@ -11,27 +8,6 @@ locals {
   }
 }
 
-# FIXME: Remove when migrating to K8s 1.25
-resource "kubernetes_role_binding" "disable-psp" {
-  for_each = toset(local.kubernetes_master_version <= "1.24" ? data.kubernetes_all_namespaces.all.namespaces : [])
-  metadata {
-    name      = "privileged-psp"
-    namespace = each.value
-  }
-
-  role_ref {
-    api_group = "rbac.authorization.k8s.io"
-    kind      = "ClusterRole"
-    name      = "gce:podsecuritypolicy:privileged"
-  }
-
-  subject {
-    api_group = "rbac.authorization.k8s.io"
-    kind      = "Group"
-    name      = "system:serviceaccounts:${each.value}"
-  }
-}
-
 resource "kubernetes_labels" "pss-default" {
   api_version = "v1"
   kind        = "Namespace"
diff --git a/terraform/fullnode/gcp/variables.tf b/terraform/fullnode/gcp/variables.tf
index 8dfc0195ef1cc..8025e9571c4f0 100644
--- a/terraform/fullnode/gcp/variables.tf
+++ b/terraform/fullnode/gcp/variables.tf
@@ -1,3 +1,5 @@
+### Project config
+
 variable "project" {
   description = "GCP project"
   type        = string
@@ -11,44 +13,205 @@ variable "region" {
 variable "zone" {
   description = "GCP zone suffix"
   type        = string
+  default     = "" # if empty, it's a regional cluster
 }
 
-variable "workspace_name_override" {
-  description = "If specified, overrides the usage of Terraform workspace for naming purposes"
-  default     = ""
+variable "node_locations" {
+  description = "List of node locations"
+  type        = list(string)
+  default     = [] # if empty, let GCP choose
 }
 
-variable "tls_sans" {
-  description = "List of Subject Alternate Names to include in TLS certificate"
-  type        = list(string)
-  default     = []
+variable "manage_via_tf" {
+  description = "Whether to manage the aptos-node k8s workload via Terraform. If set to false, the helm_release resource will still be created and updated when values change, but it may not be updated on every apply"
+  type        = bool
+  default     = true
 }
 
+### DNS
+
 variable "workspace_dns" {
   description = "Include Terraform workspace name in DNS records"
+  type        = bool
   default     = true
 }
 
 variable "dns_prefix_name" {
   description = "DNS prefix for fullnode url"
+  type        = string
   default     = "fullnode"
 }
 
 variable "zone_name" {
   description = "Zone name of GCP Cloud DNS zone to create records in"
+  type        = string
   default     = ""
 }
 
 variable "zone_project" {
   description = "GCP project which the DNS zone is in (if different)"
+  type        = string
   default     = ""
 }
 
 variable "create_google_managed_ssl_certificate" {
   description = "Whether to create a Google Managed SSL Certificate for the GCE Ingress"
+  type        = bool
+  default     = false
+}
+
+variable "backend_http2" {
+  description = "Whether to enable HTTP/2 between Ingress and backends"
+  type        = bool
+  default     = false
+}
+
+### Node pools and Autoscaling
+
+variable "node_pool_sizes" {
+  type        = map(number)
+  default     = {}
+  description = "Override the number of nodes in the specified pool"
+}
+
+variable "instance_disk_sizes" {
+  type        = map(number)
+  default     = {}
+  description = "Override the disk size in the specified pool"
+}
+
+variable "default_disk_size_gb" {
+  description = "Default disk size for nodes"
+  type        = number
+  default     = 100
+}
+
+variable "default_disk_type" {
+  description = "Default disk type for nodes"
+  type        = string
+  default     = "pd-standard"
+}
+
+variable "create_nodepools" {
+  description = "Create managed nodepools"
+  type        = bool
+  default     = true
+}
+
+variable "nodepool_sysctls" {
+  description = "Sysctls to set on nodepools"
+  type        = map(string)
+  default     = {}
+}
+
+variable "core_instance_type" {
+  description = "Instance type used for core pods"
+  type        = string
+  default     = "e2-medium"
+}
+
+variable "utility_instance_type" {
+  description = "Instance type used for utility pods"
+  type        = string
+  default     = "e2-standard-8"
+}
+
+variable "fullnode_instance_type" {
+  description = "Instance type used for validator and fullnodes"
+  type        = string
+  default     = "t2d-standard-16"
+}
+
+variable "utility_instance_enable_taint" {
+  description = "Whether to taint instances in the utilities nodegroup"
+  type        = bool
   default     = false
 }
 
+variable "fullnode_instance_enable_taint" {
+  description = "Whether to taint instances in the validator nodegroup"
+  type        = bool
+  default     = true
+}
+
+variable "gke_enable_node_autoprovisioning" {
+  description = "Enable GKE node autoprovisioning"
+  type        = bool
+  default     = true
+}
+
+variable "gke_node_autoprovisioning_max_cpu" {
+  description = "Maximum CPU allocation for GKE node autoprovisioning"
+  type        = number
+  default     = 500
+}
+
+variable "gke_node_autoprovisioning_max_memory" {
+  description = "Maximum memory allocation for GKE node autoprovisioning"
+  type        = number
+  default     = 2000
+}
+
+variable "gke_autoscaling_profile" {
+  description = "Autoscaling profile for GKE cluster. See https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler#autoscaling_profiles"
+  type        = string
+  default     = "OPTIMIZE_UTILIZATION"
+}
+
+variable "gke_autoscaling_max_node_count" {
+  description = "Maximum number of nodes for GKE nodepool autoscaling"
+  type        = number
+  default     = 250
+}
+
+### Naming overrides
+
+variable "helm_release_name_override" {
+  description = "If set, overrides the name of the aptos-node helm chart"
+  type        = string
+  default     = ""
+}
+
+variable "workspace_name_override" {
+  description = "If specified, overrides the usage of Terraform workspace for naming purposes"
+  type        = string
+  default     = ""
+}
+
+### GKE cluster config
+
+variable "enable_clouddns" {
+  description = "Enable CloudDNS (Google-managed cluster DNS)"
+  type        = bool
+  default     = false
+}
+
+variable "enable_image_streaming" {
+  description = "Enable image streaming (GCFS)"
+  type        = bool
+  default     = false
+}
+
+variable "gke_maintenance_policy" {
+  description = "The maintenance policy to use for the cluster. See https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#maintenance_policy"
+  type = object({
+    recurring_window = object({
+      start_time = string
+      end_time   = string
+      recurrence = string
+    })
+  })
+  default = {
+    recurring_window = {
+      start_time = "2023-06-15T00:00:00Z"
+      end_time   = "2023-06-15T23:59:00Z"
+      recurrence = "FREQ=DAILY"
+    }
+  }
+}
+
+### Helm
+
 variable "helm_values" {
   description = "Map of values to pass to Helm"
   type        = any
@@ -74,47 +237,44 @@ variable "fullnode_helm_values_list" {
 }
 
 variable "k8s_namespace" {
-  default     = "aptos"
   description = "Kubernetes namespace that the fullnode will be deployed into"
+  type        = string
+  default     = "aptos"
 }
 
 variable "k8s_api_sources" {
   description = "List of CIDR subnets which can access the Kubernetes API endpoint"
+  type        = list(string)
   default     = ["0.0.0.0/0"]
 }
 
 variable "num_fullnodes" {
-  default     = 1
   description = "Number of fullnodes"
-}
-
-variable "num_extra_instance" {
-  default     = 0
-  description = "Number of extra instances to add into node pool"
-}
-
-variable "instance_disk_size_gb" {
-  default     = 100
-  description = "Disk size for fullnode instance"
+  type        = number
+  default     = 1
 }
 
 variable "image_tag" {
-  default     = "devnet"
   description = "Docker image tag to use for the fullnode"
+  type        = string
+  default     = "devnet"
 }
 
 variable "era" {
   description = "Chain era, used to start a clean chain"
+  type        = number
   default     = 1
 }
 
 variable "chain_id" {
-  description = "aptos chain ID"
+  description = "Aptos chain ID"
+  type        = string
   default     = "DEVNET"
 }
 
 variable "chain_name" {
   description = "Aptos chain name"
+  type        = string
   default     = "devnet"
 }
 
@@ -123,79 +283,28 @@ variable "fullnode_name" {
   type        = string
 }
 
-variable "machine_type" {
-  description = "Machine type for running fullnode"
-  default     = "n2-standard-32"
-}
+### Addons
 
 variable "enable_backup" {
-  description = "enable data backup from fullnode"
+  description = "Enable data backup from fullnode"
+  type        = bool
   default     = false
 }
 
 variable "enable_public_backup" {
-  description = "provide data backups to the public"
+  description = "Provide data backups to the public"
+  type        = bool
   default     = false
 }
 
-
 variable "backup_fullnode_index" {
-  description = "index of fullnode to backup data from"
+  description = "Index of fullnode to backup data from"
+  type        = number
   default     = 0
 }
 
-variable "enable_monitoring" {
-  description = "Enable monitoring helm chart"
-  default     = false
-}
-
-variable "monitoring_helm_values" {
-  description = "Map of values to pass to monitoring Helm"
-  type        = any
-  default     = {}
-}
-
-variable "enable_prometheus_node_exporter" {
-  description = "Enable prometheus-node-exporter within monitoring helm chart"
-  default     = false
-}
-
-variable "enable_kube_state_metrics" {
-  description = "Enable kube-state-metrics within monitoring helm chart"
-  default     = false
-}
-
-variable "gke_enable_private_nodes" {
-  description = "Enable private nodes for GKE cluster"
-  default     = true
-}
-
-variable "gke_enable_node_autoprovisioning" {
-  description = "Enable node autoprovisioning for GKE cluster. See https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning"
-  default     = false
-}
-
-variable "gke_node_autoprovisioning_max_cpu" {
-  description = "Maximum CPU utilization for GKE node_autoprovisioning"
-  default     = 10
-}
-
-variable "gke_node_autoprovisioning_max_memory" {
-  description = "Maximum memory utilization for GKE node_autoprovisioning"
-  default     = 100
-}
-
-variable "gke_enable_autoscaling" {
-  description = "Enable autoscaling for the nodepools in the GKE cluster. See https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler"
-  default     = true
-}
-
-variable "gke_autoscaling_max_node_count" {
-  description = "Maximum number of nodes for GKE nodepool autoscaling"
-  default     = 10
-}
-
-variable "manage_via_tf" {
-  description = "Whether to manage the aptos-node k8s workload via Terraform. If set to false, the helm_release resource will still be created and updated when values change, but it may not be updated on every apply"
-  default     = true
+variable "tls_sans" {
+  description = "List of Subject Alternate Names to include in TLS certificate"
+  type        = list(string)
+  default     = []
 }
diff --git a/terraform/fullnode/gcp/versions.tf b/terraform/fullnode/gcp/versions.tf
index f88de6cbc2a7e..b74536e8f4cc8 100644
--- a/terraform/fullnode/gcp/versions.tf
+++ b/terraform/fullnode/gcp/versions.tf
@@ -1,11 +1,13 @@
 terraform {
-  required_version = "~> 1.3.6"
+  required_version = "~> 1.5.6"
   required_providers {
     google = {
-      source = "hashicorp/google"
+      source  = "hashicorp/google"
+      version = "~> 5.0.0"
     }
     google-beta = {
-      source = "hashicorp/google-beta"
+      source  = "hashicorp/google-beta"
+      version = "~> 5.0.0"
     }
     helm = {
       source = "hashicorp/helm"
diff --git a/terraform/fullnode/vultr/variables.tf b/terraform/fullnode/vultr/variables.tf
index 7bc3d6e8bb3ad..cf98dda3900bb 100644
--- a/terraform/fullnode/vultr/variables.tf
+++ b/terraform/fullnode/vultr/variables.tf
@@ -17,57 +17,68 @@ variable "fullnode_helm_values_list" {
 }
 
 variable "k8s_namespace" {
-  default     = "aptos"
   description = "Kubernetes namespace that the fullnode will be deployed into"
+  type        = string
+  default     = "aptos"
 }
 
 variable "k8s_api_sources" {
   description = "List of CIDR subnets which can access the Kubernetes API endpoint"
+  type        = list(string)
   default     = ["0.0.0.0/0"]
 }
 
 variable "num_fullnodes" {
-  default     = 1
   description = "Number of fullnodes"
+  type        = number
+  default     = 1
 }
 
 variable "image_tag" {
-  default     = "devnet"
   description = "Docker image tag to use for the fullnode"
+  type        = string
+  default     = "devnet"
 }
 
 variable "era" {
   description = "Chain era, used to start a clean chain"
+  type        = number
   default     = 1
 }
 
 variable "chain_id" {
-  description = "aptos chain ID"
+  description = "Aptos chain ID"
+  type        = string
   default     = "DEVNET"
 }
 
 variable "chain_name" {
   description = "Aptos chain name"
+  type        = string
   default     = "devnet"
 }
 
 variable "machine_type" {
   description = "Machine type for running fullnode. All configurations can be obtained at https://www.vultr.com/api/#tag/plans"
+  type        = string
   default     = "vc2-16c-32gb"
 }
 
 variable "api_key" {
   description = "API Key, can be obtained at https://my.vultr.com/settings/#settingsapi"
+  type        = string
   default     = ""
 }
 
 variable "fullnode_region" {
   description = "Geographical region for the node location. All 25 regions can be obtained at https://api.vultr.com/v2/regions"
+  type        = string
   default     = "fra"
 }
 
 
 variable "block_storage_class" {
   description = "Either vultr-block-storage for high_perf/ssd, vultr-block-storage-hdd for storage_opt/hdd. high_perf is not available in all regions!"
+  type        = string
   default     = "vultr-block-storage"
 }
diff --git a/terraform/helm/aptos-node/README.md b/terraform/helm/aptos-node/README.md
index 94f45f063f02c..c60abcae4cea9 100644
--- a/terraform/helm/aptos-node/README.md
+++ b/terraform/helm/aptos-node/README.md
@@ -53,19 +53,19 @@ Aptos blockchain node deployment
 | labels | string | `nil` |  |
 | loadTestGenesis | bool | `false` | Load test-data for starting a test network |
 | manageImages | bool | `true` | If true, helm will always override the deployed image with what is configured in the helm values. If not, helm will take the latest image from the currently running workloads, which is useful if you have a separate procedure to update images (e.g. rollout) |
-| multicluster | object | `{"enabled":false,"targetClusters":["cluster1","cluster2","cluster3"]}` | Options for multicluster mode. This is *experimental only*. |
+| multicluster | object | `{"enabled":false,"targetClusters":["forge-multiregion-1","forge-multiregion-2","forge-multiregion-3"]}` | Options for multicluster mode. This is *experimental only*. |
 | numFullnodeGroups | int | `1` | Total number of fullnode groups to deploy |
 | numValidators | int | `1` | Number of validators to deploy |
 | overrideNodeConfig | bool | `false` | Specify validator and fullnode NodeConfigs via named ConfigMaps, rather than the generated ones from this chart. |
 | service.domain | string | `nil` | If set, the base domain name to use for External DNS |
-| service.fullnode.enableMetricsPort | bool | `true` | Enable the metrics port on fullnodes |
+| service.fullnode.enableMetricsPort | bool | `false` | Enable the metrics port on fullnodes |
 | service.fullnode.enableRestApi | bool | `true` | Enable the REST API on fullnodes |
 | service.fullnode.external.type | string | `"LoadBalancer"` | The Kubernetes ServiceType to use for fullnodes' HAProxy |
 | service.fullnode.externalTrafficPolicy | string | `"Local"` | The externalTrafficPolicy for the fullnode service |
 | service.fullnode.internal.headless | bool | `false` |  |
 | service.fullnode.internal.type | string | `"ClusterIP"` | The Kubernetes ServiceType to use for fullnodes |
 | service.fullnode.loadBalancerSourceRanges | string | `nil` | If set and if the ServiceType is LoadBalancer, allow traffic to fullnodes from these CIDRs |
-| service.validator.enableMetricsPort | bool | `true` | Enable the metrics port on the validator |
+| service.validator.enableMetricsPort | bool | `false` | Enable the metrics port on the validator |
 | service.validator.enableRestApi | bool | `true` | Enable the REST API on the validator |
 | service.validator.external.type | string | `"LoadBalancer"` | The Kubernetes ServiceType to use for validator's HAProxy |
 | service.validator.externalTrafficPolicy | string | `"Local"` | The externalTrafficPolicy for the validator service |
@@ -83,7 +83,6 @@ Aptos blockchain node deployment
 | validator.image.tag | string | `nil` | Image tag to use for validator images. If set, overrides `imageTag` |
 | validator.name | string | `nil` | Internal: name of your validator for use in labels |
 | validator.nodeSelector | object | `{}` |  |
-| validator.remoteLogAddress | string | `nil` | Address for remote logging. See `logger` helm chart |
 | validator.resources.limits.cpu | float | `15.5` |  |
 | validator.resources.limits.memory | string | `"26Gi"` |  |
 | validator.resources.requests.cpu | int | `15` |  |
diff --git a/terraform/helm/aptos-node/files/haproxy.cfg b/terraform/helm/aptos-node/files/haproxy.cfg
index b44601844c7af..268cc783df810 100644
--- a/terraform/helm/aptos-node/files/haproxy.cfg
+++ b/terraform/helm/aptos-node/files/haproxy.cfg
@@ -144,6 +144,21 @@ backend validator-metrics
     default-server maxconn 16
     server {{ include "aptos-validator.fullname" $ }}-{{ $.Values.i }}-validator {{ include "aptos-validator.fullname" $ }}-{{ $.Values.i }}-validator:9101
 
+frontend validator-admin
+    mode http
+    option httplog
+    bind :9202
+    default_backend validator-admin
+
+    # Deny requests from blocked IPs
+    tcp-request connection reject if { src -n -f /usr/local/etc/haproxy/blocked.ips }
+    http-request add-header Forwarded "for=%ci"
+
+backend validator-admin
+    mode http
+    default-server maxconn 16
+    server {{ include "aptos-validator.fullname" $ }}-{{ $.Values.i }}-validator {{ include "aptos-validator.fullname" $ }}-{{ $.Values.i }}-validator:9102
+
 # Exposes the validator's own REST API
 {{- if $.Values.service.validator.enableRestApi }}
 frontend validator-api
@@ -235,6 +250,21 @@ backend {{ $config.name }}-metrics
     default-server maxconn 16
     server {{ include "aptos-validator.fullname" $ }}-{{ $.Values.i }}-{{ $config.name }} {{ include "aptos-validator.fullname" $ }}-{{ $.Values.i }}-{{ $config.name }}:9101
 
+frontend {{ $config.name }}-admin
+    mode http
+    option httplog
+    bind :{{ add 9203 $index }}
+    default_backend {{ $config.name }}-admin
+
+    # Deny requests from blocked IPs
+    tcp-request connection reject if { src -n -f /usr/local/etc/haproxy/blocked.ips }
+    http-request add-header Forwarded "for=%ci"
+
+backend {{ $config.name }}-admin
+    mode http
+    default-server maxconn 16
+    server {{ include "aptos-validator.fullname" $ }}-{{ $.Values.i }}-{{ $config.name }} {{ include "aptos-validator.fullname" $ }}-{{ $.Values.i }}-{{ $config.name }}:9102
+
 {{- end }}
 {{- end }}
 
diff --git a/terraform/helm/aptos-node/templates/fullnode.yaml b/terraform/helm/aptos-node/templates/fullnode.yaml
index 717ae39d561d1..3e917059169e1 100644
--- a/terraform/helm/aptos-node/templates/fullnode.yaml
+++ b/terraform/helm/aptos-node/templates/fullnode.yaml
@@ -24,9 +24,33 @@ spec:
     port: 6182
   - name: metrics
     port: 9101
+  - name: admin
+    port: 9102
   - name: api
     port: 8080
 
+{{- if $.Values.migrations.enable_vfn_explicit_pvc }}
+---
+
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: {{ include "aptos-validator.fullname" $ }}-{{$i}}-{{ .name }}-e{{ $.Values.chain.era }}
+  labels:
+    {{- include "aptos-validator.labels" $ | nindent 4 }}
+spec:
+  accessModes:
+  - ReadWriteOnce
+  storageClassName: {{ $.Values.fullnode.storage.class }}
+  resources:
+    requests:
+      storage: {{ $.Values.fullnode.storage.size }}
+  {{- if $.Values.fullnode.storage.labels }}
+  selector:
+    matchLabels:
+      {{- toYaml $.Values.fullnode.storage.labels | nindent 6}}
+  {{- end }}
+{{- end }}
 ---
 {{ $fullnode_statefulset := lookup "apps/v1" "StatefulSet" $.Release.Namespace (printf "%s-%d-%s-e%s" (include "aptos-validator.fullname" $) $i .name (toYaml $.Values.chain.era)) }}
 apiVersion: apps/v1
@@ -49,6 +73,7 @@ spec:
       app.kubernetes.io/name: fullnode
       app.kubernetes.io/instance: fullnode-{{$i}}
       group: {{ .name }}
+  {{- if not $.Values.migrations.enable_vfn_explicit_pvc }}
   volumeClaimTemplates:
   - metadata:
       name: fn
@@ -64,6 +89,7 @@ spec:
         matchLabels:
           {{- toYaml $.Values.fullnode.storage.labels | nindent 10}}
       {{- end }}
+    {{- end }}
   template:
     metadata:
       labels:
@@ -86,7 +112,18 @@ spec:
         image: {{ $.Values.validator.image.repo }}:{{ $.Values.validator.image.tag | default $.Values.imageTag }}
         {{- end }}
         imagePullPolicy: {{ $.Values.validator.image.pullPolicy }}
-        command: ["/usr/local/bin/aptos-node", "-f", "/opt/aptos/etc/fullnode.yaml"]
+        command:
+          - /bin/bash
+          - -c
+          - |-
+            set -euxo pipefail
+            if [[ -f /opt/aptos/data/wipe-db ]]; then
+              # Wipe DB
+              rm -rf /opt/aptos/data/db
+              # Delete the command file so we only wipe the DB once
+              rm -vf /opt/aptos/data/wipe-db
+            fi
+            /usr/local/bin/aptos-node -f /opt/aptos/etc/fullnode.yaml
       {{- with $.Values.fullnode }}
         resources:
           {{- toYaml .resources | nindent 10 }}
@@ -113,7 +150,11 @@ spec:
           mountPath: /opt/aptos/etc
         - name: genesis-config
           mountPath: /opt/aptos/genesis
+        {{- if $.Values.migrations.enable_vfn_explicit_pvc }}
+        - name: aptos-data
+        {{- else }}
         - name: fn
+        {{- end }}
           mountPath: /opt/aptos/data
         ports:
         - containerPort: 6181
@@ -122,6 +163,8 @@ spec:
           name: api
         - containerPort: 9101
           name: metrics
+        - containerPort: 9102
+          name: admin
         securityContext:
           {{- if $.Values.enablePrivilegedMode }}
           runAsUser: 0
@@ -165,6 +208,11 @@ spec:
       - name: genesis-config
         secret:
           secretName: {{ include "aptos-validator.fullname" $ }}-{{$i}}-genesis-e{{ $.Values.chain.era }}
+      {{- if $.Values.migrations.enable_vfn_explicit_pvc }}
+      - name: aptos-data
+        persistentVolumeClaim:
+          claimName: {{ include "aptos-validator.fullname" $ }}-{{$i}}-{{ .name }}-e{{ $.Values.chain.era }}
+      {{- end }}
       serviceAccountName: {{ include "aptos-validator.fullname" $ }}-fullnode
       {{- if $.Values.imagePullSecret }}
       imagePullSecrets:
diff --git a/terraform/helm/aptos-node/templates/haproxy.yaml b/terraform/helm/aptos-node/templates/haproxy.yaml
index ff93bee17a356..ef2ce118cb7e2 100644
--- a/terraform/helm/aptos-node/templates/haproxy.yaml
+++ b/terraform/helm/aptos-node/templates/haproxy.yaml
@@ -26,7 +26,7 @@ metadata:
     service.beta.kubernetes.io/aws-load-balancer-type: nlb
     service.beta.kubernetes.io/oci-load-balancer-security-list-management-mode: All
     {{- if $.Values.service.domain }}
-    external-dns.alpha.kubernetes.io/hostname: val{{$i}}.{{ $.Values.service.domain }}
+    external-dns.alpha.kubernetes.io/hostname: vn{{$i}}.{{ $.Values.service.domain }},val{{$i}}.{{ $.Values.service.domain }}
     {{- end }}
 spec:
   selector:
@@ -41,6 +41,11 @@ spec:
     port: 9101
     targetPort: 9102
   {{- end }}
+  {{- if $.Values.service.validator.enableAdminPort }}
+  - name: admin
+    port: 9102
+    targetPort: 9202
+  {{- end }}
   {{- if $.Values.service.validator.enableRestApi }}
   - name: api
     port: 80
@@ -69,7 +74,7 @@ metadata:
     service.beta.kubernetes.io/aws-load-balancer-type: nlb
     service.beta.kubernetes.io/oci-load-balancer-security-list-management-mode: All
     {{- if $.Values.service.domain }}
-    external-dns.alpha.kubernetes.io/hostname: {{ $config.name }}{{$i}}.{{ $.Values.service.domain }}
+    external-dns.alpha.kubernetes.io/hostname: {{ $config.dns_name }}{{$i}}.{{ $.Values.service.domain }},{{ $config.name }}{{$i}}.{{ $.Values.service.domain }}
     {{- end }}
 spec:
   selector:
@@ -85,6 +90,11 @@ spec:
     port: 9101
     targetPort: {{ add 9103 $index }}
   {{- end }}
+  {{- if $.Values.service.fullnode.enableAdminPort }}
+  - name: admin
+    port: 9102
+    targetPort: {{ add 9203 $index }}
+  {{- end }}
   {{- if $.Values.service.fullnode.enableRestApi }}
   - name: api
     port: 80
@@ -146,14 +156,24 @@ spec:
         imagePullPolicy: {{ .image.pullPolicy }}
         resources:
           {{- toYaml .resources | nindent 10 }}
+        # These ports are exposed by HAProxy. See haproxy.cfg for more details
+        # Fullnode ports are dynamically assigned based on the number of fullnode groups
         ports:
-        - containerPort: 6180
-        - containerPort: 6182
+        # Aptosnet
+        - containerPort: 6180 # validator
+        - containerPort: 6182 # fullnode
+        # Fullnode API
         - containerPort: 8080
+        # Validator API
         - containerPort: 8180
+        # HAProxy metrics port
         - containerPort: 9101
+        # Node ports
         - containerPort: 9102
         - containerPort: 9103
+        # AdminService ports
+        - containerPort: 9202 # validator admin
+        - containerPort: 9203 # fullnode admin
         volumeMounts:
         - name: haproxy-config
           mountPath: /usr/local/etc/haproxy
diff --git a/terraform/helm/aptos-node/templates/validator.yaml b/terraform/helm/aptos-node/templates/validator.yaml
index 9b6514e81c46f..7d42270632819 100644
--- a/terraform/helm/aptos-node/templates/validator.yaml
+++ b/terraform/helm/aptos-node/templates/validator.yaml
@@ -23,6 +23,8 @@ spec:
     port: 6181
   - name: metrics
     port: 9101
+  - name: admin
+    port: 9102
   {{- if $.Values.service.validator.enableRestApi }}
   - name: api
     port: 8080
@@ -91,7 +93,18 @@ spec:
         {{- end }}
       {{- with $.Values.validator }}
         imagePullPolicy: {{ .image.pullPolicy }}
-        command: ["/usr/local/bin/aptos-node", "-f", "/opt/aptos/etc/validator.yaml"]
+        command:
+          - /bin/bash
+          - -c
+          - |-
+            set -euxo pipefail
+            if [[ -f /opt/aptos/data/wipe-db ]]; then
+              # Wipe DB
+              rm -rf /opt/aptos/data/db
+              # Delete the command file so we only wipe the DB once
+              rm -vf /opt/aptos/data/wipe-db
+            fi
+            /usr/local/bin/aptos-node -f /opt/aptos/etc/validator.yaml
         resources:
           {{- toYaml .resources | nindent 10 }}
         env:
@@ -123,6 +136,8 @@ spec:
           name: api
         - containerPort: 9101
           name: metrics
+        - containerPort: 9102
+          name: admin
         securityContext:
           {{- if $.Values.enablePrivilegedMode }}
           runAsUser: 0
diff --git a/terraform/helm/aptos-node/values.yaml b/terraform/helm/aptos-node/values.yaml
index 11e92886690d5..5ae8433a9e977 100644
--- a/terraform/helm/aptos-node/values.yaml
+++ b/terraform/helm/aptos-node/values.yaml
@@ -39,11 +39,11 @@ haproxy:
     pullPolicy: IfNotPresent
   resources:
     limits:
-      cpu: 4
-      memory: 8Gi
+      cpu: 3
+      memory: 6Gi
     requests:
-      cpu: 4
-      memory: 8Gi
+      cpu: 3
+      memory: 6Gi
   nodeSelector: {}
   tolerations: []
   affinity: {}
@@ -74,11 +74,11 @@ validator:
     pullPolicy: IfNotPresent
   resources:
     limits:
-      cpu: 15.5
-      memory: 26Gi
+      cpu: 14
+      memory: 56Gi
     requests:
-      cpu: 15
-      memory: 26Gi
+      cpu: 14
+      memory: 56Gi
   storage:
     # -- Kubernetes storage class to use for validator persistent storage
     class:
@@ -86,8 +86,6 @@ validator:
     size: 2048Gi
   # -- Log level for the validator
   rust_log: info
-  # -- Address for remote logging. See `logger` helm chart
-  remoteLogAddress:
   # -- Flag to force enable telemetry service (useful for forge tests)
   force_enable_telemetry: false
   nodeSelector: {}
@@ -103,14 +101,15 @@ fullnode:
   # -- Specify fullnode groups by `name` and number of `replicas`
   groups:
     - name: fullnode
+      dns_name: vfn
       replicas: 1
   resources:
     limits:
-      cpu: 15.5
-      memory: 26Gi
+      cpu: 14
+      memory: 56Gi
     requests:
-      cpu: 15
-      memory: 26Gi
+      cpu: 14
+      memory: 56Gi
   storage:
     # -- Kubernetes storage class to use for fullnode persistent storage
     class:
@@ -131,6 +130,7 @@ fullnode:
     full_node_networks:
       # The first item in the array `full_node_networks` must always refer to the public fullnode network
       - network_id: "public"
+        seeds: {}
 
 service:
   # -- If set, the base domain name to use for External DNS
@@ -151,6 +151,8 @@ service:
     enableRestApi: true
     # -- Enable the metrics port on the validator
     enableMetricsPort: false
+    # -- Enable the admin port on the validator
+    enableAdminPort: false
   fullnode:
     external:
       # -- The Kubernetes ServiceType to use for fullnodes' HAProxy
@@ -167,6 +169,8 @@ service:
     enableRestApi: true
     # -- Enable the metrics port on fullnodes
     enableMetricsPort: false
+    # -- Enable the admin port on fullnodes
+    enableAdminPort: false
 
 serviceAccount:
   # -- Specifies whether a service account should be created
@@ -182,3 +186,9 @@ enablePrivilegedMode: false
 
 # Additional labels
 labels:
+
+# Infra migrations
+migrations:
+  # -- Explicitly define a PVC for VFNs.
+  # -- See templates/fullnode.yaml
+  enable_vfn_explicit_pvc: false
diff --git a/terraform/helm/autoscaling/templates/dns.yaml b/terraform/helm/autoscaling/templates/dns.yaml
index 13da61e39c714..eb4a0fb5a8b1c 100644
--- a/terraform/helm/autoscaling/templates/dns.yaml
+++ b/terraform/helm/autoscaling/templates/dns.yaml
@@ -1,4 +1,4 @@
-apiVersion: autoscaling/v2beta2
+apiVersion: autoscaling/v2
 kind: HorizontalPodAutoscaler
 metadata:
   name: hpa-coredns
diff --git a/terraform/helm/autoscaling/values.yaml b/terraform/helm/autoscaling/values.yaml
index 89c2175891dd3..bc2b6cd32c667 100644
--- a/terraform/helm/autoscaling/values.yaml
+++ b/terraform/helm/autoscaling/values.yaml
@@ -16,8 +16,8 @@ autoscaler:
   # How long after scale up that scale down evaluation resumes
   scaleDownDelayAfterAdd: 5m
   image:
-    repo: k8s.gcr.io/autoscaling/cluster-autoscaler
-    tag: v1.25.2
+    repo: registry.k8s.io/autoscaling/cluster-autoscaler
+    tag: v1.25.1
   resources:
     requests:
       cpu: 1
diff --git a/terraform/helm/fullnode/files/backup/gcs.yaml b/terraform/helm/fullnode/files/backup/gcs.yaml
index b4a41de012e3f..561a00384c64a 100644
--- a/terraform/helm/fullnode/files/backup/gcs.yaml
+++ b/terraform/helm/fullnode/files/backup/gcs.yaml
@@ -5,12 +5,12 @@ commands:
     FILE_HANDLE="$BACKUP_HANDLE/$FILE_NAME"
     echo "$FILE_HANDLE"
     exec 1>&-  # close stdout
-    gzip -c | gcloud storage cp - "gs://$BUCKET/$SUB_DIR/$FILE_HANDLE" > /dev/null
-  open_for_read: 'gcloud storage cp "gs://$BUCKET/$SUB_DIR/$FILE_HANDLE" - | gzip -cd'
+    gzip -c | gsutil -q cp - "gs://$BUCKET/$SUB_DIR/$FILE_HANDLE" > /dev/null
+  open_for_read: 'gsutil -q cp "gs://$BUCKET/$SUB_DIR/$FILE_HANDLE" - | gzip -cd'
   save_metadata_line: | 
     FILE_HANDLE="metadata/$FILE_NAME"
     echo "$FILE_HANDLE"
     exec 1>&-
-    gzip -c | gcloud storage cp - "gs://$BUCKET/$SUB_DIR/$FILE_HANDLE" > /dev/null
-  list_metadata_files: '(gcloud storage ls gs://$BUCKET/$SUB_DIR/metadata/ ||:) | sed -ne "s#gs://.*/metadata/#metadata/#p"'
-  backup_metadata_file: 'gcloud storage mv gs://$BUCKET/$SUB_DIR/metadata/$FILE_NAME gs://$BUCKET/$SUB_DIR/metadata_backup/$FILE_NAME'
+    gzip -c | gsutil -q cp - "gs://$BUCKET/$SUB_DIR/$FILE_HANDLE" > /dev/null
+  list_metadata_files: '(gsutil -q ls gs://$BUCKET/$SUB_DIR/metadata/ ||:) | sed -ne "s#gs://.*/metadata/#metadata/#p"'
+  backup_metadata_file: 'gsutil mv gs://$BUCKET/$SUB_DIR/metadata/$FILE_NAME gs://$BUCKET/$SUB_DIR/metadata_backup/$FILE_NAME'
diff --git a/terraform/helm/fullnode/templates/fullnode.yaml b/terraform/helm/fullnode/templates/fullnode.yaml
index 3f0d08f22f90c..93e4abeb71d9a 100644
--- a/terraform/helm/fullnode/templates/fullnode.yaml
+++ b/terraform/helm/fullnode/templates/fullnode.yaml
@@ -25,6 +25,80 @@ spec:
         prometheus.io/port: "9101"
     spec:
       terminationGracePeriodSeconds: 0
+      initContainers:
+      {{- with .Values.restore }}
+      {{- if .enabled }}
+      - name: restore
+        image: {{ .image.repo }}:{{ .image.tag | default $.Values.imageTag }}
+        imagePullPolicy: {{ .image.pullPolicy }}
+        resources:
+          {{- toYaml .resources | nindent 10 }}
+        command:
+        - /bin/bash
+        - -c
+        - |-
+          set -euxo pipefail
+          # cleanup aptosdb
+          if [ -f /opt/aptos/data/restore-failed ] || \
+              [ ! -f /opt/aptos/data/restore-uid ] || \
+              [ "$(cat /opt/aptos/data/restore-uid)" != "{{ .config.restore_epoch }}" ]; then
+            rm -rf /opt/aptos/data/db /opt/aptos/data/restore-{complete,failed}
+            echo "{{ .config.restore_epoch }}" > /opt/aptos/data/restore-uid
+          fi
+
+          [ -f /opt/aptos/data/restore-complete ] && exit 0
+          # start restore process
+          /usr/local/bin/aptos-debugger aptos-db restore bootstrap-db \
+            --concurrent-downloads {{ .config.concurrent_downloads }} \
+            {{ range .config.trusted_waypoints }} --trust-waypoint {{ . }}{{ end }} \
+            --target-db-dir /opt/aptos/data/db \
+            --metadata-cache-dir /opt/aptos/data/aptos-restore-metadata \
+            --ledger-history-start-version {{ .config.start_version }} \
+            {{- if .config.target_version }} --target-version {{- .config.target_version }}{{- end }}
+            --command-adapter-config /opt/aptos/etc/{{ .config.location }}.yaml
+
+          if [ $? -gt 0 ]; then
+            # mark restore as failed
+            touch /opt/aptos/data/restore-failed
+            exit 1
+          else
+            # success, remove the marker
+            rm -f /opt/aptos/data/restore-failed
+            touch /opt/aptos/data/restore-complete
+          fi
+        env:
+        - name: RUST_LOG
+          value: "debug"
+        - name: RUST_BACKTRACE
+          value: "full"
+        {{- if (include "backup.pushMetricsEndpoint" $) }}
+        - name: KUBERNETES_POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: PUSH_METRICS_ENDPOINT
+          value: "{{- include "backup.pushMetricsEndpoint" $ }}/api/v1/import/prometheus?extra_label=role={{- .jobName | default "db_restore" }}&extra_label=kubernetes_pod_name=$(KUBERNETES_POD_NAME)"
+        {{- end }}
+        - name: CONTROLLER_UID
+          valueFrom:
+            fieldRef:
+              fieldPath: "metadata.labels['controller-uid']"
+        {{- include "backup.backupEnvironment" (dict "config" .config "era" (default $.Values.chain.era .config.restore_era)) | nindent 8 }}
+        volumeMounts:
+        - name: backup-config
+          mountPath: /opt/aptos/etc
+        - name: aptos-data
+          mountPath: /opt/aptos/data
+        - name: tmp
+          mountPath: /tmp
+        securityContext:
+          readOnlyRootFilesystem: true
+          allowPrivilegeEscalation: false
+          capabilities:
+            drop:
+            - ALL
+      {{- end }} # if .enabled
+      {{- end }} # with .Values.restore
       containers:
       - name: fullnode
         {{- if and $fullnode_statefulset (not $.Values.manageImages) }} # if the statefulset already exists and we do not want helm to simply overwrite the image, use the existing image
@@ -34,10 +108,16 @@ spec:
         {{- end }}
         imagePullPolicy: {{ .Values.image.pullPolicy }}
         command:
-        - /bin/sh
+        - /bin/bash
         - -c
         - |-
-          set -e
+          set -euxo pipefail
+          if [[ -f /opt/aptos/data/wipe-db ]]; then
+            # Wipe DB
+            rm -rf /opt/aptos/data/db
+            # Delete the command file so we only wipe the DB once
+            rm -vf /opt/aptos/data/wipe-db
+          fi
           {{- if and (not .Values.chain.genesisConfigmap) (not .Values.chain.genesisSecret) }}
           # Download genesis and waypoint if necessary
           curl -o /opt/aptos/genesis/waypoint.txt {{ (get .Values.aptos_chains .Values.chain.name).waypoint_txt_url }}
@@ -73,6 +153,8 @@ spec:
           name: api
         - containerPort: 9101
           name: metrics
+        - containerPort: 9102
+          name: admin
         # NOTE: these require the API to be enabled, which is not always the case
         livenessProbe: # restart the pod if the REST API is ever unresponsive
           httpGet:
@@ -123,6 +205,9 @@ spec:
       - name: aptos-data
         persistentVolumeClaim:
           claimName: {{ include "aptos-fullnode.fullname" . }}-e{{ .Values.chain.era }}
+      - name: backup-config
+        configMap:
+          name: {{ include "backup.fullname" . }}-backup
       - name: tmp
         emptyDir: {}
       serviceAccountName: {{ include "aptos-fullnode.serviceAccountName" . }}
diff --git a/terraform/helm/fullnode/templates/restore.yaml b/terraform/helm/fullnode/templates/restore.yaml
deleted file mode 100644
index 575167ac3530c..0000000000000
--- a/terraform/helm/fullnode/templates/restore.yaml
+++ /dev/null
@@ -1,107 +0,0 @@
-{{ $restore_job_suffix := randAlpha 4 | lower }}
-{{ $backup_restore_job := lookup "batch/v1" "Job" $.Release.Namespace (print (include "backup.fullname" .) "-restore-" $restore_job_suffix) }}
-apiVersion: batch/v1
-kind: Job
-metadata:
-  name: {{ include "backup.fullname" . }}-restore-{{ $restore_job_suffix }}
-  labels:
-    {{- include "backup.labels" . | nindent 4 }}
-    app.kubernetes.io/name: restore
-spec:
-  completions: 0
-  template:
-    metadata:
-      labels:
-        {{- include "backup.selectorLabels" . | nindent 8 }}
-        app.kubernetes.io/name: restore
-      annotations:
-        seccomp.security.alpha.kubernetes.io/pod: runtime/default
-    spec:
-      restartPolicy: Never
-      terminationGracePeriodSeconds: 0
-      {{- with .Values.restore }}
-      containers:
-      - name: restore
-        {{- if and $backup_restore_job (not $.Values.manageImages) }} # if the statefulset already exists and we do not want helm to simply overwrite the image, use the existing image
-        image: {{ (first $backup_restore_job.spec.template.spec.containers).image }}
-        {{- else }}
-        image: {{ .image.repo }}:{{ .image.tag | default $.Values.imageTag }}
-        {{- end }}
-        imagePullPolicy: {{ .image.pullPolicy }}
-        resources:
-          {{- toYaml .resources | nindent 10 }}
-        command:
-        - sh
-        - -c
-        - |-
-          set -ex
-          # cleanup aptosdb
-          if [ ! -f /opt/aptos/data/restore-uid ] || [ "$(cat /opt/aptos/data/restore-uid)" != "$CONTROLLER_UID" ]; then
-              rm -rf /opt/aptos/data/db
-              echo "$CONTROLLER_UID" > /opt/aptos/data/restore-uid
-          fi
-          # start restore process
-          /usr/local/bin/aptos-debugger aptos-db restore bootstrap-db --concurrent-downloads {{ .config.concurrent_downloads }}{{ range .config.trusted_waypoints }} --trust-waypoint {{ . }}{{ end }} --target-db-dir /opt/aptos/data/db --metadata-cache-dir /tmp/aptos-restore-metadata --command-adapter-config /opt/aptos/etc/{{ .config.location }}.yaml
-        env:
-        - name: RUST_LOG
-          value: "debug"
-        - name: RUST_BACKTRACE
-          value: "1"
-        {{- if (include "backup.pushMetricsEndpoint" $) }}
-        - name: KUBERNETES_POD_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: metadata.name
-        - name: PUSH_METRICS_ENDPOINT
-          value: "{{- include "backup.pushMetricsEndpoint" $ }}/api/v1/import/prometheus?extra_label=role={{- .jobName | default "db_restore" }}&extra_label=kubernetes_pod_name=$(KUBERNETES_POD_NAME)"
-        {{- end }}
-        - name: CONTROLLER_UID
-          valueFrom:
-            fieldRef:
-              fieldPath: "metadata.labels['controller-uid']"
-        {{- include "backup.backupEnvironment" (dict "config" .config "era" (default $.Values.chain.era .config.restore_era)) | nindent 8 }}
-        volumeMounts:
-        - name: backup-config
-          mountPath: /opt/aptos/etc
-        - name: aptos-data
-          mountPath: /opt/aptos/data
-        - name: tmp
-          mountPath: /tmp
-        securityContext:
-          readOnlyRootFilesystem: true
-          allowPrivilegeEscalation: false
-          capabilities:
-            drop:
-            - ALL
-      securityContext:
-        runAsNonRoot: true
-        runAsUser: 6180
-        runAsGroup: 6180
-        fsGroup: 6180
-      {{- with .nodeSelector }}
-      nodeSelector:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .affinity }}
-      affinity:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .tolerations }}
-      tolerations:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- end }}
-      volumes:
-      - name: backup-config
-        configMap:
-          name: {{ include "backup.fullname" . }}-backup
-      - name: tmp
-        emptyDir: {}
-      - name: aptos-data
-        persistentVolumeClaim:
-          claimName: {{ include "backup.persistentVolumeClaim" . }}
-      serviceAccountName: {{ include "backup.serviceAccount" . }}
-      {{- if .Values.imagePullSecret }}
-      imagePullSecrets:
-      - name: {{.Values.imagePullSecret}}
-      {{- end }}
diff --git a/terraform/helm/fullnode/templates/service.yaml b/terraform/helm/fullnode/templates/service.yaml
index 24ac09886a734..42cbba29fa79a 100644
--- a/terraform/helm/fullnode/templates/service.yaml
+++ b/terraform/helm/fullnode/templates/service.yaml
@@ -18,6 +18,16 @@ spec:
     port: 80
     targetPort: 8080
   {{- end }}
+  {{- if .Values.service.exposeMetrics }}
+  - name: metrics
+    port: 9101
+    targetPort: 9101
+  {{- end }}
+  {{- if .Values.service.exposeAdmin }}
+  - name: admin
+    port: 9102
+    targetPort: 9102
+  {{- end }}
   - name: aptosnet
     port: 6182
   {{- with .Values.service }}
diff --git a/terraform/helm/fullnode/values.yaml b/terraform/helm/fullnode/values.yaml
index 59619c62b85d9..86f0f487f1d7e 100644
--- a/terraform/helm/fullnode/values.yaml
+++ b/terraform/helm/fullnode/values.yaml
@@ -32,6 +32,7 @@ fullnode:
     full_node_networks:
       # The first item in the array `full_node_networks` must always refer to the public fullnode network
       - network_id: "public"
+        seeds: {}
         identity: {}
         inbound_rate_limit_config:
         outbound_rate_limit_config:
@@ -50,10 +51,10 @@ image:
 resources:
   limits:
     cpu: 14
-    memory: 26Gi
+    memory: 56Gi
   requests:
     cpu: 14
-    memory: 26Gi
+    memory: 56Gi
 
 nodeSelector: {}
 tolerations: []
@@ -70,6 +71,10 @@ service:
   type: ClusterIP
   # -- Whether to expose the node REST API
   exposeApi: true
+  # -- Whether to expose the metrics port on fullnodes
+  exposeMetrics: false
+  # -- Whether to expose the admin port on fullnodes
+  exposeAdmin: false
   # -- The externalTrafficPolicy for the fullnode service
   externalTrafficPolicy:
   # -- If set and if the ServiceType is LoadBalancer, allow traffic to fullnode from these CIDRs
@@ -106,11 +111,11 @@ backup:
     pullPolicy: IfNotPresent
   resources:
     limits:
-      cpu: 1
-      memory: 1Gi
+      cpu: 4
+      memory: 8Gi
     requests:
-      cpu: 1
-      memory: 1Gi
+      cpu: 4
+      memory: 8Gi
   nodeSelector: {}
   tolerations: []
   affinity: {}
@@ -137,11 +142,11 @@ backup_verify:
   schedule: "@daily"
   resources:
     limits:
-      cpu: 4
-      memory: 8Gi
+      cpu: 8
+      memory: 32Gi
     requests:
       cpu: 4
-      memory: 8Gi
+      memory: 16Gi
   nodeSelector: {}
   tolerations: []
   affinity: {}
@@ -151,11 +156,11 @@ backup_compaction:
   schedule: "@daily"
   resources:
     limits:
-      cpu: 1
-      memory: 1Gi
+      cpu: 8
+      memory: 32Gi
     requests:
-      cpu: 1
-      memory: 1Gi
+      cpu: 4
+      memory: 16Gi
   nodeSelector: {}
   tolerations: []
   affinity: {}
@@ -170,14 +175,15 @@ restore:
     pullPolicy: IfNotPresent
   resources:
     limits:
-      cpu: 6
-      memory: 15Gi
+      cpu: 16
+      memory: 120Gi
     requests:
-      cpu: 6
-      memory: 15Gi
+      cpu: 16
+      memory: 120Gi
   nodeSelector: {}
   tolerations: []
   affinity: {}
+  enabled: false
   config:
     # -- Which of the below backup configurations to use
     location:
@@ -192,6 +198,12 @@ restore:
     # -- List of trusted waypoints for restore
     trusted_waypoints: []
     # -- Number of concurrent downloads for restore
-    concurrent_downloads: 2
+    concurrent_downloads: 16
     # -- If set, specifies a different era to restore other than the default era set in chain.era
     restore_era:
+    # -- Increase this value to trigger a restore from scratch, wiping the DB.
+    restore_epoch: 0
+    # -- Start from genesis.
+    start_version: 0
+    # -- Restore to the latest version.
+    target_version:
diff --git a/terraform/helm/genesis/files/genesis.sh b/terraform/helm/genesis/files/genesis.sh
index 7fb045f02bd3a..8e7d150f3b71f 100644
--- a/terraform/helm/genesis/files/genesis.sh
+++ b/terraform/helm/genesis/files/genesis.sh
@@ -32,26 +32,26 @@ echo $MULTICLUSTER_DOMAIN_SUFFIXES_STRING
 IFS=',' read -r -a MULTICLUSTER_DOMAIN_SUFFIXES <<< "${MULTICLUSTER_DOMAIN_SUFFIXES_STRING}"
 
 if ! [[ $(declare -p MULTICLUSTER_DOMAIN_SUFFIXES) =~ "declare -a" ]]; then
-    echo "MULTICLUSTER_DOMAIN_SUFFIXES must be an array"
-    exit 1
+  echo "MULTICLUSTER_DOMAIN_SUFFIXES must be an array"
+  exit 1
 fi
 
 if [[ "${ENABLE_MULTICLUSTER_DOMAIN_SUFFIX}" == "true" ]]; then
-    if [ -z ${NAMESPACE} ]; then
-        echo "NAMESPACE must be set"
-        exit 1
-    fi
+  if [ -z ${NAMESPACE} ]; then
+    echo "NAMESPACE must be set"
+    exit 1
+  fi
 fi
 
 if [ -z ${ERA} ] || [ -z ${NUM_VALIDATORS} ]; then
-    echo "ERA (${ERA:-null}) and NUM_VALIDATORS (${NUM_VALIDATORS:-null}) must be set"
-    exit 1
+  echo "ERA (${ERA:-null}) and NUM_VALIDATORS (${NUM_VALIDATORS:-null}) must be set"
+  exit 1
 fi
 
-if [ "${FULLNODE_ENABLE_ONCHAIN_DISCOVERY}" = "true" ] && [ -z ${DOMAIN} ] ||
-    [ "${VALIDATOR_ENABLE_ONCHAIN_DISCOVERY}" = "true" ] && [ -z ${DOMAIN} ]; then
-    echo "If FULLNODE_ENABLE_ONCHAIN_DISCOVERY or VALIDATOR_ENABLE_ONCHAIN_DISCOVERY is set, DOMAIN must be set"
-    exit 1
+if [ "${FULLNODE_ENABLE_ONCHAIN_DISCOVERY}" = "true" ] && [ -z ${DOMAIN} ] \
+  || [ "${VALIDATOR_ENABLE_ONCHAIN_DISCOVERY}" = "true" ] && [ -z ${DOMAIN} ]; then
+  echo "If FULLNODE_ENABLE_ONCHAIN_DISCOVERY or VALIDATOR_ENABLE_ONCHAIN_DISCOVERY is set, DOMAIN must be set"
+  exit 1
 fi
 
 echo "NUM_VALIDATORS=${NUM_VALIDATORS}"
@@ -68,53 +68,53 @@ echo "RANDOM_SEED=${RANDOM_SEED}"
 RANDOM_SEED_IN_DECIMAL=$(printf "%d" 0x${RANDOM_SEED})
 
 # generate all validator configurations
-for i in $(seq 0 $(($NUM_VALIDATORS-1))); do
-    username="${USERNAME_PREFIX}-${i}"
-    user_dir="${WORKSPACE}/${username}"
-
-    mkdir $user_dir
-
-    if [[ "${FULLNODE_ENABLE_ONCHAIN_DISCOVERY}" = "true" ]]; then
-        fullnode_host="fullnode${i}.${DOMAIN}:6182"
-    elif [[ "${ENABLE_MULTICLUSTER_DOMAIN_SUFFIX}" = "true" ]]; then
-        index=$(($i % ${#MULTICLUSTER_DOMAIN_SUFFIXES[@]}))
-        cluster=${MULTICLUSTER_DOMAIN_SUFFIXES[${index}]}
-        fullnode_host="${username}-${FULLNODE_INTERNAL_HOST_SUFFIX}.${NAMESPACE}.svc.${cluster}:6182"
-    else 
-        fullnode_host="${username}-${FULLNODE_INTERNAL_HOST_SUFFIX}:6182"
-    fi
-
-    if [[ "${VALIDATOR_ENABLE_ONCHAIN_DISCOVERY}" = "true" ]]; then
-        validator_host="val${i}.${DOMAIN}:6180"
-    elif [[ "${ENABLE_MULTICLUSTER_DOMAIN_SUFFIX}" = "true" ]]; then
-        index=$(($i % ${#MULTICLUSTER_DOMAIN_SUFFIXES[@]}))
-        cluster=${MULTICLUSTER_DOMAIN_SUFFIXES[${index}]}
-        validator_host="${username}-${VALIDATOR_INTERNAL_HOST_SUFFIX}.${NAMESPACE}.svc.${cluster}:6180"
-    else
-        validator_host="${username}-${VALIDATOR_INTERNAL_HOST_SUFFIX}:6180"
-    fi
-
-    if [ $i -lt $NUM_VALIDATORS_WITH_LARGER_STAKE ]; then
-        CUR_STAKE_AMOUNT=$LARGER_STAKE_AMOUNT
-    else
-        CUR_STAKE_AMOUNT=$STAKE_AMOUNT
-    fi
-
-    echo "CUR_STAKE_AMOUNT=${CUR_STAKE_AMOUNT} for ${i} validator"
-
-    if [[ -z "${RANDOM_SEED}" ]]; then
-      aptos genesis generate-keys --output-dir $user_dir
-    else
-      seed=$(printf "%064x" "$((${RANDOM_SEED_IN_DECIMAL}+i))")
-      echo "seed=$seed for ${i}th validator"
-      aptos genesis generate-keys --random-seed $seed --output-dir $user_dir
-    fi
-
-    aptos genesis set-validator-configuration --owner-public-identity-file $user_dir/public-keys.yaml --local-repository-dir $WORKSPACE \
-        --username $username \
-        --validator-host $validator_host \
-        --full-node-host $fullnode_host \
-        --stake-amount $CUR_STAKE_AMOUNT
+for i in $(seq 0 $(($NUM_VALIDATORS - 1))); do
+  username="${USERNAME_PREFIX}-${i}"
+  user_dir="${WORKSPACE}/${username}"
+
+  mkdir $user_dir
+
+  if [[ "${FULLNODE_ENABLE_ONCHAIN_DISCOVERY}" = "true" ]]; then
+    fullnode_host="fullnode${i}.${DOMAIN}:6182"
+  elif [[ "${ENABLE_MULTICLUSTER_DOMAIN_SUFFIX}" = "true" ]]; then
+    index=$(($i % ${#MULTICLUSTER_DOMAIN_SUFFIXES[@]}))
+    cluster=${MULTICLUSTER_DOMAIN_SUFFIXES[${index}]}
+    fullnode_host="${username}-${FULLNODE_INTERNAL_HOST_SUFFIX}.${NAMESPACE}.svc.${cluster}:6182"
+  else
+    fullnode_host="${username}-${FULLNODE_INTERNAL_HOST_SUFFIX}:6182"
+  fi
+
+  if [[ "${VALIDATOR_ENABLE_ONCHAIN_DISCOVERY}" = "true" ]]; then
+    validator_host="val${i}.${DOMAIN}:6180"
+  elif [[ "${ENABLE_MULTICLUSTER_DOMAIN_SUFFIX}" = "true" ]]; then
+    index=$(($i % ${#MULTICLUSTER_DOMAIN_SUFFIXES[@]}))
+    cluster=${MULTICLUSTER_DOMAIN_SUFFIXES[${index}]}
+    validator_host="${username}-${VALIDATOR_INTERNAL_HOST_SUFFIX}.${NAMESPACE}.svc.${cluster}:6180"
+  else
+    validator_host="${username}-${VALIDATOR_INTERNAL_HOST_SUFFIX}:6180"
+  fi
+
+  if [ $i -lt $NUM_VALIDATORS_WITH_LARGER_STAKE ]; then
+    CUR_STAKE_AMOUNT=$LARGER_STAKE_AMOUNT
+  else
+    CUR_STAKE_AMOUNT=$STAKE_AMOUNT
+  fi
+
+  echo "CUR_STAKE_AMOUNT=${CUR_STAKE_AMOUNT} for ${i} validator"
+
+  if [[ -z "${RANDOM_SEED}" ]]; then
+    aptos genesis generate-keys --output-dir $user_dir
+  else
+    seed=$(printf "%064x" "$((${RANDOM_SEED_IN_DECIMAL} + i))")
+    echo "seed=$seed for ${i}th validator"
+    aptos genesis generate-keys --random-seed $seed --output-dir $user_dir
+  fi
+
+  aptos genesis set-validator-configuration --owner-public-identity-file $user_dir/public-keys.yaml --local-repository-dir $WORKSPACE \
+    --username $username \
+    --validator-host $validator_host \
+    --full-node-host $fullnode_host \
+    --stake-amount $CUR_STAKE_AMOUNT
 done
 
 # get the framework
@@ -130,10 +130,10 @@ kubectl get pvc -o name | grep /fn- | grep -v "e${ERA}-" | xargs -r kubectl dele
 kubectl get secret -o name | grep "genesis-e" | grep -v "e${ERA}-" | xargs -r kubectl delete
 
 # create genesis secrets for validators to startup
-for i in $(seq 0 $(($NUM_VALIDATORS-1))); do
-username="${USERNAME_PREFIX}-${i}"
-user_dir="${WORKSPACE}/${username}"
-kubectl create secret generic "${username}-genesis-e${ERA}" \
+for i in $(seq 0 $(($NUM_VALIDATORS - 1))); do
+  username="${USERNAME_PREFIX}-${i}"
+  user_dir="${WORKSPACE}/${username}"
+  kubectl create secret generic "${username}-genesis-e${ERA}" \
     --from-file=genesis.blob=${WORKSPACE}/genesis.blob \
     --from-file=waypoint.txt=${WORKSPACE}/waypoint.txt \
     --from-file=validator-identity.yaml=${user_dir}/validator-identity.yaml \
diff --git a/terraform/helm/genesis/templates/genesis.yaml b/terraform/helm/genesis/templates/genesis.yaml
index 1298d39676d2c..6d5716a25e6fe 100644
--- a/terraform/helm/genesis/templates/genesis.yaml
+++ b/terraform/helm/genesis/templates/genesis.yaml
@@ -25,12 +25,6 @@ data:
     rewards_apy_percentage: {{ .Values.chain.rewards_apy_percentage | int }}
     voting_duration_secs: {{ .Values.chain.voting_duration_secs | int }}
     voting_power_increase_limit: {{ .Values.chain.voting_power_increase_limit | int }}
-    {{- with .Values.chain.on_chain_consensus_config}}
-    on_chain_consensus_config: {{ . | toJson }}
-    {{- end}}
-    {{- with .Values.chain.on_chain_execution_config}}
-    on_chain_execution_config: {{ . | toJson }}
-    {{- end}}
 
 ---
 
diff --git a/terraform/helm/genesis/values.yaml b/terraform/helm/genesis/values.yaml
index 50bb124cda2d9..4a8c859e61859 100644
--- a/terraform/helm/genesis/values.yaml
+++ b/terraform/helm/genesis/values.yaml
@@ -31,10 +31,6 @@ chain:
   rewards_apy_percentage: 10
   # -- Minimum price per gas unit
   min_price_per_gas_unit: 1
-  # -- Onchain Consensus Config
-  on_chain_consensus_config:
-  # -- Onchain Execution Config
-  on_chain_execution_config:
 
 # -- Default image tag to use for all tools images
 imageTag: testnet
diff --git a/terraform/helm/logger/.helmignore b/terraform/helm/logger/.helmignore
deleted file mode 100644
index 0e8a0eb36f4ca..0000000000000
--- a/terraform/helm/logger/.helmignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*.orig
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
diff --git a/terraform/helm/logger/Chart.yaml b/terraform/helm/logger/Chart.yaml
deleted file mode 100644
index e2bdba437686c..0000000000000
--- a/terraform/helm/logger/Chart.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-apiVersion: v2
-name: aptos-logger
-version: 0.2.0
diff --git a/terraform/helm/logger/README.md b/terraform/helm/logger/README.md
deleted file mode 100644
index 09b96eeeffdbe..0000000000000
--- a/terraform/helm/logger/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-Aptos Logger Deployment
-================================
-
-This Helm chart deploys a central logger that aggregates logs from aptos nodes
-using [Vector][]. The logger can be used to output logs to our central logging
-system using mutual TLS, to file for debugging purposes, and any other outputs
-possible with Vector output configuration.
-
-Note to partners: please don't point this logger towards our premainnet or mainnet
-central logging stack. We'd like to keep that for validators and key Association-run
-public fullnodes.
-
-Configuration
--------------
-
-See [values.yaml][] for the full list of options you can configure.
-
-* `logging.vector.logToFile`: logs to /tmp/logs for debugging purposes
-* `logging.vector.outputs`: your own custom vector outputs
-* `loggingClientCert`, `loggingClientKey`, `loggingCA`, `loggingCentralHost`: for mutual TLS with a central loging system
-
-There exist template helm values files in the `values` directory, for premainnet and mainnet.
-
-Deployment
-----------
-
-1. Install Helm v3: https://helm.sh/docs/intro/install/
-2. Configure `kubectl` with the Kubernetes cluster you wish to use.
-3. Set the value `logger.name` to `-`, e.g. `novi-pfn`
-4. Set the value `serviceAccount.name` to an existing fullnode or validator service account, or do a role binding, e.g. with `aptos-validator-psp`.
-5. Configure any of the other helm values if applicable. An example to connect to `mainnet` is included in the `values` directory. If unset, the fullnode will connect to premainnet by default.
-6. Install the release, setting any options:
-
-       $ helm install fullnode-logger --set logging.vector.logToFile=true .
-
-[Vector]: https://vector.dev/
-[values.yaml]: values.yaml
diff --git a/terraform/helm/logger/files/vector.toml b/terraform/helm/logger/files/vector.toml
deleted file mode 100644
index 3da31d3c469c5..0000000000000
--- a/terraform/helm/logger/files/vector.toml
+++ /dev/null
@@ -1,70 +0,0 @@
-[sources.tcp_input]
-  type = "socket"
-  address = "0.0.0.0:5044"
-  max_length = 1024000
-  mode = "tcp"
-
-[sources.syslog]
-  type = "syslog"
-  mode = "udp"
-  address = "0.0.0.0:1514"
-  max_length = 10240
-
-[transforms.parse_json]
-  type = "json_parser"
-  inputs = ["tcp_input"]
-  drop_invalid = false
-  field = "message"
-
-[transforms.add_fields]
-  type = "add_fields"
-  inputs = ["parse_json", "syslog"]
-  overwrite = true
-
-  # Fields
-  fields.owner = "{{ required "logger.name must be set" .Values.logger.name }}"
-  fields.chain_name = "{{ required "chain.name must be set" .Values.chain.name }}"
-
-{{- if .Values.loggingCentralHost }}
-[sinks.http_output]
-  # General
-  type = "http" # required
-  inputs = ["add_fields"] # required
-  compression = "none" # optional, default
-  healthcheck = true # optional, default
-  uri = "https://{{.Values.loggingCentralHost}}:9000" # required
-
-  # Batch
-  batch.max_events = 1000 # optional, no default, events
-
-  # Buffer
-  buffer.max_events = 50000 # optional
-  buffer.type = "memory" # optional, default
-  buffer.when_full = "drop_newest" # optional
-
-  # Encoding
-  encoding.codec = "ndjson" # required
-
-  # TLS
-  tls.enabled = true
-  tls.ca_file = "/etc/vector/cert/ca.crt"
-  tls.crt_file = "/etc/vector/cert/tls.crt"
-  tls.key_file = "/etc/vector/cert/tls.key"
-  tls.verify_certificate = {{ .Values.logging.vector.verifyServer | default true }}
-  tls.verify_hostname = {{ .Values.logging.vector.verifyServer | default true }}
-{{- end }}
-
-{{- if .Values.logging.vector.logToFile }}
-[sinks.file]
-  inputs = ["add_fields"]
-  type = "file"
-  path = "/tmp/logs/vector-%Y-%m-%d.log"
-  encoding.codec = "ndjson"
-{{- end }}
-
-{{- range .Values.logging.vector.outputs }}
-[sinks.{{ .output_id }}]
-    {{- range $k, $v := .config }}
-    {{ $k }} = {{ toJson $v }}
-    {{- end }}
-{{- end }}
diff --git a/terraform/helm/logger/templates/NOTES.txt b/terraform/helm/logger/templates/NOTES.txt
deleted file mode 100644
index 902fb066b17ff..0000000000000
--- a/terraform/helm/logger/templates/NOTES.txt
+++ /dev/null
@@ -1 +0,0 @@
-Your {{ .Chart.Name }} deployment named {{ .Release.Name }} is now deployed.
diff --git a/terraform/helm/logger/templates/_helpers.tpl b/terraform/helm/logger/templates/_helpers.tpl
deleted file mode 100644
index 759d584dc4899..0000000000000
--- a/terraform/helm/logger/templates/_helpers.tpl
+++ /dev/null
@@ -1,63 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "aptos-logger.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "aptos-logger.fullname" -}}
-{{- if .Values.fullnameOverride }}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- $name := default .Chart.Name .Values.nameOverride }}
-{{- if contains $name .Release.Name }}
-{{- .Release.Name | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
-{{- end }}
-{{- end }}
-{{- end }}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "aptos-logger.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Common labels
-*/}}
-{{- define "aptos-logger.labels" -}}
-helm.sh/chart: {{ include "aptos-logger.chart" . }}
-{{ include "aptos-logger.selectorLabels" . }}
-{{- if .Chart.AppVersion }}
-app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
-{{- end }}
-app.kubernetes.io/managed-by: {{ .Release.Service }}
-{{- end }}
-
-{{/*
-Selector labels
-*/}}
-{{- define "aptos-logger.selectorLabels" -}}
-app.kubernetes.io/name: {{ include "aptos-logger.name" . }}
-app.kubernetes.io/instance: {{ .Release.Name }}
-{{- end }}
-
-{{/*
-Create the name of the service account to use
-*/}}
-{{- define "aptos-logger.serviceAccountName" -}}
-{{- if .Values.serviceAccount.create }}
-{{- default (include "aptos-logger.fullname" .) .Values.serviceAccount.name }}
-{{- else }}
-{{- default "default" .Values.serviceAccount.name }}
-{{- end }}
-{{- end }}
diff --git a/terraform/helm/logger/templates/logging.yaml b/terraform/helm/logger/templates/logging.yaml
deleted file mode 100644
index c138983431c1f..0000000000000
--- a/terraform/helm/logger/templates/logging.yaml
+++ /dev/null
@@ -1,139 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "aptos-logger.fullname" . }}-vector
-  labels:
-    {{- include "aptos-logger.labels" . | nindent 4 }}
-data:
-  vector.toml: |-
-{{ (tpl (.Files.Get "files/vector.toml") .) | indent 4 }}
-
----
-{{- if .Values.loggingCentralHost }}
-apiVersion: v1
-kind: Secret
-metadata:
-  name: {{ include "aptos-logger.fullname" . }}-vector
-  labels:
-    {{- include "aptos-logger.labels" . | nindent 4 }}
-type: kubernetes.io/tls
-data:
-  tls.crt: {{.Values.loggingClientCert}}
-  tls.key: {{.Values.loggingClientKey}}
-  ca.crt: {{.Values.loggingCA}}
----
-{{- end }}
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ include "aptos-logger.fullname" . }}
-  labels:
-    {{- include "aptos-logger.labels" . | nindent 4 }}
-spec:
-  selector:
-    {{- include "aptos-logger.selectorLabels" . | nindent 4 }}
-    app.kubernetes.io/name: logging
-  ports:
-  - name: json
-    port: 5044
-  - name: syslog
-    protocol: UDP
-    port: 1514
-
----
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: {{ include "aptos-logger.fullname" . }}
-  labels:
-    {{- include "aptos-logger.labels" . | nindent 4 }}
-    app.kubernetes.io/name: logging
-spec:
-  serviceName: {{ include "aptos-logger.fullname" . }}
-  replicas: 1
-  podManagementPolicy: Parallel
-  selector:
-    matchLabels:
-      {{- include "aptos-logger.selectorLabels" . | nindent 6 }}
-      app.kubernetes.io/name: logging
-  template:
-    metadata:
-      labels:
-        {{- include "aptos-logger.selectorLabels" . | nindent 8 }}
-        app.kubernetes.io/name: logging
-      annotations:
-        seccomp.security.alpha.kubernetes.io/pod: runtime/default
-        checksum/vector.toml: {{ tpl (.Files.Get "files/vector.toml") . | sha256sum }}
-    spec:
-      volumes:
-      {{- if .Values.logging.vector.logToFile }}
-      - name: vector-logs
-        emptyDir: {}
-      {{- end }}
-      - name: vector-config
-        configMap:
-          name: {{ include "aptos-logger.fullname" . }}-vector
-      {{- if .Values.loggingCentralHost }}
-      - name: vector-secret
-        secret:
-          secretName: {{ include "aptos-logger.fullname" . }}-vector
-      {{- end }}
-      {{- with .Values.logging }}
-      containers:
-      - name: vector
-        image: {{ .vector.image.repo }}:{{ .vector.image.tag }}
-        args:
-        - "--watch-config=true"
-        resources:
-          {{- toYaml .vector.resources | nindent 10 }}
-      {{- end }}
-        securityContext:
-          readOnlyRootFilesystem: true
-          allowPrivilegeEscalation: false
-          runAsUser: 65534
-          runAsGroup: 65534
-          capabilities:
-            drop:
-            - ALL
-        ports:
-        - containerPort: 5044
-        - containerPort: 1514
-        livenessProbe:
-          tcpSocket:
-            port: 5044
-          initialDelaySeconds: 10
-        readinessProbe:
-          tcpSocket:
-            port: 5044
-        volumeMounts:
-        - name: vector-config
-          mountPath: /etc/vector
-          readOnly: true
-        {{- if .Values.loggingCentralHost }}
-        - name: vector-secret
-          mountPath: /etc/vector/cert
-        {{- end }}
-      {{- with .Values.logging }}
-        {{- if .vector.logToFile }}
-        - name: vector-logs
-          mountPath: /tmp/logs
-        {{- end }}
-      {{- with .nodeSelector }}
-      nodeSelector:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .affinity }}
-      affinity:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .tolerations }}
-      tolerations:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      securityContext:
-        runAsNonRoot: true
-        fsGroup: 65534
-      {{- end }}
-      serviceAccountName: {{ include "aptos-logger.serviceAccountName" . }}
diff --git a/terraform/helm/logger/templates/serviceaccount.yaml b/terraform/helm/logger/templates/serviceaccount.yaml
deleted file mode 100644
index 0f9ad0635574d..0000000000000
--- a/terraform/helm/logger/templates/serviceaccount.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-{{- if .Values.serviceAccount.create -}}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: {{ include "aptos-logger.serviceAccountName" . }}
-  labels:
-    {{ include "aptos-logger.labels" . | nindent 4 }}
-{{- end -}}
diff --git a/terraform/helm/logger/values.yaml b/terraform/helm/logger/values.yaml
deleted file mode 100644
index 552fcd3c5e8c3..0000000000000
--- a/terraform/helm/logger/values.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-# These are used for aggregation purposes in central logging
-chain:
-  name:
-logger:
-  name:
-
-logging:
-  vector:
-    verifyServer: # default is true
-    logToFile: false
-    image:
-      repo: timberio/vector
-      tag: 0.20.0-alpine@sha256:1b6a76585ccb0a764b6374fe448825f1f46d40c3a05473337dad7c2e1f7322b5
-      pullPolicy: IfNotPresent
-    resources:
-      limits:
-        cpu: 1.5
-        memory: 2Gi
-      requests:
-        cpu: 1
-        memory: 1.5Gi
-    outputs: []
-  nodeSelector: {}
-  tolerations: []
-  affinity: {}
-
-serviceAccount:
-  # Specifies whether a service account should be created
-  create: true
-  # The name of the service account to use.
-  # If not set and create is true, a name is generated using the fullname template
-  name:
-
-# please do not send to pre/mainnet central logging
-loggingClientCert: ""
-loggingClientKey: ""
-loggingCA: ""
-loggingCentralHost: ""
diff --git a/terraform/helm/logger/values/mainnet.yaml b/terraform/helm/logger/values/mainnet.yaml
deleted file mode 100644
index d76a3d3c0530a..0000000000000
--- a/terraform/helm/logger/values/mainnet.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-# These are used for aggregation purposes in central logging
-chain:
-  name: mainnet
-
-# sync from validator helm chart
-loggingClientCert: 
-loggingClientKey: 
-loggingCA: 
-loggingCentralHost: 
diff --git a/terraform/helm/logger/values/premainnet.yaml b/terraform/helm/logger/values/premainnet.yaml
deleted file mode 100644
index da7bacc1570d8..0000000000000
--- a/terraform/helm/logger/values/premainnet.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-# These are used for aggregation purposes in central logging
-chain:
-  name: premainnet
-
-# sync from validator helm chart
-loggingClientCert: 
-loggingClientKey: 
-loggingCA: 
-loggingCentralHost: 
diff --git a/terraform/helm/monitoring/Chart.lock b/terraform/helm/monitoring/Chart.lock
deleted file mode 100644
index 335f818158950..0000000000000
--- a/terraform/helm/monitoring/Chart.lock
+++ /dev/null
@@ -1,9 +0,0 @@
-dependencies:
-- name: prometheus-node-exporter
-  repository: https://prometheus-community.github.io/helm-charts
-  version: 4.0.0
-- name: kube-state-metrics
-  repository: https://prometheus-community.github.io/helm-charts
-  version: 4.16.0
-digest: sha256:a5f034385599a788bf58d04acc029c014317d5df0efbebdc5ae034a731d4aaa7
-generated: "2022-09-07T17:04:07.275506-07:00"
diff --git a/terraform/helm/monitoring/Chart.yaml b/terraform/helm/monitoring/Chart.yaml
deleted file mode 100644
index 732cdd97823a4..0000000000000
--- a/terraform/helm/monitoring/Chart.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v2
-name: aptos-monitoring
-version: 0.2.0
-
-dependencies:
-  - name: prometheus-node-exporter
-    condition: prometheus-node-exporter.enabled
-    version: 4.0.0
-    repository: "https://prometheus-community.github.io/helm-charts"
-  - name: kube-state-metrics
-    condition: kube-state-metrics.enabled
-    version: 4.16.0
-    repository: "https://prometheus-community.github.io/helm-charts"
diff --git a/terraform/helm/monitoring/charts/kube-state-metrics-4.16.0.tgz b/terraform/helm/monitoring/charts/kube-state-metrics-4.16.0.tgz
deleted file mode 100644
index 4ef32f978317cbe84d755a71b3f6c2a895be589a..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 8795
zcmV-hBBb3PiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc
zVQyr3R8em|NM&qo0PKDHbKADkXn*Eku~*7>ZReVljmi`rGgK>wo*Fr-N?>M+c`TCr76z{ljnigQL@f<8Pqcss?9+Z)vG2?`$Lzo?SD9ei1gWx#mdlBX#BbiX2UZ-53IEE(IfS@qJ
zDP~AuKxqW{E~6Yn80TDI=Er!3V=$S$DNXPYrb1+V*y~M*nC7D(q)AT#W+@h!_gXm9
zyTS8Y$|64Wd;?4VpeYfQk#yqukTIHIF~vFed?=xL2_mULB*jb>#t9G~P`0p&olK|&M&P0AFb46%Iq7cP1%*2@e
zeImbBAV*Y0p{IX0iS;Uo0XLtxG%VH8D*>~Q|cI#0vI0^cm>Hn6(GZg2T
zKcWCwKK>654*I9H@qaux8EnV@Q#@UGiN`391?X;vF=Z{>7wlF+^Kocp2l-vQ}Q#6E5#{;1Sn*b
zbNIi(&w6k%tVSmP8)P)%kaLnw6tq!JV&Rk2>ncbQ;US%2Hit1rB4_uInq7ktj(mlU
zl#^X}CrKt8P%0%gW+cI>K(VdHQYU{g3@HUJ(1^rD%wduvhEjns4?GFmLX}*AQ#6Wk
zG=wpVIrcnD=MV<6;olH0fQ|u05s^{}#Sq2>5S!$(el?4PG#!(P#G!>px{5#X9ZIqo
z55f0!h5qC>lKD6dsYGNSd_SfWe@tS$4_JhOY|S`U4V+QV$tad|O*n)om7O-Gc^ZKv
z_a3v-D5j%cg5)&Ui)hGuVNCpjq#%hrd?y$>V-wAu9>;7(LN#hK%ES;}^j}DZFUO*6
z20d}UzEl*CPW#me5t@nm@!4scPK__D3i^2Dqx#;
zl%`auX@U>o^B2$ScHyk-q8N>EtojB;k@^$Iz_6L1yfEj2CT~%K=CD`v9bAcI(5|Tp3$c#gZ
zu@rAU{CM%^-=^8gSV>==p%5z(6U)BD0(Qx`j)_cH6-sfW5GPU^N{PuWSF3!Kf%+>b
zNRmO&Jr86~CzIO&XHf(hW(na^>(SHNxnk5S+i^R9GV(p6dB%aH3VhFj7QpqC=5ZwD
zgTjcUQZ{|e(@>5J&3+mrSeSZ(fVXDOu%b+BU=poYMsleazeHdN?>nU)8-$c$%BAe-
zd{9}T6`D9Fj4?9B!u)
zHk0#RB_HsjK{Url`@rz0oG=V>%;@H?4F?JO-4yH2HKVZxRu7H9GGI!D;t9#(X50~O
z#vv-0#4a`>XYw59ajXX$PO=yYY;|n~`U;OR!)d6Ph^C6C63XOQQM!ZTEz{SxQ=BTb
zLxNUH1XnN^9+xvjIW0&fM~Qxc_zmwBQ<$I8$PcMmdGwSXVpI}FW>PL%RjKCzv??=N
znpE1$5!brWev%6<#PJZ${&fwn{&@|r&)zCopc&tEHyGj3LW)E_K({H?QLD`Qp%ovGqQRkGVlvzAV
z9QAZ|h1Fa|(uwUWIZm73csRvLiM5FNx#Eywvejd6E=4DTOB%s74s#}zEv@!0(8?jn
zxp0P~1a-B7LqnQ|I1{{=Dabv^q1@Ewn{QsX>t!^uNEp?-sopiaT9W6`lhMe{Vv`>O
z$1ch;#1hIL6)4A{)UX~G=A4HjHX!vMF!cmBbAaM(ih^ntD30kZjxHIMn(T$-{vqr<
z-`NC4q%}$h#t?QQ8s11{Ri{~awn0mPBP`Z}$MRH=1eX9kcNG6j<2)&69u51PrUKuI
z3R9W+g5~KMSJUDUPL7WckLrTFe%(r6EAG^{h~md^_X%Y+)DG6MT?&6+OMkhx@U^TYH0^ov{o07ma^)}{leE$Lo7Yj)m7;Gp+)Z(MZlJXKjm3)f95>V}vR$juGbypztHJ+W9!;iK$x&!!b4v{s+OtGcaNLvmxjpuqPA}K1DyBdn3n*vG0IFGQ4p-$hI3h0c;
zq=F2KjA>7eB4LolIYY4&8i}f_q|qX+shrasZq@QECO6nvY3QwzHe!ks*tKg<*!i|V
zn1B2J<^}f^9DlH1{!aIUAo#E^hu*jEQI>s>?|VCvvNMnit{6*I#2U~HJ!;Kel0s50
zr1UG0LFv3`f>ADX*G!Qlh2t4!SeBQhBDqW>Nij@$1$~Xx^A9_FmRYohUS)c{#9o8c
zrR?SKlq2%i<*X-TEEl&&Vy+V8LpiJ>iv>oB%UsRW$Ky4pRUVn)oLNk9-J2@c8PZ;G
zzeK?>o%2C+-q{S1Sc+T-X{>dr#oI7lhk^E$Ar=+I3lou&tVWFQOVKT7Z)FY@6e7Z9
zLg1CPNiE!m5Hmr>l71{_W0Fo7{*+^`>;=9tW+f;yuZ*T2=}5ms2q!e<*kmnL2K|oH
zD5J!_O(`S4Nq&zr8mSG05XG43L{%&2SicnUhtp>$#rl3XZ5=`9{jDZQzvX*x&A
zfcjP2c-5MW*l#p4zcS1?;R2^(rWPw#oA6o-;@;((-*=Miu8UPU
z%XLzjsnw3rg%RTTApF+_E12nsPqGpu)7)MmC^C5D9N
zdgzB!N(|phROO;S(E7jHzaeSJB;Rldr*f`Tf0^LG@)T$aIGqtjQ@x9;wnkziM2wB8
z*`TryJP)U``nMz{a*U~-lbS_D;TEM8N}OVOYG*NJ=cjVYCFw*#k4Y-Cpy*@Hh1Nt6
zhZKjHbHwKRdan#8bf$YD%vn5#5!0M97MQ_K+0;9M0h$oO^~M2&Sw4h8zn|#W1SiVP
z;pFHoG4$zqvYNe?`|#i`Q7my;sAo4XI2gYiH8bNeNwKplWh@0%JFMLnQyW%+1o#hk
zOI$YUJH#ef7)P#>rv|~`DEM#5a7!}jL1+XV$8otrtjr?SeT6i#dlc|iTlc+I#}a{K
zEWyodz>H>RY;h4zweXXCp>_vtu?w@ey|0p#^}EC88Xd!nCUORE?#1{0Ob~r@n^A{r@VLZH_wZ?1C7+`bBgr>#ySGf(MKE#
z9(eDjh2^xIZyLT?-ot&fu3nwJeETX$qOPf~^h|nJm}ATW`YxkPYM@jbb$)pF?y^|Q
zLeFM3sk(8b8Z1i%-s@C3*{FT_efa#j@JWD9%evlrE_5J(FJFRH*$aiy&&Irfq`f8`F0hvNNjb$FEgt3
zL7)#Dsog#+vt-U<>HIqgmZu?{9IBrrxz5L9at9s11SS{4^0!9yT)T~x_9`h~=kO^<
zF&Psa0m?E(MBx1kYv?M!kab9cIE-+Ja*l!1q%zeh(#HhHk)9>gLeXw=5)Qi~Z4!EU
zaV_&I*Ub!vdx2M4b2N~`=C4}NIJVXH6VyUoB5$x=rUQG#iE|BW($lbrOYT7VV^%2-Lq@AlU67udy7CTy6O#GtE6Lo
zbk+j0ur<|Q%L0?DRyIl-g0eUyc`S%uW^%SBv_o$)B2rV))~vOqm{Gnop|yr@So56r
z@K@$)X~Y`CYeJmdt9Z|mFH(S8ixV=Q)<$6ITxy?jJvpNabQX>(chKwK#?}PCtIZHe
zMCbo>pz}-T9uoe0d)9D-I0?=6=vrv&tR2Z~QyeD&pY{|b!<9<9Ay{hM9?G(sXIz3(
zG{KQSnkz^}ms!JDYayxK(iEp+Se%V)X37u~J{_+{uv|fCVX$OeTZh*-rZvh!W7}P4
z#aYmH+=~;1lG;X*-{DhEMP*1gWt4k#L??rw|3*o?B*!9H?(q(pY~HJ#aZ!^p)t{QR
ztV7!-OjU2r)&|ptug{;Cc8t@9O$a3A0;S<S-G#jNRkoDi6wq+TT}peS7|MHt=pp8V%hO
zJ>O}D@)9gigajITUH76FsYy*pnCwcq|k7eC<|78h9
z9zcVjhCDJ&RypoeMQ~NtLKnC{U5-rPxwf_qJ>OtoJy)`J6>SP=P~vE|l{d6uRp$7w
zJu3clo~82N*_V1qG=Syue{gtma$J-D{r>UR{{JM8E5K2faVe{7g3LYR9az_oFJFvE
zcaFqYthSgzYt6^%a
z6C}dv8;6Y5gfun&qSg~#Yk64TotwKG+xY9oN>gpF@R^biOt0x)CY%6-bR%}s41%&{X0rY7^=d`-hNzV5YPIH=m
z(A3`22DO7wRSwW7)DIB>f`cTVFNNl~!n_=wYx3k%lj
zgVMFIU@(78S(esYb%SB0ADqrguC~=){`m5ji?g?{iUiP}H~(NX89E<9Td$3>+xV^>
zO6*Wl$S$nv`tt1j52#Q3#kp-$X?akgJF3IlZM`De2GpClnPWMV8Q8M+CB0SKUv*l`
z_vN>57wyIVURXz8^k4MXskLG|DV@dZwHDrNZq-iy-{B)ADRlNbo6_2P+M-^)&ZJLQ
zaKAYG<08`zXE9zi257~q%_V;UYL1XzhF>eh2O_zm(UxKJNbO{OR2N`ieO#Le6b0{F
zf5SDohJkZvWOJp?K|-paZ3cP1qQWeO-IhWnxqI+zGm4v@!HZF-wEO~GTDo9koL0>)
zT5d((KHIf&ey^TZV(sKov%%g5h?GA0i_j=@eA}a$=-s^FJypy)JFZ`u;{_U2{5|V+
z7Ky&ps;a_!4G=AuRG~Jfgchimjrn2FLK|fzk2D)|uS|3uNcUVo70=T9_EQnA_$6K;uy`Z<#3Jo5RRP;LPyGJNt2_LY>fR^N4ued*i$i11?B0F77*6UPqfz
z%;vsNRcxzW;x4e6rpDd5UMXs$bvcFE;b_tnlE?oa&hR>iar6pzeX!ICi;8*$o4El$S^IwCl|NoOb&0Vc}_$%XpDRVsw
z&QYerfD_Dvv&+{;D)8V3#Gzv%9?k~6%O;#oLy{EPtDeKHSw>i(fvf3i8PrnGUt)K8
z?Oop`UWV(tineT0Hn^a`?_le!^A~!S%YPc#WvU*Ce~A7!I34us`rpyfR{wjFrz!ud
z(I#9Q+7iKBNaJeh$B@O3Ax>9PxvBHcatO`JYtB3wBQp}?i5rHZvI6#DM=O;(Yv5jw
zw7XEKuPnF_@tW(2#aVuQeR*E}aQXUW`JoKltLS363aIur=i*AHT+sB2QWb)j<2NMD
z?<)CJp0jkW((^2fvA(9n1kG;+slyrTSYdXSR54OUM923v!QvUE0wxkrP&jZTBpt$_
zFcu`}PKOshcH#{`X{B~+L^jetNR0WM3!K>IeFT%ha{13Q-#G(t-?hL>|Np^3UHb)k~1%HoXjvg*MY{N^EVg-gX5s@_d9KveEH&4Zw}eflkv`niUV#f555yPRckey
z&q{XpW|5t`%15xwUtBICpZ7aC$LgaksYBDNxPtdTp76Ct!hhzoiTuCs8L-v%pSu2k
zaCmgMmH$uid}aCnN1p<_&ouDxlVA_1y*#8PrRjoWT90s0>rswsZKd{~{#h#jAENX-
z&x-S3gTu!CuO}zl_-{}1IP!lNe`=NRg5`J*c2g>r9$4KgA~mRGFzgD?s7EEjw|nR=s=5YryyY^`{*}LY*QGoyB3|J7t~DVS~m}qyoY8Qs+ZuA+kCi{ld!*
zKYw-tqLfG%!RyyWvK1pY1pcNNn0Wc?);}sPZ>%Fx2HxKgg)&P2x=56ZMhP8d8E?a=
z68(QuoXU7SkBn614uTD_DlRN|2)DSm9&A|uqAL>CMaLE)>7)&_B{wi^h-h(L!~HtX
z+{z#qm5b$odnLq%=sn7SyWe1QZmf6+6ICz1*bs-Mw`8o%OOd%m*IZTVPOeRi*SVZy
zLn=yT;UNuKxesJh462uhtc%84+}*sGWK-m5^uU%e*Pv`zZ~L7o4?$$%Ei4bj>`{@r
z4?ZoI#jJ~xHtn{LvjvyXY>wT-8c5%Gvtg~y^*E0^?^o~LxgS=wOL`vCIt#D;*%&kT
zj-ZEORJ(0xLrl!oL=QosyxnNyy7g5^539VmPHEHHox7LTo;??^Y3&}TjWKaAeOeb6
zQ)=nl-MCI_L(Ep(J@v;a9jhsb;`nAnyf7)lI{r4e(AI-CB|L^d$zW>)B93O1$zfbYh?7zzl
zz;*L*t8un&1TJmA&)ST?n_6;9%j{$A96plWwbhP#&#go^nxrzjH%z%zyZghe#Vr9Q
zm)2KzY5XIjO13ubx?7dcwiCwcXQ}+R$Fm=}1-L@~9~>Xo&;Ohr9&F|RlROJf_NjjV
zOPr*sxpKE0(anl$gs#=yL)Jl}{4OUSeJ@e<2w+Re9k1o^?t*|}4X1ORv*x$mmb;8r
zb_C4;b=J#VA7$*c#9L5zB^_NW}I4mGNHMb@f&M3t#
ziY>6KnQLt;lyBt5Aw~4kWM$A0zcswaTJ5q~ERls|YyxJTkVhBmwfxGGm2Rjm&|3OC
zrYQOj#V8Fidwpp}cja3Np%V7lIAK8RhE|))iwL3{@$X!f21P?VOY6Ft^K@zXBkEo
zP@7KEh$yg{A1r{z?Pf|1tRFgEqkP#ICuL*RA{9PB_d01=AwlV!Rc@wB9aye?)@%oo
z3e09GE|IifY7xa(mR!DCY->Wb+I**D-j(%NKCeQe6~@MxWQ^X;e_~D}*e|Qge;91l{(t|tasT7t@#%K`_Y_ad`QO(20Uqh-ZzJY|UJO4>I7b~@Q(CMsjmsMXTSTu{GOdT?Mc!Y)ZJP!**^qB*$e<+u(?P=e?^0S%z
zzn?SMD)~P+t;hd8I2~;5e^2s!1^M6Rx%JnW1|Hz<^%zaCjUBOlHho&;e=x;yLMAC?
z_;Ko3wf}oOsNMfNI365q<9|NM(}hbU1ZF7*K|%LA+)i-{qnyMlZefPP8#KW@@VfAB
zO1O%|NSWZkr#Oyb64R01NFeECU#o0f{i`>3ocS{4)x)mhpe}>g?s)S3wdzRvjzG|51NX_y0UP*xvvBG*1_r
z8~{JyIDzviV#4!Yr(B>o<|=&Y`ZHfcPd22hrQl}h-p4jo5AHiWv^LoFYvr>
z7rw^=u5d=-bvmY=_v^2}hBW0;Ba*WaFqwp+$Z^K>Lz*PHx+KWf*_Id>L;Aui7#@lLw+`4NKKrJ$X6NgBX+wBb~P&l?2reu9M_mLwh154+t77QTG<
z<@=t>QV`h9Z4>KJy$5reGr;DMitTK*N`ai)*vl!tHQXTqd$rCI6lRA(Kj?d&{%e+-5B-DTzde7k{qgjR{tI(;s6feyV
zU|0V*1j;}z?ii?H`6(w1M|<9LaE_PW;5pnPnJk+;l5JTS9lht}YOyV&6PH8v$hj-B
zJWuiWdCmn*$ZxhM)xd)9@K`FVM#z$MvViqTDj4l4KKMlqgKk*D^TiZvF7ihTo=*yR
zdjY(os*|M{DrD&>#y~Uex~IfWgasn8Y;u~Y!@R@hpyyM1tM5nW>yC6i!sWP0&>T?A
zDX=_+U+eIG)f_>%?(!WD?<1VyG{R{}F#oXIbze0{m0n7gl;F>(y>6fFvwgPD_POu#
R{{sL3|Nj-iFt-5E000ISE&Kog

diff --git a/terraform/helm/monitoring/charts/prometheus-node-exporter-4.0.0.tgz b/terraform/helm/monitoring/charts/prometheus-node-exporter-4.0.0.tgz
deleted file mode 100644
index d81ac648e175e26e476217db064f05f08b9593e0..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 7383
zcmV;|94O--iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc
zVQyr3R8em|NM&qo0PKBhR~xyuV1KS(u?Oc2kQ`~&f;bg
zxoX>1r%I}pR0cQ6{PtOT)Vuru()8&b>O+E6(w6p9Tf4TTMDQ5Z6jRw{Jj5>EBwQ#I
zy`w1+%Ab=s`tG4mzu)ig@9mlY`~80X|Nj2&{&$1D-G2Yzpue+s@LhkfKiJ>@4*C!7
zd5tHPiiq#}5ALgu+@Itj34M=3QqG1j+wn+}Wggt3|@I0TX;5eq#
zJB*@JrcjKDwo9NyiXH%Aj4NYkqy_*bMr%ULKoC8k4PAZ1^Ez-ChE!K45g?mU!C8z<
zm03F_3a)7sl{l=p;YCO==1ijUkj;j0`_=1oK!X{YKLFkzUYs6&|N3NTMbGn!(df~|
zoM}O@14X9$3x_Z!kwjBO^0B=pBaEc!Lch?omk9~b@<
z!&FUkI3pwi#XJ5P*$*^AI}jDgBhJQjVyc;)PI(%I
znnJ0jkJd0lYRV?QgOG8~QfNjghBn2!p=#G!u_fsd!9(L&NYi35mi>C5Xbw9OxJ{^Q)zW
z)L-yOclxt=tu?>=+tNBFH$O8nBQ(;?AHu-%1ZH!s76>E~
zrS^wdVE-3Nf=C%~R@N8^*Hld*qDn<*VCO@50WuAyy8Abj(a-4!eK;L!3ke~IYr-m+
zl$xc?Vyn#H#ypDnHDwb6J)%rkA>l8nH1A{G0mA^LBx3%;;4d)dGt0Ul71107sU2?5kf}wUeDDQUsS^IQ>U`GA0nW_KEzMl}&UArqsyj!1bO
z-A=cEp(QB6K+zdSb05yRlynqf<{}ee2Y1F9zJQ;&RPXpv2E^Ld?`cSdhM@fuW6;ec
zGo~1Y+I;YsYNz>y9stXM={MR%@Zk*?2$WgxT05tlS>;+sn5E)^Ot3fOQ5vJ{bqtBR
z8RmMO36^Zk4}P&T>D!JJMQ&x=oPFi6d~Tq_If(s?-VP{gu$rATxdgFPDM<@yj>H?0(s2P67osQXcQz7Rk+Jwp~Cg*~3
zGvh}@$}{84aKop}pVy0>S{1uq=~R}Bg;6D9lA762qmEkS4#gx}DeG}|y8(|ZHT;mu
z&r%U3eQK>iH`HQ_^40&@5+wc-ah`k`hhMg)U*wnBc6dd)zkT?^W-061~FwNA{ezDz{Wr9n>QpW&|83mY>kZk!xk@Z9cU7iR=_5zkb74p2Sw{t_M=$)
zE)hx_=CXDui=G&F0B@`xqd{uNEFi)Sy~J2Hqah{TFqp(eFv`X>Y#__k=v8VA2mYSF
zYldW{#s&gxC#9j4x-LmqV-UGmcQdh;ZVV+-*2TM1lPI^Gs8Z>aQFT;MO6ip@Rg^^8
zLOYdS-w9mz!Sk)dYfO3rnGS+Ecqr2^kg1VEmgOQP}{1
z{_zu%FoWC@xN$=}#)!->G2kqeL+EP<9R=lKUNq2UG7YqMTk3M)0Vo^RD5@F^E|7$@
zih{_B{tP&*8UI}v|Ieedy|cIPPA+|QqwX7HUh(~J=U{)Y7XLrkKiGSU|3Ahv@Zm?a
zYq`>QC=Uc7ttY)d;cfr?;)3cB6ghQsY`9z%hRgmT?gPv_Tlcse+iyL6Of0
z1jc%CPFbqlCnPOUWsq=so-GTqfUb>wyXT)gg@7=)*3-TXWf(m5lCva;n`KJNSpePZ
zdvS5I;Ns=U#rxBv7DgKI4`$%NH
zr}_Uep4;2rbC}V1Xuf&qWrWu7SFt8bFvTG}?-?`Ed+wdwB<8bO?Q4qp=;eE^UKc_W
zvR$c?-J-hrHs`*_8|}-Cj;@)n{=Vq-FAHFQ*ZfK2OB2X}jcx%<3%LH<4*1A^C?I(s
z+RD!1*OWwbOfl4oXi)LJ|3VAh)K|I>jh2KF21M(vc6zIQqXVJ(dXl3QJyRAs*}72nj&7i05nQV-sZc70kG^GZe{
zI?lVBq;4^%+KATJ9%t>RRR4sHq)=KG}2bkrA{7rUER!9Y!
z?e{K%x9D#pknp4XXlRoSvdHWD+_Vigv<;tP6#H`8Gg32LbBZvP?eyR^XTz8xfn$i|nTNu1M{>EAdRTR(hE?zMPU4c^Upq{)Z8nO5Gwk5w>z-
zE>9}lu7OZwQZLdYSUxiif4`xuutkSa#IKDMmwgtyDzU41Zs+X~nOXM7X{2ar=~7OH
zpbP>Yk0`^?WlG)AX>_X_D`z!sHsb#!aqOTh!SN16
za4ouj05Qt*T#+>D22_B`+f=(+t4X8Z6>O+7Vli9(=q1*_^s~nLKTqVn?`D7%`Tqxd
zgWCR|ox!00wEyQ(p7Q=5k|eS>8+e~73x{xQ*nWx1%K~1*d}GXgf|_0>Gy7gKEf1Vi
zY}&7OW1@UL@C7Ifktx{IVDwU4Ba=DIS-PUq-5
z6-2F76{W)b*=-Dtw!k>Zolk${v@|v6ia1
z#fD$>m_s-(?ItQw(YoTrR-B40c+@G#jzmrI9sXlm%aHR@cq4z?Q_)cD#ZUpXxW+rhV|XCO-%sSH%%*4EzGe^>Tp9&f-DiwP|Rxl-%b{El4d`
zwlYc(HbHpy8RswHnHd}mUlojJvucxW;kW2E3kag$T3Drc--^MQ{DQiUvbwP>%W>7n
zW=}3`cDvsa9jLZT1Xn!Z(GcDpm1);Vw<=M-k+f9mti7-d3-JX-ykj$jX-5b%3>WvxT54_3A*H`DqK`)T`D()sxy50;*Q5f@vo070*&l
zs5bl~RG~J4|KD_B0s8k-h8nO3X+sUx{neoc=mGjrgS1>B^5ZI+E>p2Xw&pUG#HO6`
z-a7F$+|g=Ia=Xh~PD{@Ae>FkZeb^Sr%_`Ll`fuU`+tD<<-j?CC)Myl@!_it1U}0^}<|
z@64YyTQ8yi-HOZWgT{t}yRvFEgx{Y}cM;XqG%0O;xg#wWxBPu*uqKb~PMbS3w!K+-
z6|)Ww_Zt23;&vHcu-MPvwnZu1sI@|#7f*A422EMdqO~rqo8N-lKAe{hE+|Ft>PH&1
z@Tt*yQB=7yxXRx3lHXG*uU0nu)oT^cPmf#B$u~3?!Rma&a&!23?WFe`soU-$E^S|~dUr=~ffKm2M{kMt-PON(uy3`F6sG=KVNYi)-Z_J4`55utCb-16yu?|N0M%^k(s~X;qdreQ`9?r))SlMP(
zl!_h2_eboH{u=-I&l>q3`FS(HPyR>0f3UmL$p3gc|M792`mB@j8cZ^90Oq(jinh`W#Yhf~f?jQ=5-TlDBCEAB_rkV)m3}||-(CMll!cT{?qCL1
z=>K4_^RlM@yZeKu`TtR#hwA^@+TJh}-&;BQS2P1WLYLB#8Gql#_+V6II{F2J
z%o(vARMQi^$~x{o#QFxF*S9o9PtPCltY`lpb`8L)^M7`C`}OmG4hB#8|Bv!~OZ)E*
z%$*HJs7Pk<&rNEC@sMna>tAo(xPVHHMEFV)d`s>{OS@)A$imVNhD6XAjc`&v8ovW}
z+g`v0xW%>Jh~5cd?M}LQI)GGY#y)@%Ak1mzCqK&D#!~
zqll@JwX=T}H|MsS&mQK$;+(YRZ1`)8B70zwUL*`*^Fo6?Wb;k4kK{FF=}p#wIU?RF
z0}v@Sz4Nn)?Qs$I%<#h`ITSG$Ac#5LTuljs%M$}@abIbYL>MDeByww%a@9AfRHnM6
zwFy|{(S}SiNEl{kmK#*f;l6$zlBm$KM$PP)@yF~l`-Eve$j}Jw8Pl0?Jh>nbdDfW!
zvw+mSeSlT#zrDS^=J`KQ@t;R|T4TI!q4AfWsWZtYcbuu)+O^KqP@1akwI?ls-r`8L
zq&%aol}76j+A@b}Y~io%z)Qz7Zo?Mi3bxEF->97(lDNGM+ttK^h1(LcmkU4jmv1p@
z!#;mqG-=z^ux6aRx~22M`#1Og>T}8Z_TQafa*t_XW&Ho3e*W9eVDIVtw?}!FtpDzG
z#>}#LBnw#GcRE?PfA(KzVp(^}$hRCXoH^FXp{oH~oKjcW?bv!Q#`^l79o+HF^?!Ho
zWmEqTp5p(H@~l4F1>1u??lol-Ki~qnGwWmlT=ti%rGNlOHl?v$*iEd>U8#>24d#dd~p0c~^}CGggzLenVk7OJWULG@92B+Awgc
z8<^K(@9HBwYMyOz3D-LAdNq%uwBv0Vu9bamjg;J$7WoyHMOVeGmX&n3mJRBQa&_10
zxO!7&sr%X$nw89$dvuiFzFAsc_^(U6k(|{XO~8xWlv_57OQ2LvSmgZD?55K-N&-|MeOea14Pxe#JSg!yyBEjc0
z0egkgb6;L3-gAFMK6}r76(`>F|Lb+&JrR_r5>Ag#r02UYVxCV!Bt2VC@L&Fn7Gk0P
zUz{8szd7;a@Zo)|(Eq)IgM*s>zkIp-6#sdYrvvA?1i+^=9-_HUR*S$V&nvylFKrxi
z0aGHvYa;9!&TlXCE7Z{3L~D+5etvfPzu~i9YDDq{TnnlcGN258uA~wqk@i~kY(!VJ;eOb1S8q(Hnx-SO9xbZ(DmUt=_g_zPknrLLQil9c?n)M}y=klZr$Un8Syg=u!9j>f+?}$>HUR
zxu4`iE3&;smZ^+E<=W8?Tb=ATWs_|fVZ^WXxNP0>09K;w>aBtC&x|g|Ic4owTRe=M
z+nd-W=t^3yN*7J!=j%%#Qu2VBn~RMC6$ho8v0-Eyln!h5E2!t=Sc!aIa~<@}MYLKW
z7NKu%s;#s9>gcD#i+3NO+jV`F*;;P6dbGCQ>aliU*T2zT&Ypkcd6$X^b3?1S+%r3e
ze$VgweRC@_e5zc5`^k~o+|Zose~D27?RESihAy&HjC|Q;&3uMpM)A6L&BbTRCSA?j
zF1OjAy}-CAT???c?Zdk%3LI-EZO&7BwYcJr`Sya$+^&s^dX7eZg+1B5L#}brup0qM
zl2;2Au)^0aU2?zg-Szr(YzLg(mBgKWKU1v6`@}HQ{J#b~WkBV;)8PnzG5zNXneVQRAL=n}TJSrTCJ1
zE%&*vzH{7w?P%9LH{_mYY=XUPn*NsQo-sG@y_In{?NByZU@j{^}4Y?7Wve0aw}A{yhI&o|m_arZ}CV;rI)G{O+96;Sd8%*=IQxs~3OaAmI%
zH=j^GZ27&?&+Qjl$VAHWyB5nAY8N*zHtegM^6Ttk`n&7fV*fv&J#!&hX^i*vF;}hs
z`!5HL{NIEAlmGu1kF#_pL8X+a@%KT0$1CkO-1#%EZJ(Z}=jnNRo}R_e{|f*B|NodU
Jz6b!A005uhrs4nq

diff --git a/terraform/helm/monitoring/files/alertmanager.yml b/terraform/helm/monitoring/files/alertmanager.yml
deleted file mode 100644
index 13e9d3f81f645..0000000000000
--- a/terraform/helm/monitoring/files/alertmanager.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-# Severeties: info, warning, [error, critical]
-# Last 2 items are high urgency
-
-global:
-
-route:
-  group_by: ["instance", "kubernetes_pod_name", "role"]
-
-  # When a new group of alerts is created by an incoming alert, wait at
-  # least 'group_wait' to send the initial notification.
-  # This way ensures that you get multiple alerts for the same group that start
-  # firing shortly after another are batched together on the first
-  # notification.
-  group_wait: 30s
-
-  # When the first notification was sent, wait 'group_interval' to send a batch
-  # of new alerts that started firing for that group.
-  group_interval: 5m
-
-  # If an alert has successfully been sent, wait 'repeat_interval' to
-  # resend them.
-  repeat_interval: 10m
-
-  # A default receiver
-  receiver: "default"
-
-  # The child route trees.
-  # https://prometheus.io/docs/alerting/latest/configuration/#route
-  routes: {{ .Values.monitoring.alertmanager.alertRouteTrees | toJson }}
-
-# A list of notification receivers
-# https://prometheus.io/docs/alerting/latest/configuration/#receiver
-receivers: {{ .Values.monitoring.alertmanager.alertReceivers | toJson }}
diff --git a/terraform/helm/monitoring/files/dashboards b/terraform/helm/monitoring/files/dashboards
deleted file mode 120000
index 9791cdc4da5a6..0000000000000
--- a/terraform/helm/monitoring/files/dashboards
+++ /dev/null
@@ -1 +0,0 @@
-../../../../dashboards
\ No newline at end of file
diff --git a/terraform/helm/monitoring/files/grafana.ini b/terraform/helm/monitoring/files/grafana.ini
deleted file mode 100644
index c5f998f8b3b54..0000000000000
--- a/terraform/helm/monitoring/files/grafana.ini
+++ /dev/null
@@ -1,34 +0,0 @@
-{{- if .Values.monitoring.grafana.googleAuth }}
-
-[auth]
-# Set to true to disable (hide) the login form, useful if you use OAuth
-disable_login_form = true
-
-{{- with .Values.monitoring.grafana.config }}
-[auth.google]
-enabled = true
-client_id = {{ .client_id }}
-client_secret = {{ .client_secret }}
-scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
-auth_url = https://accounts.google.com/o/oauth2/auth
-token_url = https://accounts.google.com/o/oauth2/token
-allowed_domains = {{ .allowed_domains }}
-allow_sign_up = true
-{{- end }}
-
-[users]
-auto_assign_org_role = Editor
-
-[server]
-protocol = http
-root_url = http://mon.{{ .Values.service.domain }}/grafana
-serve_from_sub_path = true
-
-{{- else }}
-[auth.anonymous]
-enabled = true
-
-# Role for unauthenticated users, other valid values are `Editor` and `Admin`
-org_role = Editor
-
-{{- end }}
\ No newline at end of file
diff --git a/terraform/helm/monitoring/files/prometheus.yml b/terraform/helm/monitoring/files/prometheus.yml
deleted file mode 100644
index 274eebaccbe93..0000000000000
--- a/terraform/helm/monitoring/files/prometheus.yml
+++ /dev/null
@@ -1,225 +0,0 @@
-global:
-  scrape_interval: 15s
-  evaluation_interval: 15s
-  external_labels:
-    chain_name: {{ .Values.chain.name }}
-    {{- if .Values.validator.name }}
-    owner: {{ .Values.validator.name }}
-    {{- else if .Values.fullnode.name  }}
-    owner: {{ .Values.fullnode.name }}
-    {{- else }}
-    owner: release:{{ .Release.Name }}
-    {{- end }}
-
-# Alertmanager configuration
-alerting:
-  alertmanagers:
-  - static_configs:
-    - targets:
-      - localhost:9093
-
-# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
-rule_files:
-{{- range $path, $_ := .Files.Glob "files/rules/*.yml" }}
-  - {{ base $path }}
-{{- end }}
-
-scrape_configs:
-{{ if .Values.monitoring.prometheus.fullKubernetesScrape }}
-- job_name: 'kubernetes-apiservers'
-  scheme: https
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-  kubernetes_sd_configs:
-  - role: endpoints
-
-  # Keep only the default/kubernetes service endpoints for the https port. This
-  # will add targets for each API server which Kubernetes adds an endpoint to
-  # the default/kubernetes service.
-  metric_relabel_configs:
-  - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
-    action: keep
-    regex: default;kubernetes;https
-  - source_labels: [__name__]
-    action: drop
-    regex: '(.+)_request_duration_seconds_bucket'
-  - target_label: owner
-    {{- if .Values.validator.name }}
-    replacement: {{ .Values.validator.name }}
-    {{- else if .Values.fullnode.name  }}
-    replacement: {{ .Values.fullnode.name }}
-    {{- else }}
-    replacement: {{ .Release.Name }}
-    {{- end }}
-{{ end }}
-
-- job_name: 'kubernetes-nodes'
-  scheme: https
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-  kubernetes_sd_configs:
-  - role: node
-
-  {{ if not .Values.monitoring.prometheus.fullKubernetesScrape }}
-  metric_relabel_configs:
-  - source_labels: [namespace]
-    action: keep
-    regex: "{{ .Release.Namespace }}"
-  # Explicitly drop spammy metrics
-  - source_labels: [__name__]
-    regex: 'storage_operation_duration_seconds_bucket'
-    action: drop
-  {{ end }}
-
-  relabel_configs:
-  - action: labelmap
-    regex: __meta_kubernetes_node_label_(.+)
-  - target_label: __address__
-    replacement: kubernetes.default.svc:443
-  - source_labels: [__meta_kubernetes_node_name]
-    regex: (.+)
-    target_label: __metrics_path__
-    replacement: /api/v1/nodes/${1}/proxy/metrics
-
-- job_name: 'kubernetes-cadvisor'
-  scheme: https
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-  kubernetes_sd_configs:
-  - role: node
-
-  relabel_configs:
-  - target_label: __address__
-    replacement: kubernetes.default.svc:443
-  - source_labels: [__meta_kubernetes_node_name]
-    regex: (.+)
-    target_label: __metrics_path__
-    replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
-
-  {{ if not .Values.monitoring.prometheus.fullKubernetesScrape }}
-  # Only keep container task state for key containers
-  metric_relabel_configs:
-  - source_labels: [__name__, container]
-    action: drop
-    regex: container_tasks_state;!validator|!fullnode
-  - source_labels: [container]
-    action: drop
-    regex: calico.*|csi.*|ebs.*|chaos.*|aws-node|node-driver-registrar
-  {{ end }}
-
-# Scrape config for service endpoints.
-#
-# The relabeling allows the actual service scrape endpoint to be configured
-# via the following annotations:
-#
-# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
-# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
-# to set this to `https` & most likely set the `tls_config` of the scrape config.
-# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
-# * `prometheus.io/port`: If the metrics are exposed on a different port to the
-# service then set this appropriately.
-- job_name: 'kubernetes-service-endpoints'
-
-  kubernetes_sd_configs:
-    - role: endpoints
-
-  relabel_configs:
-    - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
-      action: keep
-      regex: true
-    - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
-      action: replace
-      target_label: __scheme__
-      regex: (https?)
-    - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
-      action: replace
-      target_label: __metrics_path__
-      regex: (.+)
-    - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
-      action: replace
-      target_label: __address__
-      regex: ([^:]+)(?::\d+)?;(\d+)
-      replacement: $1:$2
-    - action: labelmap
-      regex: __meta_kubernetes_service_label_(.+)
-    - source_labels: [__meta_kubernetes_namespace]
-      action: replace
-      target_label: kubernetes_namespace
-    - source_labels: [__meta_kubernetes_service_name]
-      action: replace
-      target_label: kubernetes_name
-    - source_labels: [__meta_kubernetes_pod_node_name]
-      action: replace
-      target_label: kubernetes_node
-
-  # Drop some redundant labels from kube-state-metrics
-  metric_relabel_configs:
-  - action: labeldrop
-    regex: uid|container_id
-  # Drop tmpfs metrics from node-exporter
-  - source_labels: [fstype]
-    regex: tmpfs
-    action: drop
-
-  # Scrape config for pods
-  #
-  # The relabeling allows the actual pod scrape endpoint to be configured via the
-  # following annotations:
-  #
-  # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
-  # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
-  # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
-- job_name: "kubernetes-pods"
-
-  kubernetes_sd_configs:
-  - role: pod
-
-  relabel_configs:
-  - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
-    action: keep
-    regex: true
-  - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
-    action: replace
-    target_label: __metrics_path__
-    regex: (.+)
-  - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
-    action: replace
-    regex: ([^:]+)(?::\d+)?;(\d+)
-    replacement: ${1}:${2}
-    target_label: __address__
-  - source_labels: [__meta_kubernetes_namespace]
-    action: replace
-    target_label: namespace
-  - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]
-    action: replace
-    target_label: role
-  - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance]
-    action: replace
-    target_label: instance
-  - source_labels: [__meta_kubernetes_pod_name]
-    action: replace
-    target_label: kubernetes_pod_name
-  # Explicitly drop all vector metrics
-  - source_labels: [namespace]
-    regex: 'vector'
-    action: drop
-
-{{ if .Values.monitoring.prometheus.remote_write.enabled }}
-{{ with .Values.monitoring.prometheus.remote_write }}
-remote_write:
-  - url: {{ .url }}
-    sigv4:
-      region: {{ .region }}
-    queue_config:
-      max_samples_per_send: 1000
-      max_shards: 200
-      capacity: 2500
-{{ end }}
-{{ end }}
-
diff --git a/terraform/helm/monitoring/files/rules/alerts.yml b/terraform/helm/monitoring/files/rules/alerts.yml
deleted file mode 100644
index 658692ac5b4cd..0000000000000
--- a/terraform/helm/monitoring/files/rules/alerts.yml
+++ /dev/null
@@ -1,166 +0,0 @@
-groups:
-- name: "Aptos alerts"
-  rules:
-{{- if .Values.validator.name }}
-  # consensus
-  - alert: Zero Block Commit Rate
-    expr: rate(aptos_consensus_last_committed_round{role="validator"}[1m]) == 0 OR absent(aptos_consensus_last_committed_round{role="validator"})
-    for: 20m
-    labels:
-      severity: error
-      summary: "The block commit rate is low"
-    annotations:
-  - alert: High local timeout rate
-    expr: rate(aptos_consensus_timeout_count{role="validator"}[1m]) > 0.5
-    for: 20m
-    labels:
-      severity: warning
-      summary: "Consensus timeout rate is high"
-    annotations:
-  - alert: High consensus error rate
-    expr: rate(aptos_consensus_error_count{role="validator"}[1m]) / on (role) rate(consensus_duration_count{op='main_loop', role="validator"}[1m]) > 0.25
-    for: 20m
-    labels:
-      severity: warning
-      summary: "Consensus error rate is high"
-    annotations:
-{{- end }}
-    # State sync alerts
-  - alert: State sync is not making progress
-    expr: rate(aptos_state_sync_version{type="synced"}[5m]) == 0 OR absent(aptos_state_sync_version{type="synced"})
-    for: 5m
-    labels:
-      severity: error
-      summary: "State sync is not making progress (i.e., the synced version is not increasing!)"
-    annotations:
-  - alert: State sync is lagging significantly
-    expr: (aptos_data_client_highest_advertised_data{data_type="transactions"} - on(kubernetes_pod_name, role) aptos_state_sync_version{type="synced"}) > 1000000
-    for: 5m
-    labels:
-      severity: error
-      summary: "State sync is lagging significantly (i.e., the lag is greater than 1 million versions)"
-    annotations:
-
-    # Mempool alerts
-  - alert: Mempool has no active upstream peers
-    expr: (sum by (kubernetes_pod_name) (aptos_mempool_active_upstream_peers_count)) == 0
-    for: 3m
-    labels:
-      severity: error
-      summary: "Mempool has no active upstream peers (unable to forward transactions to anyone!)"
-    annotations:
-  - alert: Mempool is at >80% capacity (count)
-    expr: aptos_core_mempool_index_size{index="system_ttl"} > 1600000 # assumes default mempool size 2_000_000
-    for: 5m
-    labels:
-      severity: warning
-      summary: "Mempool count is at >80% capacity (it may soon become full!)"
-    annotations:
-  - alert: Mempool is at >80% capacity (bytes)
-    expr: aptos_core_mempool_index_size{index="size_bytes"} > 1717986918 # assumes default mempool size 2 * 1024 * 1024 * 1024
-    for: 5m
-    labels:
-      severity: warning
-      summary: "Mempool bytes is at >80% capacity (it may soon become full!)"
-    annotations:
-  - alert: Mempool is growing at a significant rate (count)
-    expr: rate(aptos_core_mempool_index_size{index="system_ttl"}[1m]) > 60000 # 3% growth per minute - assumes default mempool size 2_000_000
-    for: 10m
-    labels:
-      severity: warning
-      summary: "Mempool count is growing at a significant rate (it may soon become full!)"
-    annotations:
-  - alert: Mempool is growing at a significant rate (bytes)
-    expr: rate(aptos_core_mempool_index_size{index="size_bytes"}[1m]) > 64424509 # 3% growth per minute - assumes default mempool size 2 * 1024 * 1024 * 1024
-    for: 10m
-    labels:
-      severity: warning
-      summary: "Mempool bytes is growing at a significant rate (it may soon become full!)"
-    annotations:
-
-  # Networking alerts
-  - alert: Validator Connected Peers
-    expr: 0 == min(aptos_network_peers{state="connected", role_type="validator", role="validator"})
-    for: 15m
-    labels:
-      severity: error
-      summary: "Validator node has zero connected peers"
-    annotations:
-
-  # Storage core metrics
-  - alert: Validator Low Disk Space (warning)
-    expr: (kubelet_volume_stats_capacity_bytes{persistentvolumeclaim=~".*(validator|fullnode)-e.*"} - kubelet_volume_stats_used_bytes) / 1024 / 1024 / 1024 < 200
-    for: 1h
-    labels:
-      severity: warning
-      summary: "Less than 200 GB of free space on Aptos Node."
-    annotations:
-      description: "(This is a warning, deal with it in working hours.) A validator or fullnode pod has less than 200 GB of disk space. Take these steps:
-        1. If only a few nodes have this issue, it might be that they are not typically spec'd or customized differently, \
-          it's most likely a expansion of the volume is needed soon. Talk to the PE team. Otherwise, it's a bigger issue.
-        2. Pass this issue on to the storage team. If you are the storage team, read on.
-        3. Go to the dashboard and look for the stacked up column family sizes. \
-          If the total size on that chart can't justify low free disk space, we need to log in to a node to see if something other than the AptosDB is eating up disk. \
-          Start from things under /opt/aptos/data.
-        3 Otherwise, if the total size on that chart is the majority of the disk consumption, zoom out and look for anomalies -- sudden increases overall or on a few \
-          specific Column Families, etc. Also check average size of each type of data. Reason about the anomaly with changes in recent releases in mind.
-        4 If everything made sense, it's a bigger issue, somehow our gas schedule didn't stop state explosion before an alert is triggered. Our recommended disk \
-          spec and/or default pruning configuration, as well as storage gas schedule need updates. Discuss with the ecosystem team and send out a PR on the docs site, \
-          form a plan to inform the node operator community and prepare for a on-chain proposal to update the gas schedule."
-  - alert: Validator Very Low Disk Space (critical)
-    expr: (kubelet_volume_stats_capacity_bytes{persistentvolumeclaim=~".*(validator|fullnode)-e.*"} - kubelet_volume_stats_used_bytes) / 1024 / 1024 / 1024 < 50
-    for: 5m
-    labels:
-      severity: critical
-      summary: "Less than 50 GB of free space on Aptos Node."
-    annotations:
-      description: "A validator or fullnode pod has less than 50 GB of disk space -- that's dangerously low. \
-        1. A warning level alert of disk space less than 200GB should've fired a few days ago at least, search on slack and understand why it's not dealt with.
-        2. Search in the code for the runbook of the warning alert, quickly go through that too determine if it's a bug. Involve the storage team and other team accordingly.
-      If no useful information is found, evaluate the trend of disk usage increasing, how long can we run further? If it can't last the night, you have these options to mitigate this:
-        1. Expand the disk if it's a cloud volume.
-        2. Shorten the pruner windows. Before that, find the latest version of these https://github.com/aptos-labs/aptos-core/blob/48cc64df8a64f2d13012c10d8bd5bf25d94f19dc/config/src/config/storage_config.rs#L166-L218 \
-          and read carefully the comments on the prune window config entries -- set safe values.
-        3. If you believe this is happening on nodes that are not run by us, involve the PE / Community / Ecosystem teams to coordinate efforts needed on those nodes.
-      "
-  - alert: AptosDB API Success Rate
-    expr: sum by(kubernetes_pod_name) (rate(aptos_storage_api_latency_seconds_count{result="Ok"}[1m])) / sum by(kubernetes_pod_name) (rate(aptos_storage_api_latency_seconds_count[1m])) < 0.99  # 99%
-    for: 5m
-    labels:
-      severity: error
-      summary: "AptosDB API success rate dropped."
-    annotations:
-      description: "AptosDB APIs started to return Error.
-      This must be looked at together with alerts / dashboards of upper level components -- it unfortunately can be either the cause or victim of issues over there. Things you can do:
-        1. Go to the storage dashboard and see if the errors are on specific APIs.
-        2. Look at logs and see storage related errors, understand if it's hardware / dependency errors or logical errors in our code.
-        3. Previous steps should narrow down the possibilities of the issue, at this point if it's still not clear, read the code to understand if the error is caused by a bug or a change of input pattern.
-        4. See if changes in recent releases can cause this issue.
-      "
-  - alert: RocksDB Read Latency
-    expr: sum by (kubernetes_pod_name) (rate(aptos_schemadb_get_latency_seconds_sum[1m])) / sum by (kubernetes_pod_name) (rate(aptos_schemadb_get_latency_seconds_count[1m])) > 0.001  # 1 millisecond
-    for: 5m
-    labels:
-      severity: warning
-      summary: "RocksDB read latency raised."
-    annotations:
-      description: "RocksDB read latency raised, which indicates bad performance.
-      If alerts on other components are not fired, this is probably not urgent. But things you can do:
-        1. On the system dashboard, see if we get a flat line on the IOPs panel -- it can be disk being throttled. It's either the node is not spec'd as expected, or we are using more IOPs than expected.
-        2. Check out the traffic pattern on various dashboards, is there a sudden increase in traffic? Verify that on the storage dashboard by looking at the number of API calls, per API if needed.
-        3. Check the system dashboard to see if we are bottle necked by the memory (we rely heavily on the filesystem cache) or the CPU. It might be helpful to restart one of the nodes that's having this issue.
-
-        9. After all those, our threshold was set strictly initially, so if everything looks fine, we can change the alarm threshold.
-      "
-  # Logging alerts
-  - alert: Logs Being Dropped
-    expr: 1 < (rate(aptos_struct_log_queue_error[1m]) + rate(aptos_struct_log_send_error[1m]))
-    for: 5m
-    labels:
-      severity: warning
-      summary: "Logs being dropped"
-    annotations:
-      description: "Logging Transmit Error rate is high \
-        check the logging dashboard and \
-        there may be network issues, downstream throughput issues, or something wrong with Vector \
-        TODO: Runbook"
diff --git a/terraform/helm/monitoring/templates/_helpers.tpl b/terraform/helm/monitoring/templates/_helpers.tpl
deleted file mode 100644
index f121a8d524ff9..0000000000000
--- a/terraform/helm/monitoring/templates/_helpers.tpl
+++ /dev/null
@@ -1,63 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "aptos-monitoring.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "aptos-monitoring.fullname" -}}
-{{- if .Values.fullnameOverride }}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- $name := default .Chart.Name .Values.nameOverride }}
-{{- if contains $name .Release.Name }}
-{{- .Release.Name | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
-{{- end }}
-{{- end }}
-{{- end }}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "aptos-monitoring.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Common labels
-*/}}
-{{- define "aptos-monitoring.labels" -}}
-helm.sh/chart: {{ include "aptos-monitoring.chart" . }}
-{{ include "aptos-monitoring.selectorLabels" . }}
-{{- if .Chart.AppVersion }}
-app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
-{{- end }}
-app.kubernetes.io/managed-by: {{ .Release.Service }}
-{{- end }}
-
-{{/*
-Selector labels
-*/}}
-{{- define "aptos-monitoring.selectorLabels" -}}
-app.kubernetes.io/name: {{ include "aptos-monitoring.name" . }}
-app.kubernetes.io/instance: {{ .Release.Name }}
-{{- end }}
-
-{{/*
-Create the name of the service account to use
-*/}}
-{{- define "aptos-monitoring.serviceAccountName" -}}
-{{- if .Values.serviceAccount.create }}
-{{- default (include "aptos-monitoring.fullname" .) .Values.serviceAccount.name }}
-{{- else }}
-{{- default "default" .Values.serviceAccount.name }}
-{{- end }}
-{{- end }}
diff --git a/terraform/helm/monitoring/templates/monitoring.yaml b/terraform/helm/monitoring/templates/monitoring.yaml
deleted file mode 100644
index affba0e54a04b..0000000000000
--- a/terraform/helm/monitoring/templates/monitoring.yaml
+++ /dev/null
@@ -1,368 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "aptos-monitoring.fullname" . }}-grafana
-  labels:
-    {{- include "aptos-monitoring.labels" . | nindent 4 }}
-data:
-  prometheus.yml: |-
-    apiVersion: 1
-    datasources:
-      - name: Prometheus
-        type: prometheus
-        isDefault: true
-        access: proxy
-        url: http://localhost:9090
-
-  dashboards.yml: |-
-    apiVersion: 1
-    providers:
-    - name: 'default'
-      folder: 'aptos'
-      type: file
-      options:
-        path: /etc/grafana/dashboards/aptos
-  grafana.ini: |-
-{{ (tpl (.Files.Get "files/grafana.ini") .) | indent 4 }}
-
----
-
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: {{ include "aptos-monitoring.fullname" . }}-prometheus
-  labels:
-    {{- include "aptos-monitoring.labels" . | nindent 4 }}
-spec:
-  accessModes:
-  - ReadWriteOnce
-  storageClassName: {{ .Values.monitoring.prometheus.storage.class }}
-  resources:
-    requests:
-      storage: {{ .Values.monitoring.prometheus.storage.size }}
-
----
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "aptos-monitoring.fullname" . }}
-  labels:
-    {{- include "aptos-monitoring.labels" . | nindent 4 }}
-data:
-{{ (tpl (.Files.Glob "files/rules/*.yml").AsConfig .) | indent 2 }}
-  prometheus.yml: |-
-{{ (tpl (.Files.Get "files/prometheus.yml") .) | indent 4 }}
-  alertmanager.yml: |-
-{{ (tpl (.Files.Get "files/alertmanager.yml") .) | indent 4 }}
-  haproxy.cfg: |-
-{{ (tpl (.Files.Get "files/haproxy-mon.cfg") .) | indent 4 }}
-
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "aptos-monitoring.fullname" . }}-dashboards
-  labels:
-    {{- include "aptos-monitoring.labels" . | nindent 4 }}
-binaryData:
-{{ (.Files.Glob "files/dashboards/*.json.gz").AsSecrets | indent 2 }}
-
----
-
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: {{ include "aptos-monitoring.fullname" . }}-prometheus
-  annotations:
-{{- toYaml .Values.monitoring.serviceAccount.annotations | nindent 4 }}
-  labels:
-    {{- include "aptos-monitoring.labels" . | nindent 4 }}
-
----
-
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: {{ include "aptos-monitoring.fullname" . }}-prometheus
-  labels:
-    {{- include "aptos-monitoring.labels" . | nindent 4 }}
-rules:
-- apiGroups: [""]
-  resources:
-  - nodes
-  - nodes/proxy
-  - services
-  - endpoints
-  - pods
-  verbs: ["get", "list", "watch"]
-- apiGroups:
-  - extensions
-  resources:
-  - ingresses
-  verbs: ["get", "list", "watch"]
-- nonResourceURLs: ["/metrics"]
-  verbs: ["get"]
-
----
-
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: {{ include "aptos-monitoring.fullname" . }}-prometheus
-  labels:
-    {{- include "aptos-monitoring.labels" . | nindent 4 }}
-roleRef:
-  apiGroup: rbac.authorization.k8s.io
-  kind: ClusterRole
-  name: {{ include "aptos-monitoring.fullname" . }}-prometheus
-subjects:
-- kind: ServiceAccount
-  name: {{ include "aptos-monitoring.fullname" . }}-prometheus
-  namespace: {{ .Release.Namespace }}
-
----
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ include "aptos-monitoring.fullname" . }}
-  labels:
-    {{- include "aptos-monitoring.labels" . | nindent 4 }}
-  annotations:
-    service.beta.kubernetes.io/aws-load-balancer-type: nlb
-    {{- if .Values.service.domain }}
-    external-dns.alpha.kubernetes.io/hostname: mon.{{ .Values.service.domain }}
-    {{- end }}
-spec:
-  selector:
-    {{- include "aptos-monitoring.selectorLabels" . | nindent 4 }}
-    app.kubernetes.io/name: monitoring
-  ports:
-  - name: grafana-http
-    port: 80
-    targetPort: 3000
-  type: LoadBalancer
-  {{- with .Values.service.monitoring.loadBalancerSourceRanges }}
-  loadBalancerSourceRanges:
-    {{- toYaml . | nindent 4 }}
-  {{- end }}
-
----
-
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ include "aptos-monitoring.fullname" . }}-prometheus
-  labels:
-    {{- include "aptos-monitoring.labels" . | nindent 4 }}
-spec:
-  selector:
-    {{- include "aptos-monitoring.selectorLabels" . | nindent 4 }}
-    app.kubernetes.io/name: monitoring
-  ports:
-  - name: prometheus-http
-    port: 9090
-  type: ClusterIP
-
----
-
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
-  name: {{ include "aptos-monitoring.fullname" . }}
-  labels:
-    {{- include "aptos-monitoring.labels" . | nindent 4 }}
-    app.kubernetes.io/name: monitoring
-spec:
-  serviceName: {{ include "aptos-monitoring.fullname" . }}
-  replicas: 1
-  podManagementPolicy: Parallel
-  selector:
-    matchLabels:
-      {{- include "aptos-monitoring.selectorLabels" . | nindent 6 }}
-      app.kubernetes.io/name: monitoring
-  template:
-    metadata:
-      labels:
-        {{- include "aptos-monitoring.selectorLabels" . | nindent 8 }}
-        app.kubernetes.io/name: monitoring
-      annotations:
-        seccomp.security.alpha.kubernetes.io/pod: runtime/default
-        checksum/prometheus.yml: {{ tpl (.Files.Get "files/prometheus.yml") . | sha256sum }}
-        checksum/alertmanager.yml: {{ tpl (.Files.Get "files/alertmanager.yml") . | sha256sum }}
-        checksum/rules.yml: {{ (tpl (.Files.Glob "files/rules/*.yml").AsConfig .) | sha256sum }}
-        checksum/dashboards.json: {{ (.Files.Glob "files/dashboards/*.json.gz").AsSecrets | sha256sum }}
-    spec:
-      {{- with .Values.monitoring }}
-      containers:
-      - name: prometheus
-        image: {{ .prometheus.image.repo }}:{{ .prometheus.image.tag }}
-        imagePullPolicy: {{ .prometheus.image.pullPolicy }}
-        command:
-         - sh
-         - -c
-         - |
-           {{- if .prometheus.deleteWal }}
-           rm -r /prometheus/data/wal/*
-           {{- end }}
-           prometheus \
-            --web.enable-lifecycle \
-            --web.external-url=http://mon.{{ $.Values.service.domain }} \
-            --config.file=/etc/prometheus/prometheus.yml \
-            --storage.tsdb.retention.time={{ .prometheus.tsdb_retention_time }} \
-            --storage.tsdb.min-block-duration={{ .prometheus.tsdb_min_block_duration }} \
-            --storage.tsdb.max-block-duration={{ .prometheus.tsdb_max_block_duration }}
-        resources:
-          {{- toYaml .prometheus.resources | nindent 10 }}
-        ports:
-        - containerPort: 9090
-        livenessProbe:
-          httpGet:
-            path: /-/healthy
-            port: 9090
-          initialDelaySeconds: 10
-        readinessProbe:
-          httpGet:
-            path: /-/ready
-            port: 9090
-        volumeMounts:
-        - name: monitoring-config
-          mountPath: /etc/prometheus
-        - name: prometheus-data
-          mountPath: /prometheus
-        securityContext:
-          readOnlyRootFilesystem: true
-          runAsUser: 65534
-          runAsGroup: 65534
-          allowPrivilegeEscalation: false
-          capabilities:
-            drop:
-            - ALL
-      - name: alertmanager
-        image: {{ .alertmanager.image.repo }}:{{ .alertmanager.image.tag }}
-        imagePullPolicy: {{ .alertmanager.image.pullPolicy }}
-        args:
-        - "--config.file=/etc/alertmanager/alertmanager.yml"
-        - "--storage.path=/alertmanager"
-        resources:
-          {{- toYaml .alertmanager.resources | nindent 10 }}
-        ports:
-        - containerPort: 9093
-        livenessProbe:
-          httpGet:
-            path: /-/healthy
-            port: 9093
-          initialDelaySeconds: 10
-        readinessProbe:
-          httpGet:
-            path: /-/ready
-            port: 9093
-        volumeMounts:
-        - name: monitoring-config
-          mountPath: /etc/alertmanager
-        - name: alertmanager-data
-          mountPath: /alertmanager
-        securityContext:
-          readOnlyRootFilesystem: true
-          runAsUser: 65534
-          runAsGroup: 65534
-          allowPrivilegeEscalation: false
-          capabilities:
-            drop:
-            - ALL
-      - name: grafana
-        image: {{ .grafana.image.repo }}:{{ .grafana.image.tag }}
-        imagePullPolicy: {{ .grafana.image.pullPolicy }}
-        env:
-        {{- range $k, $v := .grafana.env }}
-        - name: {{ quote $k }}
-          value: {{ quote $v }}
-        {{- end }}
-        command: ["/bin/sh", "-c"]
-        args: ["cp /dashboards/* /etc/grafana/dashboards/aptos && gunzip -f /etc/grafana/dashboards/aptos/*.json.gz && exec /run.sh"]
-        resources:
-          {{- toYaml .grafana.resources | nindent 10 }}
-        ports:
-        - containerPort: 3000
-        livenessProbe:
-          httpGet:
-            path: /api/health
-            port: 3000
-          initialDelaySeconds: 10
-        readinessProbe:
-          httpGet:
-            path: /api/health
-            port: 3000
-        volumeMounts:
-        - name: grafana-config
-          mountPath: /etc/grafana
-        - name: grafana-provisioning
-          mountPath: /etc/grafana/provisioning
-        - name: grafana-dashboards-archive
-          mountPath: /dashboards
-        - name: grafana-dashboards
-          mountPath: /etc/grafana/dashboards/aptos
-        - name: grafana-data
-          mountPath: /var/lib/grafana
-        securityContext:
-          readOnlyRootFilesystem: true
-          runAsUser: 472
-          runAsGroup: 472
-          allowPrivilegeEscalation: false
-          capabilities:
-            drop:
-            - ALL
-      {{- with .nodeSelector }}
-      nodeSelector:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .affinity }}
-      affinity:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .tolerations }}
-      tolerations:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      securityContext:
-        runAsNonRoot: true
-        fsGroup: 65534
-      {{- end }}
-      volumes:
-      - name: grafana-config
-        configMap:
-          name: {{ include "aptos-monitoring.fullname" . }}-grafana
-          items:
-          - key: grafana.ini
-            path: grafana.ini
-      - name: grafana-provisioning
-        configMap:
-          name: {{ include "aptos-monitoring.fullname" . }}-grafana
-          items:
-          - key: prometheus.yml
-            path: datasources/prometheus.yml
-          - key: dashboards.yml
-            path: dashboards/dashboards.yml
-      - name: grafana-dashboards-archive
-        configMap:
-          name: {{ include "aptos-monitoring.fullname" . }}-dashboards
-      - name: grafana-dashboards
-        emptyDir: {}
-      - name: monitoring-config
-        configMap:
-          name: {{ include "aptos-monitoring.fullname" . }}
-      - name: prometheus-data
-        persistentVolumeClaim:
-          claimName: {{ include "aptos-monitoring.fullname" . }}-prometheus
-      - name: pushgateway-data
-        emptyDir: {}
-      - name: alertmanager-data
-        emptyDir: {}
-      - name: grafana-data
-        emptyDir: {}
-      serviceAccountName: {{ include "aptos-monitoring.fullname" . }}-prometheus
-      {{- if .Values.imagePullSecret }}
-      imagePullSecrets:
-      - name: {{.Values.imagePullSecret}}
-      {{- end }}
diff --git a/terraform/helm/monitoring/templates/serviceaccount.yaml b/terraform/helm/monitoring/templates/serviceaccount.yaml
deleted file mode 100644
index 10457a743947f..0000000000000
--- a/terraform/helm/monitoring/templates/serviceaccount.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-{{- if .Values.serviceAccount.create -}}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: {{ include "aptos-monitoring.serviceAccountName" . }}
-  labels:
-    {{ include "aptos-monitoring.labels" . | nindent 4 }}
-{{- end -}}
diff --git a/terraform/helm/monitoring/values.yaml b/terraform/helm/monitoring/values.yaml
deleted file mode 100644
index 2b577afdd5e51..0000000000000
--- a/terraform/helm/monitoring/values.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
-chain:
-  name:
-validator:
-  name:
-fullnode:
-  name:
-
-monitoring:
-  prometheus:
-    fullKubernetesScrape: false
-    deleteWal: false
-    tsdb_retention_time: 15d
-    tsdb_min_block_duration: 30m
-    tsdb_max_block_duration: 1h
-    remote_write:
-      enabled: false
-      url:
-      region:
-    image:
-      repo: prom/prometheus
-      tag: v2.34.0@sha256:cb42332b66ac51a05c52f255e48a4496c0a172676093123bf28b37762009e78a
-      pullPolicy: IfNotPresent
-    resources:
-      limits:
-        cpu: 1
-        memory: 1.5Gi
-      requests:
-        cpu: 1
-        memory: 1.5Gi
-    storage:
-      class:
-      size: 100Gi
-  pushgateway:
-    image:
-      repo: prom/pushgateway
-      tag: v1.4.1@sha256:b561435cb17ee816c5d90c2408bcc1ffe25304f1608e18db16a3969f6cc44626
-      pullPolicy: IfNotPresent
-    resources:
-      limits:
-        cpu: 0.1
-        memory: 128Mi
-      requests:
-        cpu: 0.1
-        memory: 128Mi
-  alertmanager:
-    alertRouteTrees:
-      - match:
-          severity: critical
-        receiver: 'critical'
-      - match:
-          severity: error
-        receiver: 'error'
-    alertReceivers:
-      - name: 'critical'
-      - name: 'error'
-      - name: 'default'
-    image:
-      repo: prom/alertmanager
-      tag: v0.24.0@sha256:b1ba90841a82ea24d79d4e6255b96025a9e89275bec0fae87d75a5959461971e
-      pullPolicy: IfNotPresent
-    resources:
-      limits:
-        cpu: 0.1
-        memory: 128Mi
-      requests:
-        cpu: 0.1
-        memory: 128Mi
-  grafana:
-    image:
-      repo: grafana/grafana
-      tag: 9.0.9@sha256:4a6b9d8d88522d2851f947f8f84cca10b6a43ca26d5e93102daf3a87935f10a5
-      pullPolicy: IfNotPresent
-    resources:
-      limits:
-        cpu: 1
-        memory: 256Mi
-      requests:
-        cpu: 1
-        memory: 256Mi
-    googleAuth:
-    config:
-    env:
-      GF_AUTH_ANONYMOUS_ENABLED: true
-      GF_AUTH_ANONYMOUS_ORG_ROLE: Editor
-  nodeSelector: {}
-  tolerations: []
-  affinity: {}
-  serviceAccount:
-    annotations: {}
-
-service:
-  domain:
-  external:
-    type: LoadBalancer
-  monitoring:
-    loadBalancerSourceRanges:
-
-serviceAccount:
-  # Specifies whether a service account should be created
-  create: true
-  # The name of the service account to use.
-  # If not set and create is true, a name is generated using the fullname template
-  name:
-  annotations:
-
-kube-state-metrics:
-  enabled: false
-  namespaceOverride: kube-system
-  podAnnotations:
-    prometheus.io/scrape: "true"
-    prometheus.io/port: "8080"
-
-prometheus-node-exporter:
-  enabled: false
-  namespaceOverride: kube-system
-  podAnnotations:
-    prometheus.io/scrape: "true"
-    prometheus.io/port: "9100"
diff --git a/terraform/helm/node-health-checker/.helmignore b/terraform/helm/node-health-checker/.helmignore
deleted file mode 100644
index 50af031725419..0000000000000
--- a/terraform/helm/node-health-checker/.helmignore
+++ /dev/null
@@ -1,22 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
diff --git a/terraform/helm/node-health-checker/Chart.yaml b/terraform/helm/node-health-checker/Chart.yaml
deleted file mode 100644
index 04bfc6661914e..0000000000000
--- a/terraform/helm/node-health-checker/Chart.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-apiVersion: v2
-name: node-health-checker
-version: 0.1
-appVersion: 0.1.0
-description: Node health checker
-home: https://aptoslabs.com/
-sources:
-- https://github.com/aptos-labs/aptos-core
diff --git a/terraform/helm/node-health-checker/README.md b/terraform/helm/node-health-checker/README.md
deleted file mode 100644
index ce159edc32631..0000000000000
--- a/terraform/helm/node-health-checker/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-# node-health-checker
-
-![Version: 0.1](https://img.shields.io/badge/Version-0.1-informational?style=flat-square) ![AppVersion: 0.1.0](https://img.shields.io/badge/AppVersion-0.1.0-informational?style=flat-square)
-
-Node health checker
-
-**Homepage:** 
-
-## Source Code
-
-* 
-
-## Values
-
-| Key | Type | Default | Description |
-|-----|------|---------|-------------|
-| imageTag | string | `"devnet"` | Default image tag to use for all aptos images |
-| node_health_checker.affinity | object | `{}` |  |
-| node_health_checker.baseline_node_url | string | `"http://aptos-node-0-validator:8080"` | The baseline node URL for the health checker. Defaults to the validator in your deployment |
-| node_health_checker.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy to use for node-checker image |
-| node_health_checker.image.repo | string | `"aptoslabs/node-checker"` | Image repo to use for node-checker image for running load tests |
-| node_health_checker.image.tag | string | `nil` | Image tag to use for node-checker image |
-| node_health_checker.mint_key | string | `nil` | The mint key for the validator used by node health checker |
-| node_health_checker.nodeSelector | object | `{}` |  |
-| node_health_checker.resources.limits.cpu | int | `1` |  |
-| node_health_checker.resources.limits.memory | string | `"512Mi"` |  |
-| node_health_checker.resources.requests.cpu | int | `1` |  |
-| node_health_checker.resources.requests.memory | string | `"512Mi"` |  |
-| node_health_checker.tolerations | list | `[]` |  |
-| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
-| serviceAccount.name | string | `nil` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
-
-----------------------------------------------
-Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
diff --git a/terraform/helm/node-health-checker/files/nhc_baseline_fullnode.yaml b/terraform/helm/node-health-checker/files/nhc_baseline_fullnode.yaml
deleted file mode 100644
index 377469473a8c6..0000000000000
--- a/terraform/helm/node-health-checker/files/nhc_baseline_fullnode.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
-# Based on config from https://github.com/aptos-labs/aptos-core/blob/main/ecosystem/node-checker/configuration_examples/single_node_validator.yaml
----
-node_address:
-  url: {{ .Values.node_health_checker.baseline_node_url }}
-  metrics_port: 9101
-  api_port: 8080
-  noise_port: 6180
-configuration_name: ait3_registration
-configuration_name_pretty: AIT3 Registration
-chain_id: ~
-role_type: ~
-evaluators:
-  - consensus_proposals
-  - performance_tps
-  - api_latency
-  - consensus_round
-  - consensus_timeouts
-  - state_sync_version
-  - api_transaction_availability
-evaluator_args:
-  build_version_args: {}
-  consensus_proposals_args: {}
-  consensus_round_args: {}
-  consensus_timeouts_args:
-    allowed_consensus_timeouts: 0
-  latency_args:
-    num_samples: 5
-    delay_between_samples_ms: 20
-    num_allowed_errors: 1
-    max_api_latency_ms: 1000
-  network_minimum_peers_args:
-    minimum_peers_inbound: 0
-    minimum_peers_outbound: 1
-  network_peers_tolerance_args:
-    inbound_peers_tolerance: 10
-    outbound_peers_tolerance: 10
-  node_identity_args: {}
-  state_sync_version_args:
-    version_delta_tolerance: 5000
-  tps_args:
-    emit_args:
-      mempool_backlog: 5000
-      target_tps: 0
-      txn_expiration_time_secs: 30
-      duration: 10
-      invalid_tx: 0
-      transaction_type: coin_transfer
-    mint_args:
-      mint_key:
-        key: {{ .Values.node_health_checker.mint_key }}
-      mint_file: ~
-    minimum_tps: 1000
-    repeat_target_count: 1
-  transaction_availability_args:
-    transaction_fetch_delay_secs: 5
-runner_args:
-  blocking_runner_args:
-    metrics_fetch_delay_secs: 5
-    api_client_timeout_secs: 4
diff --git a/terraform/helm/node-health-checker/templates/_helpers.tpl b/terraform/helm/node-health-checker/templates/_helpers.tpl
deleted file mode 100644
index 33733f9caac78..0000000000000
--- a/terraform/helm/node-health-checker/templates/_helpers.tpl
+++ /dev/null
@@ -1,63 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "node-health-checker.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "node-health-checker.fullname" -}}
-{{- if .Values.fullnameOverride -}}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- $name := default .Chart.Name .Values.nameOverride -}}
-{{- if contains $name .Release.Name -}}
-{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
-{{- else -}}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-{{- end -}}
-{{- end -}}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "node-health-checker.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
-{{- end -}}
-
-{{/*
-Common labels
-*/}}
-{{- define "node-health-checker.labels" -}}
-helm.sh/chart: {{ include "node-health-checker.chart" . }}
-{{ include "node-health-checker.selectorLabels" . }}
-{{- if .Chart.AppVersion }}
-app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
-{{- end }}
-app.kubernetes.io/managed-by: {{ .Release.Service }}
-{{- end -}}
-
-{{/*
-Selector labels
-*/}}
-{{- define "node-health-checker.selectorLabels" -}}
-app.kubernetes.io/part-of: {{ include "node-health-checker.name" . }}
-app.kubernetes.io/instance: {{ .Release.Name }}
-{{- end -}}
-
-{{/*
-Create the name of the service account to use
-*/}}
-{{- define "node-health-checker.serviceAccountName" -}}
-{{- if .Values.serviceAccount.create -}}
-    {{ default (include "node-health-checker.fullname" .) .Values.serviceAccount.name }}
-{{- else -}}
-    {{ default "default" .Values.serviceAccount.name }}
-{{- end -}}
-{{- end -}}
diff --git a/terraform/helm/node-health-checker/templates/configmap.yaml b/terraform/helm/node-health-checker/templates/configmap.yaml
deleted file mode 100644
index 8370c6c7654cd..0000000000000
--- a/terraform/helm/node-health-checker/templates/configmap.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "node-health-checker.fullname" . }}
-  labels:
-    {{- include "node-health-checker.labels" . | nindent 4 }}
-data:
-  baseline_fullnode.yaml: |-
-{{ (tpl (.Files.Get "files/nhc_baseline_fullnode.yaml") .) | indent 4 }}
diff --git a/terraform/helm/node-health-checker/templates/deployment.yaml b/terraform/helm/node-health-checker/templates/deployment.yaml
deleted file mode 100644
index 61874ce1c8c2a..0000000000000
--- a/terraform/helm/node-health-checker/templates/deployment.yaml
+++ /dev/null
@@ -1,70 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: {{ include "node-health-checker.fullname" . }}
-  labels:
-    {{- include "node-health-checker.labels" . | nindent 4 }}
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      {{- include "node-health-checker.selectorLabels" . | nindent 6 }}
-      app.kubernetes.io/name: node-health-checker
-  template:
-    metadata:
-      labels:
-        {{- include "node-health-checker.selectorLabels" . | nindent 8 }}
-        app.kubernetes.io/name: node-health-checker
-      annotations:
-        seccomp.security.alpha.kubernetes.io/pod: runtime/default
-    spec:
-      {{- with .Values.imagePullSecrets }}
-      imagePullSecrets:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      serviceAccountName: {{ include "node-health-checker.serviceAccountName" . }}
-      securityContext:
-        {{- toYaml .Values.podSecurityContext | nindent 8 }}
-      containers:
-        - name: node-health-checker
-          image: "{{ .Values.node_health_checker.image.repo }}:{{ .Values.node_health_checker.image.tag | default .Values.imageTag }}"
-          imagePullPolicy: {{ .Values.node_health_checker.image.pullPolicy }}
-          command: ["aptos-node-checker"]
-          args: 
-            - server
-            - run
-            - --baseline-node-config-paths
-            - /nhc/baseline_fullnode.yaml
-          ports:
-            - containerPort: 20121
-          volumeMounts:
-            - name: node-health-checker-config
-              mountPath: /nhc
-          resources:
-            {{- toYaml .Values.node_health_checkerresources | nindent 12 }}
-          securityContext:
-            allowPrivilegeEscalation: false
-            capabilities:
-              drop:
-              - ALL
-      {{- with .Values.node_health_checker.nodeSelector }}
-      nodeSelector:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .Values.node_health_checker.affinity }}
-      affinity:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .Values.node_health_checker.tolerations }}
-      tolerations:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      securityContext:
-        runAsNonRoot: true
-        runAsUser: 6180
-        runAsGroup: 6180
-        fsGroup: 6180
-      volumes:
-        - name: node-health-checker-config
-          configMap:
-            name: {{ include "node-health-checker.fullname" . }}
diff --git a/terraform/helm/node-health-checker/templates/service.yaml b/terraform/helm/node-health-checker/templates/service.yaml
deleted file mode 100644
index a59908042a8d6..0000000000000
--- a/terraform/helm/node-health-checker/templates/service.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ include "node-health-checker.fullname" . }}
-  labels:
-    {{- include "node-health-checker.labels" . | nindent 4 }}
-spec:
-  selector:
-    {{- include "node-health-checker.selectorLabels" . | nindent 4 }}
-    app.kubernetes.io/name: node-health-checker
-  type: ClusterIP
-  ports:
-    - port: 20121
-      protocol: TCP
-      name: http
diff --git a/terraform/helm/node-health-checker/values.yaml b/terraform/helm/node-health-checker/values.yaml
deleted file mode 100644
index 3f23ca843bdb9..0000000000000
--- a/terraform/helm/node-health-checker/values.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-# -- Default image tag to use for all aptos images
-imageTag: devnet
-
-node_health_checker:
-  image:
-    # -- Image repo to use for node-checker image for running load tests
-    repo: aptoslabs/node-checker
-    # -- Image tag to use for node-checker image
-    tag:
-    # -- Image pull policy to use for node-checker image
-    pullPolicy: IfNotPresent
-  resources:
-    limits:
-      cpu: 1
-      memory: 512Mi
-    requests:
-      cpu: 1
-      memory: 512Mi
-  nodeSelector: {}
-  tolerations: []
-  affinity: {}
-  # -- The baseline node URL for the health checker. Defaults to the validator in your deployment
-  baseline_node_url: http://aptos-node-0-validator:8080
-  # -- The mint key for the validator used by node health checker
-  mint_key:
-
-serviceAccount:
-  # -- Specifies whether a service account should be created
-  create: true
-  # -- The name of the service account to use. If not set and create is true, a name is generated using the fullname template
-  name:
diff --git a/terraform/helm/pfn-addons/README.md b/terraform/helm/pfn-addons/README.md
new file mode 100644
index 0000000000000..621a161f4c86a
--- /dev/null
+++ b/terraform/helm/pfn-addons/README.md
@@ -0,0 +1,54 @@
+# pfn-addons
+
+![Version: 0.1](https://img.shields.io/badge/Version-0.1-informational?style=flat-square)
+
+Additional components for a public fullnode fleet deployment
+
+## Values
+
+| Key | Type | Default | Description |
+|-----|------|---------|-------------|
+| ingress.acm_certificate | string | `nil` |  |
+| ingress.class | string | `"alb"` |  |
+| ingress.cookieDurationSeconds | int | `86400` |  |
+| ingress.enableStickyness | bool | `true` |  |
+| ingress.gce_managed_certificate | string | `nil` |  |
+| ingress.gce_managed_certificate_domains | string | `nil` |  |
+| ingress.gce_security_policy | string | `nil` | Security policy to apply to the backend services behind the ingress |
+| ingress.loadBalancerSourceRanges | string | `nil` |  |
+| ingress.wafAclArn | string | `nil` |  |
+| load_test.affinity | object | `{}` |  |
+| load_test.config.duration | int | `300` | How long to emit transactions for |
+| load_test.config.expected_max_txns | int | `6000000` | Default 20k * $duration |
+| load_test.config.max_transactions_per_account | int | `5` |  |
+| load_test.config.mempool_backlog | int | `5000` | Number of transactions outstanding in mempool |
+| load_test.config.mint_key | string | `nil` | The private key used to mint to fund load test |
+| load_test.config.numFullnodeGroups | string | `nil` | The number of fullnode groups to run traffic against |
+| load_test.config.target_tps | int | `0` | Whether to target a constant TPS, or 0 if not used. Cannot be used with mempool_backlog. |
+| load_test.config.transaction_type | string | `"coin-transfer"` |  |
+| load_test.config.txn_expiration_time_secs | int | `30` | How long to wait for transactions to be expired |
+| load_test.config.use_pfns | bool | `true` | If true, run $numFullnodeGroups parallel load tests |
+| load_test.config.use_validators | bool | `false` | Whether to submit transactions through validator REST API |
+| load_test.enabled | bool | `false` | Whether to enable the load test CronJob |
+| load_test.fullnode | object | `{"groups":[{"name":"fullnode"}]}` | The fullnode groups to target |
+| load_test.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy to use for tools image |
+| load_test.image.repo | string | `"aptoslabs/tools"` | Image repo to use for tools image for running load tests |
+| load_test.image.tag | string | `nil` | Image tag to use for tools image |
+| load_test.intervalMins | int | `15` | How many minutes between load test runs |
+| load_test.nodeSelector | object | `{}` |  |
+| load_test.resources.limits.cpu | int | `1` |  |
+| load_test.resources.limits.memory | string | `"512Mi"` |  |
+| load_test.resources.requests.cpu | int | `1` |  |
+| load_test.resources.requests.memory | string | `"512Mi"` |  |
+| load_test.tolerations | list | `[]` |  |
+| service.aws_tags | string | `nil` |  |
+| service.domain | string | `nil` |  |
+| service.enableOnchainDiscovery | bool | `false` |  |
+| service.loadBalancerSourceRanges | string | `nil` |  |
+| service.sessionAffinity | string | `nil` |  |
+| serviceAccount.annotations | string | `nil` |  |
+| serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
+| serviceAccount.name | string | `nil` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
+
+----------------------------------------------
+Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0)
diff --git a/terraform/helm/pfn-addons/templates/ingress.yaml b/terraform/helm/pfn-addons/templates/ingress.yaml
index d45300600d23b..2beceb6a22626 100644
--- a/terraform/helm/pfn-addons/templates/ingress.yaml
+++ b/terraform/helm/pfn-addons/templates/ingress.yaml
@@ -34,7 +34,7 @@ metadata:
     {{ if eq .Values.ingress.class "gce" }}
     # kubernetes.io/ingress.global-static-ip-name: config.ingressConfig.staticIpName # may not be necessary
     {{- if .Values.ingress.gce_managed_certificate }}
-    kubernetes.io/ingress.allow-http: "false"
+    kubernetes.io/ingress.allow-http: "true"
     networking.gke.io/managed-certificates: {{ .Values.ingress.gce_managed_certificate }}
     networking.gke.io/v1beta1.FrontendConfig: ssl-redirect
     {{- end }}
diff --git a/terraform/helm/testnet-addons/templates/loadtest.yaml b/terraform/helm/pfn-addons/templates/loadtest.yaml
similarity index 91%
rename from terraform/helm/testnet-addons/templates/loadtest.yaml
rename to terraform/helm/pfn-addons/templates/loadtest.yaml
index c1a61feca8f44..e5a6048aa197a 100644
--- a/terraform/helm/testnet-addons/templates/loadtest.yaml
+++ b/terraform/helm/pfn-addons/templates/loadtest.yaml
@@ -2,9 +2,9 @@
 apiVersion: batch/v1
 kind: CronJob
 metadata:
-  name: {{ include "testnet-addons.fullname" . }}-load-test
+  name: {{ include "pfn-addons.fullname" . }}-load-test
   labels:
-    {{- include "testnet-addons.labels" . | nindent 4 }}
+    {{- include "pfn-addons.labels" . | nindent 4 }}
     app.kubernetes.io/name: load-test
 spec:
   concurrencyPolicy: Replace
@@ -14,11 +14,11 @@ spec:
       template:
         metadata:
           labels:
-            {{- include "testnet-addons.selectorLabels" . | nindent 12 }}
+            {{- include "pfn-addons.selectorLabels" . | nindent 12 }}
             app.kubernetes.io/name: load-test
         spec:
           restartPolicy: Never
-          priorityClassName: {{ include "testnet-addons.fullname" . }}-high
+          priorityClassName: {{ include "pfn-addons.fullname" . }}-high
           containers:
           - name: load-test
             image: {{ .Values.load_test.image.repo }}:{{ .Values.load_test.image.tag | default .Values.imageTag }}
@@ -27,7 +27,7 @@ spec:
             - aptos-transaction-emitter
             - emit-tx
             - --mint-key={{ .Values.load_test.config.mint_key }}
-            - --chain-id={{ .Values.genesis.chain_id }}
+            - --chain-id={{ .Values.load_test.config.chain_id }}
             # Build targets args for internal cluster targets
             {{- $numTargets := 0 }}
             {{- $targetSuffix := "" }}
@@ -115,7 +115,7 @@ spec:
             # - name: net.ipv4.tcp_tw_reuse
             #   value: "1"
           {{- end }}
-          serviceAccountName: {{ include "testnet-addons.serviceAccountName" . }}
+          serviceAccountName: {{ include "pfn-addons.serviceAccountName" . }}
           {{- if .Values.imagePullSecret }}
           imagePullSecrets:
           - name: {{.Values.imagePullSecret}}
diff --git a/terraform/helm/pfn-addons/templates/service.yaml b/terraform/helm/pfn-addons/templates/service.yaml
index eee24370f4cb4..4f555ad9f4706 100644
--- a/terraform/helm/pfn-addons/templates/service.yaml
+++ b/terraform/helm/pfn-addons/templates/service.yaml
@@ -5,7 +5,16 @@ metadata:
   labels:
     {{- include "pfn-addons.labels" . | nindent 4 }}
   annotations:
+    {{- if eq .Values.ingress.class "alb" }}
     alb.ingress.kubernetes.io/healthcheck-path: /v1/-/healthy
+    {{- end }}
+    {{- if eq .Values.ingress.class "gce" }}
+    {{- if .Values.ingress.backend_http2 }}
+    cloud.google.com/app-protocols: '{"default": "HTTP2"}'
+    {{- end }}
+    cloud.google.com/backend-config: '{"default":"{{ include "pfn-addons.fullname" . }}"}'
+    cloud.google.com/neg: '{"ingress": true}'
+    {{- end }}
 spec:
   selector:
     app.kubernetes.io/part-of: aptos-fullnode
@@ -15,3 +24,30 @@ spec:
     targetPort: 8080
   type: NodePort
   externalTrafficPolicy: Local
+---
+{{- if eq .Values.ingress.class "gce" }}
+apiVersion: cloud.google.com/v1
+kind: BackendConfig
+metadata:
+  name: {{ include "pfn-addons.fullname" . }}
+spec:
+  {{- if .Values.ingress.gce_security_policy }}
+  securityPolicy:
+    name: {{ .Values.ingress.gce_security_policy }}
+  {{- end }}
+  healthCheck:
+    checkIntervalSec: 30
+    timeoutSec: 5
+    healthyThreshold: 1
+    unhealthyThreshold: 2
+    type: HTTP
+    requestPath: /v1/-/healthy
+    # container targetPort
+    port: 8080
+  {{- if .Values.ingress.enableStickyness }}
+  sessionAffinity:
+    affinityType: "GENERATED_COOKIE"
+    affinityCookieTtlSec: {{ .Values.ingress.cookieDurationSeconds }}
+  {{- end }}
+---
+{{- end }}
diff --git a/terraform/helm/node-health-checker/templates/serviceaccount.yaml b/terraform/helm/pfn-addons/templates/serviceaccount.yaml
similarity index 60%
rename from terraform/helm/node-health-checker/templates/serviceaccount.yaml
rename to terraform/helm/pfn-addons/templates/serviceaccount.yaml
index debfd370f2811..dae86c014e77a 100644
--- a/terraform/helm/node-health-checker/templates/serviceaccount.yaml
+++ b/terraform/helm/pfn-addons/templates/serviceaccount.yaml
@@ -2,9 +2,9 @@
 apiVersion: v1
 kind: ServiceAccount
 metadata:
-  name: {{ include "node-health-checker.serviceAccountName" . }}
+  name: {{ include "pfn-addons.serviceAccountName" . }}
   labels:
-{{ include "node-health-checker.labels" . | nindent 4 }}
+{{ include "pfn-addons.labels" . | nindent 4 }}
   annotations:
     {{- toYaml .Values.serviceAccount.annotations | nindent 4 }}
 {{- end -}}
diff --git a/terraform/helm/pfn-addons/values.yaml b/terraform/helm/pfn-addons/values.yaml
index 14a6d0d6e4d03..7aa23fe82899d 100644
--- a/terraform/helm/pfn-addons/values.yaml
+++ b/terraform/helm/pfn-addons/values.yaml
@@ -7,6 +7,13 @@ service:
   domain:
   aws_tags:
 
+serviceAccount:
+  # -- Specifies whether a service account should be created
+  create: true
+  # -- The name of the service account to use. If not set and create is true, a name is generated using the fullname template
+  name:
+  annotations:
+
 ingress:
   class: alb
   # the below only work for alb ingress
@@ -18,3 +25,55 @@ ingress:
   # the below only work for gce ingress
   gce_managed_certificate:
   gce_managed_certificate_domains:
+  # -- Security policy to apply to the backend services behind the ingress
+  gce_security_policy:
+  # -- Enable HTTP/2 on the backends shards
+  backend_http2: false
+
+load_test:
+  # -- Whether to enable the load test CronJob
+  enabled: false
+  image:
+    # -- Image repo to use for tools image for running load tests
+    repo: aptoslabs/tools
+    # -- Image tag to use for tools image
+    tag:
+    # -- Image pull policy to use for tools image
+    pullPolicy: IfNotPresent
+  resources:
+    limits:
+      cpu: 4
+      memory: 4Gi
+    requests:
+      cpu: 4
+      memory: 4Gi
+  nodeSelector: {}
+  tolerations: []
+  affinity: {}
+  # -- How many minutes between load test runs
+  intervalMins: 15
+  # -- The fullnode groups to target
+  fullnode:
+    groups:
+      - name: fullnode
+  config:
+    # -- The number of fullnode groups to run traffic against
+    numFullnodeGroups:
+    # -- The private key used to mint to fund load test
+    mint_key:
+    # -- Number of transactions outstanding in mempool
+    mempool_backlog: 5000
+    # -- Whether to target a constant TPS, or 0 if not used. Cannot be used with mempool_backlog.
+    target_tps: 0
+    # -- How long to emit transactions for
+    duration: 300
+    # -- How long to wait for transactions to be expired
+    txn_expiration_time_secs: 30
+    # -- Whether to submit transactions through validator REST API
+    use_validators: false
+    # -- If true, run $numFullnodeGroups parallel load tests
+    use_pfns: true
+    # -- Default 20k * $duration
+    expected_max_txns: 6000000
+    max_transactions_per_account: 5
+    transaction_type: coin-transfer
diff --git a/terraform/helm/testnet-addons/Chart.yaml b/terraform/helm/testnet-addons/Chart.yaml
index 4aa559546d831..ee84f7ce7decd 100644
--- a/terraform/helm/testnet-addons/Chart.yaml
+++ b/terraform/helm/testnet-addons/Chart.yaml
@@ -1,8 +1,8 @@
 apiVersion: v2
 name: testnet-addons
-version: 0.1
+version: "0.1"
 appVersion: 0.1.0
 description: Additional components for aptos-nodes testnet
 home: https://aptoslabs.com/
 sources:
-- https://github.com/aptos-labs/aptos-core
+  - https://github.com/aptos-labs/aptos-core
diff --git a/terraform/helm/testnet-addons/README.md b/terraform/helm/testnet-addons/README.md
index 2e898549d11a2..242b66fbc2702 100644
--- a/terraform/helm/testnet-addons/README.md
+++ b/terraform/helm/testnet-addons/README.md
@@ -23,29 +23,10 @@ Additional components for aptos-nodes testnet
 | ingress.cookieDurationSeconds | int | `86400` | If stickiness is enabled, how long the session cookie should last |
 | ingress.enableStickyness | bool | `true` | Whether to enable session stickiness on the underlying load balancer |
 | ingress.gce_managed_certificate | string | `nil` | The GCE certificate to install on the ingress |
+| ingress.gce_security_policy | string | `nil` | Security policy to apply to the backend services behind the ingress |
 | ingress.gce_static_ip | string | `nil` | The GCE static IP to install on the ingress |
 | ingress.loadBalancerSourceRanges | string | `nil` | List of CIDRs to accept traffic from |
 | ingress.wafAclArn | string | `nil` | The ARN of the WAF ACL to install on the ingress |
-| load_test.affinity | object | `{}` |  |
-| load_test.config.duration | int | `300` | How long to emit transactions for |
-| load_test.config.mempool_backlog | int | `5000` | Number of transactions outstanding in mempool |
-| load_test.config.mint_key | string | `nil` | The private key used to mint to fund load test |
-| load_test.config.numFullnodeGroups | string | `nil` | The number of fullnode groups to run traffic against |
-| load_test.config.target_tps | int | `0` | Whether to target a constant TPS, or 0 if not used. Cannot be used with mempool_backlog. |
-| load_test.config.txn_expiration_time_secs | int | `30` | How long to wait for transactions to be expired |
-| load_test.config.use_validators | bool | `false` | Whether to submit transactions through validator REST API |
-| load_test.enabled | bool | `false` | Whether to enable the load test CronJob |
-| load_test.fullnode | object | `{"groups":[{"name":"fullnode"}]}` | The fullnode groups to target |
-| load_test.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy to use for tools image |
-| load_test.image.repo | string | `"aptoslabs/tools"` | Image repo to use for tools image for running load tests |
-| load_test.image.tag | string | `nil` | Image tag to use for tools image |
-| load_test.intervalMins | int | `15` | How many minutes between load test runs |
-| load_test.nodeSelector | object | `{}` |  |
-| load_test.resources.limits.cpu | int | `1` |  |
-| load_test.resources.limits.memory | string | `"512Mi"` |  |
-| load_test.resources.requests.cpu | int | `1` |  |
-| load_test.resources.requests.memory | string | `"512Mi"` |  |
-| load_test.tolerations | list | `[]` |  |
 | service.domain | string | `nil` | If set, the base domain name to use for External DNS |
 | serviceAccount.create | bool | `true` | Specifies whether a service account should be created |
 | serviceAccount.name | string | `nil` | The name of the service account to use. If not set and create is true, a name is generated using the fullname template |
diff --git a/terraform/helm/testnet-addons/templates/ingress.yaml b/terraform/helm/testnet-addons/templates/ingress.yaml
index 632a9fc2fe545..46e6113d407af 100644
--- a/terraform/helm/testnet-addons/templates/ingress.yaml
+++ b/terraform/helm/testnet-addons/templates/ingress.yaml
@@ -36,23 +36,13 @@ metadata:
     # Allow HTTP but always return 301 because we have redirectToHttps enabled
     kubernetes.io/ingress.allow-http: "true"
     kubernetes.io/ingress.global-static-ip-name: {{ .Values.ingress.gce_static_ip }}
+    {{- if .Values.ingress.gce_managed_certificate }}
     networking.gke.io/managed-certificates: {{ .Values.ingress.gce_managed_certificate }}
     networking.gke.io/v1beta1.FrontendConfig: {{ include "testnet-addons.fullname" . }}
+    {{- end }} # ingress.gce_managed_certificate
     {{- end }} # "GKE"
 spec:
   rules:
-  {{- if .Values.service.domain }}
-  - host: api.{{ .Values.service.domain }}
-    http:
-      paths:
-      - path: /
-        pathType: Prefix
-        backend:
-          service:
-            name: {{ include "testnet-addons.fullname" . }}-api
-            port:
-              number: 80
-  {{- end }}
   - host: {{ .Values.service.domain }}
     http:
       paths:
@@ -78,7 +68,7 @@ spec:
             port:
               number: 80
 ---
-{{- if eq .Values.cloud "GKE" }}
+{{- if .Values.ingress.gce_managed_certificate }}
 apiVersion: networking.gke.io/v1beta1
 kind: FrontendConfig
 metadata:
@@ -94,6 +84,5 @@ metadata:
 spec:
   domains:
     - {{ .Values.service.domain }}
-    - api.{{ .Values.service.domain }}
 ---
 {{- end }}
diff --git a/terraform/helm/testnet-addons/templates/service.yaml b/terraform/helm/testnet-addons/templates/service.yaml
index 74416c999ce53..a300e915f736f 100644
--- a/terraform/helm/testnet-addons/templates/service.yaml
+++ b/terraform/helm/testnet-addons/templates/service.yaml
@@ -30,6 +30,10 @@ metadata:
   name: {{ include "testnet-addons.fullname" . }}-api
   namespace: default
 spec:
+  {{- if .Values.ingress.gce_security_policy }}
+  securityPolicy:
+    name: {{ .Values.ingress.gce_security_policy }}
+  {{- end }}
   healthCheck:
     checkIntervalSec: 30
     timeoutSec: 5
diff --git a/terraform/helm/testnet-addons/templates/waypoint.yaml b/terraform/helm/testnet-addons/templates/waypoint.yaml
index 154fa3f986ed5..792f0ad1a3b9b 100644
--- a/terraform/helm/testnet-addons/templates/waypoint.yaml
+++ b/terraform/helm/testnet-addons/templates/waypoint.yaml
@@ -29,6 +29,10 @@ metadata:
   name: {{ include "testnet-addons.fullname" . }}-waypoint
   namespace: default
 spec:
+  {{- if .Values.ingress.gce_security_policy }}
+  securityPolicy:
+    name: {{ .Values.ingress.gce_security_policy }}
+  {{- end }}
   healthCheck:
     checkIntervalSec: 30
     timeoutSec: 5
diff --git a/terraform/helm/testnet-addons/values.yaml b/terraform/helm/testnet-addons/values.yaml
index bd5d5b9900935..93800823784bb 100644
--- a/terraform/helm/testnet-addons/values.yaml
+++ b/terraform/helm/testnet-addons/values.yaml
@@ -28,54 +28,6 @@ waypoint:
       cpu: 200m
       memory: 512Mi
 
-load_test:
-  # -- Whether to enable the load test CronJob
-  enabled: false
-  image:
-    # -- Image repo to use for tools image for running load tests
-    repo: aptoslabs/tools
-    # -- Image tag to use for tools image
-    tag:
-    # -- Image pull policy to use for tools image
-    pullPolicy: IfNotPresent
-  resources:
-    limits:
-      cpu: 1
-      memory: 512Mi
-    requests:
-      cpu: 1
-      memory: 512Mi
-  nodeSelector: {}
-  tolerations: []
-  affinity: {}
-  # -- How many minutes between load test runs
-  intervalMins: 15
-  # -- The fullnode groups to target
-  fullnode:
-    groups:
-      - name: fullnode
-  config:
-    # -- The number of fullnode groups to run traffic against
-    numFullnodeGroups:
-    # -- The private key used to mint to fund load test
-    mint_key:
-    # -- Number of transactions outstanding in mempool
-    mempool_backlog: 5000
-    # -- Whether to target a constant TPS, or 0 if not used. Cannot be used with mempool_backlog.
-    target_tps: 0
-    # -- How long to emit transactions for
-    duration: 300
-    # -- How long to wait for transactions to be expired
-    txn_expiration_time_secs: 30
-    # -- Whether to submit transactions through validator REST API
-    use_validators: false
-    # -- If true, run $numFullnodeGroups parallel load tests
-    use_pfns: true
-    # -- Default 20k * $duration
-    expected_max_txns: 6000000
-    max_transactions_per_account: 5
-    transaction_type: coin-transfer
-
 serviceAccount:
   # -- Specifies whether a service account should be created
   create: true
@@ -93,6 +45,8 @@ ingress:
   gce_static_ip:
   # -- The GCE certificate to install on the ingress
   gce_managed_certificate:
+  # -- Security policy to apply to the backend services behind the ingress
+  gce_security_policy:
   # -- The ARN of the WAF ACL to install on the ingress
   wafAclArn:
   # -- List of CIDRs to accept traffic from
diff --git a/terraform/helm/vector-log-agent/Chart.yaml b/terraform/helm/vector-log-agent/Chart.yaml
index d06993717e7e5..2fae2ad2f9393 100644
--- a/terraform/helm/vector-log-agent/Chart.yaml
+++ b/terraform/helm/vector-log-agent/Chart.yaml
@@ -1,3 +1,3 @@
 apiVersion: v2
 name: aptos-vector-log-agent
-version: 0.1.0
+version: 0.2.0
diff --git a/terraform/helm/vector-log-agent/files/vector-config.yaml b/terraform/helm/vector-log-agent/files/vector-config.yaml
index 4f1e580bc8b49..a7374a4ed994d 100644
--- a/terraform/helm/vector-log-agent/files/vector-config.yaml
+++ b/terraform/helm/vector-log-agent/files/vector-config.yaml
@@ -1,6 +1,5 @@
 data_dir: /vector-data-dir
-# TODO: change this to expire_metrics_sec after vector 0.25 has been released.
-expire_metrics: { secs: 1800, nanos: 0 } # expire metrics when no sample has been received after 30 minutes
+expire_metrics_secs: 1800 # expire metrics when no sample has been received after 30 minutes
 api:
   enabled: true
   address: "127.0.0.1:8686"
diff --git a/terraform/helm/vector-log-agent/files/vector-transforms.yaml b/terraform/helm/vector-log-agent/files/vector-transforms.yaml
index 3f4b5185eea39..69b42e84934f9 100644
--- a/terraform/helm/vector-log-agent/files/vector-transforms.yaml
+++ b/terraform/helm/vector-log-agent/files/vector-transforms.yaml
@@ -36,7 +36,6 @@ transforms:
       del(.k8s.annotations."kubectl.kubernetes.io/last-applied-configuration")
       del(.k8s.annotations."seccomp.security.alpha.kubernetes.io/pod")
       del(.k8s.annotations."checksum/validator.yaml")
-      del(.k8s.annotations."kubernetes.io/psp")
 
       del(.k8s.labels."app.kubernetes.io/managed-by")
       del(.k8s.labels."app.kubernetes.io/part-of")
@@ -75,7 +74,7 @@ transforms:
       if !exists(.message) && exists(.msg) {
         .message = del(.msg)
       }
-      parsed_timestamp, err = to_timestamp(.timestamp)
+      parsed_timestamp, err = parse_timestamp(.timestamp, "%+") # parse as ISO 8601 / RFC 3339 according to https://github.com/vectordotdev/vrl/blob/650547870a16c66dcfab01ec382cfdc23415d85b/lib/core/src/conversion.rs#L249C6-L249C8
       if err == null {
         .timestamp = parsed_timestamp
       }
diff --git a/terraform/helm/vector-log-agent/testing/test1.json b/terraform/helm/vector-log-agent/testing/test1.json
index 5732b04282174..4aa13039a63bc 100644
--- a/terraform/helm/vector-log-agent/testing/test1.json
+++ b/terraform/helm/vector-log-agent/testing/test1.json
@@ -26,7 +26,6 @@
     "annotations": {
       "kubectl.kubernetes.io/last-applied-configuration": "{\"some_very_long_json\":\"foo_bar\"}",
       "checksum/validator.yaml": "8430318f1be488c63b67a5041443bd9b70be34179068a89b42f52a1118f850e2",
-      "kubernetes.io/psp": "aptos-node",
       "seccomp.security.alpha.kubernetes.io/pod": "runtime/default"
     },
     "pod_ip": "192.168.130.132",
@@ -48,4 +47,4 @@
   "message": "2022-07-24T03:39:54.744745Z [consensus] INFO consensus/src/round_manager.rs:314 Local state SyncInfo[certified_round: 966, ordered_round: 965, timeout round: 0, commit_info: BlockInfo: [epoch: 2, round: 963, id: 6d4833ab, executed_state_id: 81472687, version: 1380763, timestamp (us): 1658633992630447, next_epoch_state: None]], remote state SyncInfo[certified_round: 967, ordered_round: 966, timeout round: 0, commit_info: BlockInfo: [epoch: 2, round: 963, id: 6d4833ab, executed_state_id: 81472687, version: 1380763, timestamp (us): 1658633992630447, next_epoch_state: None]] {\"epoch\":2,\"event\":\"ReceiveNewCertificate\",\"remote_peer\":\"065f70c398566ebd3b806cbd11f7e86dd8e39d9616f2bb45a1bda1a0748c7c88\",\"round\":967}",
   "source_type": "kubernetes_logs",
   "stream": "stderr"
-}
\ No newline at end of file
+}
diff --git a/terraform/helm/vector-log-agent/values.yaml b/terraform/helm/vector-log-agent/values.yaml
index 9e330245c7ef8..9b5db1308a616 100644
--- a/terraform/helm/vector-log-agent/values.yaml
+++ b/terraform/helm/vector-log-agent/values.yaml
@@ -1,7 +1,7 @@
 image:
   repository: timberio/vector
   pullPolicy: IfNotPresent
-  tag: "0.25.X-distroless-libc"
+  tag: "0.34.X-distroless-libc"
 
 
 # -- Choose any (you can choose multiple) logging sinks supported by vector as found here https://vector.dev/docs/reference/configuration/sinks/
diff --git a/terraform/modules/eks/cluster.tf b/terraform/modules/eks/cluster.tf
index 0657f3120abca..ea6d2f864fa00 100644
--- a/terraform/modules/eks/cluster.tf
+++ b/terraform/modules/eks/cluster.tf
@@ -12,7 +12,7 @@ resource "aws_eks_cluster" "aptos" {
   tags                      = local.default_tags
 
   vpc_config {
-    subnet_ids              = concat(aws_subnet.public.*.id, aws_subnet.private.*.id)
+    subnet_ids              = concat(aws_subnet.public[*].id, aws_subnet.private[*].id)
     public_access_cidrs     = var.k8s_api_sources
     endpoint_private_access = true
     security_group_ids      = [aws_security_group.cluster.id]
@@ -218,11 +218,17 @@ data "aws_iam_policy_document" "cluster-autoscaler" {
   statement {
     sid = "DescribeAutoscaling"
     actions = [
-      "autoscaling:DescribeAutoScalingInstances",
+      "autoscaling:DescribeLaunchConfigurations",
       "autoscaling:DescribeAutoScalingGroups",
-      "ec2:DescribeLaunchTemplateVersions",
+      "autoscaling:DescribeAutoScalingInstances",
+      "autoscaling:DescribeLaunchConfigurations",
+      "autoscaling:DescribeScalingActivities",
       "autoscaling:DescribeTags",
-      "autoscaling:DescribeLaunchConfigurations"
+      "ec2:DescribeInstanceTypes",
+      "ec2:DescribeLaunchTemplateVersions",
+      "ec2:DescribeImages",
+      "ec2:GetInstanceTypesFromInstanceRequirements",
+      "eks:DescribeNodegroup"
     ]
     resources = ["*"]
   }
@@ -275,4 +281,4 @@ resource "helm_release" "autoscaling" {
     name  = "chart_sha1"
     value = sha1(join("", [for f in fileset(local.autoscaling_helm_chart_path, "**") : filesha1("${local.autoscaling_helm_chart_path}/${f}")]))
   }
-}
\ No newline at end of file
+}
diff --git a/terraform/modules/eks/kubernetes.tf b/terraform/modules/eks/kubernetes.tf
index 8a7d7f257c012..5de01b8e2875f 100644
--- a/terraform/modules/eks/kubernetes.tf
+++ b/terraform/modules/eks/kubernetes.tf
@@ -1,6 +1,6 @@
 provider "kubernetes" {
   host                   = aws_eks_cluster.aptos.endpoint
-  cluster_ca_certificate = base64decode(aws_eks_cluster.aptos.certificate_authority.0.data)
+  cluster_ca_certificate = base64decode(aws_eks_cluster.aptos.certificate_authority[0].data)
   token                  = data.aws_eks_cluster_auth.aptos.token
 }
 
@@ -72,71 +72,18 @@ resource "kubernetes_storage_class" "gp2" {
   depends_on = [null_resource.delete-gp2]
 }
 
-# FIXME: Remove when migrating to K8s 1.25
-resource "kubernetes_role_binding" "psp-kube-system" {
-  metadata {
-    name      = "eks:podsecuritypolicy:privileged"
-    namespace = "kube-system"
-  }
-
-  role_ref {
-    api_group = "rbac.authorization.k8s.io"
-    kind      = "ClusterRole"
-    name      = "eks:podsecuritypolicy:privileged"
-  }
-
-  subject {
-    api_group = "rbac.authorization.k8s.io"
-    kind      = "Group"
-    name      = "system:serviceaccounts:kube-system"
-  }
-}
-
 locals {
   kubeconfig = "/tmp/kube.config.${md5(timestamp())}"
 }
 
-# FIXME: Remove when migrating to K8s 1.25
-resource "null_resource" "delete-psp-authenticated" {
-  provisioner "local-exec" {
-    command = <<-EOT
-      aws --region ${var.region} eks update-kubeconfig --name ${aws_eks_cluster.aptos.name} --kubeconfig ${local.kubeconfig} &&
-      kubectl --kubeconfig ${local.kubeconfig} delete --ignore-not-found clusterrolebinding eks:podsecuritypolicy:authenticated
-    EOT
-  }
-
-  depends_on = [kubernetes_role_binding.psp-kube-system]
-}
-
 provider "helm" {
   kubernetes {
     host                   = aws_eks_cluster.aptos.endpoint
-    cluster_ca_certificate = base64decode(aws_eks_cluster.aptos.certificate_authority.0.data)
+    cluster_ca_certificate = base64decode(aws_eks_cluster.aptos.certificate_authority[0].data)
     token                  = data.aws_eks_cluster_auth.aptos.token
   }
 }
 
-resource "kubernetes_namespace" "tigera-operator" {
-  metadata {
-    annotations = {
-      name = "tigera-operator"
-    }
-
-    name = "tigera-operator"
-  }
-}
-
-resource "helm_release" "calico" {
-  name       = "calico"
-  repository = "https://docs.projectcalico.org/charts"
-  chart      = "tigera-operator"
-  version    = "3.23.3"
-  namespace  = "tigera-operator"
-  depends_on = [
-    kubernetes_namespace.tigera-operator
-  ]
-}
-
 resource "kubernetes_cluster_role" "debug" {
   metadata {
     name = "debug"
@@ -247,7 +194,7 @@ resource "local_file" "kubernetes" {
   filename = "${local.workspace_name}-kubernetes.json"
   content = jsonencode({
     kubernetes_host        = aws_eks_cluster.aptos.endpoint
-    kubernetes_ca_cert     = base64decode(aws_eks_cluster.aptos.certificate_authority.0.data)
+    kubernetes_ca_cert     = base64decode(aws_eks_cluster.aptos.certificate_authority[0].data)
     issuer                 = aws_eks_cluster.aptos.identity[0].oidc[0].issuer
     service_account_prefix = "aptos-pfn"
     pod_cidrs              = aws_subnet.private[*].cidr_block
diff --git a/terraform/modules/eks/network.tf b/terraform/modules/eks/network.tf
index 89983b601f27a..e42f1218aa8da 100644
--- a/terraform/modules/eks/network.tf
+++ b/terraform/modules/eks/network.tf
@@ -45,7 +45,7 @@ resource "aws_route_table" "public" {
 
 resource "aws_route_table_association" "public" {
   count          = length(local.aws_availability_zones)
-  subnet_id      = element(aws_subnet.public.*.id, count.index)
+  subnet_id      = element(aws_subnet.public[*].id, count.index)
   route_table_id = aws_route_table.public.id
 }
 
@@ -91,7 +91,7 @@ resource "aws_route_table" "private" {
 
 resource "aws_route_table_association" "private" {
   count          = length(local.aws_availability_zones)
-  subnet_id      = element(aws_subnet.private.*.id, count.index)
+  subnet_id      = element(aws_subnet.private[*].id, count.index)
   route_table_id = aws_route_table.private.id
 }
 
diff --git a/terraform/modules/eks/variables.tf b/terraform/modules/eks/variables.tf
index 32572bf2b95bd..cd6ee985b67c0 100644
--- a/terraform/modules/eks/variables.tf
+++ b/terraform/modules/eks/variables.tf
@@ -5,7 +5,8 @@ variable "region" {
 
 variable "kubernetes_version" {
   description = "Version of Kubernetes to use for EKS cluster"
-  default     = "1.22"
+  type        = string
+  default     = "1.27"
 }
 
 variable "eks_cluster_name" {
@@ -15,6 +16,7 @@ variable "eks_cluster_name" {
 
 variable "k8s_api_sources" {
   description = "List of CIDR subnets which can access the Kubernetes API endpoint"
+  type        = list(string)
   default     = ["0.0.0.0/0"]
 }
 
@@ -55,32 +57,37 @@ variable "k8s_debugger_roles" {
 }
 
 variable "iam_path" {
-  default     = "/"
   description = "Path to use when naming IAM objects"
+  type        = string
+  default     = "/"
 }
 
 variable "permissions_boundary_policy" {
-  default     = ""
   description = "ARN of IAM policy to set as permissions boundary on created roles"
+  type        = string
 }
 
 variable "vpc_cidr_block" {
-  default     = "192.168.0.0/16"
   description = "VPC CIDR Block"
+  type        = string
+  default     = "192.168.0.0/16"
 }
 
 variable "utility_instance_type" {
   description = "Instance type used for utilities"
+  type        = string
   default     = "t3.medium"
 }
 
 variable "fullnode_instance_type" {
   description = "Instance type used for validator and fullnodes"
+  type        = string
   default     = "c6i.8xlarge"
 }
 
 variable "num_fullnodes" {
   description = "Number of fullnodes to deploy"
+  type        = number
   default     = 1
 }
 
@@ -92,10 +99,12 @@ variable "node_pool_sizes" {
 
 variable "workspace_name_override" {
   description = "If specified, overrides the usage of Terraform workspace for naming purposes"
+  type        = string
   default     = ""
 }
 
 variable "num_extra_instance" {
-  default     = 0
   description = "Number of extra instances to add into node pool"
+  type        = number
+  default     = 0
 }
diff --git a/terraform/modules/eks/versions.tf b/terraform/modules/eks/versions.tf
index a2b7631af994b..db37affd89991 100644
--- a/terraform/modules/eks/versions.tf
+++ b/terraform/modules/eks/versions.tf
@@ -1,5 +1,5 @@
 terraform {
-  required_version = "~> 1.3.6"
+  required_version = "~> 1.5.6"
   required_providers {
     aws = {
       source = "hashicorp/aws"
diff --git a/terraform/modules/resources/instance.tf b/terraform/modules/resources/instance.tf
new file mode 100644
index 0000000000000..120dc71dae119
--- /dev/null
+++ b/terraform/modules/resources/instance.tf
@@ -0,0 +1,159 @@
+### Inputs
+
+variable "instance_type" {
+  description = "The instance type"
+  type        = string
+  default     = ""
+
+  validation {
+    condition     = can(regex("^(e2|n2d|t2d)-standard-(4|8|16|32|48|60)$", var.instance_type))
+    error_message = "Unknown machine type"
+  }
+}
+
+variable "utility_instance_type" {
+  description = "The utilities instance type"
+  type        = string
+  default     = "e2-standard-8"
+
+  validation {
+    condition     = can(regex("^(e2|n2d|t2d)-standard-(4|8|16|32|48|60)$", var.utility_instance_type))
+    error_message = "Unknown machine type"
+  }
+}
+
+variable "max_instances" {
+  description = "The maximum number of instances"
+  type        = number
+  default     = 100
+}
+
+variable "app_service" {
+  description = "Application service labeled using app.kubernetes.io/part-of"
+  type        = string
+  default     = ""
+}
+
+### Computation
+
+locals {
+  machine_family         = split("-", var.instance_type)[0]
+  utility_machine_family = split("-", var.utility_instance_type)[0]
+  machine_shapes = {
+    "t2d-standard-8"  = { cores = 8, memory = 32 }
+    "t2d-standard-16" = { cores = 16, memory = 64 }
+    "t2d-standard-32" = { cores = 32, memory = 128 }
+    "t2d-standard-48" = { cores = 48, memory = 192 }
+    "t2d-standard-60" = { cores = 60, memory = 240 }
+  }
+  # leave 1 core for the system
+  available_cores = local.machine_shapes[var.instance_type].cores - 1
+  # leave 4 GB for the system
+  available_memory = local.machine_shapes[var.instance_type].memory - 4
+
+  node_affinity = {
+    podAntiAffinity = { # don't schedule nodes on the same host
+      requiredDuringSchedulingIgnoredDuringExecution = [
+        {
+          labelSelector = {
+            matchExpressions = [
+              {
+                key      = "app.kubernetes.io/part-of",
+                operator = "In",
+                values   = [var.app_service]
+              }
+            ]
+          }
+          topologyKey = "kubernetes.io/hostname"
+        }
+      ]
+    }
+    nodeAffinity = { # affinity for the right instance types
+      requiredDuringSchedulingIgnoredDuringExecution = {
+        nodeSelectorTerms = [
+          {
+            matchExpressions = [
+              {
+                key      = "cloud.google.com/machine-family",
+                operator = "In",
+                values   = [local.machine_family],
+              }
+            ]
+          }
+        ]
+      }
+    }
+  }
+
+  utility_affinity = {
+    podAntiAffinity = { # don't schedule nodes on the same host
+      requiredDuringSchedulingIgnoredDuringExecution = [
+        {
+          labelSelector = {
+            matchExpressions = [
+              {
+                key      = "app.kubernetes.io/part-of",
+                operator = "In",
+                values   = [var.app_service]
+              }
+            ]
+          }
+          topologyKey = "kubernetes.io/hostname"
+        }
+      ]
+    }
+    nodeAffinity = { # affinity for the right instance types
+      requiredDuringSchedulingIgnoredDuringExecution = {
+        nodeSelectorTerms = [
+          {
+            matchExpressions = [
+              {
+                key      = "cloud.google.com/machine-family",
+                operator = "In",
+                values   = [local.utility_machine_family],
+              }
+            ]
+          }
+        ]
+      }
+    }
+  }
+}
+
+### Outputs
+
+output "resources" {
+  description = "Resources for the instance"
+  value = {
+    limits = {
+      cpu               = local.available_cores
+      memory            = "${local.available_memory}G"
+      ephemeral-storage = "5Gi"
+    }
+    requests = {
+      cpu               = local.available_cores
+      memory            = "${local.available_memory}G"
+      ephemeral-storage = "5Gi"
+    }
+  }
+}
+
+output "max_cpu" {
+  description = "Maximum CPU for the Node autoprovisioning"
+  value       = local.machine_shapes[var.instance_type].cores * var.max_instances
+}
+
+output "max_memory" {
+  description = "Maximum RAM for the Node autoprovisioning"
+  value       = local.machine_shapes[var.instance_type].memory * var.max_instances
+}
+
+output "node_affinity" {
+  description = "Node affinity for the validator instances"
+  value       = local.node_affinity
+}
+
+output "utility_affinity" {
+  description = "Node affinity for the utility instances"
+  value       = local.utility_affinity
+}
diff --git a/terraform/scripts/migrate_cluster_psp_to_pss.sh b/terraform/scripts/migrate_cluster_psp_to_pss.sh
index a73b48e99bdd8..40c89a67ed666 100755
--- a/terraform/scripts/migrate_cluster_psp_to_pss.sh
+++ b/terraform/scripts/migrate_cluster_psp_to_pss.sh
@@ -1,66 +1,66 @@
 #!/usr/bin/env bash
 
 function msg() {
-    if [[ ${VERBOSE} == true ]]; then
-        echo ${@} 2>&1
-    fi
+  if [[ ${VERBOSE} == true ]]; then
+    echo ${@} 2>&1
+  fi
 }
 
-function disable_psp_ns () {
-    local _ns=${1}
-    msg "Disabling PodSecurityPolicy on namespace ${_ns}"
-    kubectl delete -n "${_ns}" rolebinding disable-psp 2>/dev/null
-    kubectl create -n "${_ns}" rolebinding disable-psp \
-            --clusterrole privileged-psp --group "system:serviceaccounts:${_ns}"
+function disable_psp_ns() {
+  local _ns=${1}
+  msg "Disabling PodSecurityPolicy on namespace ${_ns}"
+  kubectl delete -n "${_ns}" rolebinding disable-psp 2> /dev/null
+  kubectl create -n "${_ns}" rolebinding disable-psp \
+    --clusterrole privileged-psp --group "system:serviceaccounts:${_ns}"
 }
 
 function set_pss_label() {
-    local _ns=${1}
-    local _policy=${2}
-    msg "Namespace ${_ns}: setting policy ${_policy}"
-    kubectl label --overwrite ns "${_ns}" "${_policy}"
+  local _ns=${1}
+  local _policy=${2}
+  msg "Namespace ${_ns}: setting policy ${_policy}"
+  kubectl label --overwrite ns "${_ns}" "${_policy}"
 }
 
 function set_pss_labels_ns() {
-    local _ns=${1}
-    set_pss_label "${_ns}" "pod-security.kubernetes.io/enforce=privileged"
-    set_pss_label "${_ns}" "pod-security.kubernetes.io/enforce-version=${POLICY_VERSION}"
-    set_pss_label "${_ns}" "pod-security.kubernetes.io/warn=baseline"
-    set_pss_label "${_ns}" "pod-security.kubernetes.io/warn-version=${POLICY_VERSION}"
-    set_pss_label "${_ns}" "pod-security.kubernetes.io/audit=baseline"
-    set_pss_label "${_ns}" "pod-security.kubernetes.io/audit-version=${POLICY_VERSION}"
+  local _ns=${1}
+  set_pss_label "${_ns}" "pod-security.kubernetes.io/enforce=privileged"
+  set_pss_label "${_ns}" "pod-security.kubernetes.io/enforce-version=${POLICY_VERSION}"
+  set_pss_label "${_ns}" "pod-security.kubernetes.io/warn=baseline"
+  set_pss_label "${_ns}" "pod-security.kubernetes.io/warn-version=${POLICY_VERSION}"
+  set_pss_label "${_ns}" "pod-security.kubernetes.io/audit=baseline"
+  set_pss_label "${_ns}" "pod-security.kubernetes.io/audit-version=${POLICY_VERSION}"
 }
 
 function list_ns() {
-    kubectl get ns | grep Active | awk '{ print $1 }'
+  kubectl get ns | grep Active | awk '{ print $1 }'
 }
 
 function migrate() {
-    msg "Creating resource PodSecurityPolicy/privileged-psp"
-    local scriptdir=$(dirname $(readlink -f ${0}))
-    kubectl apply -f "${scriptdir}"/privileged-psp.yaml
+  msg "Creating resource PodSecurityPolicy/privileged-psp"
+  local scriptdir=$(dirname $(readlink -f ${0}))
+  kubectl apply -f "${scriptdir}"/privileged-psp.yaml
 
-    msg "Creating role 'privileged-psp'"
-    kubectl delete clusterrole privileged-psp 2>/dev/null
-    kubectl create clusterrole privileged-psp \
-            --verb use --resource podsecuritypolicies --resource-name privileged-psp
+  msg "Creating role 'privileged-psp'"
+  kubectl delete clusterrole privileged-psp 2> /dev/null
+  kubectl create clusterrole privileged-psp \
+    --verb use --resource podsecuritypolicies --resource-name privileged-psp
 
-    local _ns
-    for _ns in $(list_ns); do
-        disable_psp_ns "${_ns}"
-        # set_pss_labels_ns "${_ns}" "${POLICY_VERSION}"
-    done
-    set_pss_labels_ns default "${POLICY_VERSION}"
+  local _ns
+  for _ns in $(list_ns); do
+    disable_psp_ns "${_ns}"
+    # set_pss_labels_ns "${_ns}" "${POLICY_VERSION}"
+  done
+  set_pss_labels_ns default "${POLICY_VERSION}"
 }
 
 function clean() {
-    msg "Cleaning up PSP resources"
-    kubectl delete clusterrole privileged-psp 2>/dev/null
+  msg "Cleaning up PSP resources"
+  kubectl delete clusterrole privileged-psp 2> /dev/null
 
-    local _ns
-    for _ns in $(list_ns); do
-        kubectl delete -n "${_ns}" rolebinding disable-psp 2>/dev/null
-    done
+  local _ns
+  for _ns in $(list_ns); do
+    kubectl delete -n "${_ns}" rolebinding disable-psp 2> /dev/null
+  done
 }
 
 POLICY_VERSION=v1.24
@@ -70,68 +70,69 @@ cmd=""
 
 optspec="h-:"
 while getopts "$optspec" optchar; do
-    case "${optchar}" in
-        -)
-            case "${OPTARG}" in
-                debug)
-                    DEBUG=true
-                    set +x
-                    ;;
-                verbose)
-                    VERBOSE=true
-                    ;;
-                policy-version=*)
-                    val=${OPTARG#*=}
-                    opt=${OPTARG%=$val}
-                    POLICY_VERSION=${val}
-                    ;;
-                *)
-                    if [ "$OPTERR" = 1 ] && [ "${optspec:0:1}" != ":" ]; then
-                        echo "Unknown option --${OPTARG}" >&2
-                    fi
-                    ;;
-            esac;;
+  case "${optchar}" in
+    -)
+      case "${OPTARG}" in
+        debug)
+          DEBUG=true
+          set +x
+          ;;
+        verbose)
+          VERBOSE=true
+          ;;
+        policy-version=*)
+          val=${OPTARG#*=}
+          opt=${OPTARG%=$val}
+          POLICY_VERSION=${val}
+          ;;
         *)
-            echo "Unknown argument: '-${OPTARG}'" >&2
-            exit 2
-            ;;
-    esac
+          if [ "$OPTERR" = 1 ] && [ "${optspec:0:1}" != ":" ]; then
+            echo "Unknown option --${OPTARG}" >&2
+          fi
+          ;;
+      esac
+      ;;
+    *)
+      echo "Unknown argument: '-${OPTARG}'" >&2
+      exit 2
+      ;;
+  esac
 done
-shift $((OPTIND -1))
+shift $((OPTIND - 1))
 
 case $# in
-    0)
-        cmd="usage"
-        ;;
-    1)
-        cmd=${1}
-        ;;
-    *)
-        echo "Too many parameters on the command line" >&2
-        exit 2
-        ;;
+  0)
+    cmd="usage"
+    ;;
+  1)
+    cmd=${1}
+    ;;
+  *)
+    echo "Too many parameters on the command line" >&2
+    exit 2
+    ;;
 esac
 
 case ${cmd} in
-    usage)
-        echo "Usage: $(basename ${0}) [--verbose] [--debug] [--policy-version=] check | migrate | clean" >&2
-        echo "Default PSS policy version: ${POLICY_VERSION}" >&2
-        exit 1
-        ;;
-    check)
-        echo "Hint: you can get the list of labels with kubectl get ns --show-labels"
-        kubectl label --dry-run=server \
-          --overwrite ns --all \
-          pod-security.kubernetes.io/enforce=baseline
-        ;;
-    clean)
-        clean
-        ;;
-    migrate)
-        migrate
-        ;;
-    *)
-        echo "Unknown command:" ${cmd}
-        exit 2
-        ;;
+  usage)
+    echo "Usage: $(basename ${0}) [--verbose] [--debug] [--policy-version=] check | migrate | clean" >&2
+    echo "Default PSS policy version: ${POLICY_VERSION}" >&2
+    exit 1
+    ;;
+  check)
+    echo "Hint: you can get the list of labels with kubectl get ns --show-labels"
+    kubectl label --dry-run=server \
+      --overwrite ns --all \
+      pod-security.kubernetes.io/enforce=baseline
+    ;;
+  clean)
+    clean
+    ;;
+  migrate)
+    migrate
+    ;;
+  *)
+    echo "Unknown command:" ${cmd}
+    exit 2
+    ;;
 esac

From 63e7330828050abf3c8ca9dc0030c8991a570a23 Mon Sep 17 00:00:00 2001
From: Josh Lind 
Date: Mon, 29 Jan 2024 11:01:09 -0500
Subject: [PATCH 38/44] [Network] Improve noise error log

---
 network/framework/src/noise/error.rs     | 4 ++--
 network/framework/src/noise/handshake.rs | 4 +++-
 2 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/network/framework/src/noise/error.rs b/network/framework/src/noise/error.rs
index 7395dd3b65ef3..67f2d657a6397 100644
--- a/network/framework/src/noise/error.rs
+++ b/network/framework/src/noise/error.rs
@@ -44,9 +44,9 @@ pub enum NoiseHandshakeError {
 
     #[error(
         "noise server: client {0}: client is expecting us to have a different \
-         public key: {1}"
+         public key. Expected: {1}, Actual: {2}"
     )]
-    ClientExpectingDifferentPubkey(ShortHexStr, String),
+    ClientExpectingDifferentPubkey(ShortHexStr, String, String),
 
     #[error("noise server: client {0}: error parsing handshake init message: {1}")]
     ServerParseClient(ShortHexStr, NoiseError),
diff --git a/network/framework/src/noise/handshake.rs b/network/framework/src/noise/handshake.rs
index 0c61cc0490aab..1e7c2c51d358a 100644
--- a/network/framework/src/noise/handshake.rs
+++ b/network/framework/src/noise/handshake.rs
@@ -348,10 +348,12 @@ impl NoiseUpgrader {
         }
 
         // verify that this is indeed our public key
-        if self_expected_public_key != self.noise_config.public_key().as_slice() {
+        let actual_public_key = self.noise_config.public_key();
+        if self_expected_public_key != actual_public_key.as_slice() {
             return Err(NoiseHandshakeError::ClientExpectingDifferentPubkey(
                 remote_peer_short,
                 hex::encode(self_expected_public_key),
+                hex::encode(actual_public_key.as_slice()),
             ));
         }
 

From b9b1e65163ffc2bc4343e94bf2ed82996988ec83 Mon Sep 17 00:00:00 2001
From: Josh Lind 
Date: Mon, 29 Jan 2024 11:20:46 -0500
Subject: [PATCH 39/44] [Network] Add peer ping latencies to dialing.

---
 Cargo.lock                                    |   2 +
 config/src/config/network_config.rs           |   3 +
 network/builder/src/builder.rs                |   4 +
 network/framework/Cargo.toml                  |   4 +
 .../src/connectivity_manager/builder.rs       |   2 +
 .../framework/src/connectivity_manager/mod.rs | 515 ++++++++++++++----
 .../src/connectivity_manager/selection.rs     | 223 ++++++++
 .../src/connectivity_manager/test.rs          |   3 +-
 network/framework/src/counters.rs             |  40 ++
 9 files changed, 679 insertions(+), 117 deletions(-)
 create mode 100644 network/framework/src/connectivity_manager/selection.rs

diff --git a/Cargo.lock b/Cargo.lock
index a38195b3653b5..6c65cd6ebf71c 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -2882,10 +2882,12 @@ dependencies = [
  "itertools 0.10.5",
  "maplit",
  "once_cell",
+ "ordered-float 3.9.2",
  "pin-project 1.1.3",
  "proptest",
  "proptest-derive 0.4.0",
  "rand 0.7.3",
+ "rand 0.8.5",
  "rand_core 0.5.1",
  "serde",
  "serde_bytes",
diff --git a/config/src/config/network_config.rs b/config/src/config/network_config.rs
index e0b006e6c4ffd..c2e9d0a280989 100644
--- a/config/src/config/network_config.rs
+++ b/config/src/config/network_config.rs
@@ -125,6 +125,8 @@ pub struct NetworkConfig {
     pub max_message_size: usize,
     /// The maximum number of parallel message deserialization tasks that can run (per application)
     pub max_parallel_deserialization_tasks: Option,
+    /// Whether or not to enable latency aware peer dialing
+    pub enable_latency_aware_dialing: bool,
 }
 
 impl Default for NetworkConfig {
@@ -166,6 +168,7 @@ impl NetworkConfig {
             outbound_rx_buffer_size_bytes: None,
             outbound_tx_buffer_size_bytes: None,
             max_parallel_deserialization_tasks: None,
+            enable_latency_aware_dialing: true,
         };
 
         // Configure the number of parallel deserialization tasks
diff --git a/network/builder/src/builder.rs b/network/builder/src/builder.rs
index 09b76c724acd9..238f4aa07dc33 100644
--- a/network/builder/src/builder.rs
+++ b/network/builder/src/builder.rs
@@ -158,6 +158,7 @@ impl NetworkBuilder {
             CONNECTIVITY_CHECK_INTERVAL_MS,
             NETWORK_CHANNEL_SIZE,
             mutual_authentication,
+            true, /* enable_latency_aware_dialing */
         );
 
         builder
@@ -224,6 +225,7 @@ impl NetworkBuilder {
             config.connectivity_check_interval_ms,
             config.network_channel_size,
             config.mutual_authentication,
+            config.enable_latency_aware_dialing,
         );
 
         network_builder.discovery_listeners = Some(Vec::new());
@@ -342,6 +344,7 @@ impl NetworkBuilder {
         connectivity_check_interval_ms: u64,
         channel_size: usize,
         mutual_authentication: bool,
+        enable_latency_aware_dialing: bool,
     ) -> &mut Self {
         let pm_conn_mgr_notifs_rx = self.peer_manager_builder.add_connection_event_listener();
         let outbound_connection_limit = if !self.network_context.network_id().is_validator_network()
@@ -364,6 +367,7 @@ impl NetworkBuilder {
             pm_conn_mgr_notifs_rx,
             outbound_connection_limit,
             mutual_authentication,
+            enable_latency_aware_dialing,
         ));
         self
     }
diff --git a/network/framework/Cargo.toml b/network/framework/Cargo.toml
index 92da1769e93bd..e2baf89a9513c 100644
--- a/network/framework/Cargo.toml
+++ b/network/framework/Cargo.toml
@@ -43,10 +43,14 @@ hex = { workspace = true }
 itertools = { workspace = true }
 maplit = { workspace = true }
 once_cell = { workspace = true }
+ordered-float = { workspace = true }
 pin-project = { workspace = true }
 proptest ={ workspace = true, optional = true }
 proptest-derive = { workspace = true, optional = true }
 rand = { workspace = true, features = ["small_rng"] }
+# Note: we cannot rely on the workspace version of rand. So we use this workaround. See:
+# https://github.com/aptos-labs/aptos-core/blob/main/state-sync/aptos-data-client/Cargo.toml#L41.
+rand_latest = { package = "rand", version = "0.8.5" }
 serde = { workspace = true }
 serde_bytes = { workspace = true }
 serde_json = { workspace = true }
diff --git a/network/framework/src/connectivity_manager/builder.rs b/network/framework/src/connectivity_manager/builder.rs
index 5c41895a8d892..26e25fad5c971 100644
--- a/network/framework/src/connectivity_manager/builder.rs
+++ b/network/framework/src/connectivity_manager/builder.rs
@@ -35,6 +35,7 @@ impl ConnectivityManagerBuilder {
         connection_notifs_rx: conn_notifs_channel::Receiver,
         outbound_connection_limit: Option,
         mutual_authentication: bool,
+        enable_latency_aware_dialing: bool,
     ) -> Self {
         let (conn_mgr_reqs_tx, conn_mgr_reqs_rx) = aptos_channels::new(
             channel_size,
@@ -56,6 +57,7 @@ impl ConnectivityManagerBuilder {
                 Duration::from_millis(max_connection_delay_ms),
                 outbound_connection_limit,
                 mutual_authentication,
+                enable_latency_aware_dialing,
             )),
         }
     }
diff --git a/network/framework/src/connectivity_manager/mod.rs b/network/framework/src/connectivity_manager/mod.rs
index 51a73e0a2b83e..2f1c7773a4c99 100644
--- a/network/framework/src/connectivity_manager/mod.rs
+++ b/network/framework/src/connectivity_manager/mod.rs
@@ -39,6 +39,7 @@ use aptos_config::{
     network_id::NetworkContext,
 };
 use aptos_crypto::x25519;
+use aptos_infallible::RwLock;
 use aptos_logger::prelude::*;
 use aptos_netcore::transport::ConnectionOrigin;
 use aptos_num_variants::NumVariants;
@@ -50,21 +51,23 @@ use futures::{
     future::{BoxFuture, FutureExt},
     stream::{FuturesUnordered, StreamExt},
 };
-use rand::{
-    prelude::{SeedableRng, SmallRng},
-    seq::SliceRandom,
-};
+use futures_util::future::join_all;
+use itertools::Itertools;
+use rand_latest::Rng;
 use serde::Serialize;
 use std::{
     cmp::{min, Ordering},
     collections::{hash_map::Entry, HashMap, HashSet},
     fmt,
+    net::{Shutdown, TcpStream, ToSocketAddrs},
     sync::Arc,
-    time::{Duration, SystemTime},
+    time::{Duration, Instant, SystemTime},
 };
+use tokio::task::JoinHandle;
 use tokio_retry::strategy::jitter;
 
 pub mod builder;
+mod selection;
 #[cfg(test)]
 mod test;
 
@@ -76,6 +79,13 @@ mod test;
 /// around the same time at startup.
 const MAX_CONNECTION_DELAY_JITTER: Duration = Duration::from_millis(100);
 
+/// The maximum amount of time to wait before timing out a connection attempt.
+/// This should be relatively small to avoid blocking dials for too long.
+const MAX_CONNECTION_TIMEOUT_SECS: u64 = 2;
+
+/// The maximum number of socket addresses to ping for a single address
+const MAX_SOCKET_ADDRESSES_TO_PING: usize = 2;
+
 /// The amount of time to try other peers until dialing this peer again.
 ///
 /// It's currently set to 5 minutes to ensure rotation through all (or most) peers
@@ -91,7 +101,7 @@ pub struct ConnectivityManager {
     /// PeerId and address of remote peers to which this peer is connected.
     connected: HashMap,
     /// All information about peers from discovery sources.
-    discovered_peers: DiscoveredPeerSet,
+    discovered_peers: Arc>,
     /// Channel to send connection requests to PeerManager.
     connection_reqs_tx: ConnectionRequestSender,
     /// Channel to receive notifications from PeerManager.
@@ -115,10 +125,10 @@ pub struct ConnectivityManager {
     event_id: u32,
     /// A way to limit the number of connected peers by outgoing dials.
     outbound_connection_limit: Option,
-    /// Random for shuffling which peers will be dialed
-    rng: SmallRng,
     /// Whether we are using mutual authentication or not
     mutual_authentication: bool,
+    /// Whether or not to enable latency aware peer dialing
+    enable_latency_aware_dialing: bool,
 }
 
 /// Different sources for peer addresses, ordered by priority (Onchain=highest,
@@ -163,36 +173,50 @@ pub enum ConnectivityRequest {
 }
 
 #[derive(Clone, Debug, Default, PartialEq, Serialize)]
-struct DiscoveredPeerSet(HashMap);
+struct DiscoveredPeerSet {
+    peer_set: HashMap,
+}
 
 impl DiscoveredPeerSet {
-    fn get_mut(&mut self, peer_id: &PeerId) -> Option<&mut DiscoveredPeer> {
-        self.0.get_mut(peer_id)
+    /// Gets the eligible peers from the discovered peer set
+    fn get_eligible_peers(&self) -> PeerSet {
+        self.peer_set
+            .iter()
+            .filter(|(_, peer)| peer.is_eligible())
+            .map(|(peer_id, peer)| (*peer_id, peer.into()))
+            .collect()
     }
 
-    fn try_remove_empty(&mut self, peer_id: &PeerId) -> bool {
-        match self.0.entry(*peer_id) {
-            Entry::Occupied(entry) => {
-                let peer = entry.get();
-                if peer.addrs.is_empty() && peer.keys.is_empty() {
-                    entry.remove();
-                    true
-                } else {
-                    false
-                }
-            },
-            Entry::Vacant(_) => true,
+    /// Removes the specified peer from the set if the state is empty
+    fn remove_peer_if_empty(&mut self, peer_id: &PeerId) {
+        if let Entry::Occupied(entry) = self.peer_set.entry(*peer_id) {
+            if entry.get().is_empty() {
+                entry.remove();
+            }
         }
     }
 
-    /// Converts `DiscoveredPeerSet` into a `PeerSet`, however disregards the source of discovery
-    /// TODO: Provide smarter merging based on discovery source
-    pub fn to_eligible_peers(&self) -> PeerSet {
-        self.0
-            .iter()
-            .filter(|(_, peer)| peer.is_eligible())
-            .map(|(peer_id, peer)| (*peer_id, peer.into()))
-            .collect()
+    /// Updates the last dial time for the specified peer (if one was found)
+    fn update_last_dial_time(&mut self, peer_id: &PeerId) {
+        if let Some(discovered_peer) = self.peer_set.get_mut(peer_id) {
+            discovered_peer.update_last_dial_time()
+        }
+    }
+
+    /// Returns the ping latency for the specified peer (if one was found)
+    fn get_ping_latency_ms(&self, peer_id: &PeerId) -> Option {
+        if let Some(discovered_peer) = self.peer_set.get(peer_id) {
+            discovered_peer.ping_latency_ms
+        } else {
+            None
+        }
+    }
+
+    /// Updates the ping latency for the specified peer (if one was found)
+    fn update_ping_latency_ms(&mut self, peer_id: &PeerId, latency: u64) {
+        if let Some(discovered_peer) = self.peer_set.get_mut(peer_id) {
+            discovered_peer.set_ping_latency_ms(latency)
+        }
     }
 }
 
@@ -204,6 +228,8 @@ struct DiscoveredPeer {
     keys: PublicKeys,
     /// The last time the node was dialed
     last_dial_time: SystemTime,
+    /// The calculated peer ping latency (ms)
+    ping_latency_ms: Option,
 }
 
 impl DiscoveredPeer {
@@ -213,6 +239,7 @@ impl DiscoveredPeer {
             addrs: Addresses::default(),
             keys: PublicKeys::default(),
             last_dial_time: SystemTime::UNIX_EPOCH,
+            ping_latency_ms: None,
         }
     }
 
@@ -226,15 +253,25 @@ impl DiscoveredPeer {
         self.is_eligible() && !self.addrs.is_empty()
     }
 
+    /// Returns true iff the peer's addresses and keys are empty
+    pub fn is_empty(&self) -> bool {
+        self.addrs.is_empty() && self.keys.is_empty()
+    }
+
     /// Updates the last time we tried to connect to this node
-    pub fn set_last_dial_time(&mut self, time: SystemTime) {
-        self.last_dial_time = time;
+    pub fn update_last_dial_time(&mut self) {
+        self.last_dial_time = SystemTime::now();
+    }
+
+    /// Updates the ping latency for this peer
+    pub fn set_ping_latency_ms(&mut self, latency: u64) {
+        self.ping_latency_ms = Some(latency);
     }
 
     /// Based on input, backoff on amount of time to dial a peer again
-    pub fn has_dialed_recently(&self, backoff_duration: Duration) -> bool {
+    pub fn has_dialed_recently(&self) -> bool {
         if let Ok(duration_since_last_dial) = self.last_dial_time.elapsed() {
-            duration_since_last_dial < backoff_duration
+            duration_since_last_dial < TRY_DIAL_BACKOFF_TIME
         } else {
             false
         }
@@ -243,8 +280,8 @@ impl DiscoveredPeer {
 
 impl PartialOrd for DiscoveredPeer {
     fn partial_cmp(&self, other: &Self) -> Option {
-        let self_dialed_recently = self.has_dialed_recently(TRY_DIAL_BACKOFF_TIME);
-        let other_dialed_recently = other.has_dialed_recently(TRY_DIAL_BACKOFF_TIME);
+        let self_dialed_recently = self.has_dialed_recently();
+        let other_dialed_recently = other.has_dialed_recently();
 
         // Less recently dialed is prioritized over recently dialed
         if !self_dialed_recently && other_dialed_recently {
@@ -313,6 +350,7 @@ where
         max_delay: Duration,
         outbound_connection_limit: Option,
         mutual_authentication: bool,
+        enable_latency_aware_dialing: bool,
     ) -> Self {
         // Verify that the trusted peers set exists and that it is empty
         let trusted_peers = peers_and_metadata
@@ -336,7 +374,7 @@ where
             time_service,
             peers_and_metadata,
             connected: HashMap::new(),
-            discovered_peers: DiscoveredPeerSet::default(),
+            discovered_peers: Arc::new(RwLock::new(DiscoveredPeerSet::default())),
             connection_reqs_tx,
             connection_notifs_rx,
             requests_rx,
@@ -347,11 +385,11 @@ where
             max_delay,
             event_id: 0,
             outbound_connection_limit,
-            rng: SmallRng::from_entropy(),
             mutual_authentication,
+            enable_latency_aware_dialing,
         };
 
-        // set the initial config addresses and pubkeys
+        // Set the initial seed config addresses and public keys
         connmgr.handle_update_discovered_peers(DiscoverySource::Config, seeds);
         connmgr
     }
@@ -510,68 +548,163 @@ where
         }
     }
 
-    fn dial_eligible_peers<'a>(
+    /// Identifies a set of peers to dial and queues them for dialing
+    async fn dial_eligible_peers<'a>(
         &'a mut self,
         pending_dials: &'a mut FuturesUnordered>,
     ) {
-        let to_connect = self.choose_peers_to_dial();
-        for (peer_id, peer) in to_connect {
+        for (peer_id, peer) in self.choose_peers_to_dial().await {
             self.queue_dial_peer(peer_id, peer, pending_dials);
         }
     }
 
-    fn choose_peers_to_dial(&mut self) -> Vec<(PeerId, DiscoveredPeer)> {
+    /// Selects a set of peers to dial
+    async fn choose_peers_to_dial(&mut self) -> Vec<(PeerId, DiscoveredPeer)> {
+        // Get the eligible peers to dial
         let network_id = self.network_context.network_id();
         let role = self.network_context.role();
         let roles_to_dial = network_id.upstream_roles(&role);
-        let mut eligible: Vec<_> = self
-            .discovered_peers
-            .0
-            .iter()
+        let discovered_peers = self.discovered_peers.read().peer_set.clone();
+        let eligible_peers: Vec<_> = discovered_peers
+            .into_iter()
             .filter(|(peer_id, peer)| {
                 peer.is_eligible_to_be_dialed() // The node is eligible to dial
-                    && !self.connected.contains_key(peer_id) // The node is not already connected.
-                    && !self.dial_queue.contains_key(peer_id) // There is no pending dial to this node.
+                    && !self.connected.contains_key(peer_id) // The node is not already connected
+                    && !self.dial_queue.contains_key(peer_id) // There is no pending dial to this node
                     && roles_to_dial.contains(&peer.role) // We can dial this role
             })
             .collect();
 
-        // Prioritize by PeerRole
-        // Shuffle so we don't get stuck on certain peers
-        eligible.shuffle(&mut self.rng);
-
-        // Sort by peer priority
-        eligible
-            .sort_by(|(_, peer), (_, other)| peer.partial_cmp(other).unwrap_or(Ordering::Equal));
-
-        // Limit the number of dialed connections from a Full Node
-        // This does not limit the number of incoming connections
-        // It enforces that a full node cannot have more outgoing connections than `connection_limit`
-        // including in flight dials.
-        let num_eligible = eligible.len();
-        let to_connect = if let Some(conn_limit) = self.outbound_connection_limit {
-            let outbound_connections = self
-                .connected
-                .iter()
-                .filter(|(_, metadata)| metadata.origin == ConnectionOrigin::Outbound)
-                .count();
-            min(
-                conn_limit
-                    .saturating_sub(outbound_connections.saturating_add(self.dial_queue.len())),
-                num_eligible,
+        // Initialize the dial state for any new peers
+        for (peer_id, _) in &eligible_peers {
+            self.dial_states
+                .entry(*peer_id)
+                .or_insert_with(|| DialState::new(self.backoff_strategy.clone()));
+        }
+
+        // Limit the number of dialed connections from a fullnode. Note: this does not
+        // limit the number of incoming connections. It only enforces that a fullnode
+        // cannot have more outgoing connections than the limit (including in-flight dials).
+        let num_eligible_peers = eligible_peers.len();
+        let num_peers_to_dial =
+            if let Some(outbound_connection_limit) = self.outbound_connection_limit {
+                // Get the number of outbound connections
+                let num_outbound_connections = self
+                    .connected
+                    .iter()
+                    .filter(|(_, metadata)| metadata.origin == ConnectionOrigin::Outbound)
+                    .count();
+
+                // Add any pending dials to the count
+                let total_outbound_connections =
+                    num_outbound_connections.saturating_add(self.dial_queue.len());
+
+                // Calculate the potential number of peers to dial
+                let num_peers_to_dial =
+                    outbound_connection_limit.saturating_sub(total_outbound_connections);
+
+                // Limit the number of peers to dial by the total number of eligible peers
+                min(num_peers_to_dial, num_eligible_peers)
+            } else {
+                num_eligible_peers // Otherwise, we attempt to dial all eligible peers
+            };
+
+        // If we have no peers to dial, return early
+        if num_peers_to_dial == 0 {
+            return vec![];
+        }
+
+        // Prioritize the eligible peers and select the peers to dial
+        if selection::should_select_peers_by_latency(
+            &self.network_context,
+            self.enable_latency_aware_dialing,
+        ) {
+            // Ping the eligible peers (so that we can fetch missing ping latency information)
+            self.ping_eligible_peers(eligible_peers.clone()).await;
+
+            // Choose the peers to dial (weighted by ping latency)
+            selection::choose_random_peers_by_ping_latency(
+                self.network_context,
+                eligible_peers,
+                num_peers_to_dial,
+                self.discovered_peers.clone(),
             )
         } else {
-            num_eligible
-        };
+            // Choose the peers randomly
+            selection::choose_peers_to_dial_randomly(eligible_peers, num_peers_to_dial)
+        }
+    }
 
-        // Take peers to connect to in priority order
-        eligible
-            .iter()
-            .take(to_connect)
-            .map(|(peer_id, peer)| (**peer_id, (*peer).clone()))
-            .collect()
+    /// Pings the eligible peers to calculate their ping latencies
+    /// and updates the discovered peer state accordingly.
+    async fn ping_eligible_peers(&mut self, eligible_peers: Vec<(PeerId, DiscoveredPeer)>) {
+        // Identify the eligible peers that don't already have latency information
+        let peers_to_ping = eligible_peers
+            .into_iter()
+            .filter(|(_, peer)| peer.ping_latency_ms.is_none())
+            .collect::>();
+
+        // If there are no peers to ping, return early
+        let num_peers_to_ping = peers_to_ping.len();
+        if num_peers_to_ping == 0 {
+            return;
+        }
+
+        // Spawn a task that pings each peer concurrently
+        let ping_start_time = Instant::now();
+        let mut ping_tasks = vec![];
+        for (peer_id, peer) in peers_to_ping.into_iter() {
+            // Get the network address for the peer
+            let network_context = self.network_context;
+            let network_address = match self.dial_states.get(&peer_id) {
+                Some(dial_state) => match dial_state.random_addr(&peer.addrs) {
+                    Some(network_address) => network_address.clone(),
+                    None => {
+                        warn!(
+                            NetworkSchema::new(&network_context),
+                            "Peer {} does not have a network address!",
+                            peer_id.short_str()
+                        );
+                        continue; // Continue onto the next peer
+                    },
+                },
+                None => {
+                    warn!(
+                        NetworkSchema::new(&network_context),
+                        "Peer {} does not have a dial state!",
+                        peer_id.short_str()
+                    );
+                    continue; // Continue onto the next peer
+                },
+            };
+
+            // Ping the peer
+            let ping_task = spawn_latency_ping_task(
+                network_context,
+                peer_id,
+                network_address,
+                self.discovered_peers.clone(),
+            );
+
+            // Add the task to the list of ping tasks
+            ping_tasks.push(ping_task);
+        }
+
+        // Wait for all the ping tasks to complete (or timeout)
+        let num_ping_tasks = ping_tasks.len();
+        join_all(ping_tasks).await;
+
+        // Log the peer ping latencies
+        log_peer_ping_latencies(
+            self.network_context,
+            self.discovered_peers.clone(),
+            num_peers_to_ping,
+            num_ping_tasks,
+            ping_start_time,
+        );
     }
 
+    /// Queues a dial to the specified peer
     fn queue_dial_peer<'a>(
         &'a mut self,
         peer_id: PeerId,
@@ -582,19 +715,36 @@ where
         // newly eligible, but not connected to peers, have their counter initialized properly.
         counters::peer_connected(&self.network_context, &peer_id, 0);
 
-        let connection_reqs_tx = self.connection_reqs_tx.clone();
-        // The initial dial state; it has zero dial delay and uses the first
-        // address.
-        let init_dial_state = DialState::new(self.backoff_strategy.clone());
-        let dial_state = self
-            .dial_states
-            .entry(peer_id)
-            .or_insert_with(|| init_dial_state);
+        // Get the peer's dial state
+        let dial_state = match self.dial_states.get_mut(&peer_id) {
+            Some(dial_state) => dial_state,
+            None => {
+                // The peer should have a dial state! If not, log an error and return.
+                error!(
+                    NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
+                    "{} Peer {} does not have a dial state!",
+                    self.network_context,
+                    peer_id.short_str()
+                );
+                return;
+            },
+        };
 
         // Choose the next addr to dial for this peer. Currently, we just
         // round-robin the selection, i.e., try the sequence:
         // addr[0], .., addr[len-1], addr[0], ..
-        let addr = dial_state.next_addr(&peer.addrs).clone();
+        let addr = match dial_state.next_addr(&peer.addrs) {
+            Some(addr) => addr.clone(),
+            None => {
+                warn!(
+                    NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
+                    "{} Peer {} does not have any network addresses!",
+                    self.network_context,
+                    peer_id.short_str()
+                );
+                return;
+            },
+        };
 
         // Using the DialState's backoff strategy, compute the delay until
         // the next dial attempt for this peer.
@@ -606,6 +756,7 @@ where
         let network_context = self.network_context;
         // Create future which completes by either dialing after calculated
         // delay or on cancellation.
+        let connection_reqs_tx = self.connection_reqs_tx.clone();
         let f = async move {
             // We dial after a delay. The dial can be canceled by sending to or dropping
             // `cancel_rx`.
@@ -634,9 +785,9 @@ where
         pending_dials.push(f.boxed());
 
         // Update last dial time
-        if let Some(discovered_peer) = self.discovered_peers.get_mut(&peer_id) {
-            discovered_peer.set_last_dial_time(SystemTime::now())
-        }
+        self.discovered_peers
+            .write()
+            .update_last_dial_time(&peer_id);
         self.dial_queue.insert(peer_id, cancel_tx);
     }
 
@@ -658,7 +809,7 @@ where
             info!(
                 NetworkSchema::new(&self.network_context),
                 discovered_peers = ?self.discovered_peers,
-                "Current eligible peers"
+                "Active discovered peers"
             )
         });
 
@@ -668,12 +819,19 @@ where
         self.close_stale_connections().await;
         // Dial peers which are eligible but are neither connected nor queued for dialing in the
         // future.
-        self.dial_eligible_peers(pending_dials);
+        self.dial_eligible_peers(pending_dials).await;
+
+        // Update the metrics for any connected peer ping latencies
+        self.update_connected_ping_latency_metrics();
     }
 
-    fn reset_dial_state(&mut self, peer_id: &PeerId) {
-        if let Some(dial_state) = self.dial_states.get_mut(peer_id) {
-            *dial_state = DialState::new(self.backoff_strategy.clone());
+    /// Updates the metrics for tracking connected peer ping latencies
+    fn update_connected_ping_latency_metrics(&self) {
+        for peer_id in self.connected.keys() {
+            if let Some(ping_latency_ms) = self.discovered_peers.read().get_ping_latency_ms(peer_id)
+            {
+                counters::observe_connected_ping_time(&self.network_context, ping_latency_ms);
+            }
         }
     }
 
@@ -704,11 +862,14 @@ where
         }
     }
 
+    /// Handles an update for newly discovered peers. This typically
+    /// occurs at node startup, and on epoch changes.
     fn handle_update_discovered_peers(
         &mut self,
         src: DiscoverySource,
         new_discovered_peers: PeerSet,
     ) {
+        // Log the update event
         info!(
             NetworkSchema::new(&self.network_context),
             "{} Received updated list of discovered peers! Source: {:?}, num peers: {:?}",
@@ -717,13 +878,10 @@ where
             new_discovered_peers.len()
         );
 
-        let self_peer_id = self.network_context.peer_id();
+        // Remove peers that no longer have relevant network information
         let mut keys_updated = false;
-
         let mut peers_to_check_remove = Vec::new();
-
-        // Remove peer info that no longer have information to use them
-        for (peer_id, peer) in self.discovered_peers.0.iter_mut() {
+        for (peer_id, peer) in self.discovered_peers.write().peer_set.iter_mut() {
             let new_peer = new_discovered_peers.get(peer_id);
             let check_remove = if let Some(new_peer) = new_peer {
                 if new_peer.keys.is_empty() {
@@ -745,24 +903,25 @@ where
 
         // Remove peers that no longer have state
         for peer_id in peers_to_check_remove {
-            self.discovered_peers.try_remove_empty(&peer_id);
+            self.discovered_peers.write().remove_peer_if_empty(&peer_id);
         }
 
         // Make updates to the peers accordingly
         for (peer_id, discovered_peer) in new_discovered_peers {
             // Don't include ourselves, because we don't need to dial ourselves
-            if peer_id == self_peer_id {
+            if peer_id == self.network_context.peer_id() {
                 continue;
             }
 
             // Create the new `DiscoveredPeer`, role is set when a `Peer` is first discovered
-            let peer = self
-                .discovered_peers
-                .0
+            let mut discovered_peers = self.discovered_peers.write();
+            let peer = discovered_peers
+                .peer_set
                 .entry(peer_id)
                 .or_insert_with(|| DiscoveredPeer::new(discovered_peer.role));
+
+            // Update the peer's pubkeys
             let mut peer_updated = false;
-            // Update peer's pubkeys
             if peer.keys.update(src, discovered_peer.keys) {
                 info!(
                     NetworkSchema::new(&self.network_context)
@@ -777,7 +936,7 @@ where
                 peer_updated = true;
             }
 
-            // Update peer's addresses
+            // Update the peer's addresses
             if peer.addrs.update(src, discovered_peer.addresses) {
                 info!(
                     NetworkSchema::new(&self.network_context).remote_peer(&peer_id),
@@ -797,7 +956,9 @@ where
             // fresh backoff (since the current backoff delay might be maxed
             // out if we can't reach any of their previous addresses).
             if peer_updated {
-                self.reset_dial_state(&peer_id)
+                if let Some(dial_state) = self.dial_states.get_mut(&peer_id) {
+                    *dial_state = DialState::new(self.backoff_strategy.clone());
+                }
             }
         }
 
@@ -805,7 +966,7 @@ where
         if keys_updated {
             // For each peer, union all of the pubkeys from each discovery source
             // to generate the new eligible peers set.
-            let new_eligible = self.discovered_peers.to_eligible_peers();
+            let new_eligible = self.discovered_peers.read().get_eligible_peers();
 
             // Swap in the new eligible peers set
             if let Err(error) = self
@@ -933,6 +1094,114 @@ fn log_dial_result(
     }
 }
 
+/// Logs the total and individual ping latencies
+fn log_peer_ping_latencies(
+    network_context: NetworkContext,
+    discovered_peers: Arc>,
+    total_peers_to_ping: usize,
+    num_peers_pinged: usize,
+    ping_start_time: Instant,
+) {
+    // Log the total ping latency time
+    let ping_latency_duration = Instant::now().duration_since(ping_start_time);
+    info!(
+        NetworkSchema::new(&network_context),
+        "Finished pinging eligible peers! Total peers to ping: {}, num peers pinged: {}, time: {}ms",
+        total_peers_to_ping,
+        num_peers_pinged,
+        ping_latency_duration.as_millis()
+    );
+
+    // Log the ping latencies for the eligible peers (sorted by latency)
+    let eligible_peers = discovered_peers.read().peer_set.clone();
+    let eligible_peers_and_latencies = eligible_peers
+        .into_iter()
+        .map(|(peer_id, peer)| (peer_id, peer.ping_latency_ms))
+        .collect::>();
+    let sorted_eligible_peers_and_latencies = eligible_peers_and_latencies
+        .iter()
+        .sorted_by_key(|(_, ping_latency_ms)| ping_latency_ms)
+        .collect::>();
+    info!(
+        NetworkSchema::new(&network_context),
+        "Sorted eligible peers with recorded ping latencies: {:?}",
+        sorted_eligible_peers_and_latencies
+    );
+}
+
+/// Spawns a task that pings the peer at the specified
+/// network address and updates the peer's ping latency.
+fn spawn_latency_ping_task(
+    network_context: NetworkContext,
+    peer_id: AccountAddress,
+    network_address: NetworkAddress,
+    discovered_peers: Arc>,
+) -> JoinHandle<()> {
+    tokio::task::spawn_blocking(move || {
+        // Extract the socket addresses from the network address
+        let socket_addresses = match network_address.to_socket_addrs() {
+            Ok(socket_addresses) => socket_addresses.collect::>(),
+            Err(error) => {
+                warn!(
+                    NetworkSchema::new(&network_context),
+                    "Failed to resolve network address {:?}: {}", network_address, error
+                );
+                return;
+            },
+        };
+
+        // If no socket addresses were found, log an error and return
+        if socket_addresses.is_empty() {
+            warn!(
+                NetworkSchema::new(&network_context),
+                "Peer {} does not have any socket addresses for network address {:?}!",
+                peer_id.short_str(),
+                network_address,
+            );
+            return;
+        }
+
+        // Limit the number of socket addresses we'll try to connect to
+        let socket_addresses = socket_addresses
+            .iter()
+            .take(MAX_SOCKET_ADDRESSES_TO_PING)
+            .collect::>();
+
+        // Attempt to connect to the socket addresses over TCP and time the connection
+        for socket_address in socket_addresses {
+            // Start the ping timer
+            let start_time = Instant::now();
+
+            // Attempt to connect to the socket address
+            if let Ok(tcp_stream) = TcpStream::connect_timeout(
+                socket_address,
+                Duration::from_secs(MAX_CONNECTION_TIMEOUT_SECS),
+            ) {
+                // We connected successfully, update the peer's ping latency
+                let ping_latency_ms = start_time.elapsed().as_millis() as u64;
+                discovered_peers
+                    .write()
+                    .update_ping_latency_ms(&peer_id, ping_latency_ms);
+
+                // Update the ping latency metrics
+                counters::observe_pre_dial_ping_time(&network_context, ping_latency_ms);
+
+                // Attempt to terminate the TCP stream cleanly
+                if let Err(error) = tcp_stream.shutdown(Shutdown::Both) {
+                    warn!(
+                        NetworkSchema::new(&network_context),
+                        "Failed to terminate TCP stream to peer {} after pinging: {}",
+                        peer_id.short_str(),
+                        error
+                    );
+                }
+
+                return;
+            }
+        }
+    })
+}
+
 /////////////////////
 // DiscoverySource //
 /////////////////////
@@ -1058,13 +1327,27 @@ where
         }
     }
 
-    fn next_addr<'a>(&mut self, addrs: &'a Addresses) -> &'a NetworkAddress {
-        assert!(!addrs.is_empty());
+    /// Returns the address to dial (specified by the index) for this peer
+    fn get_addr_at_index<'a>(
+        &self,
+        addr_index: usize,
+        addrs: &'a Addresses,
+    ) -> Option<&'a NetworkAddress> {
+        addrs.get(addr_index % addrs.len())
+    }
 
-        let addr_idx = self.addr_idx;
+    /// Returns the current address to dial for this peer and updates
+    /// the internal state to point to the next address.
+    fn next_addr<'a>(&mut self, addrs: &'a Addresses) -> Option<&'a NetworkAddress> {
+        let curr_addr = self.get_addr_at_index(self.addr_idx, addrs);
         self.addr_idx = self.addr_idx.wrapping_add(1);
+        curr_addr
+    }
 
-        addrs.get(addr_idx % addrs.len()).unwrap()
+    /// Returns a random address to dial for this peer
+    fn random_addr<'a>(&self, addrs: &'a Addresses) -> Option<&'a NetworkAddress> {
+        let addr_index = ::rand_latest::thread_rng().gen_range(0..addrs.len());
+        self.get_addr_at_index(addr_index, addrs)
     }
 
     fn next_backoff_delay(&mut self, max_delay: Duration) -> Duration {
diff --git a/network/framework/src/connectivity_manager/selection.rs b/network/framework/src/connectivity_manager/selection.rs
new file mode 100644
index 0000000000000..3e7d70b5f7a09
--- /dev/null
+++ b/network/framework/src/connectivity_manager/selection.rs
@@ -0,0 +1,223 @@
+// Copyright © Aptos Foundation
+// SPDX-License-Identifier: Apache-2.0
+
+use crate::{
+    connectivity_manager::{DiscoveredPeer, DiscoveredPeerSet},
+    logging::NetworkSchema,
+};
+use aptos_config::network_id::NetworkContext;
+use aptos_infallible::RwLock;
+use aptos_logger::error;
+use aptos_types::PeerId;
+use maplit::hashset;
+use ordered_float::OrderedFloat;
+use rand_latest::prelude::*;
+use std::{cmp::Ordering, collections::HashSet, sync::Arc};
+
+/// Chooses peers to dial randomly from the given list of eligible
+/// peers. We take last dial times into account to ensure that we
+/// don't dial the same peers too frequently.
+pub fn choose_peers_to_dial_randomly(
+    mut eligible_peers: Vec<(PeerId, DiscoveredPeer)>,
+    num_peers_to_dial: usize,
+) -> Vec<(PeerId, DiscoveredPeer)> {
+    // Shuffle the peers (so that we don't always dial the same ones first)
+    eligible_peers.shuffle(&mut ::rand_latest::thread_rng());
+
+    // Sort the peers by priority (this takes into account last dial times)
+    eligible_peers
+        .sort_by(|(_, peer), (_, other)| peer.partial_cmp(other).unwrap_or(Ordering::Equal));
+
+    // Select the peers to dial
+    eligible_peers.into_iter().take(num_peers_to_dial).collect()
+}
+
+/// Chooses peers randomly weighted by latency from the given list of peers
+pub fn choose_random_peers_by_ping_latency(
+    network_context: NetworkContext,
+    eligible_peers: Vec<(PeerId, DiscoveredPeer)>,
+    num_peers_to_choose: usize,
+    discovered_peers: Arc>,
+) -> Vec<(PeerId, DiscoveredPeer)> {
+    // Get all eligible peer IDs
+    let eligible_peer_ids = eligible_peers
+        .iter()
+        .map(|(peer_id, _)| *peer_id)
+        .collect::>();
+
+    // Identify the peer IDs that haven't been dialed recently
+    let non_recently_dialed_peer_ids = eligible_peers
+        .iter()
+        .filter(|(_, peer)| !peer.has_dialed_recently())
+        .map(|(peer_id, _)| *peer_id)
+        .collect::>();
+
+    // Choose peers (weighted by latency) from the non-recently dialed peers
+    let mut selected_peer_ids = choose_peers_by_ping_latency(
+        &network_context,
+        &non_recently_dialed_peer_ids,
+        num_peers_to_choose,
+        discovered_peers.clone(),
+    );
+
+    // If not enough peers were selected, choose additional peers weighted by latency
+    let num_selected_peer_ids = selected_peer_ids.len();
+    if num_selected_peer_ids < num_peers_to_choose {
+        // Filter out the already selected peers
+        let unselected_peer_ids = get_unselected_peer_ids(&eligible_peer_ids, &selected_peer_ids);
+
+        // Choose the remaining peers weighted by latency
+        let num_remaining_peers = num_peers_to_choose.saturating_sub(num_selected_peer_ids);
+        let remaining_selected_peer_ids = choose_peers_by_ping_latency(
+            &network_context,
+            &unselected_peer_ids,
+            num_remaining_peers,
+            discovered_peers.clone(),
+        );
+
+        // Extend the selected peers with the remaining peers
+        selected_peer_ids.extend(remaining_selected_peer_ids);
+    }
+
+    // Extend the selected peers with random peers (if necessary)
+    let selected_peer_ids =
+        extend_with_random_peers(selected_peer_ids, &eligible_peer_ids, num_peers_to_choose);
+
+    // Return the selected peers
+    get_discovered_peers_for_ids(selected_peer_ids, discovered_peers)
+}
+
+/// Returns true iff peers should be selected by ping latency. Note: this only
+/// makes sense for the public network, as the validator and VFN networks
+/// establish all-to-all connections.
+pub fn should_select_peers_by_latency(
+    network_context: &NetworkContext,
+    enable_latency_aware_dialing: bool,
+) -> bool {
+    network_context.network_id().is_public_network() && enable_latency_aware_dialing
+}
+
+/// Selects the specified number of peers from the list of potential
+/// peers. Peer selection is weighted by peer latencies (i.e., the
+/// lower the ping latency, the higher the probability of selection).
+fn choose_peers_by_ping_latency(
+    network_context: &NetworkContext,
+    peer_ids: &HashSet,
+    num_peers_to_choose: usize,
+    discovered_peers: Arc>,
+) -> HashSet {
+    // If no peers can be chosen, return an empty list
+    if num_peers_to_choose == 0 || peer_ids.is_empty() {
+        return hashset![];
+    }
+
+    // Gather the latency weights for all peers
+    let mut peer_ids_and_latency_weights = vec![];
+    for peer_id in peer_ids {
+        if let Some(ping_latency_ms) = discovered_peers.read().get_ping_latency_ms(peer_id) {
+            let latency_weight = convert_latency_to_weight(ping_latency_ms);
+            peer_ids_and_latency_weights.push((peer_id, OrderedFloat(latency_weight)));
+        }
+    }
+
+    // Get the random peers by weight
+    let weighted_selected_peers = peer_ids_and_latency_weights
+        .choose_multiple_weighted(
+            &mut ::rand_latest::thread_rng(),
+            num_peers_to_choose,
+            |peer| peer.1,
+        )
+        .map(|peers| peers.into_iter().map(|peer| *peer.0).collect::>());
+
+    // Return the random peers by weight
+    weighted_selected_peers
+        .unwrap_or_else(|error| {
+            // We failed to select any peers
+            error!(
+                NetworkSchema::new(network_context),
+                "Failed to choose peers by latency for network context: {:?}. Error: {:?}",
+                network_context,
+                error
+            );
+            vec![]
+        })
+        .into_iter()
+        .collect::>()
+}
+
+/// Converts the given latency measurement to a weight. The weight
+/// is calculated as the inverse of the latency, with a scaling
+/// factor to ensure that low latency peers are highly weighted.
+fn convert_latency_to_weight(latency_ms: u64) -> f64 {
+    // If the latency is <= 0, something has gone wrong, so return 0.
+    let latency_ms = latency_ms as f64;
+    if latency_ms <= 0.0 {
+        return 0.0;
+    }
+
+    // Invert the latency to get the weight
+    let mut weight = 1000.0 / latency_ms;
+
+    // For every 25ms of latency, reduce the weight by 1/2 (to
+    // ensure that low latency peers are highly weighted)
+    let num_reductions = (latency_ms / 25.0) as usize;
+    for _ in 0..num_reductions {
+        weight /= 2.0;
+    }
+
+    weight
+}
+
+/// If the number of selected peers is less than the number of required peers,
+/// select remaining peers from the serviceable peers (at random).
+fn extend_with_random_peers(
+    mut selected_peer_ids: HashSet,
+    peer_ids: &HashSet,
+    num_required_peers: usize,
+) -> HashSet {
+    // Only select random peers if we don't have enough peers
+    let num_selected_peers = selected_peer_ids.len();
+    if num_selected_peers < num_required_peers {
+        // Filter out the already selected peers
+        let unselected_peer_ids = get_unselected_peer_ids(peer_ids, &selected_peer_ids);
+
+        // Randomly select the remaining peers
+        let num_remaining_peers = num_required_peers.saturating_sub(num_selected_peers);
+        let remaining_peer_ids = unselected_peer_ids
+            .into_iter()
+            .choose_multiple(&mut ::rand_latest::thread_rng(), num_remaining_peers);
+
+        // Add the remaining peers to the selected peers
+        selected_peer_ids.extend(remaining_peer_ids);
+    }
+
+    selected_peer_ids
+}
+
+/// Returns the discovered peer states for the given peer ids
+fn get_discovered_peers_for_ids(
+    peer_ids: HashSet,
+    discovered_peers: Arc>,
+) -> Vec<(PeerId, DiscoveredPeer)> {
+    peer_ids
+        .into_iter()
+        .filter_map(|peer_id| {
+            discovered_peers
+                .read()
+                .peer_set
+                .get(&peer_id)
+                .map(|peer| (peer_id, peer.clone()))
+        })
+        .collect()
+}
+
+/// Returns the unselected peer IDs from the given set of eligible and selected peer IDs
+fn get_unselected_peer_ids(
+    eligible_peer_ids: &HashSet,
+    selected_peer_ids: &HashSet,
+) -> HashSet {
+    eligible_peer_ids
+        .difference(selected_peer_ids)
+        .cloned()
+        .collect()
+}
diff --git a/network/framework/src/connectivity_manager/test.rs b/network/framework/src/connectivity_manager/test.rs
index 70af39cacc1be..7165e8e3e6e2f 100644
--- a/network/framework/src/connectivity_manager/test.rs
+++ b/network/framework/src/connectivity_manager/test.rs
@@ -19,7 +19,7 @@ use aptos_time_service::{MockTimeService, TimeService};
 use aptos_types::{account_address::AccountAddress, network_address::NetworkAddress};
 use futures::{executor::block_on, future, SinkExt};
 use maplit::{hashmap, hashset};
-use rand::rngs::StdRng;
+use rand::{rngs::StdRng, SeedableRng};
 use std::{io, str::FromStr};
 use tokio_retry::strategy::FixedInterval;
 
@@ -106,6 +106,7 @@ impl TestHarness {
             MAX_CONNECTION_DELAY,
             Some(MAX_TEST_CONNECTIONS),
             true, /* mutual_authentication */
+            true, /* enable_latency_aware_dialing */
         );
         let mock = Self {
             network_context,
diff --git a/network/framework/src/counters.rs b/network/framework/src/counters.rs
index 600bdd003839e..64bbe6cddc822 100644
--- a/network/framework/src/counters.rs
+++ b/network/framework/src/counters.rs
@@ -13,6 +13,7 @@ use aptos_netcore::transport::ConnectionOrigin;
 use aptos_short_hex_str::AsShortHexStr;
 use aptos_types::PeerId;
 use once_cell::sync::Lazy;
+use std::time::Duration;
 
 // some type labels
 pub const REQUEST_LABEL: &str = "request";
@@ -31,6 +32,10 @@ pub const FAILED_LABEL: &str = "failed";
 pub const INBOUND_LABEL: &str = "inbound";
 pub const OUTBOUND_LABEL: &str = "outbound";
 
+// Peer ping labels
+const CONNECTED_LABEL: &str = "connected";
+const PRE_DIAL_LABEL: &str = "pre_dial";
+
 // Serialization labels
 pub const SERIALIZATION_LABEL: &str = "serialization";
 pub const DESERIALIZATION_LABEL: &str = "deserialization";
@@ -597,3 +602,38 @@ pub fn start_serialization_timer(protocol_id: ProtocolId, operation: &str) -> Hi
         .with_label_values(&[protocol_id.as_str(), operation])
         .start_timer()
 }
+
+/// Counters related to peer ping times (before and after dialing)
+pub static NETWORK_PRE_DIAL_PING_TIME: Lazy = Lazy::new(|| {
+    register_histogram_vec!(
+        "aptos_network_peer_ping_times",
+        "Counters related to peer ping times (before and after dialing)",
+        &["network_id", "label"],
+    )
+    .unwrap()
+});
+
+/// Starts and returns the timer for peer pings (before dialing)
+pub fn start_pre_dial_ping_timer(network_context: &NetworkContext) -> HistogramTimer {
+    NETWORK_PRE_DIAL_PING_TIME
+        .with_label_values(&[network_context.network_id().as_str(), PRE_DIAL_LABEL])
+        .start_timer()
+}
+
+/// Observes the ping time for a connected peer
+pub fn observe_connected_ping_time(network_context: &NetworkContext, ping_latency_ms: u64) {
+    observe_ping_time(network_context, ping_latency_ms, CONNECTED_LABEL);
+}
+
+/// Observes the ping time for a peer before dialing
+pub fn observe_pre_dial_ping_time(network_context: &NetworkContext, ping_latency_ms: u64) {
+    observe_ping_time(network_context, ping_latency_ms, PRE_DIAL_LABEL);
+}
+
+/// Observes the ping time for the given label
+fn observe_ping_time(network_context: &NetworkContext, ping_latency_ms: u64, label: &str) {
+    let ping_latency_secs = Duration::from_millis(ping_latency_ms).as_secs_f64();
+    NETWORK_PRE_DIAL_PING_TIME
+        .with_label_values(&[network_context.network_id().as_str(), label])
+        .observe(ping_latency_secs);
+}

From 7d1078ab5c53e3700bf8a2ed92f80964f89918c0 Mon Sep 17 00:00:00 2001
From: Josh Lind 
Date: Mon, 29 Jan 2024 11:28:39 -0500
Subject: [PATCH 40/44] [Network] Add simple tests for latency aware dialing.

---
 .../framework/src/connectivity_manager/mod.rs |   7 +
 .../src/connectivity_manager/selection.rs     | 532 ++++++++++++++++++
 2 files changed, 539 insertions(+)

diff --git a/network/framework/src/connectivity_manager/mod.rs b/network/framework/src/connectivity_manager/mod.rs
index 2f1c7773a4c99..20ac38af49837 100644
--- a/network/framework/src/connectivity_manager/mod.rs
+++ b/network/framework/src/connectivity_manager/mod.rs
@@ -178,6 +178,13 @@ struct DiscoveredPeerSet {
 }
 
 impl DiscoveredPeerSet {
+    #[cfg(test)]
+    /// Creates a new discovered peer set from the
+    /// specified peer set. This is used for testing.
+    pub fn new_from_peer_set(peer_set: HashMap) -> Self {
+        Self { peer_set }
+    }
+
     /// Gets the eligible peers from the discovered peer set
     fn get_eligible_peers(&self) -> PeerSet {
         self.peer_set
diff --git a/network/framework/src/connectivity_manager/selection.rs b/network/framework/src/connectivity_manager/selection.rs
index 3e7d70b5f7a09..40e3db0a049ac 100644
--- a/network/framework/src/connectivity_manager/selection.rs
+++ b/network/framework/src/connectivity_manager/selection.rs
@@ -221,3 +221,535 @@ fn get_unselected_peer_ids(
         .cloned()
         .collect()
 }
+
+#[cfg(test)]
+mod test {
+    use super::*;
+    use aptos_config::{
+        config::{PeerRole, RoleType},
+        network_id::NetworkId,
+    };
+    use aptos_types::account_address::AccountAddress;
+    use rand::Rng;
+    use std::collections::{BinaryHeap, HashMap};
+
+    #[test]
+    fn test_choose_random_peers() {
+        // Create an empty eligible peers set
+        let eligible_peers = vec![];
+
+        // Choose several peers randomly and verify none are selected
+        let selected_peers = choose_peers_to_dial_randomly(eligible_peers, 5);
+        assert!(selected_peers.is_empty());
+
+        // Create a large set of eligible peers
+        let eligible_peers = create_eligible_peers(100);
+
+        // Choose several peers randomly and verify the number of selected peers
+        let num_peers_to_dial = 5;
+        let selected_peers = choose_peers_to_dial_randomly(eligible_peers, num_peers_to_dial);
+        assert_eq!(selected_peers.len(), num_peers_to_dial);
+
+        // Create a small set of eligible peers
+        let num_eligible_peers = 5;
+        let eligible_peers = create_eligible_peers(num_eligible_peers);
+
+        // Choose many peers randomly and verify the number of selected peers
+        let selected_peers = choose_peers_to_dial_randomly(eligible_peers, 20);
+        assert_eq!(selected_peers.len(), num_eligible_peers);
+    }
+
+    #[test]
+    fn test_choose_random_peers_shuffle() {
+        // Create a set of 10 eligible peers
+        let num_eligible_peers = 10;
+        let eligible_peers = create_eligible_peers(num_eligible_peers);
+
+        // Choose all the peers randomly and verify the number of selected peers
+        let selected_peers_1 =
+            choose_peers_to_dial_randomly(eligible_peers.clone(), num_eligible_peers);
+        assert_eq!(selected_peers_1.len(), num_eligible_peers);
+
+        // Choose all the peers randomly again and verify the number of selected peers
+        let selected_peers_2 = choose_peers_to_dial_randomly(eligible_peers, num_eligible_peers);
+        assert_eq!(selected_peers_2.len(), num_eligible_peers);
+
+        // Verify the selected peer sets are identical
+        for peer in selected_peers_1.clone() {
+            assert!(selected_peers_2.contains(&peer));
+        }
+
+        // Verify that the peer orders are different (the peers were shuffled randomly!)
+        assert_ne!(selected_peers_1, selected_peers_2);
+    }
+
+    #[test]
+    fn test_choose_random_peers_recently_dialed() {
+        // Create a set of eligible peers
+        let mut eligible_peers = vec![];
+
+        // Add peers that have not been dialed recently
+        let num_non_dialed_peers = 20;
+        let non_dialed_peers = insert_non_dialed_peers(num_non_dialed_peers, &mut eligible_peers);
+
+        // Add peers that have been dialed recently
+        let num_dialed_peers = 60;
+        let dialed_peers = insert_dialed_peers(num_dialed_peers, &mut eligible_peers);
+
+        // Choose various peers randomly (until the max non-dialed peers) and verify the selection
+        for num_peers_to_dial in 1..=num_non_dialed_peers {
+            // Choose peers randomly and verify the number of selected peers
+            let selected_peers =
+                choose_peers_to_dial_randomly(eligible_peers.clone(), num_peers_to_dial);
+            assert_eq!(selected_peers.len(), num_peers_to_dial);
+
+            // Verify that all of the selected peers were not dialed recently
+            for (peer_id, _) in selected_peers {
+                assert!(non_dialed_peers.contains(&peer_id));
+                assert!(!dialed_peers.contains(&peer_id));
+            }
+        }
+
+        // Choose various peers randomly (beyond the max non-dialed peers) and verify the selection
+        let mut non_dialed_peer_selected = false;
+        let mut dialed_peer_selected = false;
+        let total_num_peers = num_non_dialed_peers + num_dialed_peers;
+        for num_peers_to_dial in num_non_dialed_peers + 1..=total_num_peers {
+            // Choose peers randomly and verify the number of selected peers
+            let selected_peers =
+                choose_peers_to_dial_randomly(eligible_peers.clone(), num_peers_to_dial);
+            assert_eq!(selected_peers.len(), num_peers_to_dial);
+
+            // Update the selected peer flags
+            for (peer_id, _) in selected_peers {
+                if non_dialed_peers.contains(&peer_id) {
+                    non_dialed_peer_selected = true;
+                }
+                if dialed_peers.contains(&peer_id) {
+                    dialed_peer_selected = true;
+                }
+            }
+
+            // Verify that at least one of each peer type was selected
+            assert!(non_dialed_peer_selected);
+            assert!(dialed_peer_selected);
+        }
+    }
+
+    #[test]
+    fn test_choose_peers_by_latency_dialed() {
+        // Create a set of eligible peers
+        let mut eligible_peers = vec![];
+
+        // Add peers that have not been dialed recently
+        let num_non_dialed_peers = 30;
+        let non_dialed_peers = insert_non_dialed_peers(num_non_dialed_peers, &mut eligible_peers);
+
+        // Add peers that have been dialed recently
+        let num_dialed_peers = 30;
+        let dialed_peers = insert_dialed_peers(num_dialed_peers, &mut eligible_peers);
+
+        // Create the discovered peer set
+        let discovered_peers = create_discovered_peers(eligible_peers.clone(), true);
+
+        // Choose peers by latency (until the max non-dialed peers) and verify the selection
+        for num_peers_to_dial in 1..=num_non_dialed_peers {
+            // Choose peers by latency and verify the number of selected peers
+            let selected_peers = choose_random_peers_by_ping_latency(
+                NetworkContext::mock(),
+                eligible_peers.clone(),
+                num_peers_to_dial,
+                discovered_peers.clone(),
+            );
+            assert_eq!(selected_peers.len(), num_peers_to_dial);
+
+            // Verify that all of the selected peers were not dialed recently
+            for (peer_id, _) in selected_peers {
+                assert!(non_dialed_peers.contains(&peer_id));
+                assert!(!dialed_peers.contains(&peer_id));
+            }
+        }
+
+        // Choose peers by latency (beyond the max non-dialed peers) and verify the selection
+        let total_num_peers = num_non_dialed_peers + num_dialed_peers;
+        for num_peers_to_dial in num_non_dialed_peers + 1..=total_num_peers {
+            // Choose peers by latency and verify the number of selected peers
+            let selected_peers = choose_random_peers_by_ping_latency(
+                NetworkContext::mock(),
+                eligible_peers.clone(),
+                num_peers_to_dial,
+                discovered_peers.clone(),
+            );
+            assert_eq!(selected_peers.len(), num_peers_to_dial);
+
+            // Get the selected peer IDs
+            let selected_peer_ids = selected_peers
+                .iter()
+                .map(|(peer_id, _)| *peer_id)
+                .collect::>();
+
+            // Verify the peer selection
+            for non_dialed_peer in non_dialed_peers.clone() {
+                assert!(selected_peer_ids.contains(&non_dialed_peer));
+            }
+
+            // Verify that at least some dialed peers were selected
+            let dialed_selected_peers = non_dialed_peers
+                .difference(&selected_peer_ids)
+                .cloned()
+                .collect::>();
+            assert!(dialed_peers.is_superset(&dialed_selected_peers));
+        }
+    }
+
+    #[test]
+    fn test_choose_peers_by_latency_missing_pings() {
+        // Create an empty set of eligible peers
+        let mut eligible_peers = vec![];
+
+        // Choose several peers by latency and verify none are selected
+        let network_context = NetworkContext::mock();
+        let discovered_peers = Arc::new(RwLock::new(DiscoveredPeerSet::default()));
+        let selected_peers = choose_random_peers_by_ping_latency(
+            network_context,
+            eligible_peers.clone(),
+            5,
+            discovered_peers.clone(),
+        );
+        assert!(selected_peers.is_empty());
+
+        // Add peers that have not been dialed recently
+        let num_non_dialed_peers = 30;
+        let _ = insert_non_dialed_peers(num_non_dialed_peers, &mut eligible_peers);
+
+        // Create the discovered peer set (without ping latencies)
+        let discovered_peers = create_discovered_peers(eligible_peers.clone(), false);
+
+        // Choose several peers by latency and verify the number of selected peers
+        let num_peers_to_choose = 5;
+        let selected_peers = choose_random_peers_by_ping_latency(
+            network_context,
+            eligible_peers.clone(),
+            num_peers_to_choose,
+            discovered_peers.clone(),
+        );
+        assert_eq!(selected_peers.len(), num_peers_to_choose);
+
+        // Choose all peers by latency and verify the number of selected peers
+        let selected_peers = choose_random_peers_by_ping_latency(
+            network_context,
+            eligible_peers.clone(),
+            num_non_dialed_peers,
+            discovered_peers.clone(),
+        );
+        assert_eq!(selected_peers.len(), num_non_dialed_peers);
+
+        // Choose more peers by latency than are available and verify the number of selected peers
+        let selected_peers = choose_random_peers_by_ping_latency(
+            network_context,
+            eligible_peers.clone(),
+            num_non_dialed_peers + 1,
+            discovered_peers.clone(),
+        );
+        assert_eq!(selected_peers.len(), num_non_dialed_peers);
+
+        // Add peers that have been dialed recently (with no ping latencies)
+        let num_dialed_peers = 30;
+        let _ = insert_dialed_peers(num_dialed_peers, &mut eligible_peers);
+
+        // Create the discovered peer set (without ping latencies)
+        let discovered_peers = create_discovered_peers(eligible_peers.clone(), false);
+
+        // Choose more peers than non dialed-peers and verify the number of selected peers
+        let num_peers_to_choose = num_non_dialed_peers + 10;
+        let selected_peers = choose_random_peers_by_ping_latency(
+            network_context,
+            eligible_peers.clone(),
+            num_peers_to_choose,
+            discovered_peers.clone(),
+        );
+        assert_eq!(selected_peers.len(), num_peers_to_choose);
+
+        // Choose all peers by latency and verify the number of selected peers
+        let num_peers_to_choose = num_non_dialed_peers + num_dialed_peers;
+        let selected_peers = choose_random_peers_by_ping_latency(
+            network_context,
+            eligible_peers.clone(),
+            num_peers_to_choose,
+            discovered_peers.clone(),
+        );
+        assert_eq!(selected_peers.len(), num_peers_to_choose);
+
+        // Choose more peers than are available and verify the number of selected peers
+        let num_total_peers = num_non_dialed_peers + num_dialed_peers;
+        let selected_peers = choose_random_peers_by_ping_latency(
+            network_context,
+            eligible_peers.clone(),
+            num_total_peers + 10,
+            discovered_peers.clone(),
+        );
+        assert_eq!(selected_peers.len(), num_total_peers);
+    }
+
+    #[test]
+    fn test_choose_peers_by_latency_prioritized_dialed() {
+        // Create a set of eligible peers
+        let mut eligible_peers = vec![];
+
+        // Add peers that have been dialed recently
+        let num_dialed_peers = 100;
+        let dialed_peers = insert_dialed_peers(num_dialed_peers, &mut eligible_peers);
+
+        // Create the discovered peer set
+        let discovered_peers = create_discovered_peers(eligible_peers.clone(), true);
+
+        // Add peers that have not been dialed recently (with no ping latencies)
+        let num_non_dialed_peers = 100;
+        let non_dialed_peers = insert_non_dialed_peers(num_non_dialed_peers, &mut eligible_peers);
+
+        // Choose peers by latency (multiple times) and verify the selection
+        let mut peer_selection_counts = HashMap::new();
+        for _ in 0..5000 {
+            // Choose a single peer by latency and verify the number of selected peers
+            let num_peers_to_dial = 1;
+            let selected_peers = choose_random_peers_by_ping_latency(
+                NetworkContext::mock(),
+                eligible_peers.clone(),
+                num_peers_to_dial,
+                discovered_peers.clone(),
+            );
+            assert_eq!(selected_peers.len(), num_peers_to_dial);
+
+            // Verify the selection and update the peer selection counts
+            for (peer_id, _) in selected_peers {
+                // Verify that the peer was dialed recently
+                assert!(!non_dialed_peers.contains(&peer_id));
+                assert!(dialed_peers.contains(&peer_id));
+
+                // Update the peer selection counts
+                let count = peer_selection_counts.entry(peer_id).or_insert(0);
+                *count += 1;
+            }
+        }
+
+        // Verify the top 10% of selected peers are the lowest latency peers
+        verify_highest_peer_selection_latencies(discovered_peers.clone(), &peer_selection_counts);
+    }
+
+    #[test]
+    fn test_choose_peers_by_latency_prioritized_non_dialed() {
+        // Create a set of eligible peers
+        let mut eligible_peers = vec![];
+
+        // Add peers that have not been dialed recently
+        let num_non_dialed_peers = 100;
+        let non_dialed_peers = insert_non_dialed_peers(num_non_dialed_peers, &mut eligible_peers);
+
+        // Add peers that have been dialed recently
+        let num_dialed_peers = 100;
+        let dialed_peers = insert_dialed_peers(num_dialed_peers, &mut eligible_peers);
+
+        // Create the discovered peer set (with ping latencies)
+        let discovered_peers = create_discovered_peers(eligible_peers.clone(), true);
+
+        // Choose peers by latency (multiple times) and verify the selection
+        let mut peer_selection_counts = HashMap::new();
+        for _ in 0..5000 {
+            // Choose a single peer by latency and verify the number of selected peers
+            let num_peers_to_dial = 1;
+            let selected_peers = choose_random_peers_by_ping_latency(
+                NetworkContext::mock(),
+                eligible_peers.clone(),
+                num_peers_to_dial,
+                discovered_peers.clone(),
+            );
+            assert_eq!(selected_peers.len(), num_peers_to_dial);
+
+            // Verify the selection and update the peer selection counts
+            for (peer_id, _) in selected_peers {
+                // Verify that the peer was not dialed recently
+                assert!(non_dialed_peers.contains(&peer_id));
+                assert!(!dialed_peers.contains(&peer_id));
+
+                // Update the peer selection counts
+                let count = peer_selection_counts.entry(peer_id).or_insert(0);
+                *count += 1;
+            }
+        }
+
+        // Verify the top 10% of selected peers are the lowest latency peers
+        verify_highest_peer_selection_latencies(discovered_peers.clone(), &peer_selection_counts);
+    }
+
+    #[test]
+    fn test_latency_to_weights() {
+        // Verify that a latency of 0 has a weight of 0
+        assert_eq!(convert_latency_to_weight(0), 0.0);
+
+        // Verify that latencies are scaled exponentially
+        assert_eq!(convert_latency_to_weight(1), 1000.0);
+        assert_eq!(convert_latency_to_weight(5), 200.0);
+        assert_eq!(convert_latency_to_weight(10), 100.0);
+        assert_eq!(convert_latency_to_weight(20), 50.0);
+        assert_eq!(convert_latency_to_weight(25), 20.0);
+        assert_eq!(convert_latency_to_weight(50), 5.0);
+        assert_eq!(convert_latency_to_weight(100), 0.625);
+        assert_eq!(convert_latency_to_weight(200), 0.01953125);
+    }
+
+    #[test]
+    fn test_should_select_peers_by_latency() {
+        // Create a validator network context
+        let validator_network_context =
+            NetworkContext::new(RoleType::Validator, NetworkId::Validator, PeerId::random());
+
+        // Verify that we don't select peers by latency for the validator network
+        let enable_latency_aware_dialing = true;
+        assert!(!should_select_peers_by_latency(
+            &validator_network_context,
+            enable_latency_aware_dialing
+        ));
+
+        // Create a VFN network context
+        let vfn_network_context =
+            NetworkContext::new(RoleType::FullNode, NetworkId::Vfn, PeerId::random());
+
+        // Verify that we don't select peers by latency for the VFN network
+        let enable_latency_aware_dialing = true;
+        assert!(!should_select_peers_by_latency(
+            &vfn_network_context,
+            enable_latency_aware_dialing
+        ));
+
+        // Create a public network context
+        let public_network_context =
+            NetworkContext::new(RoleType::FullNode, NetworkId::Public, PeerId::random());
+
+        // Verify that we select peers by latency for the public network
+        let enable_latency_aware_dialing = true;
+        assert!(should_select_peers_by_latency(
+            &public_network_context,
+            enable_latency_aware_dialing
+        ));
+
+        // Disable peer ping latencies and verify that we don't select peers by latency
+        let enable_latency_aware_dialing = false;
+        assert!(!should_select_peers_by_latency(
+            &public_network_context,
+            enable_latency_aware_dialing
+        ));
+    }
+
+    /// Creates a set of discovered peers from the given eligible
+    /// peers. If `set_ping_latencies` is true, random ping latencies
+    /// are set for each peer.
+    fn create_discovered_peers(
+        eligible_peers: Vec<(PeerId, DiscoveredPeer)>,
+        set_ping_latencies: bool,
+    ) -> Arc> {
+        // Create a new discovered peer set
+        let mut peer_set = HashMap::new();
+        for (peer_id, mut peer) in eligible_peers {
+            // Set a random ping latency between 1 and 1000 ms (if required)
+            if set_ping_latencies {
+                let ping_latency_ms = rand::thread_rng().gen_range(1, 1000);
+                peer.set_ping_latency_ms(ping_latency_ms);
+            }
+
+            // Insert the peer into the set
+            peer_set.insert(peer_id, peer.clone());
+        }
+
+        // Create and return the discovered peers
+        Arc::new(RwLock::new(DiscoveredPeerSet::new_from_peer_set(peer_set)))
+    }
+
+    /// Creates a set of eligible peers (as specified by the number of peers)
+    fn create_eligible_peers(num_eligible_peers: usize) -> Vec<(PeerId, DiscoveredPeer)> {
+        let mut eligible_peers = vec![];
+        for _ in 0..num_eligible_peers {
+            eligible_peers.push((
+                AccountAddress::random(),
+                DiscoveredPeer::new(PeerRole::PreferredUpstream),
+            ));
+        }
+        eligible_peers
+    }
+
+    /// Creates and inserts a set of dialed peers into the eligible peers
+    /// set, and returns the set of dialed peer IDs.
+    fn insert_dialed_peers(
+        num_dialed_peers: usize,
+        eligible_peers: &mut Vec<(PeerId, DiscoveredPeer)>,
+    ) -> HashSet {
+        let mut dialed_peers = hashset![];
+        for _ in 0..num_dialed_peers {
+            // Create a dialed peer
+            let peer_id = AccountAddress::random();
+            let mut peer = DiscoveredPeer::new(PeerRole::PreferredUpstream);
+            dialed_peers.insert(peer_id);
+
+            // Set the last dial time to be recent
+            peer.update_last_dial_time();
+
+            // Add the peer to the eligible peers
+            eligible_peers.push((peer_id, peer));
+        }
+        dialed_peers
+    }
+
+    /// Creates and inserts a set of non-dialed peers into the eligible peers
+    /// set, and returns the set of non-dialed peer IDs.
+    fn insert_non_dialed_peers(
+        num_non_dialed_peers: usize,
+        eligible_peers: &mut Vec<(PeerId, DiscoveredPeer)>,
+    ) -> HashSet {
+        let mut non_dialed_peers = hashset![];
+        for _ in 0..num_non_dialed_peers {
+            // Create a non-dialed peer
+            let peer_id = AccountAddress::random();
+            non_dialed_peers.insert(peer_id);
+
+            // Add the peer to the eligible peers
+            eligible_peers.push((peer_id, DiscoveredPeer::new(PeerRole::ValidatorFullNode)));
+        }
+        non_dialed_peers
+    }
+
+    /// Verifies the top 10% of selected peers are the lowest latency peers
+    fn verify_highest_peer_selection_latencies(
+        discovered_peers: Arc>,
+        peers_and_selection_counts: &HashMap,
+    ) {
+        // Build a max-heap of all peers by their selection counts
+        let mut max_heap_selection_counts = BinaryHeap::new();
+        for (peer, selection_count) in peers_and_selection_counts.clone() {
+            max_heap_selection_counts.push((selection_count, peer));
+        }
+
+        // Verify the top 10% of polled peers are the lowest latency peers
+        let peers_to_verify = peers_and_selection_counts.len() / 10;
+        let mut highest_seen_latency = 0.0;
+        for _ in 0..peers_to_verify {
+            // Get the peer
+            let (_, peer) = max_heap_selection_counts.pop().unwrap();
+
+            // Get the peer's ping latency
+            let discovered_peers = discovered_peers.read();
+            let discovered_peer = discovered_peers.peer_set.get(&peer).unwrap();
+            let ping_latency = discovered_peer.ping_latency_ms.unwrap() as f64;
+
+            // Verify that the ping latencies are increasing
+            if ping_latency <= highest_seen_latency {
+                // The ping latencies did not increase. This should only be
+                // possible if the latencies are very close (i.e., within 10%).
+                if (highest_seen_latency - ping_latency) > 0.1 {
+                    panic!("The ping latencies are not increasing! Are peers weighted by latency?");
+                }
+            }
+
+            // Update the highest seen latency
+            highest_seen_latency = ping_latency;
+        }
+    }
+}

From a86669f3cc84a6ee83e407daba228030985c735d Mon Sep 17 00:00:00 2001
From: Josh Lind 
Date: Thu, 1 Feb 2024 11:16:13 -0500
Subject: [PATCH 41/44] [Network] Adopt f64 for ping latencies.

---
 .../framework/src/connectivity_manager/mod.rs | 65 ++++++++++++-------
 .../src/connectivity_manager/selection.rs     | 36 +++++-----
 network/framework/src/counters.rs             | 23 ++-----
 3 files changed, 65 insertions(+), 59 deletions(-)

diff --git a/network/framework/src/connectivity_manager/mod.rs b/network/framework/src/connectivity_manager/mod.rs
index 20ac38af49837..920c274835fe9 100644
--- a/network/framework/src/connectivity_manager/mod.rs
+++ b/network/framework/src/connectivity_manager/mod.rs
@@ -53,6 +53,7 @@ use futures::{
 };
 use futures_util::future::join_all;
 use itertools::Itertools;
+use ordered_float::OrderedFloat;
 use rand_latest::Rng;
 use serde::Serialize;
 use std::{
@@ -211,18 +212,18 @@ impl DiscoveredPeerSet {
     }
 
     /// Returns the ping latency for the specified peer (if one was found)
-    fn get_ping_latency_ms(&self, peer_id: &PeerId) -> Option {
+    fn get_ping_latency_secs(&self, peer_id: &PeerId) -> Option {
         if let Some(discovered_peer) = self.peer_set.get(peer_id) {
-            discovered_peer.ping_latency_ms
+            discovered_peer.ping_latency_secs
         } else {
             None
         }
     }
 
     /// Updates the ping latency for the specified peer (if one was found)
-    fn update_ping_latency_ms(&mut self, peer_id: &PeerId, latency: u64) {
+    fn update_ping_latency_secs(&mut self, peer_id: &PeerId, latency_secs: f64) {
         if let Some(discovered_peer) = self.peer_set.get_mut(peer_id) {
-            discovered_peer.set_ping_latency_ms(latency)
+            discovered_peer.set_ping_latency_secs(latency_secs)
         }
     }
 }
@@ -235,8 +236,8 @@ struct DiscoveredPeer {
     keys: PublicKeys,
     /// The last time the node was dialed
     last_dial_time: SystemTime,
-    /// The calculated peer ping latency (ms)
-    ping_latency_ms: Option,
+    /// The calculated peer ping latency (secs)
+    ping_latency_secs: Option,
 }
 
 impl DiscoveredPeer {
@@ -246,7 +247,7 @@ impl DiscoveredPeer {
             addrs: Addresses::default(),
             keys: PublicKeys::default(),
             last_dial_time: SystemTime::UNIX_EPOCH,
-            ping_latency_ms: None,
+            ping_latency_secs: None,
         }
     }
 
@@ -271,8 +272,8 @@ impl DiscoveredPeer {
     }
 
     /// Updates the ping latency for this peer
-    pub fn set_ping_latency_ms(&mut self, latency: u64) {
-        self.ping_latency_ms = Some(latency);
+    pub fn set_ping_latency_secs(&mut self, latency_secs: f64) {
+        self.ping_latency_secs = Some(latency_secs);
     }
 
     /// Based on input, backoff on amount of time to dial a peer again
@@ -648,7 +649,7 @@ where
         // Identify the eligible peers that don't already have latency information
         let peers_to_ping = eligible_peers
             .into_iter()
-            .filter(|(_, peer)| peer.ping_latency_ms.is_none())
+            .filter(|(_, peer)| peer.ping_latency_secs.is_none())
             .collect::>();
 
         // If there are no peers to ping, return early
@@ -828,16 +829,25 @@ where
         // future.
         self.dial_eligible_peers(pending_dials).await;
 
-        // Update the metrics for any connected peer ping latencies
-        self.update_connected_ping_latency_metrics();
+        // Update the metrics for any peer ping latencies
+        self.update_ping_latency_metrics();
     }
 
-    /// Updates the metrics for tracking connected peer ping latencies
-    fn update_connected_ping_latency_metrics(&self) {
+    /// Updates the metrics for tracking pre-dial and connected peer ping latencies
+    fn update_ping_latency_metrics(&self) {
+        // Update the pre-dial peer ping latencies
+        for (_, peer) in self.discovered_peers.read().peer_set.iter() {
+            if let Some(ping_latency_secs) = peer.ping_latency_secs {
+                counters::observe_pre_dial_ping_time(&self.network_context, ping_latency_secs);
+            }
+        }
+
+        // Update the connected peer ping latencies
         for peer_id in self.connected.keys() {
-            if let Some(ping_latency_ms) = self.discovered_peers.read().get_ping_latency_ms(peer_id)
+            if let Some(ping_latency_secs) =
+                self.discovered_peers.read().get_ping_latency_secs(peer_id)
             {
-                counters::observe_connected_ping_time(&self.network_context, ping_latency_ms);
+                counters::observe_connected_ping_time(&self.network_context, ping_latency_secs);
             }
         }
     }
@@ -1113,21 +1123,21 @@ fn log_peer_ping_latencies(
     let ping_latency_duration = Instant::now().duration_since(ping_start_time);
     info!(
         NetworkSchema::new(&network_context),
-        "Finished pinging eligible peers! Total peers to ping: {}, num peers pinged: {}, time: {}ms",
+        "Finished pinging eligible peers! Total peers to ping: {}, num peers pinged: {}, time: {} secs",
         total_peers_to_ping,
         num_peers_pinged,
-        ping_latency_duration.as_millis()
+        ping_latency_duration.as_secs_f64()
     );
 
     // Log the ping latencies for the eligible peers (sorted by latency)
     let eligible_peers = discovered_peers.read().peer_set.clone();
     let eligible_peers_and_latencies = eligible_peers
         .into_iter()
-        .map(|(peer_id, peer)| (peer_id, peer.ping_latency_ms))
+        .map(|(peer_id, peer)| (peer_id, peer.ping_latency_secs))
         .collect::>();
     let sorted_eligible_peers_and_latencies = eligible_peers_and_latencies
         .iter()
-        .sorted_by_key(|(_, ping_latency_ms)| ping_latency_ms)
+        .sorted_by_key(|(_, ping_latency_secs)| ping_latency_secs.map(OrderedFloat))
         .collect::>();
     info!(
         NetworkSchema::new(&network_context),
@@ -1185,13 +1195,10 @@ fn spawn_latency_ping_task(
                 Duration::from_secs(MAX_CONNECTION_TIMEOUT_SECS),
             ) {
                 // We connected successfully, update the peer's ping latency
-                let ping_latency_ms = start_time.elapsed().as_millis() as u64;
+                let ping_latency_secs = start_time.elapsed().as_secs_f64();
                 discovered_peers
                     .write()
-                    .update_ping_latency_ms(&peer_id, ping_latency_ms);
-
-                // Update the ping latency metrics
-                counters::observe_pre_dial_ping_time(&network_context, ping_latency_ms);
+                    .update_ping_latency_secs(&peer_id, ping_latency_secs);
 
                 // Attempt to terminate the TCP stream cleanly
                 if let Err(error) = tcp_stream.shutdown(Shutdown::Both) {
@@ -1204,6 +1211,14 @@ fn spawn_latency_ping_task(
                 }
 
                 return;
+            } else {
+                // Log an error if we failed to connect to the socket address
+                info!(
+                    NetworkSchema::new(&network_context),
+                    "Failed to ping peer {} at socket address {:?} after pinging",
+                    peer_id.short_str(),
+                    socket_address
+                );
             }
         }
     })
diff --git a/network/framework/src/connectivity_manager/selection.rs b/network/framework/src/connectivity_manager/selection.rs
index 40e3db0a049ac..f31d5759f58d6 100644
--- a/network/framework/src/connectivity_manager/selection.rs
+++ b/network/framework/src/connectivity_manager/selection.rs
@@ -114,8 +114,8 @@ fn choose_peers_by_ping_latency(
     // Gather the latency weights for all peers
     let mut peer_ids_and_latency_weights = vec![];
     for peer_id in peer_ids {
-        if let Some(ping_latency_ms) = discovered_peers.read().get_ping_latency_ms(peer_id) {
-            let latency_weight = convert_latency_to_weight(ping_latency_ms);
+        if let Some(ping_latency_secs) = discovered_peers.read().get_ping_latency_secs(peer_id) {
+            let latency_weight = convert_latency_to_weight(ping_latency_secs);
             peer_ids_and_latency_weights.push((peer_id, OrderedFloat(latency_weight)));
         }
     }
@@ -148,19 +148,18 @@ fn choose_peers_by_ping_latency(
 /// Converts the given latency measurement to a weight. The weight
 /// is calculated as the inverse of the latency, with a scaling
 /// factor to ensure that low latency peers are highly weighted.
-fn convert_latency_to_weight(latency_ms: u64) -> f64 {
+fn convert_latency_to_weight(latency_secs: f64) -> f64 {
     // If the latency is <= 0, something has gone wrong, so return 0.
-    let latency_ms = latency_ms as f64;
-    if latency_ms <= 0.0 {
+    if latency_secs <= 0.0 {
         return 0.0;
     }
 
     // Invert the latency to get the weight
-    let mut weight = 1000.0 / latency_ms;
+    let mut weight = 1.0 / latency_secs;
 
     // For every 25ms of latency, reduce the weight by 1/2 (to
     // ensure that low latency peers are highly weighted)
-    let num_reductions = (latency_ms / 25.0) as usize;
+    let num_reductions = (latency_secs / 0.025) as usize;
     for _ in 0..num_reductions {
         weight /= 2.0;
     }
@@ -584,17 +583,17 @@ mod test {
     #[test]
     fn test_latency_to_weights() {
         // Verify that a latency of 0 has a weight of 0
-        assert_eq!(convert_latency_to_weight(0), 0.0);
+        assert_eq!(convert_latency_to_weight(0.0), 0.0);
 
         // Verify that latencies are scaled exponentially
-        assert_eq!(convert_latency_to_weight(1), 1000.0);
-        assert_eq!(convert_latency_to_weight(5), 200.0);
-        assert_eq!(convert_latency_to_weight(10), 100.0);
-        assert_eq!(convert_latency_to_weight(20), 50.0);
-        assert_eq!(convert_latency_to_weight(25), 20.0);
-        assert_eq!(convert_latency_to_weight(50), 5.0);
-        assert_eq!(convert_latency_to_weight(100), 0.625);
-        assert_eq!(convert_latency_to_weight(200), 0.01953125);
+        assert_eq!(convert_latency_to_weight(0.001), 1000.0);
+        assert_eq!(convert_latency_to_weight(0.005), 200.0);
+        assert_eq!(convert_latency_to_weight(0.01), 100.0);
+        assert_eq!(convert_latency_to_weight(0.02), 50.0);
+        assert_eq!(convert_latency_to_weight(0.025), 20.0);
+        assert_eq!(convert_latency_to_weight(0.05), 5.0);
+        assert_eq!(convert_latency_to_weight(0.1), 0.625);
+        assert_eq!(convert_latency_to_weight(0.2), 0.01953125);
     }
 
     #[test]
@@ -653,7 +652,8 @@ mod test {
             // Set a random ping latency between 1 and 1000 ms (if required)
             if set_ping_latencies {
                 let ping_latency_ms = rand::thread_rng().gen_range(1, 1000);
-                peer.set_ping_latency_ms(ping_latency_ms);
+                let ping_latency_secs = ping_latency_ms as f64 / 1000.0;
+                peer.set_ping_latency_secs(ping_latency_secs);
             }
 
             // Insert the peer into the set
@@ -737,7 +737,7 @@ mod test {
             // Get the peer's ping latency
             let discovered_peers = discovered_peers.read();
             let discovered_peer = discovered_peers.peer_set.get(&peer).unwrap();
-            let ping_latency = discovered_peer.ping_latency_ms.unwrap() as f64;
+            let ping_latency = discovered_peer.ping_latency_secs.unwrap();
 
             // Verify that the ping latencies are increasing
             if ping_latency <= highest_seen_latency {
diff --git a/network/framework/src/counters.rs b/network/framework/src/counters.rs
index 64bbe6cddc822..e5b5b68c8aacd 100644
--- a/network/framework/src/counters.rs
+++ b/network/framework/src/counters.rs
@@ -13,7 +13,6 @@ use aptos_netcore::transport::ConnectionOrigin;
 use aptos_short_hex_str::AsShortHexStr;
 use aptos_types::PeerId;
 use once_cell::sync::Lazy;
-use std::time::Duration;
 
 // some type labels
 pub const REQUEST_LABEL: &str = "request";
@@ -604,7 +603,7 @@ pub fn start_serialization_timer(protocol_id: ProtocolId, operation: &str) -> Hi
 }
 
 /// Counters related to peer ping times (before and after dialing)
-pub static NETWORK_PRE_DIAL_PING_TIME: Lazy = Lazy::new(|| {
+pub static NETWORK_PEER_PING_TIMES: Lazy = Lazy::new(|| {
     register_histogram_vec!(
         "aptos_network_peer_ping_times",
         "Counters related to peer ping times (before and after dialing)",
@@ -613,27 +612,19 @@ pub static NETWORK_PRE_DIAL_PING_TIME: Lazy = Lazy::new(|| {
     .unwrap()
 });
 
-/// Starts and returns the timer for peer pings (before dialing)
-pub fn start_pre_dial_ping_timer(network_context: &NetworkContext) -> HistogramTimer {
-    NETWORK_PRE_DIAL_PING_TIME
-        .with_label_values(&[network_context.network_id().as_str(), PRE_DIAL_LABEL])
-        .start_timer()
-}
-
 /// Observes the ping time for a connected peer
-pub fn observe_connected_ping_time(network_context: &NetworkContext, ping_latency_ms: u64) {
-    observe_ping_time(network_context, ping_latency_ms, CONNECTED_LABEL);
+pub fn observe_connected_ping_time(network_context: &NetworkContext, ping_latency_secs: f64) {
+    observe_ping_time(network_context, ping_latency_secs, CONNECTED_LABEL);
 }
 
 /// Observes the ping time for a peer before dialing
-pub fn observe_pre_dial_ping_time(network_context: &NetworkContext, ping_latency_ms: u64) {
-    observe_ping_time(network_context, ping_latency_ms, PRE_DIAL_LABEL);
+pub fn observe_pre_dial_ping_time(network_context: &NetworkContext, ping_latency_secs: f64) {
+    observe_ping_time(network_context, ping_latency_secs, PRE_DIAL_LABEL);
 }
 
 /// Observes the ping time for the given label
-fn observe_ping_time(network_context: &NetworkContext, ping_latency_ms: u64, label: &str) {
-    let ping_latency_secs = Duration::from_millis(ping_latency_ms).as_secs_f64();
-    NETWORK_PRE_DIAL_PING_TIME
+fn observe_ping_time(network_context: &NetworkContext, ping_latency_secs: f64, label: &str) {
+    NETWORK_PEER_PING_TIMES
         .with_label_values(&[network_context.network_id().as_str(), label])
         .observe(ping_latency_secs);
 }

From 0da935350140051ffe4ad9e8893053dd90aac5a3 Mon Sep 17 00:00:00 2001
From: Teng Zhang 
Date: Thu, 1 Feb 2024 11:14:42 -0800
Subject: [PATCH 42/44] Revert "fix type checking issue due to name conflicts"
 (#11848)

This reverts commit 577f267911185dad8b3a7a3129e31ca074b62d0f.
---
 .../tests/checking/specs/len_ok.exp           | 27 -------------------
 .../tests/checking/specs/len_ok.move          |  9 -------
 .../checking/specs/len_same_fun_name_err.exp  |  7 -----
 .../checking/specs/len_same_fun_name_err.move | 14 ----------
 .../tests/move_check/typing/len_err.exp       |  8 ------
 .../tests/move_check/typing/len_err.move      | 10 -------
 .../move-model/src/builder/exp_builder.rs     |  9 -------
 7 files changed, 84 deletions(-)
 delete mode 100644 third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp
 delete mode 100644 third_party/move/move-compiler-v2/tests/checking/specs/len_ok.move
 delete mode 100644 third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.exp
 delete mode 100644 third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.move
 delete mode 100644 third_party/move/move-compiler/tests/move_check/typing/len_err.exp
 delete mode 100644 third_party/move/move-compiler/tests/move_check/typing/len_err.move

diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp b/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp
deleted file mode 100644
index b6f5e04220537..0000000000000
--- a/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.exp
+++ /dev/null
@@ -1,27 +0,0 @@
-
-Diagnostics:
-warning: Unused local variable `len`. Consider removing or prefixing with an underscore: `_len`
-  ┌─ tests/checking/specs/len_ok.move:4:13
-  │
-4 │         let len = 5;
-  │             ^^^
-
-// ---- Model Dump
-module 0x42::m {
-    private fun f(gallery: &vector) {
-        {
-          let len: u64 = 5;
-          spec {
-            assert Ge(Len($t0), 0);
-          }
-          ;
-          Tuple()
-        }
-    }
-    spec fun $f(gallery: vector) {
-        {
-          let len: u256 = 5;
-          Tuple()
-        }
-    }
-} // end 0x42::m
diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.move b/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.move
deleted file mode 100644
index a082f4b344719..0000000000000
--- a/third_party/move/move-compiler-v2/tests/checking/specs/len_ok.move
+++ /dev/null
@@ -1,9 +0,0 @@
-module 0x42::m {
-
-    fun f(gallery: &vector) {
-        let len = 5;
-        spec {
-            assert len(gallery) >= 0;
-        };
-    }
-}
diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.exp b/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.exp
deleted file mode 100644
index 9cf694dbbac09..0000000000000
--- a/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.exp
+++ /dev/null
@@ -1,7 +0,0 @@
-
-Diagnostics:
-error: invalid call of `m::len`: argument count mismatch (expected 0 but found 1)
-   ┌─ tests/checking/specs/len_same_fun_name_err.move:10:20
-   │
-10 │             assert len(gallery) >= 0; // err is raised here because the built-in one is shadowed.
-   │                    ^^^^^^^^^^^^
diff --git a/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.move b/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.move
deleted file mode 100644
index 48c56deb9d945..0000000000000
--- a/third_party/move/move-compiler-v2/tests/checking/specs/len_same_fun_name_err.move
+++ /dev/null
@@ -1,14 +0,0 @@
-module 0x42::m {
-
-    fun len(): bool {
-        true
-    }
-
-    fun f(gallery: &vector) {
-        let len = 5;
-        spec {
-            assert len(gallery) >= 0; // err is raised here because the built-in one is shadowed.
-            assert len();
-        };
-    }
-}
diff --git a/third_party/move/move-compiler/tests/move_check/typing/len_err.exp b/third_party/move/move-compiler/tests/move_check/typing/len_err.exp
deleted file mode 100644
index 514580232fb50..0000000000000
--- a/third_party/move/move-compiler/tests/move_check/typing/len_err.exp
+++ /dev/null
@@ -1,8 +0,0 @@
-error[E02010]: invalid name
-  ┌─ tests/move_check/typing/len_err.move:5:9
-  │  
-5 │ ╭         spec {
-6 │ │             assert len(gallery) >= len;
-7 │ │         };
-  │ ╰─────────^ Conflicting name 'len' is used as both a variable and a function pointer (including built-in functions) in spec
-
diff --git a/third_party/move/move-compiler/tests/move_check/typing/len_err.move b/third_party/move/move-compiler/tests/move_check/typing/len_err.move
deleted file mode 100644
index 73485764cbc63..0000000000000
--- a/third_party/move/move-compiler/tests/move_check/typing/len_err.move
+++ /dev/null
@@ -1,10 +0,0 @@
-module 0x42::m {
-
-    fun f_err(gallery: &vector) {
-        let len = 5;
-        spec {
-            assert len(gallery) >= len;
-        };
-    }
-
-}
diff --git a/third_party/move/move-model/src/builder/exp_builder.rs b/third_party/move/move-model/src/builder/exp_builder.rs
index 37c68250ab6ef..111afca6ea52a 100644
--- a/third_party/move/move-model/src/builder/exp_builder.rs
+++ b/third_party/move/move-model/src/builder/exp_builder.rs
@@ -1957,15 +1957,6 @@ impl<'env, 'translator, 'module_translator> ExpTranslator<'env, 'translator, 'mo
                 if n.value.as_str() == "update_field" {
                     return Some(self.translate_update_field(expected_type, loc, generics, args));
                 }
-                let builtin_module = self.parent.parent.builtin_module();
-                let full_name = QualifiedSymbol {
-                    module_name: builtin_module,
-                    symbol: self.symbol_pool().make(&n.value),
-                };
-                // For other built-in functions, type check is performed in translate_call
-                if self.parent.parent.spec_fun_table.get(&full_name).is_some() {
-                    return None;
-                }
             }
         }
         if let EA::ModuleAccess_::Name(n) = &maccess.value {

From fb97dc7aed52c3ad3c0902fa4276951a12e67ec3 Mon Sep 17 00:00:00 2001
From: Justin Chang <37165464+just-in-chang@users.noreply.github.com>
Date: Thu, 1 Feb 2024 12:12:10 -0800
Subject: [PATCH 43/44] [NFT Metadata Crawler] Add IF EXISTS to migration
 (#11866)

* Add IF EXISTS to migration

* not
---
 .../2024-01-31-221845_add_not_parsable_column/down.sql          | 2 +-
 .../migrations/2024-01-31-221845_add_not_parsable_column/up.sql | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/down.sql b/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/down.sql
index cdf49e8755b12..271ce0e95d00e 100644
--- a/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/down.sql
+++ b/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/down.sql
@@ -1 +1 @@
-ALTER TABLE nft_metadata_crawler.parsed_asset_uris DROP COLUMN do_not_parse;
+ALTER TABLE IF EXISTS nft_metadata_crawler.parsed_asset_uris DROP COLUMN do_not_parse;
diff --git a/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/up.sql b/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/up.sql
index 1e480c0d5d43f..59313b9765afb 100644
--- a/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/up.sql
+++ b/ecosystem/nft-metadata-crawler-parser/migrations/2024-01-31-221845_add_not_parsable_column/up.sql
@@ -1 +1 @@
-ALTER TABLE nft_metadata_crawler.parsed_asset_uris ADD COLUMN do_not_parse BOOLEAN NOT NULL DEFAULT FALSE;
+ALTER TABLE IF NOT EXISTS nft_metadata_crawler.parsed_asset_uris ADD COLUMN do_not_parse BOOLEAN NOT NULL DEFAULT FALSE;

From e7f9189f3a3290238b918dc5052f19ae9d34326f Mon Sep 17 00:00:00 2001
From: Max Kaplan <1482859+CapCap@users.noreply.github.com>
Date: Thu, 1 Feb 2024 13:04:30 -0800
Subject: [PATCH 44/44] [GRPC] Nits + Clone reduction (#11820)

---
 .../indexer-grpc-data-access/src/gcs.rs       |  4 +-
 .../indexer-grpc-utils/src/counters.rs        | 96 +++++++++++--------
 2 files changed, 56 insertions(+), 44 deletions(-)

diff --git a/ecosystem/indexer-grpc/indexer-grpc-data-access/src/gcs.rs b/ecosystem/indexer-grpc/indexer-grpc-data-access/src/gcs.rs
index 10b68c58f8ea4..4dafefabe97f1 100644
--- a/ecosystem/indexer-grpc/indexer-grpc-data-access/src/gcs.rs
+++ b/ecosystem/indexer-grpc/indexer-grpc-data-access/src/gcs.rs
@@ -110,13 +110,13 @@ impl From for StorageReadError {
                 ),
                 false => StorageReadError::PermenantError(
                     GCS_STORAGE_NAME,
-                    anyhow::Error::new(e).context("Failed to download object; it's permernant."),
+                    anyhow::Error::new(e).context("Failed to download object; it's permanent."),
                 ),
             },
             Error::TokenSource(e) => StorageReadError::PermenantError(
                 GCS_STORAGE_NAME,
                 anyhow::anyhow!(e.to_string())
-                    .context("Failed to download object; authenication/token error."),
+                    .context("Failed to download object; authentication/token error."),
             ),
         }
     }
diff --git a/ecosystem/indexer-grpc/indexer-grpc-utils/src/counters.rs b/ecosystem/indexer-grpc/indexer-grpc-utils/src/counters.rs
index fe1e21f50fff4..652c8dec70562 100644
--- a/ecosystem/indexer-grpc/indexer-grpc-utils/src/counters.rs
+++ b/ecosystem/indexer-grpc/indexer-grpc-utils/src/counters.rs
@@ -8,32 +8,54 @@ use once_cell::sync::Lazy;
 use prometheus::{register_int_counter_vec, IntCounterVec};
 
 pub enum IndexerGrpcStep {
-    DataServiceNewRequestReceived,   // [Data Service] New request received.
-    DataServiceWaitingForCacheData,  // [Data Service] Waiting for data from cache.
-    DataServiceDataFetchedCache,     // [Data Service] Fetched data from Redis cache.
-    DataServiceDataFetchedFilestore, // [Data Service] Fetched data from Filestore.
-    DataServiceTxnsDecoded,          // [Data Service] Decoded transactions.
-    DataServiceChunkSent, // [Data Service] One chunk of transactions sent to GRPC response channel.
-    DataServiceAllChunksSent, // [Data Service] All chunks of transactions sent to GRPC response channel. Current batch finished.
+    // [Data Service] New request received.
+    DataServiceNewRequestReceived,
+    // [Data Service] Waiting for data from cache.
+    DataServiceWaitingForCacheData,
+    // [Data Service] Fetched data from Redis cache.
+    DataServiceDataFetchedCache,
+    // [Data Service] Fetched data from Filestore.
+    DataServiceDataFetchedFilestore,
+    // [Data Service] Decoded transactions.
+    DataServiceTxnsDecoded,
+    // [Data Service] One chunk of transactions sent to GRPC response channel.
+    DataServiceChunkSent,
+    // [Data Service] All chunks of transactions sent to GRPC response channel. Current batch finished.
+    DataServiceAllChunksSent,
 
-    CacheWorkerReceivedTxns, // [Indexer Cache] Received transactions from fullnode.
-    CacheWorkerTxnEncoded,   // [Indexer Cache] Encoded transactions.
-    CacheWorkerTxnsProcessed, // [Indexer Cache] Processed transactions in a batch.
-    CacheWorkerBatchProcessed, // [Indexer Cache] Successfully process current batch.
+    // [Indexer Cache] Received transactions from fullnode.
+    CacheWorkerReceivedTxns,
+    // [Indexer Cache] Encoded transactions.
+    CacheWorkerTxnEncoded,
+    // [Indexer Cache] Processed transactions in a batch.
+    CacheWorkerTxnsProcessed,
+    // [Indexer Cache] Successfully process current batch.
+    CacheWorkerBatchProcessed,
 
-    FilestoreFetchTxns,      // [File worker] Fetch transactions from cache.
-    FilestoreUploadTxns,     // [File worker] Upload transactions to filestore.
-    FilestoreUpdateMetadata, // [File worker] Update metadata to filestore.
-    FilestoreProcessedBatch, // [File worker] Successfully process current batch.
-    FileStoreEncodedTxns,    // [File worker] Encoded transactions.
+    // [File worker] Fetch transactions from cache.
+    FilestoreFetchTxns,
+    // [File worker] Upload transactions to filestore.
+    FilestoreUploadTxns,
+    // [File worker] Update metadata to filestore.
+    FilestoreUpdateMetadata,
+    // [File worker] Successfully process current batch.
+    FilestoreProcessedBatch,
+    // [File worker] Encoded transactions.
+    FileStoreEncodedTxns,
 
-    FullnodeFetchedBatch, // [Indexer Fullnode] Fetched batch of transactions from fullnode
-    FullnodeDecodedBatch, // [Indexer Fullnode] Decoded batch of transactions from fullnode
-    FullnodeProcessedBatch, // [Indexer Fullnode] Processed batch of transactions from fullnode
-    FullnodeSentBatch,    // [Indexer Fullnode] Sent batch successfully
+    // [Indexer Fullnode] Fetched batch of transactions from fullnode
+    FullnodeFetchedBatch,
+    // [Indexer Fullnode] Decoded batch of transactions from fullnode
+    FullnodeDecodedBatch,
+    // [Indexer Fullnode] Processed batch of transactions from fullnode
+    FullnodeProcessedBatch,
+    // [Indexer Fullnode] Sent batch successfully
+    FullnodeSentBatch,
 
-    TableInfoProcessedBatch, // [Indexer Table Info] Processed batch of transactions from fullnode
-    TableInfoProcessed,      // [Indexer Table Info] Processed transactions from fullnode
+    // [Indexer Table Info] Processed batch of transactions from fullnode
+    TableInfoProcessedBatch,
+    // [Indexer Table Info] Processed transactions from fullnode
+    TableInfoProcessed,
 }
 
 impl IndexerGrpcStep {
@@ -74,7 +96,7 @@ impl IndexerGrpcStep {
             // Data service steps
             IndexerGrpcStep::DataServiceNewRequestReceived => {
                 "[Data Service] New request received."
-            },
+            }
             IndexerGrpcStep::DataServiceWaitingForCacheData => {
                 "[Data Service] Waiting for data from cache."
             }
@@ -104,10 +126,10 @@ impl IndexerGrpcStep {
             // Table info service steps
             IndexerGrpcStep::TableInfoProcessedBatch => {
                 "[Indexer Table Info] Processed batch successfully"
-            },
+            }
             IndexerGrpcStep::TableInfoProcessed => {
                 "[Indexer Table Info] Processed successfully"
-            },
+            }
         }
     }
 }
@@ -220,6 +242,7 @@ pub fn log_grpc_step(
             step.get_label(),
         );
     } else {
+        let request_metadata = request_metadata.clone().unwrap();
         tracing::info!(
             start_version,
             end_version,
@@ -229,23 +252,12 @@ pub fn log_grpc_step(
             duration_in_secs,
             size_in_bytes,
             // Request metadata variables
-            request_name = request_metadata.clone().unwrap().processor_name.as_str(),
-            request_email = request_metadata.clone().unwrap().request_email.as_str(),
-            request_api_key_name = request_metadata
-                .clone()
-                .unwrap()
-                .request_api_key_name
-                .as_str(),
-            processor_name = request_metadata.clone().unwrap().processor_name.as_str(),
-            connection_id = request_metadata
-                .clone()
-                .unwrap()
-                .request_connection_id
-                .as_str(),
-            request_user_classification = request_metadata
-                .unwrap()
-                .request_user_classification
-                .as_str(),
+            request_name = request_metadata.processor_name.as_str(),
+            request_email = request_metadata.request_email.as_str(),
+            request_api_key_name = request_metadata.request_api_key_name.as_str(),
+            processor_name = request_metadata.processor_name.as_str(),
+            connection_id = request_metadata.request_connection_id.as_str(),
+            request_user_classification = request_metadata.request_user_classification.as_str(),
             service_type,
             step = step.get_step(),
             "{}",