diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c3831c73c77ba..8c6b774f2ad25c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,9 @@ Release channels have their own copy of this changelog: * Added allow_commission_decrease_at_any_time feature which will allow commission on a vote account to be decreased even in the second half of epochs when the commission_updates_only_allowed_in_first_half_of_epoch feature would have prevented it + * Updated local ledger storage so that the RPC endpoint + `getSignaturesForAddress` always returns signatures in block-inclusion order + * RPC's `simulateTransaction` now returns `innerInstructions` as `json`/`jsonParsed` (#34313). * Upgrade Notes * `solana-program` and `solana-sdk` default to support for Borsh v1, with limited backward compatibility for v0.10 and v0.9. Please upgrade to Borsh v1. diff --git a/Cargo.lock b/Cargo.lock index 8453cfea9516e8..7f9f53572d74fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -163,9 +163,9 @@ checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "59d2a3357dde987206219e78ecfbbb6e8dad06cbb65292758d3270e6254f7355" [[package]] name = "aquamarine" @@ -450,7 +450,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -604,7 +604,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -772,7 +772,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", "syn_derive", ] @@ -1399,9 +1399,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "14c3242926edf34aec4ac3a77108ad4854bffaa2e4ddc1824124ce59231302d5" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -1432,9 +1432,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" dependencies = [ "cfg-if 1.0.0", ] @@ -1539,7 +1539,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -1550,7 +1550,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -1675,9 +1675,9 @@ dependencies = [ [[package]] name = "dir-diff" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2860407d7d7e2e004bb2128510ad9e8d669e76fa005ccf567977b5d71b8b4a0b" +checksum = "a7ad16bf5f84253b50d6557681c58c3ab67c47c77d39fed9aeb56e947290bd10" dependencies = [ "walkdir", ] @@ -1734,7 +1734,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -1840,7 +1840,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -2124,7 +2124,7 @@ checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -2517,9 +2517,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -2532,7 +2532,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -2937,9 +2937,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.150" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "libloading" @@ -3405,7 +3405,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -3509,7 +3509,7 @@ dependencies = [ "proc-macro-crate 1.1.0", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -3521,7 +3521,7 @@ dependencies = [ "proc-macro-crate 2.0.0", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -4028,7 +4028,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -4206,7 +4206,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -4529,9 +4529,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ "async-compression", "base64 0.21.5", @@ -4909,7 +4909,7 @@ checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -4963,7 +4963,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -4980,9 +4980,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.9.27" +version = "0.9.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cc7a1570e38322cfe4154732e5110f887ea57e22b76f4bfd32b5bdd3368666c" +checksum = "9269cfafc7e0257ee4a42f3f68a307f458c63d9e7c8ba4b58c5d15f1b7d7e8d3" dependencies = [ "indexmap 2.1.0", "itoa", @@ -5013,7 +5013,7 @@ checksum = "91d129178576168c589c9ec973feedf7d3126c01ac2bf08795109aa35b69fb8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -5495,7 +5495,7 @@ dependencies = [ "rand 0.8.5", "rayon", "serde_json", - "serde_yaml 0.9.27", + "serde_yaml 0.9.28", "serial_test", "solana-clap-utils", "solana-cli-config", @@ -5765,7 +5765,7 @@ dependencies = [ "lazy_static", "serde", "serde_derive", - "serde_yaml 0.9.27", + "serde_yaml 0.9.28", "solana-clap-utils", "solana-sdk", "url 2.5.0", @@ -5972,6 +5972,7 @@ dependencies = [ "solana-tpu-client", "solana-transaction-status", "solana-turbine", + "solana-unified-scheduler-pool", "solana-version", "solana-vote", "solana-vote-program", @@ -6143,7 +6144,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.0", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -6156,7 +6157,7 @@ dependencies = [ "itertools", "serde", "serde_json", - "serde_yaml 0.9.27", + "serde_yaml 0.9.28", "solana-accounts-db", "solana-clap-utils", "solana-cli-config", @@ -6289,7 +6290,7 @@ dependencies = [ "semver 1.0.20", "serde", "serde_yaml 0.8.26", - "serde_yaml 0.9.27", + "serde_yaml 0.9.28", "solana-clap-utils", "solana-config-program", "solana-logger", @@ -6431,6 +6432,7 @@ dependencies = [ "solana-storage-bigtable", "solana-streamer", "solana-transaction-status", + "solana-unified-scheduler-pool", "solana-version", "solana-vote-program", "solana_rbpf", @@ -7188,7 +7190,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -7544,6 +7546,24 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-unified-scheduler-logic" +version = "1.18.0" + +[[package]] +name = "solana-unified-scheduler-pool" +version = "1.18.0" +dependencies = [ + "assert_matches", + "solana-ledger", + "solana-logger", + "solana-program-runtime", + "solana-runtime", + "solana-sdk", + "solana-unified-scheduler-logic", + "solana-vote", +] + [[package]] name = "solana-upload-perf" version = "1.18.0" @@ -7578,7 +7598,7 @@ dependencies = [ "rayon", "serde", "serde_json", - "serde_yaml 0.9.27", + "serde_yaml 0.9.28", "signal-hook", "solana-account-decoder", "solana-accounts-db", @@ -7863,7 +7883,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -7875,7 +7895,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.40", + "syn 2.0.42", "thiserror", ] @@ -7933,7 +7953,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -8038,9 +8058,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "stream-cancel" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0a9eb2715209fb8cc0d942fcdff45674bfc9f0090a0d897e85a22955ad159b" +checksum = "5f9fbf9bd71e4cf18d68a8a0951c0e5b7255920c0cd992c4ff51cddd6ef514a3" dependencies = [ "futures-core", "pin-project", @@ -8106,9 +8126,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.40" +version = "2.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13fa70a4ee923979ffb522cacce59d34421ebdea5625e1073c4326ef9d2dd42e" +checksum = "5b7d0a2c048d661a1a59fcd7355baa232f7ed34e0ee4df2eef3c1c1c0d3852d8" dependencies = [ "proc-macro2", "quote", @@ -8124,7 +8144,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -8296,7 +8316,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -8308,7 +8328,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", "test-case-core", ] @@ -8329,22 +8349,22 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -8481,7 +8501,7 @@ source = "git+https://github.com/solana-labs/solana-tokio.git?rev=7cf47705faacf7 dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -8876,9 +8896,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" +checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" [[package]] name = "untrusted" @@ -9023,7 +9043,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", "wasm-bindgen-shared", ] @@ -9057,7 +9077,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9345,22 +9365,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.15" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ba595b9f2772fbee2312de30eeb80ec773b4cb2f1e8098db024afadda6c06f" +checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.15" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" +checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -9380,7 +9400,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 05f41f5d48b335..e453797198416b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -108,6 +108,8 @@ members = [ "transaction-status", "turbine", "udp-client", + "unified-scheduler-logic", + "unified-scheduler-pool", "upload-perf", "validator", "version", @@ -136,7 +138,7 @@ Inflector = "0.11.4" aquamarine = "0.3.3" aes-gcm-siv = "0.10.3" ahash = "0.8.6" -anyhow = "1.0.75" +anyhow = "1.0.76" ark-bn254 = "0.4.0" ark-ec = "0.4.0" ark-ff = "0.4.0" @@ -177,7 +179,7 @@ const_format = "0.2.32" core_affinity = "0.5.10" criterion = "0.5.1" criterion-stats = "0.3.0" -crossbeam-channel = "0.5.8" +crossbeam-channel = "0.5.9" csv = "1.3.0" ctrlc = "3.4.1" curve25519-dalek = "3.2.1" @@ -185,7 +187,7 @@ dashmap = "5.5.3" derivation-path = { version = "0.2.0", default-features = false } dialoguer = "0.10.4" digest = "0.10.7" -dir-diff = "0.3.2" +dir-diff = "0.3.3" dirs-next = "2.0.0" dlopen2 = "0.5.0" eager = "0.1.0" @@ -214,7 +216,7 @@ histogram = "0.6.9" hmac = "0.12.1" http = "0.2.11" humantime = "2.0.1" -hyper = "0.14.27" +hyper = "0.14.28" hyper-proxy = "0.9.1" im = "15.1.0" index_list = "0.2.11" @@ -234,7 +236,7 @@ jsonrpc-ipc-server = "18.0.0" jsonrpc-pubsub = "18.0.0" jsonrpc-server-utils = "18.0.0" lazy_static = "1.4.0" -libc = "0.2.150" +libc = "0.2.151" libloading = "0.7.4" libsecp256k1 = "0.6.0" light-poseidon = "0.2.0" @@ -282,7 +284,7 @@ rayon = "1.8.0" rcgen = "0.10.0" reed-solomon-erasure = "6.0.0" regex = "1.10.2" -reqwest = { version = "0.11.22", default-features = false } +reqwest = { version = "0.11.23", default-features = false } rolling-file = "0.2.0" rpassword = "7.3" rustc_version = "0.4" @@ -296,7 +298,7 @@ serde_bytes = "0.11.12" serde_derive = "1.0.103" serde_json = "1.0.108" serde_with = { version = "2.3.3", default-features = false } -serde_yaml = "0.9.27" +serde_yaml = "0.9.28" serial_test = "2.0.0" sha2 = "0.10.8" sha3 = "0.10.4" @@ -357,6 +359,8 @@ solana-pubsub-client = { path = "pubsub-client", version = "=1.18.0" } solana-quic-client = { path = "quic-client", version = "=1.18.0" } solana-rayon-threadlimit = { path = "rayon-threadlimit", version = "=1.18.0" } solana-remote-wallet = { path = "remote-wallet", version = "=1.18.0", default-features = false } +solana-unified-scheduler-logic = { path = "unified-scheduler-logic", version = "=1.18.0" } +solana-unified-scheduler-pool = { path = "unified-scheduler-pool", version = "=1.18.0" } solana-rpc = { path = "rpc", version = "=1.18.0" } solana-rpc-client = { path = "rpc-client", version = "=1.18.0", default-features = false } solana-rpc-client-api = { path = "rpc-client-api", version = "=1.18.0" } @@ -393,7 +397,7 @@ spl-token = "=4.0.0" spl-token-2022 = "=0.9.0" spl-token-metadata-interface = "=0.2.0" static_assertions = "1.1.0" -stream-cancel = "0.8.1" +stream-cancel = "0.8.2" strum = "0.24" strum_macros = "0.24" subtle = "2.4.1" @@ -406,7 +410,7 @@ tar = "0.4.40" tarpc = "0.29.0" tempfile = "3.8.1" test-case = "3.3.1" -thiserror = "1.0.50" +thiserror = "1.0.51" tiny-bip39 = "0.8.2" # Update solana-tokio patch below when updating this version tokio = "1.29.1" diff --git a/accounts-bench/src/main.rs b/accounts-bench/src/main.rs index 60ff67735d0d94..88d15ea72482aa 100644 --- a/accounts-bench/src/main.rs +++ b/accounts-bench/src/main.rs @@ -10,6 +10,7 @@ use { accounts_db::{ test_utils::{create_test_accounts, update_accounts_bench}, AccountShrinkThreshold, AccountsDb, CalcAccountsHashDataSource, + ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, }, accounts_index::AccountSecondaryIndexes, ancestors::Ancestors, @@ -69,11 +70,14 @@ fn main() { if fs::remove_dir_all(path.clone()).is_err() { println!("Warning: Couldn't remove {path:?}"); } - let accounts_db = AccountsDb::new_with_config_for_benches( + let accounts_db = AccountsDb::new_with_config( vec![path], &ClusterType::Testnet, AccountSecondaryIndexes::default(), AccountShrinkThreshold::default(), + Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), + None, + Arc::default(), ); let accounts = Accounts::new(Arc::new(accounts_db)); println!("Creating {num_accounts} accounts"); diff --git a/accounts-db/src/accounts.rs b/accounts-db/src/accounts.rs index 992f864c89d4ff..8eb0702967790e 100644 --- a/accounts-db/src/accounts.rs +++ b/accounts-db/src/accounts.rs @@ -824,8 +824,6 @@ mod tests { use { super::*, crate::{ - accounts_db::AccountShrinkThreshold, - accounts_index::AccountSecondaryIndexes, rent_collector::RentCollector, transaction_results::{DurableNonceFee, TransactionExecutionDetails}, }, @@ -834,7 +832,6 @@ mod tests { solana_sdk::{ account::{AccountSharedData, WritableAccount}, address_lookup_table::state::LookupTableMeta, - genesis_config::ClusterType, hash::Hash, instruction::{CompiledInstruction, InstructionError}, message::{Message, MessageHeader}, @@ -939,12 +936,7 @@ mod tests { #[test] fn test_load_lookup_table_addresses_account_not_found() { let ancestors = vec![(0, 0)].into_iter().collect(); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let invalid_table_key = Pubkey::new_unique(); @@ -967,12 +959,7 @@ mod tests { #[test] fn test_load_lookup_table_addresses_invalid_account_owner() { let ancestors = vec![(0, 0)].into_iter().collect(); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let invalid_table_key = Pubkey::new_unique(); @@ -999,12 +986,7 @@ mod tests { #[test] fn test_load_lookup_table_addresses_invalid_account_data() { let ancestors = vec![(0, 0)].into_iter().collect(); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let invalid_table_key = Pubkey::new_unique(); @@ -1031,12 +1013,7 @@ mod tests { #[test] fn test_load_lookup_table_addresses() { let ancestors = vec![(1, 1), (0, 0)].into_iter().collect(); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let table_key = Pubkey::new_unique(); @@ -1077,12 +1054,7 @@ mod tests { #[test] fn test_load_by_program_slot() { - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); // Load accounts owned by various programs into AccountsDb @@ -1106,12 +1078,7 @@ mod tests { #[test] fn test_accounts_empty_bank_hash_stats() { - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); assert!(accounts.accounts_db.get_bank_hash_stats(0).is_some()); assert!(accounts.accounts_db.get_bank_hash_stats(1).is_none()); @@ -1119,12 +1086,7 @@ mod tests { #[test] fn test_lock_accounts_with_duplicates() { - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let keypair = Keypair::new(); @@ -1144,12 +1106,7 @@ mod tests { #[test] fn test_lock_accounts_with_too_many_accounts() { - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let keypair = Keypair::new(); @@ -1210,12 +1167,7 @@ mod tests { let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); let account3 = AccountSharedData::new(4, 0, &Pubkey::default()); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); accounts.store_for_tests(0, &keypair0.pubkey(), &account0); accounts.store_for_tests(0, &keypair1.pubkey(), &account1); @@ -1320,12 +1272,7 @@ mod tests { let account1 = AccountSharedData::new(2, 0, &Pubkey::default()); let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); accounts.store_for_tests(0, &keypair0.pubkey(), &account0); accounts.store_for_tests(0, &keypair1.pubkey(), &account1); @@ -1402,12 +1349,7 @@ mod tests { let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); let account3 = AccountSharedData::new(4, 0, &Pubkey::default()); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); accounts.store_for_tests(0, &keypair0.pubkey(), &account0); accounts.store_for_tests(0, &keypair1.pubkey(), &account1); @@ -1479,12 +1421,7 @@ mod tests { let account2 = AccountSharedData::new(3, 0, &Pubkey::default()); let account3 = AccountSharedData::new(4, 0, &Pubkey::default()); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); accounts.store_for_tests(0, &keypair0.pubkey(), &account0); accounts.store_for_tests(0, &keypair1.pubkey(), &account1); @@ -1639,12 +1576,7 @@ mod tests { let mut loaded = vec![loaded0, loaded1]; - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); { accounts @@ -1691,12 +1623,7 @@ mod tests { #[test] fn huge_clean() { solana_logger::setup(); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let mut old_pubkey = Pubkey::default(); let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); @@ -2031,12 +1958,7 @@ mod tests { let mut loaded = vec![loaded]; let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let txs = vec![tx]; let execution_results = vec![new_execution_result( @@ -2145,12 +2067,7 @@ mod tests { let mut loaded = vec![loaded]; let durable_nonce = DurableNonce::from_blockhash(&Hash::new_unique()); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let txs = vec![tx]; let execution_results = vec![new_execution_result( @@ -2187,12 +2104,7 @@ mod tests { #[test] fn test_load_largest_accounts() { - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); /* This test assumes pubkey0 < pubkey1 < pubkey2. diff --git a/accounts-db/src/accounts_db.rs b/accounts-db/src/accounts_db.rs index 743768e485dfbd..570ff8c26a415c 100644 --- a/accounts-db/src/accounts_db.rs +++ b/accounts-db/src/accounts_db.rs @@ -9522,40 +9522,6 @@ pub(crate) enum UpdateIndexThreadSelection { // These functions/fields are only usable from a dev context (i.e. tests and benches) #[cfg(feature = "dev-context-only-utils")] impl AccountsDb { - pub fn new_with_config_for_tests( - paths: Vec, - cluster_type: &ClusterType, - account_indexes: AccountSecondaryIndexes, - shrink_ratio: AccountShrinkThreshold, - ) -> Self { - Self::new_with_config( - paths, - cluster_type, - account_indexes, - shrink_ratio, - Some(ACCOUNTS_DB_CONFIG_FOR_TESTING), - None, - Arc::default(), - ) - } - - pub fn new_with_config_for_benches( - paths: Vec, - cluster_type: &ClusterType, - account_indexes: AccountSecondaryIndexes, - shrink_ratio: AccountShrinkThreshold, - ) -> Self { - Self::new_with_config( - paths, - cluster_type, - account_indexes, - shrink_ratio, - Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), - None, - Arc::default(), - ) - } - pub fn load_without_fixed_root( &self, ancestors: &Ancestors, @@ -9855,13 +9821,6 @@ pub mod tests { } impl AccountsDb { - pub fn new_sized(paths: Vec, file_size: u64) -> Self { - AccountsDb { - file_size, - ..AccountsDb::new_for_tests(paths, &ClusterType::Development) - } - } - pub fn get_append_vec_id(&self, pubkey: &Pubkey, slot: Slot) -> Option { let ancestors = vec![(slot, 1)].into_iter().collect(); let result = self.accounts_index.get(pubkey, Some(&ancestors), None); @@ -11578,7 +11537,10 @@ pub mod tests { fn test_account_grow_many() { let (_accounts_dir, paths) = get_temp_accounts_paths(2).unwrap(); let size = 4096; - let accounts = AccountsDb::new_sized(paths, size); + let accounts = AccountsDb { + file_size: size, + ..AccountsDb::new_for_tests(paths, &ClusterType::Development) + }; let mut keys = vec![]; for i in 0..9 { let key = solana_sdk::pubkey::new_rand(); @@ -11965,12 +11927,10 @@ pub mod tests { fn test_clean_old_with_both_normal_and_zero_lamport_accounts() { solana_logger::setup(); - let mut accounts = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - spl_token_mint_index_enabled(), - AccountShrinkThreshold::default(), - ); + let mut accounts = AccountsDb { + account_indexes: spl_token_mint_index_enabled(), + ..AccountsDb::new_single_for_tests() + }; let pubkey1 = solana_sdk::pubkey::new_rand(); let pubkey2 = solana_sdk::pubkey::new_rand(); @@ -12341,7 +12301,10 @@ pub mod tests { let min_file_bytes = std::mem::size_of::() + std::mem::size_of::(); - let db = Arc::new(AccountsDb::new_sized(Vec::new(), min_file_bytes as u64)); + let db = Arc::new(AccountsDb { + file_size: min_file_bytes as u64, + ..AccountsDb::new_single_for_tests() + }); db.add_root(slot); let thread_hdls: Vec<_> = (0..num_threads) @@ -12867,7 +12830,10 @@ pub mod tests { #[test] fn test_storage_finder() { solana_logger::setup(); - let db = AccountsDb::new_sized(Vec::new(), 16 * 1024); + let db = AccountsDb { + file_size: 16 * 1024, + ..AccountsDb::new_single_for_tests() + }; let key = solana_sdk::pubkey::new_rand(); let lamports = 100; let data_len = 8190; @@ -13009,7 +12975,10 @@ pub mod tests { let zero_lamport_account = AccountSharedData::new(zero_lamport, data_size, &owner); let mut current_slot = 0; - let accounts = AccountsDb::new_sized(Vec::new(), store_size); + let accounts = AccountsDb { + file_size: store_size, + ..AccountsDb::new_single_for_tests() + }; // A: Initialize AccountsDb with pubkey1 and pubkey2 current_slot += 1; @@ -13653,7 +13622,10 @@ pub mod tests { #[test] fn test_store_reuse() { solana_logger::setup(); - let accounts = AccountsDb::new_sized(vec![], 4096); + let accounts = AccountsDb { + file_size: 4096, + ..AccountsDb::new_single_for_tests() + }; let size = 100; let num_accounts: usize = 100; diff --git a/accounts-db/src/tiered_storage/hot.rs b/accounts-db/src/tiered_storage/hot.rs index c460d3b9b27587..92c0114e1ce359 100644 --- a/accounts-db/src/tiered_storage/hot.rs +++ b/accounts-db/src/tiered_storage/hot.rs @@ -281,9 +281,16 @@ impl HotStorageReader { &self, account_offset: HotAccountOffset, ) -> TieredStorageResult<&HotAccountMeta> { - let internal_account_offset = account_offset.offset(); - - let (meta, _) = get_pod::(&self.mmap, internal_account_offset)?; + let offset = account_offset.offset(); + + assert!( + offset.saturating_add(std::mem::size_of::()) + <= self.footer.index_block_offset as usize, + "reading HotAccountOffset ({}) would exceed accounts blocks offset boundary ({}).", + offset, + self.footer.index_block_offset, + ); + let (meta, _) = get_pod::(&self.mmap, offset)?; Ok(meta) } @@ -538,7 +545,7 @@ pub mod tests { .collect(); let account_offsets: Vec<_>; - let footer = TieredStorageFooter { + let mut footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, account_entry_count: NUM_ACCOUNTS, ..TieredStorageFooter::default() @@ -557,6 +564,7 @@ pub mod tests { .collect(); // while the test only focuses on account metas, writing a footer // here is necessary to make it a valid tiered-storage file. + footer.index_block_offset = current_offset as u64; footer.write_footer_block(&file).unwrap(); } @@ -566,9 +574,37 @@ pub mod tests { let meta = hot_storage.get_account_meta_from_offset(*offset).unwrap(); assert_eq!(meta, expected_meta); } + assert_eq!(&footer, hot_storage.footer()); } + #[test] + #[should_panic(expected = "would exceed accounts blocks offset boundary")] + fn test_get_acount_meta_from_offset_out_of_bounds() { + // Generate a new temp path that is guaranteed to NOT already have a file. + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir + .path() + .join("test_get_acount_meta_from_offset_out_of_bounds"); + + let footer = TieredStorageFooter { + account_meta_format: AccountMetaFormat::Hot, + index_block_offset: 160, + ..TieredStorageFooter::default() + }; + + { + let file = TieredStorageFile::new_writable(&path).unwrap(); + footer.write_footer_block(&file).unwrap(); + } + + let hot_storage = HotStorageReader::new_from_path(&path).unwrap(); + let offset = HotAccountOffset::new(footer.index_block_offset as usize).unwrap(); + // Read from index_block_offset, which offset doesn't belong to + // account blocks. Expect assert failure here + hot_storage.get_account_meta_from_offset(offset).unwrap(); + } + #[test] fn test_hot_storage_get_account_offset_and_address() { // Generate a new temp path that is guaranteed to NOT already have a file. @@ -594,7 +630,7 @@ pub mod tests { }) .collect(); - let footer = TieredStorageFooter { + let mut footer = TieredStorageFooter { account_meta_format: AccountMetaFormat::Hot, account_entry_count: NUM_ACCOUNTS, // Set index_block_offset to 0 as we didn't write any account @@ -605,13 +641,11 @@ pub mod tests { { let file = TieredStorageFile::new_writable(&path).unwrap(); - footer + let cursor = footer .index_block_format .write_index_block(&file, &index_writer_entries) .unwrap(); - - // while the test only focuses on account metas, writing a footer - // here is necessary to make it a valid tiered-storage file. + footer.owners_block_offset = cursor as u64; footer.write_footer_block(&file).unwrap(); } diff --git a/accounts-db/src/tiered_storage/index.rs b/accounts-db/src/tiered_storage/index.rs index 06bb48491dad32..6567b6311558d4 100644 --- a/accounts-db/src/tiered_storage/index.rs +++ b/accounts-db/src/tiered_storage/index.rs @@ -83,13 +83,23 @@ impl IndexBlockFormat { footer: &TieredStorageFooter, index_offset: IndexOffset, ) -> TieredStorageResult<&'a Pubkey> { - let account_offset = match self { + let offset = match self { Self::AddressAndBlockOffsetOnly => { + debug_assert!(index_offset.0 < footer.account_entry_count); footer.index_block_offset as usize + std::mem::size_of::() * (index_offset.0 as usize) } }; - let (address, _) = get_pod::(mmap, account_offset)?; + + debug_assert!( + offset.saturating_add(std::mem::size_of::()) + <= footer.owners_block_offset as usize, + "reading IndexOffset ({}) would exceeds index block boundary ({}).", + offset, + footer.owners_block_offset, + ); + + let (address, _) = get_pod::(mmap, offset)?; Ok(address) } @@ -139,7 +149,7 @@ mod tests { #[test] fn test_address_and_offset_indexer() { const ENTRY_COUNT: usize = 100; - let footer = TieredStorageFooter { + let mut footer = TieredStorageFooter { account_entry_count: ENTRY_COUNT as u32, ..TieredStorageFooter::default() }; @@ -163,7 +173,8 @@ mod tests { { let file = TieredStorageFile::new_writable(&path).unwrap(); let indexer = IndexBlockFormat::AddressAndBlockOffsetOnly; - indexer.write_index_block(&file, &index_entries).unwrap(); + let cursor = indexer.write_index_block(&file, &index_entries).unwrap(); + footer.owners_block_offset = cursor as u64; } let indexer = IndexBlockFormat::AddressAndBlockOffsetOnly; @@ -184,4 +195,75 @@ mod tests { assert_eq!(index_entry.address, address); } } + + #[test] + #[should_panic(expected = "index_offset.0 < footer.account_entry_count")] + fn test_get_account_address_out_of_bounds() { + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir + .path() + .join("test_get_account_address_out_of_bounds"); + + let footer = TieredStorageFooter { + account_entry_count: 100, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, + ..TieredStorageFooter::default() + }; + + { + // we only writes a footer here as the test should hit an assert + // failure before it actually reads the file. + let file = TieredStorageFile::new_writable(&path).unwrap(); + footer.write_footer_block(&file).unwrap(); + } + + let file = OpenOptions::new() + .read(true) + .create(false) + .open(&path) + .unwrap(); + let mmap = unsafe { MmapOptions::new().map(&file).unwrap() }; + footer + .index_block_format + .get_account_address(&mmap, &footer, IndexOffset(footer.account_entry_count)) + .unwrap(); + } + + #[test] + #[should_panic(expected = "would exceeds index block boundary")] + fn test_get_account_address_exceeds_index_block_boundary() { + let temp_dir = TempDir::new().unwrap(); + let path = temp_dir + .path() + .join("test_get_account_address_exceeds_index_block_boundary"); + + let footer = TieredStorageFooter { + account_entry_count: 100, + index_block_format: IndexBlockFormat::AddressAndBlockOffsetOnly, + index_block_offset: 1024, + // only holds one index entry + owners_block_offset: 1024 + std::mem::size_of::() as u64, + ..TieredStorageFooter::default() + }; + + { + // we only writes a footer here as the test should hit an assert + // failure before it actually reads the file. + let file = TieredStorageFile::new_writable(&path).unwrap(); + footer.write_footer_block(&file).unwrap(); + } + + let file = OpenOptions::new() + .read(true) + .create(false) + .open(&path) + .unwrap(); + let mmap = unsafe { MmapOptions::new().map(&file).unwrap() }; + // IndexOffset does not exceeds the account_entry_count but exceeds + // the index block boundary. + footer + .index_block_format + .get_account_address(&mmap, &footer, IndexOffset(2)) + .unwrap(); + } } diff --git a/ci/run-sanity.sh b/ci/run-sanity.sh index 3e674d92f4eb25..8108d13a061fd5 100755 --- a/ci/run-sanity.sh +++ b/ci/run-sanity.sh @@ -39,4 +39,5 @@ $solana_ledger_tool create-snapshot --ledger config/ledger "$snapshot_slot" conf cp config/ledger/genesis.tar.bz2 config/snapshot-ledger $solana_ledger_tool copy --ledger config/ledger \ --target-db config/snapshot-ledger --starting-slot "$snapshot_slot" --ending-slot "$latest_slot" -$solana_ledger_tool verify --ledger config/snapshot-ledger +$solana_ledger_tool verify --ledger config/snapshot-ledger --block-verification-method blockstore-processor +$solana_ledger_tool verify --ledger config/snapshot-ledger --block-verification-method unified-scheduler diff --git a/core/Cargo.toml b/core/Cargo.toml index 0bc1a3fe3770aa..bc1bd4549f6751 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -69,6 +69,7 @@ solana-streamer = { workspace = true } solana-tpu-client = { workspace = true } solana-transaction-status = { workspace = true } solana-turbine = { workspace = true } +solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } solana-vote = { workspace = true } solana-vote-program = { workspace = true } diff --git a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs index bfdead250996e1..52f7dda718e722 100644 --- a/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs +++ b/core/src/banking_stage/transaction_scheduler/prio_graph_scheduler.rs @@ -46,10 +46,17 @@ impl PrioGraphScheduler { } } - /// Schedule transactions from the given `TransactionStateContainer` to be consumed by the - /// worker threads. Returns summary of scheduling, or an error. - /// `filter` is used to filter out transactions that should be skipped and dropped, and - /// should set `false` for transactions that should be dropped, and `true` otherwise. + /// Schedule transactions from the given `TransactionStateContainer` to be + /// consumed by the worker threads. Returns summary of scheduling, or an + /// error. + /// `pre_graph_filter` is used to filter out transactions that should be + /// skipped and dropped before insertion to the prio-graph. This fn should + /// set `false` for transactions that should be dropped, and `true` + /// otherwise. + /// `pre_lock_filter` is used to filter out transactions after they have + /// made it to the top of the prio-graph, and immediately before locks are + /// checked and taken. This fn should return `true` for transactions that + /// should be scheduled, and `false` otherwise. /// /// Uses a `PrioGraph` to perform look-ahead during the scheduling of transactions. /// This, combined with internal tracking of threads' in-flight transactions, allows @@ -58,7 +65,8 @@ impl PrioGraphScheduler { pub(crate) fn schedule( &mut self, container: &mut TransactionStateContainer, - filter: impl Fn(&[&SanitizedTransaction], &mut [bool]), + pre_graph_filter: impl Fn(&[&SanitizedTransaction], &mut [bool]), + pre_lock_filter: impl Fn(&SanitizedTransaction) -> bool, ) -> Result { let num_threads = self.consume_work_senders.len(); let mut batches = Batches::new(num_threads); @@ -100,7 +108,8 @@ impl PrioGraphScheduler { txs.push(&transaction.transaction); }); - let (_, filter_us) = measure_us!(filter(&txs, &mut filter_array[..chunk_size])); + let (_, filter_us) = + measure_us!(pre_graph_filter(&txs, &mut filter_array[..chunk_size])); saturating_add_assign!(total_filter_time_us, filter_us); for (id, filter_result) in ids.iter().zip(&filter_array[..chunk_size]) { @@ -148,6 +157,10 @@ impl PrioGraphScheduler { }; let transaction = &transaction_state.transaction_ttl().transaction; + if !pre_lock_filter(transaction) { + container.remove_by_id(&id.id); + continue; + } // Check if this transaction conflicts with any blocked transactions if !blocking_locks.check_locks(transaction.message()) { @@ -601,10 +614,14 @@ mod tests { .unzip() } - fn test_filter(_txs: &[&SanitizedTransaction], results: &mut [bool]) { + fn test_pre_graph_filter(_txs: &[&SanitizedTransaction], results: &mut [bool]) { results.fill(true); } + fn test_pre_lock_filter(_tx: &SanitizedTransaction) -> bool { + true + } + #[test] fn test_schedule_disconnected_channel() { let (mut scheduler, work_receivers, _finished_work_sender) = create_test_frame(1); @@ -612,7 +629,7 @@ mod tests { drop(work_receivers); // explicitly drop receivers assert_matches!( - scheduler.schedule(&mut container, test_filter), + scheduler.schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter), Err(SchedulerError::DisconnectedSendChannel(_)) ); } @@ -625,7 +642,9 @@ mod tests { (&Keypair::new(), &[Pubkey::new_unique()], 2, 2), ]); - let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); assert_eq!(scheduling_summary.num_scheduled, 2); assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!(collect_work(&work_receivers[0]).1, vec![txids!([1, 0])]); @@ -640,7 +659,9 @@ mod tests { (&Keypair::new(), &[pubkey], 1, 2), ]); - let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); assert_eq!(scheduling_summary.num_scheduled, 2); assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!( @@ -658,7 +679,9 @@ mod tests { ); // expect 4 full batches to be scheduled - let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); assert_eq!( scheduling_summary.num_scheduled, 4 * TARGET_NUM_TRANSACTIONS_PER_BATCH @@ -678,7 +701,9 @@ mod tests { let mut container = create_container((0..4).map(|i| (Keypair::new(), [Pubkey::new_unique()], 1, i))); - let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); assert_eq!(scheduling_summary.num_scheduled, 4); assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!(collect_work(&work_receivers[0]).1, [txids!([3, 1])]); @@ -710,7 +735,9 @@ mod tests { // fact they eventually join means that the scheduler will schedule them // onto the same thread to avoid causing [4], which conflicts with both // chains, to be un-schedulable. - let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); assert_eq!(scheduling_summary.num_scheduled, 5); assert_eq!(scheduling_summary.num_unschedulable, 0); assert_eq!( @@ -751,7 +778,9 @@ mod tests { // Because the look-ahead window is shortened to a size of 4, the scheduler does // not have knowledge of the joining at transaction [4] until after [0] and [1] // have been scheduled. - let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); assert_eq!(scheduling_summary.num_scheduled, 4); assert_eq!(scheduling_summary.num_unschedulable, 2); let (thread_0_work, thread_0_ids) = collect_work(&work_receivers[0]); @@ -762,7 +791,9 @@ mod tests { ); // Cannot schedule even on next pass because of lock conflicts - let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); assert_eq!(scheduling_summary.num_scheduled, 0); assert_eq!(scheduling_summary.num_unschedulable, 2); @@ -774,7 +805,9 @@ mod tests { }) .unwrap(); scheduler.receive_completed(&mut container).unwrap(); - let scheduling_summary = scheduler.schedule(&mut container, test_filter).unwrap(); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, test_pre_lock_filter) + .unwrap(); assert_eq!(scheduling_summary.num_scheduled, 2); assert_eq!(scheduling_summary.num_unschedulable, 0); @@ -783,4 +816,29 @@ mod tests { [txids!([4]), txids!([5])] ); } + + #[test] + fn test_schedule_pre_lock_filter() { + let (mut scheduler, work_receivers, _finished_work_sender) = create_test_frame(1); + let pubkey = Pubkey::new_unique(); + let keypair = Keypair::new(); + let mut container = create_container([ + (&Keypair::new(), &[pubkey], 1, 1), + (&keypair, &[pubkey], 1, 2), + (&Keypair::new(), &[pubkey], 1, 3), + ]); + + // 2nd transaction should be filtered out and dropped before locking. + let pre_lock_filter = + |tx: &SanitizedTransaction| tx.message().fee_payer() != &keypair.pubkey(); + let scheduling_summary = scheduler + .schedule(&mut container, test_pre_graph_filter, pre_lock_filter) + .unwrap(); + assert_eq!(scheduling_summary.num_scheduled, 2); + assert_eq!(scheduling_summary.num_unschedulable, 0); + assert_eq!( + collect_work(&work_receivers[0]).1, + vec![txids!([2]), txids!([0])] + ); + } } diff --git a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs index f7601a75686559..930d2fe1d067c3 100644 --- a/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs +++ b/core/src/banking_stage/transaction_scheduler/scheduler_controller.rs @@ -114,12 +114,13 @@ impl SchedulerController { ) -> Result<(), SchedulerError> { match decision { BufferedPacketsDecision::Consume(bank_start) => { - let (scheduling_summary, schedule_time_us) = - measure_us!(self - .scheduler - .schedule(&mut self.container, |txs, results| { - Self::pre_scheduling_filter(txs, results, &bank_start.working_bank) - })?); + let (scheduling_summary, schedule_time_us) = measure_us!(self.scheduler.schedule( + &mut self.container, + |txs, results| { + Self::pre_graph_filter(txs, results, &bank_start.working_bank) + }, + |_| true // no pre-lock filter for now + )?); saturating_add_assign!( self.count_metrics.num_scheduled, scheduling_summary.num_scheduled @@ -152,11 +153,7 @@ impl SchedulerController { Ok(()) } - fn pre_scheduling_filter( - transactions: &[&SanitizedTransaction], - results: &mut [bool], - bank: &Bank, - ) { + fn pre_graph_filter(transactions: &[&SanitizedTransaction], results: &mut [bool], bank: &Bank) { let lock_results = vec![Ok(()); transactions.len()]; let mut error_counters = TransactionErrorMetrics::default(); let check_results = bank.check_transactions( diff --git a/core/src/replay_stage.rs b/core/src/replay_stage.rs index a175fffa29914d..82c86ffc12079b 100644 --- a/core/src/replay_stage.rs +++ b/core/src/replay_stage.rs @@ -67,6 +67,7 @@ use { }, solana_sdk::{ clock::{BankId, Slot, MAX_PROCESSING_AGE, NUM_CONSECUTIVE_LEADER_SLOTS}, + feature_set, genesis_config::ClusterType, hash::Hash, pubkey::Pubkey, @@ -1228,8 +1229,12 @@ impl ReplayStage { let duplicate_slots = blockstore .duplicate_slots_iterator(bank_forks.root_bank().slot()) .unwrap(); - let duplicate_slot_hashes = duplicate_slots - .filter_map(|slot| bank_forks.bank_hash(slot).map(|hash| (slot, hash))); + let duplicate_slot_hashes = duplicate_slots.filter_map(|slot| { + let bank = bank_forks.get(slot)?; + bank.feature_set + .is_active(&feature_set::consume_blockstore_duplicate_proofs::id()) + .then_some((slot, bank.hash())) + }); ( bank_forks.root_bank(), bank_forks.frozen_banks().values().cloned().collect(), @@ -2110,7 +2115,11 @@ impl ReplayStage { ); // If we previously marked this slot as duplicate in blockstore, let the state machine know - if !duplicate_slots_tracker.contains(&slot) && blockstore.get_duplicate_slot(slot).is_some() + if bank + .feature_set + .is_active(&feature_set::consume_blockstore_duplicate_proofs::id()) + && !duplicate_slots_tracker.contains(&slot) + && blockstore.get_duplicate_slot(slot).is_some() { let duplicate_state = DuplicateState::new_from_state( slot, @@ -2920,7 +2929,10 @@ impl ReplayStage { SlotStateUpdate::BankFrozen(bank_frozen_state), ); // If we previously marked this slot as duplicate in blockstore, let the state machine know - if !duplicate_slots_tracker.contains(&bank.slot()) + if bank + .feature_set + .is_active(&feature_set::consume_blockstore_duplicate_proofs::id()) + && !duplicate_slots_tracker.contains(&bank.slot()) && blockstore.get_duplicate_slot(bank.slot()).is_some() { let duplicate_state = DuplicateState::new_from_state( diff --git a/core/src/validator.rs b/core/src/validator.rs index df5ec80f431582..13c454631625a0 100644 --- a/core/src/validator.rs +++ b/core/src/validator.rs @@ -108,7 +108,7 @@ use { clock::Slot, epoch_schedule::MAX_LEADER_SCHEDULE_EPOCH_OFFSET, exit::Exit, - genesis_config::GenesisConfig, + genesis_config::{ClusterType, GenesisConfig}, hash::Hash, pubkey::Pubkey, shred_version::compute_shred_version, @@ -118,6 +118,7 @@ use { solana_send_transaction_service::send_transaction_service, solana_streamer::{socket::SocketAddrSpace, streamer::StakedNodes}, solana_turbine::{self, broadcast_stage::BroadcastStageType}, + solana_unified_scheduler_pool::DefaultSchedulerPool, solana_vote_program::vote_state, solana_wen_restart::wen_restart::wait_for_wen_restart, std::{ @@ -144,6 +145,7 @@ const WAIT_FOR_SUPERMAJORITY_THRESHOLD_PERCENT: u64 = 80; pub enum BlockVerificationMethod { #[default] BlockstoreProcessor, + UnifiedScheduler, } impl BlockVerificationMethod { @@ -469,12 +471,12 @@ pub struct Validator { blockstore_metric_report_service: BlockstoreMetricReportService, accounts_background_service: AccountsBackgroundService, accounts_hash_verifier: AccountsHashVerifier, - turbine_quic_endpoint: Endpoint, + turbine_quic_endpoint: Option, turbine_quic_endpoint_runtime: Option, - turbine_quic_endpoint_join_handle: solana_turbine::quic_endpoint::AsyncTryJoinHandle, - repair_quic_endpoint: Endpoint, + turbine_quic_endpoint_join_handle: Option, + repair_quic_endpoint: Option, repair_quic_endpoint_runtime: Option, - repair_quic_endpoint_join_handle: repair::quic_endpoint::AsyncTryJoinHandle, + repair_quic_endpoint_join_handle: Option, } impl Validator { @@ -813,6 +815,24 @@ impl Validator { // (by both replay stage and banking stage) let prioritization_fee_cache = Arc::new(PrioritizationFeeCache::default()); + match &config.block_verification_method { + BlockVerificationMethod::BlockstoreProcessor => { + info!("no scheduler pool is installed for block verification..."); + } + BlockVerificationMethod::UnifiedScheduler => { + let scheduler_pool = DefaultSchedulerPool::new_dyn( + config.runtime_config.log_messages_bytes_limit, + transaction_status_sender.clone(), + Some(replay_vote_sender.clone()), + prioritization_fee_cache.clone(), + ); + bank_forks + .write() + .unwrap() + .install_scheduler_pool(scheduler_pool); + } + } + let leader_schedule_cache = Arc::new(leader_schedule_cache); let entry_notification_sender = entry_notifier_service .as_ref() @@ -1150,58 +1170,74 @@ impl Validator { // Outside test-validator crate, we always need a tokio runtime (and // the respective handle) to initialize the turbine QUIC endpoint. let current_runtime_handle = tokio::runtime::Handle::try_current(); - let turbine_quic_endpoint_runtime = current_runtime_handle.is_err().then(|| { - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .thread_name("solTurbineQuic") - .build() - .unwrap() - }); + let turbine_quic_endpoint_runtime = (current_runtime_handle.is_err() + && genesis_config.cluster_type != ClusterType::MainnetBeta) + .then(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name("solTurbineQuic") + .build() + .unwrap() + }); let (turbine_quic_endpoint_sender, turbine_quic_endpoint_receiver) = unbounded(); let ( turbine_quic_endpoint, turbine_quic_endpoint_sender, turbine_quic_endpoint_join_handle, - ) = solana_turbine::quic_endpoint::new_quic_endpoint( - turbine_quic_endpoint_runtime - .as_ref() - .map(TokioRuntime::handle) - .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), - &identity_keypair, - node.sockets.tvu_quic, - node.info - .tvu(Protocol::QUIC) - .map_err(|err| format!("Invalid QUIC TVU address: {err:?}"))? - .ip(), - turbine_quic_endpoint_sender, - bank_forks.clone(), - ) - .unwrap(); - - // Repair quic endpoint. - let repair_quic_endpoint_runtime = current_runtime_handle.is_err().then(|| { - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .thread_name("solRepairQuic") - .build() - .unwrap() - }); - let (repair_quic_endpoint, repair_quic_endpoint_sender, repair_quic_endpoint_join_handle) = - repair::quic_endpoint::new_quic_endpoint( - repair_quic_endpoint_runtime + ) = if genesis_config.cluster_type == ClusterType::MainnetBeta { + let (sender, _receiver) = tokio::sync::mpsc::channel(1); + (None, sender, None) + } else { + solana_turbine::quic_endpoint::new_quic_endpoint( + turbine_quic_endpoint_runtime .as_ref() .map(TokioRuntime::handle) .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), &identity_keypair, - node.sockets.serve_repair_quic, + node.sockets.tvu_quic, node.info - .serve_repair(Protocol::QUIC) - .map_err(|err| format!("Invalid QUIC serve-repair address: {err:?}"))? + .tvu(Protocol::QUIC) + .map_err(|err| format!("Invalid QUIC TVU address: {err:?}"))? .ip(), - repair_quic_endpoint_sender, + turbine_quic_endpoint_sender, bank_forks.clone(), ) - .unwrap(); + .map(|(endpoint, sender, join_handle)| (Some(endpoint), sender, Some(join_handle))) + .unwrap() + }; + + // Repair quic endpoint. + let repair_quic_endpoint_runtime = (current_runtime_handle.is_err() + && genesis_config.cluster_type != ClusterType::MainnetBeta) + .then(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .thread_name("solRepairQuic") + .build() + .unwrap() + }); + let (repair_quic_endpoint, repair_quic_endpoint_sender, repair_quic_endpoint_join_handle) = + if genesis_config.cluster_type == ClusterType::MainnetBeta { + let (sender, _receiver) = tokio::sync::mpsc::channel(1); + (None, sender, None) + } else { + repair::quic_endpoint::new_quic_endpoint( + repair_quic_endpoint_runtime + .as_ref() + .map(TokioRuntime::handle) + .unwrap_or_else(|| current_runtime_handle.as_ref().unwrap()), + &identity_keypair, + node.sockets.serve_repair_quic, + node.info + .serve_repair(Protocol::QUIC) + .map_err(|err| format!("Invalid QUIC serve-repair address: {err:?}"))? + .ip(), + repair_quic_endpoint_sender, + bank_forks.clone(), + ) + .map(|(endpoint, sender, join_handle)| (Some(endpoint), sender, Some(join_handle))) + .unwrap() + }; let in_wen_restart = config.wen_restart_proto_path.is_some() && !waited_for_supermajority; let tower = match process_blockstore.process_to_create_tower() { @@ -1494,14 +1530,18 @@ impl Validator { } self.gossip_service.join().expect("gossip_service"); - repair::quic_endpoint::close_quic_endpoint(&self.repair_quic_endpoint); + if let Some(repair_quic_endpoint) = &self.repair_quic_endpoint { + repair::quic_endpoint::close_quic_endpoint(repair_quic_endpoint); + } self.serve_repair_service .join() .expect("serve_repair_service"); - self.repair_quic_endpoint_runtime - .map(|runtime| runtime.block_on(self.repair_quic_endpoint_join_handle)) - .transpose() - .unwrap(); + if let Some(repair_quic_endpoint_join_handle) = self.repair_quic_endpoint_join_handle { + self.repair_quic_endpoint_runtime + .map(|runtime| runtime.block_on(repair_quic_endpoint_join_handle)) + .transpose() + .unwrap(); + }; self.stats_reporter_service .join() .expect("stats_reporter_service"); @@ -1514,13 +1554,17 @@ impl Validator { self.accounts_hash_verifier .join() .expect("accounts_hash_verifier"); - solana_turbine::quic_endpoint::close_quic_endpoint(&self.turbine_quic_endpoint); + if let Some(turbine_quic_endpoint) = &self.turbine_quic_endpoint { + solana_turbine::quic_endpoint::close_quic_endpoint(turbine_quic_endpoint); + } self.tpu.join().expect("tpu"); self.tvu.join().expect("tvu"); - self.turbine_quic_endpoint_runtime - .map(|runtime| runtime.block_on(self.turbine_quic_endpoint_join_handle)) - .transpose() - .unwrap(); + if let Some(turbine_quic_endpoint_join_handle) = self.turbine_quic_endpoint_join_handle { + self.turbine_quic_endpoint_runtime + .map(|runtime| runtime.block_on(turbine_quic_endpoint_join_handle)) + .transpose() + .unwrap(); + } self.completed_data_sets_service .join() .expect("completed_data_sets_service"); diff --git a/core/src/window_service.rs b/core/src/window_service.rs index a68a20e2078471..a36692cc7ef3ab 100644 --- a/core/src/window_service.rs +++ b/core/src/window_service.rs @@ -1,6 +1,7 @@ //! `window_service` handles the data plane incoming shreds, storing them in //! blockstore and retransmitting where required //! + use { crate::{ cluster_info_vote_listener::VerifiedVoteReceiver, @@ -28,7 +29,12 @@ use { solana_metrics::inc_new_counter_error, solana_perf::packet::{Packet, PacketBatch}, solana_rayon_threadlimit::get_thread_count, - solana_sdk::clock::Slot, + solana_runtime::bank_forks::BankForks, + solana_sdk::{ + clock::{Slot, DEFAULT_MS_PER_SLOT}, + feature_set, + }, + solana_turbine::cluster_nodes, std::{ cmp::Reverse, collections::{HashMap, HashSet}, @@ -142,12 +148,31 @@ fn run_check_duplicate( blockstore: &Blockstore, shred_receiver: &Receiver, duplicate_slots_sender: &DuplicateSlotSender, + bank_forks: &RwLock, ) -> Result<()> { + let mut root_bank = bank_forks.read().unwrap().root_bank(); + let mut last_updated = Instant::now(); let check_duplicate = |shred: PossibleDuplicateShred| -> Result<()> { + if last_updated.elapsed().as_millis() as u64 > DEFAULT_MS_PER_SLOT { + // Grabs bank forks lock once a slot + last_updated = Instant::now(); + root_bank = bank_forks.read().unwrap().root_bank(); + } let shred_slot = shred.slot(); + let send_index_and_erasure_conflicts = cluster_nodes::check_feature_activation( + &feature_set::index_erasure_conflict_duplicate_proofs::id(), + shred_slot, + &root_bank, + ); let (shred1, shred2) = match shred { - PossibleDuplicateShred::LastIndexConflict(shred, conflict) => (shred, conflict), - PossibleDuplicateShred::ErasureConflict(shred, conflict) => (shred, conflict), + PossibleDuplicateShred::LastIndexConflict(shred, conflict) + | PossibleDuplicateShred::ErasureConflict(shred, conflict) => { + if send_index_and_erasure_conflicts { + (shred, conflict) + } else { + return Ok(()); + } + } PossibleDuplicateShred::Exists(shred) => { // Unlike the other cases we have to wait until here to decide to handle the duplicate and store // in blockstore. This is because the duplicate could have been part of the same insert batch, @@ -342,6 +367,7 @@ impl WindowService { let outstanding_requests = Arc::>::default(); let cluster_info = repair_info.cluster_info.clone(); + let bank_forks = repair_info.bank_forks.clone(); let repair_service = RepairService::new( blockstore.clone(), @@ -366,6 +392,7 @@ impl WindowService { blockstore.clone(), duplicate_receiver, duplicate_slots_sender, + bank_forks, ); let t_insert = Self::start_window_insert_thread( @@ -392,6 +419,7 @@ impl WindowService { blockstore: Arc, duplicate_receiver: Receiver, duplicate_slots_sender: DuplicateSlotSender, + bank_forks: Arc>, ) -> JoinHandle<()> { let handle_error = || { inc_new_counter_error!("solana-check-duplicate-error", 1, 1); @@ -405,6 +433,7 @@ impl WindowService { &blockstore, &duplicate_receiver, &duplicate_slots_sender, + &bank_forks, ) { if Self::should_exit_on_error(e, &handle_error) { break; @@ -507,9 +536,11 @@ mod test { solana_gossip::contact_info::ContactInfo, solana_ledger::{ blockstore::{make_many_slot_entries, Blockstore}, + genesis_utils::create_genesis_config, get_tmp_ledger_path_auto_delete, shred::{ProcessShredsStats, Shredder}, }, + solana_runtime::bank::Bank, solana_sdk::{ hash::Hash, signature::{Keypair, Signer}, @@ -556,6 +587,8 @@ mod test { #[test] fn test_run_check_duplicate() { let ledger_path = get_tmp_ledger_path_auto_delete!(); + let genesis_config = create_genesis_config(10_000).genesis_config; + let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); let blockstore = Arc::new(Blockstore::open(ledger_path.path()).unwrap()); let (sender, receiver) = unbounded(); let (duplicate_slot_sender, duplicate_slot_receiver) = unbounded(); @@ -587,6 +620,7 @@ mod test { &blockstore, &receiver, &duplicate_slot_sender, + &bank_forks, ) .unwrap(); @@ -616,6 +650,8 @@ mod test { Arc::new(keypair), SocketAddrSpace::Unspecified, )); + let genesis_config = create_genesis_config(10_000).genesis_config; + let bank_forks = BankForks::new_rw_arc(Bank::new_for_tests(&genesis_config)); // Start duplicate thread receiving and inserting duplicates let t_check_duplicate = WindowService::start_check_duplicate_thread( @@ -624,6 +660,7 @@ mod test { blockstore.clone(), duplicate_shred_receiver, duplicate_slot_sender, + bank_forks, ); let handle_duplicate = |shred| { diff --git a/docs/src/cli/install.md b/docs/src/cli/install.md index 06efc82851ba0a..7773631dda59d3 100644 --- a/docs/src/cli/install.md +++ b/docs/src/cli/install.md @@ -4,8 +4,8 @@ sidebar_label: Installation sidebar_position: 1 --- -There are multiple ways to install the Solana tools on your computer -depending on your preferred workflow: +There are multiple ways to install the Solana tools on your computer depending +on your preferred workflow: - [Use Solana's Install Tool (Simplest option)](#use-solanas-install-tool) - [Download Prebuilt Binaries](#download-prebuilt-binaries) @@ -19,8 +19,8 @@ depending on your preferred workflow: - Open your favorite Terminal application - Install the Solana release - [LATEST_SOLANA_RELEASE_VERSION](https://github.com/solana-labs/solana/releases/tag/LATEST_SOLANA_RELEASE_VERSION) on your - machine by running: + [LATEST_SOLANA_RELEASE_VERSION](https://github.com/solana-labs/solana/releases/tag/LATEST_SOLANA_RELEASE_VERSION) + on your machine by running: ```bash sh -c "$(curl -sSfL https://release.solana.com/LATEST_SOLANA_RELEASE_VERSION/install)" @@ -41,15 +41,14 @@ Active release directory: /home/solana/.local/share/solana/install/active_releas Update successful ``` -- Depending on your system, the end of the installer messaging may prompt you - to +- Depending on your system, the end of the installer messaging may prompt you to ```bash Please update your PATH environment variable to include the solana programs: ``` -- If you get the above message, copy and paste the recommended command below - it to update `PATH` +- If you get the above message, copy and paste the recommended command below it + to update `PATH` - Confirm you have the desired version of `solana` installed by running: ```bash @@ -65,10 +64,10 @@ solana --version - Open a Command Prompt (`cmd.exe`) as an Administrator - - Search for Command Prompt in the Windows search bar. When the Command - Prompt app appears, right-click and select “Open as Administrator”. - If you are prompted by a pop-up window asking “Do you want to allow this app to - make changes to your device?”, click Yes. + - Search for Command Prompt in the Windows search bar. When the Command Prompt + app appears, right-click and select “Open as Administrator”. If you are + prompted by a pop-up window asking “Do you want to allow this app to make + changes to your device?”, click Yes. - Copy and paste the following command, then press Enter to download the Solana installer into a temporary directory: @@ -149,8 +148,100 @@ set PATH=%cd%/bin;%PATH% ## Build From Source If you are unable to use the prebuilt binaries or prefer to build it yourself -from source, navigate to -[https://github.com/solana-labs/solana/releases/latest](https://github.com/solana-labs/solana/releases/latest), +from source, follow these steps, ensuring you have the necessary prerequisites +installed on your system. + +### Prerequisites + +Before building from source, make sure to install the following prerequisites: + +#### For Debian and Other Linux Distributions: + +Rust Programming Language: Check "Install Rust" at +[https://www.rust-lang.org/tools/install](https://www.rust-lang.org/tools/install), +which recommends the following command. + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +Install build dependencies: + +- Build essential +- Package config +- Udev & LLM & libclang +- Protocol buffers + +```bash +apt-get install \ + build-essential \ + pkg-config \ + libudev-dev llvm libclang-dev \ + protobuf-compiler +``` + +#### For Other Linux Distributions: + +Replace `apt` with your distribution's package manager (e.g., `yum`, `dnf`, +`pacman`) and adjust package names as needed. + +#### For macOS: + +Install Homebrew (if not already installed), check "Install Hombrew" at +[https://brew.sh/](https://brew.sh/), which recommends the following command: + +```bash +/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" +``` + +Install the necessary tools and libraries using Homebrew: + +```bash +brew install rust pkg-config libudev protobuf llvm coreutils +``` + +Follow the instructions given at the end of the brew install command about +`PATH` configurations. + +#### For Windows: + +Rust Programming Language: Check "Install Rust" at +[https://www.rust-lang.org/tools/install](https://www.rust-lang.org/tools/install), +which recommends the following command. + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +- Download and install the Build Tools for Visual Studio (2019 or later) from + the + [Visual Studio downloads page](https://visualstudio.microsoft.com/downloads/). + Make sure to include the C++ build tools in the installation. +- Install LLVM: Download and install LLVM from the + [official LLVM download page](https://releases.llvm.org/download.html). +- Install Protocol Buffers Compiler (protoc): Download `protoc` from the + [GitHub releases page of Protocol Buffers](https://github.com/protocolbuffers/protobuf/releases), + and add it to your `PATH`. + +:::info + +Users on Windows 10 or 11 may need to install +[Windows Subsystem for Linux](https://learn.microsoft.com/en-us/windows/wsl/install) +(WSL) in order to be able to build from source. WSL provides a Linux environment +that runs inside your existing Windows installation. You can then run regular +Linux software, including the Linux versions of Solana CLI. + +After installed, run `wsl` from your Windows terminal, then continue through the +[Debian and Other Linux Distributions](#for-debian-and-other-linux-distributions) +above. + +::: + +### Building from Source + +After installing the prerequisites, proceed with building Solana from source, +navigate to +[Solana's GitHub releases page](https://github.com/solana-labs/solana/releases/latest), and download the **Source Code** archive. Extract the code and build the binaries with: @@ -168,15 +259,16 @@ solana-install init ## Use Homebrew -This option requires you to have [Homebrew](https://brew.sh/) package manager on your MacOS or Linux machine. +This option requires you to have [Homebrew](https://brew.sh/) package manager on +your MacOS or Linux machine. ### MacOS & Linux - Follow instructions at: https://formulae.brew.sh/formula/solana [Homebrew formulae](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/solana.rb) -is updated after each `solana` release, however it is possible that -the Homebrew version is outdated. +is updated after each `solana` release, however it is possible that the Homebrew +version is outdated. - Confirm you have the desired version of `solana` installed by entering: diff --git a/ledger-tool/Cargo.toml b/ledger-tool/Cargo.toml index 94d9bbe470d5fa..ddc1ca9b564e94 100644 --- a/ledger-tool/Cargo.toml +++ b/ledger-tool/Cargo.toml @@ -45,6 +45,7 @@ solana-stake-program = { workspace = true } solana-storage-bigtable = { workspace = true } solana-streamer = { workspace = true } solana-transaction-status = { workspace = true } +solana-unified-scheduler-pool = { workspace = true } solana-version = { workspace = true } solana-vote-program = { workspace = true } solana_rbpf = { workspace = true, features = ["debugger"] } diff --git a/ledger-tool/src/bigtable.rs b/ledger-tool/src/bigtable.rs index 7809ec82357492..987a03f2ef99cc 100644 --- a/ledger-tool/src/bigtable.rs +++ b/ledger-tool/src/bigtable.rs @@ -1,6 +1,9 @@ //! The `bigtable` subcommand use { - crate::{ledger_path::canonicalize_ledger_path, output::CliEntries}, + crate::{ + ledger_path::canonicalize_ledger_path, + output::{CliBlockWithEntries, CliEntries, EncodedConfirmedBlockWithEntries}, + }, clap::{ value_t, value_t_or_exit, values_t_or_exit, App, AppSettings, Arg, ArgMatches, SubCommand, }, @@ -23,8 +26,8 @@ use { solana_sdk::{clock::Slot, pubkey::Pubkey, signature::Signature}, solana_storage_bigtable::CredentialType, solana_transaction_status::{ - BlockEncodingOptions, ConfirmedBlock, EncodeError, TransactionDetails, - UiTransactionEncoding, VersionedConfirmedBlock, + BlockEncodingOptions, ConfirmedBlock, EncodeError, EncodedConfirmedBlock, + TransactionDetails, UiTransactionEncoding, VersionedConfirmedBlock, }, std::{ cmp::min, @@ -113,6 +116,7 @@ async fn first_available_block( async fn block( slot: Slot, output_format: OutputFormat, + show_entries: bool, config: solana_storage_bigtable::LedgerStorageConfig, ) -> Result<(), Box> { let bigtable = solana_storage_bigtable::LedgerStorage::new_with_config(config) @@ -126,7 +130,7 @@ async fn block( BlockEncodingOptions { transaction_details: TransactionDetails::Full, show_rewards: true, - max_supported_transaction_version: None, + max_supported_transaction_version: Some(0), }, ) .map_err(|err| match err { @@ -134,12 +138,25 @@ async fn block( format!("Failed to process unsupported transaction version ({version}) in block") } })?; - - let cli_block = CliBlock { - encoded_confirmed_block: encoded_block.into(), - slot, - }; - println!("{}", output_format.formatted_string(&cli_block)); + let encoded_block: EncodedConfirmedBlock = encoded_block.into(); + + if show_entries { + let entries = bigtable.get_entries(slot).await?; + let cli_block = CliBlockWithEntries { + encoded_confirmed_block: EncodedConfirmedBlockWithEntries::try_from( + encoded_block, + entries, + )?, + slot, + }; + println!("{}", output_format.formatted_string(&cli_block)); + } else { + let cli_block = CliBlock { + encoded_confirmed_block: encoded_block, + slot, + }; + println!("{}", output_format.formatted_string(&cli_block)); + } Ok(()) } @@ -823,6 +840,12 @@ impl BigTableSubCommand for App<'_, '_> { .takes_value(true) .index(1) .required(true), + ) + .arg( + Arg::with_name("show_entries") + .long("show-entries") + .required(false) + .help("Display the transactions in their entries"), ), ) .subcommand( @@ -1117,13 +1140,14 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { } ("block", Some(arg_matches)) => { let slot = value_t_or_exit!(arg_matches, "slot", Slot); + let show_entries = arg_matches.is_present("show_entries"); let config = solana_storage_bigtable::LedgerStorageConfig { - read_only: false, + read_only: true, instance_name, app_profile_id, ..solana_storage_bigtable::LedgerStorageConfig::default() }; - runtime.block_on(block(slot, output_format, config)) + runtime.block_on(block(slot, output_format, show_entries, config)) } ("entries", Some(arg_matches)) => { let slot = value_t_or_exit!(arg_matches, "slot", Slot); @@ -1139,7 +1163,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); let limit = value_t_or_exit!(arg_matches, "limit", usize); let config = solana_storage_bigtable::LedgerStorageConfig { - read_only: false, + read_only: true, instance_name, app_profile_id, ..solana_storage_bigtable::LedgerStorageConfig::default() @@ -1151,7 +1175,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let starting_slot = value_t_or_exit!(arg_matches, "starting_slot", Slot); let limit = value_t_or_exit!(arg_matches, "limit", usize); let config = solana_storage_bigtable::LedgerStorageConfig { - read_only: false, + read_only: true, instance_name, app_profile_id, ..solana_storage_bigtable::LedgerStorageConfig::default() @@ -1168,7 +1192,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { let ref_app_profile_id = value_t_or_exit!(arg_matches, "reference_app_profile_id", String); let ref_config = solana_storage_bigtable::LedgerStorageConfig { - read_only: false, + read_only: true, credential_type: CredentialType::Filepath(credential_path), instance_name: ref_instance_name, app_profile_id: ref_app_profile_id, @@ -1184,7 +1208,7 @@ pub fn bigtable_process_command(ledger_path: &Path, matches: &ArgMatches<'_>) { .parse() .expect("Invalid signature"); let config = solana_storage_bigtable::LedgerStorageConfig { - read_only: false, + read_only: true, instance_name, app_profile_id, ..solana_storage_bigtable::LedgerStorageConfig::default() diff --git a/ledger-tool/src/ledger_utils.rs b/ledger-tool/src/ledger_utils.rs index e3f1b48a0a965c..e72804c201c136 100644 --- a/ledger-tool/src/ledger_utils.rs +++ b/ledger-tool/src/ledger_utils.rs @@ -30,6 +30,7 @@ use { PrunedBanksRequestHandler, SnapshotRequestHandler, }, bank_forks::BankForks, + prioritization_fee_cache::PrioritizationFeeCache, snapshot_config::SnapshotConfig, snapshot_hash::StartingSnapshotHashes, snapshot_utils::{ @@ -42,6 +43,7 @@ use { timing::timestamp, }, solana_streamer::socket::SocketAddrSpace, + solana_unified_scheduler_pool::DefaultSchedulerPool, std::{ path::{Path, PathBuf}, process::exit, @@ -305,6 +307,25 @@ pub fn load_and_process_ledger( "Using: block-verification-method: {}", block_verification_method, ); + match block_verification_method { + BlockVerificationMethod::BlockstoreProcessor => { + info!("no scheduler pool is installed for block verification..."); + } + BlockVerificationMethod::UnifiedScheduler => { + let no_transaction_status_sender = None; + let no_replay_vote_sender = None; + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + bank_forks + .write() + .unwrap() + .install_scheduler_pool(DefaultSchedulerPool::new_dyn( + process_options.runtime_config.log_messages_bytes_limit, + no_transaction_status_sender, + no_replay_vote_sender, + ignored_prioritization_fee_cache, + )); + } + } let node_id = Arc::new(Keypair::new()); let cluster_info = Arc::new(ClusterInfo::new( diff --git a/ledger-tool/src/output.rs b/ledger-tool/src/output.rs index 2c0db44372e1b2..7770b7967c1734 100644 --- a/ledger-tool/src/output.rs +++ b/ledger-tool/src/output.rs @@ -1,8 +1,14 @@ use { + chrono::{Local, TimeZone}, serde::{Deserialize, Serialize}, - solana_cli_output::{QuietDisplay, VerboseDisplay}, - solana_sdk::clock::Slot, - solana_transaction_status::EntrySummary, + solana_cli_output::{display::writeln_transaction, QuietDisplay, VerboseDisplay}, + solana_sdk::{ + clock::{Slot, UnixTimestamp}, + native_token::lamports_to_sol, + }, + solana_transaction_status::{ + EncodedConfirmedBlock, EncodedTransactionWithStatusMeta, EntrySummary, Rewards, + }, std::fmt::{self, Display, Formatter, Result}, }; @@ -70,6 +76,14 @@ impl Display for SlotBounds<'_> { } } +fn writeln_entry(f: &mut dyn fmt::Write, i: usize, entry: &CliEntry, prefix: &str) -> fmt::Result { + writeln!( + f, + "{prefix}Entry {} - num_hashes: {}, hash: {}, transactions: {}, starting_transaction_index: {}", + i, entry.num_hashes, entry.hash, entry.num_transactions, entry.starting_transaction_index, + ) +} + #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct CliEntries { @@ -85,15 +99,7 @@ impl fmt::Display for CliEntries { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { writeln!(f, "Slot {}", self.slot)?; for (i, entry) in self.entries.iter().enumerate() { - writeln!( - f, - " Entry {} - num_hashes: {}, hash: {}, transactions: {}, starting_transaction_index: {}", - i, - entry.num_hashes, - entry.hash, - entry.num_transactions, - entry.starting_transaction_index, - )?; + writeln_entry(f, i, entry, " ")?; } Ok(()) } @@ -118,3 +124,182 @@ impl From for CliEntry { } } } + +impl From<&CliPopulatedEntry> for CliEntry { + fn from(populated_entry: &CliPopulatedEntry) -> Self { + Self { + num_hashes: populated_entry.num_hashes, + hash: populated_entry.hash.clone(), + num_transactions: populated_entry.num_transactions, + starting_transaction_index: populated_entry.starting_transaction_index, + } + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliPopulatedEntry { + num_hashes: u64, + hash: String, + num_transactions: u64, + starting_transaction_index: usize, + transactions: Vec, +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct CliBlockWithEntries { + #[serde(flatten)] + pub encoded_confirmed_block: EncodedConfirmedBlockWithEntries, + #[serde(skip_serializing)] + pub slot: Slot, +} + +impl QuietDisplay for CliBlockWithEntries {} +impl VerboseDisplay for CliBlockWithEntries {} + +impl fmt::Display for CliBlockWithEntries { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + writeln!(f, "Slot: {}", self.slot)?; + writeln!( + f, + "Parent Slot: {}", + self.encoded_confirmed_block.parent_slot + )?; + writeln!(f, "Blockhash: {}", self.encoded_confirmed_block.blockhash)?; + writeln!( + f, + "Previous Blockhash: {}", + self.encoded_confirmed_block.previous_blockhash + )?; + if let Some(block_time) = self.encoded_confirmed_block.block_time { + writeln!( + f, + "Block Time: {:?}", + Local.timestamp_opt(block_time, 0).unwrap() + )?; + } + if let Some(block_height) = self.encoded_confirmed_block.block_height { + writeln!(f, "Block Height: {block_height:?}")?; + } + if !self.encoded_confirmed_block.rewards.is_empty() { + let mut rewards = self.encoded_confirmed_block.rewards.clone(); + rewards.sort_by(|a, b| a.pubkey.cmp(&b.pubkey)); + let mut total_rewards = 0; + writeln!(f, "Rewards:")?; + writeln!( + f, + " {:<44} {:^15} {:<15} {:<20} {:>14} {:>10}", + "Address", "Type", "Amount", "New Balance", "Percent Change", "Commission" + )?; + for reward in rewards { + let sign = if reward.lamports < 0 { "-" } else { "" }; + + total_rewards += reward.lamports; + #[allow(clippy::format_in_format_args)] + writeln!( + f, + " {:<44} {:^15} {:>15} {} {}", + reward.pubkey, + if let Some(reward_type) = reward.reward_type { + format!("{reward_type}") + } else { + "-".to_string() + }, + format!( + "{}◎{:<14.9}", + sign, + lamports_to_sol(reward.lamports.unsigned_abs()) + ), + if reward.post_balance == 0 { + " - -".to_string() + } else { + format!( + "◎{:<19.9} {:>13.9}%", + lamports_to_sol(reward.post_balance), + (reward.lamports.abs() as f64 + / (reward.post_balance as f64 - reward.lamports as f64)) + * 100.0 + ) + }, + reward + .commission + .map(|commission| format!("{commission:>9}%")) + .unwrap_or_else(|| " -".to_string()) + )?; + } + + let sign = if total_rewards < 0 { "-" } else { "" }; + writeln!( + f, + "Total Rewards: {}◎{:<12.9}", + sign, + lamports_to_sol(total_rewards.unsigned_abs()) + )?; + } + for (index, entry) in self.encoded_confirmed_block.entries.iter().enumerate() { + writeln_entry(f, index, &entry.into(), "")?; + for (index, transaction_with_meta) in entry.transactions.iter().enumerate() { + writeln!(f, " Transaction {index}:")?; + writeln_transaction( + f, + &transaction_with_meta.transaction.decode().unwrap(), + transaction_with_meta.meta.as_ref(), + " ", + None, + None, + )?; + } + } + Ok(()) + } +} + +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct EncodedConfirmedBlockWithEntries { + pub previous_blockhash: String, + pub blockhash: String, + pub parent_slot: Slot, + pub entries: Vec, + pub rewards: Rewards, + pub block_time: Option, + pub block_height: Option, +} + +impl EncodedConfirmedBlockWithEntries { + pub fn try_from( + block: EncodedConfirmedBlock, + entries_iterator: impl Iterator, + ) -> std::result::Result { + let mut entries = vec![]; + for (i, entry) in entries_iterator.enumerate() { + let ending_transaction_index = entry + .starting_transaction_index + .saturating_add(entry.num_transactions as usize); + let transactions = block + .transactions + .get(entry.starting_transaction_index..ending_transaction_index) + .ok_or(format!( + "Mismatched entry data and transactions: entry {:?}", + i + ))?; + entries.push(CliPopulatedEntry { + num_hashes: entry.num_hashes, + hash: entry.hash.to_string(), + num_transactions: entry.num_transactions, + starting_transaction_index: entry.starting_transaction_index, + transactions: transactions.to_vec(), + }); + } + Ok(Self { + previous_blockhash: block.previous_blockhash, + blockhash: block.blockhash, + parent_slot: block.parent_slot, + entries, + rewards: block.rewards, + block_time: block.block_time, + block_height: block.block_height, + }) + } +} diff --git a/ledger/src/blockstore_processor.rs b/ledger/src/blockstore_processor.rs index e2208ce557e12d..cc8a4e5cb607ac 100644 --- a/ledger/src/blockstore_processor.rs +++ b/ledger/src/blockstore_processor.rs @@ -74,7 +74,7 @@ use { thiserror::Error, }; -struct TransactionBatchWithIndexes<'a, 'b> { +pub struct TransactionBatchWithIndexes<'a, 'b> { pub batch: TransactionBatch<'a, 'b>, pub transaction_indexes: Vec, } @@ -134,7 +134,7 @@ fn get_first_error( first_err } -fn execute_batch( +pub fn execute_batch( batch: &TransactionBatchWithIndexes, bank: &Arc, transaction_status_sender: Option<&TransactionStatusSender>, @@ -1832,7 +1832,7 @@ pub struct TransactionStatusBatch { pub transaction_indexes: Vec, } -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct TransactionStatusSender { pub sender: Sender, } @@ -1947,7 +1947,9 @@ pub mod tests { genesis_utils::{ self, create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs, }, - installed_scheduler_pool::{MockInstalledScheduler, SchedulingContext, WaitReason}, + installed_scheduler_pool::{ + MockInstalledScheduler, MockUninstalledScheduler, SchedulingContext, + }, }, solana_sdk::{ account::{AccountSharedData, WritableAccount}, @@ -4545,11 +4547,12 @@ pub mod tests { let txs = create_test_transactions(&mint_keypair, &genesis_config.hash()); let mut mocked_scheduler = MockInstalledScheduler::new(); - let mut seq = mockall::Sequence::new(); + let seq = Arc::new(Mutex::new(mockall::Sequence::new())); + let seq_cloned = seq.clone(); mocked_scheduler .expect_context() .times(1) - .in_sequence(&mut seq) + .in_sequence(&mut seq.lock().unwrap()) .return_const(context); mocked_scheduler .expect_schedule_execution() @@ -4557,15 +4560,21 @@ pub mod tests { .returning(|_| ()); mocked_scheduler .expect_wait_for_termination() - .with(mockall::predicate::eq(WaitReason::DroppedFromBankForks)) - .times(1) - .in_sequence(&mut seq) - .returning(|_| None); - mocked_scheduler - .expect_return_to_pool() + .with(mockall::predicate::eq(true)) .times(1) - .in_sequence(&mut seq) - .returning(|| ()); + .in_sequence(&mut seq.lock().unwrap()) + .returning(move |_| { + let mut mocked_uninstalled_scheduler = MockUninstalledScheduler::new(); + mocked_uninstalled_scheduler + .expect_return_to_pool() + .times(1) + .in_sequence(&mut seq_cloned.lock().unwrap()) + .returning(|| ()); + ( + (Ok(()), ExecuteTimings::default()), + Box::new(mocked_uninstalled_scheduler), + ) + }); let bank = BankWithScheduler::new(bank, Some(Box::new(mocked_scheduler))); let batch = bank.prepare_sanitized_batch(&txs); diff --git a/local-cluster/tests/local_cluster.rs b/local-cluster/tests/local_cluster.rs index c2a94b5049e346..955123df34e155 100644 --- a/local-cluster/tests/local_cluster.rs +++ b/local-cluster/tests/local_cluster.rs @@ -4,6 +4,7 @@ use { crossbeam_channel::{unbounded, Receiver}, gag::BufferRedirect, log::*, + rand::seq::IteratorRandom, serial_test::serial, solana_accounts_db::{ accounts_db::create_accounts_run_and_snapshot_dirs, hardened_unpack::open_genesis_config, @@ -15,7 +16,7 @@ use { }, optimistic_confirmation_verifier::OptimisticConfirmationVerifier, replay_stage::DUPLICATE_THRESHOLD, - validator::ValidatorConfig, + validator::{BlockVerificationMethod, ValidatorConfig}, }, solana_download_utils::download_snapshot_archive, solana_entry::entry::create_ticks, @@ -239,10 +240,7 @@ fn test_local_cluster_signature_subscribe() { ); let (mut sig_subscribe_client, receiver) = PubsubClient::signature_subscribe( - &format!( - "ws://{}", - &non_bootstrap_info.rpc_pubsub().unwrap().to_string() - ), + &format!("ws://{}", non_bootstrap_info.rpc_pubsub().unwrap()), &transaction.signatures[0], Some(RpcSignatureSubscribeConfig { commitment: Some(CommitmentConfig::processed()), @@ -5456,6 +5454,44 @@ fn test_duplicate_shreds_switch_failure() { ); } +#[test] +#[serial] +fn test_randomly_mixed_block_verification_methods_between_bootstrap_and_not() { + // tailored logging just to see two block verification methods are working correctly + solana_logger::setup_with_default( + "solana_metrics::metrics=warn,\ + solana_core=warn,\ + solana_runtime::installed_scheduler_pool=trace,\ + solana_ledger::blockstore_processor=debug,\ + info", + ); + + let num_nodes = 2; + let mut config = ClusterConfig::new_with_equal_stakes( + num_nodes, + DEFAULT_CLUSTER_LAMPORTS, + DEFAULT_NODE_STAKE, + ); + + // Randomly switch to use unified scheduler + config + .validator_configs + .iter_mut() + .choose(&mut rand::thread_rng()) + .unwrap() + .block_verification_method = BlockVerificationMethod::UnifiedScheduler; + + let local = LocalCluster::new(&mut config, SocketAddrSpace::Unspecified); + cluster_tests::spend_and_verify_all_nodes( + &local.entry_point_info, + &local.funding_keypair, + num_nodes, + HashSet::new(), + SocketAddrSpace::Unspecified, + &local.connection_cache, + ); +} + /// Forks previous marked invalid should be marked as such in fork choice on restart #[test] #[serial] diff --git a/program-runtime/src/loaded_programs.rs b/program-runtime/src/loaded_programs.rs index 75bf3a3c0f2ea7..b7b92a0409c800 100644 --- a/program-runtime/src/loaded_programs.rs +++ b/program-runtime/src/loaded_programs.rs @@ -3,9 +3,9 @@ use { invoke_context::{BuiltinFunctionWithContext, InvokeContext}, timings::ExecuteDetailsTimings, }, - itertools::Itertools, log::{debug, error, log_enabled, trace}, percentage::PercentageInteger, + rand::{thread_rng, Rng}, solana_measure::measure::Measure, solana_rbpf::{ elf::Executable, @@ -25,7 +25,7 @@ use { fmt::{Debug, Formatter}, sync::{ atomic::{AtomicU64, Ordering}, - Arc, Mutex, RwLock, + Arc, Condvar, Mutex, RwLock, }, }, }; @@ -129,6 +129,8 @@ pub struct LoadedProgram { pub tx_usage_counter: AtomicU64, /// How often this entry was used by an instruction pub ix_usage_counter: AtomicU64, + /// Latest slot in which the entry was used + pub latest_access_slot: AtomicU64, } #[derive(Debug, Default)] @@ -348,6 +350,7 @@ impl LoadedProgram { tx_usage_counter: AtomicU64::new(0), program, ix_usage_counter: AtomicU64::new(0), + latest_access_slot: AtomicU64::new(0), }) } @@ -360,6 +363,7 @@ impl LoadedProgram { maybe_expiration_slot: self.maybe_expiration_slot, tx_usage_counter: AtomicU64::new(self.tx_usage_counter.load(Ordering::Relaxed)), ix_usage_counter: AtomicU64::new(self.ix_usage_counter.load(Ordering::Relaxed)), + latest_access_slot: AtomicU64::new(self.latest_access_slot.load(Ordering::Relaxed)), }) } @@ -381,6 +385,7 @@ impl LoadedProgram { tx_usage_counter: AtomicU64::new(0), program: LoadedProgramType::Builtin(BuiltinProgram::new_builtin(function_registry)), ix_usage_counter: AtomicU64::new(0), + latest_access_slot: AtomicU64::new(0), } } @@ -395,6 +400,7 @@ impl LoadedProgram { maybe_expiration_slot, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::new(0), }; debug_assert!(tombstone.is_tombstone()); tombstone @@ -416,6 +422,16 @@ impl LoadedProgram { && slot >= self.deployment_slot && slot < self.effective_slot } + + pub fn update_access_slot(&self, slot: Slot) { + let _ = self.latest_access_slot.fetch_max(slot, Ordering::Relaxed); + } + + pub fn decayed_usage_counter(&self, now: Slot) -> u64 { + let last_access = self.latest_access_slot.load(Ordering::Relaxed); + let decaying_for = now.saturating_sub(last_access); + self.tx_usage_counter.load(Ordering::Relaxed) >> decaying_for + } } #[derive(Clone, Debug)] @@ -439,11 +455,66 @@ impl Default for ProgramRuntimeEnvironments { } } +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq)] +pub struct LoadingTaskCookie(u64); + +impl LoadingTaskCookie { + fn new() -> Self { + Self(0) + } + + fn update(&mut self) { + let LoadingTaskCookie(cookie) = self; + *cookie = cookie.wrapping_add(1); + } +} + +/// Prevents excessive polling during cooperative loading +#[derive(Debug, Default)] +pub struct LoadingTaskWaiter { + cookie: Mutex, + cond: Condvar, +} + +impl LoadingTaskWaiter { + pub fn new() -> Self { + Self { + cookie: Mutex::new(LoadingTaskCookie::new()), + cond: Condvar::new(), + } + } + + pub fn cookie(&self) -> LoadingTaskCookie { + *self.cookie.lock().unwrap() + } + + pub fn notify(&self) { + let mut cookie = self.cookie.lock().unwrap(); + cookie.update(); + self.cond.notify_all(); + } + + pub fn wait(&self, cookie: LoadingTaskCookie) -> LoadingTaskCookie { + let cookie_guard = self.cookie.lock().unwrap(); + *self + .cond + .wait_while(cookie_guard, |current_cookie| *current_cookie == cookie) + .unwrap() + } +} + +#[derive(Debug, Default)] +struct SecondLevel { + slot_versions: Vec>, + /// Contains the bank and TX batch a program at this address is currently being loaded + cooperative_loading_lock: Option<(Slot, std::thread::ThreadId)>, +} + pub struct LoadedPrograms { /// A two level index: /// - /// Pubkey is the address of a program, multiple versions can coexists simultaneously under the same address (in different slots). - entries: HashMap>>, + /// The first level is for the address at which programs are deployed and the second level for the slot (and thus also fork). + entries: HashMap, /// The slot of the last rerooting pub latest_root_slot: Slot, /// The epoch of the last rerooting @@ -460,6 +531,7 @@ pub struct LoadedPrograms { pub programs_to_recompile: Vec<(Pubkey, Arc)>, pub stats: Stats, pub fork_graph: Option>>, + pub loading_task_waiter: Arc, } impl Debug for LoadedPrograms { @@ -482,11 +554,6 @@ pub struct LoadedProgramsForTxBatch { pub environments: ProgramRuntimeEnvironments, } -pub struct ExtractedPrograms { - pub loaded: LoadedProgramsForTxBatch, - pub missing: HashMap, -} - impl LoadedProgramsForTxBatch { pub fn new(slot: Slot, environments: ProgramRuntimeEnvironments) -> Self { Self { @@ -557,6 +624,7 @@ impl LoadedPrograms { programs_to_recompile: Vec::default(), stats: Stats::default(), fork_graph: None, + loading_task_waiter: Arc::new(LoadingTaskWaiter::default()), } } @@ -582,12 +650,12 @@ impl LoadedPrograms { key: Pubkey, entry: Arc, ) -> (bool, Arc) { - let second_level = self.entries.entry(key).or_default(); - let index = second_level + let slot_versions = &mut self.entries.entry(key).or_default().slot_versions; + let index = slot_versions .iter() .position(|at| at.effective_slot >= entry.effective_slot); if let Some((existing, entry_index)) = - index.and_then(|index| second_level.get(index).map(|value| (value, index))) + index.and_then(|index| slot_versions.get(index).map(|value| (value, index))) { if existing.deployment_slot == entry.deployment_slot && existing.effective_slot == entry.effective_slot @@ -603,13 +671,13 @@ impl LoadedPrograms { existing.ix_usage_counter.load(Ordering::Relaxed), Ordering::Relaxed, ); - second_level.remove(entry_index); + slot_versions.remove(entry_index); } else if existing.is_tombstone() != entry.is_tombstone() { // Either the old entry is tombstone and the new one is not. // (Let's give the new entry a chance). // Or, the old entry is not a tombstone and the new one is a tombstone. // (Remove the old entry, as the tombstone makes it obsolete). - second_level.remove(entry_index); + slot_versions.remove(entry_index); } else { self.stats.replacements.fetch_add(1, Ordering::Relaxed); return (true, existing.clone()); @@ -617,7 +685,7 @@ impl LoadedPrograms { } } self.stats.insertions.fetch_add(1, Ordering::Relaxed); - second_level.insert(index.unwrap_or(second_level.len()), entry.clone()); + slot_versions.insert(index.unwrap_or(slot_versions.len()), entry.clone()); (false, entry) } @@ -633,7 +701,9 @@ impl LoadedPrograms { pub fn prune_by_deployment_slot(&mut self, slot: Slot) { for second_level in self.entries.values_mut() { - second_level.retain(|entry| entry.deployment_slot != slot); + second_level + .slot_versions + .retain(|entry| entry.deployment_slot != slot); } self.remove_programs_with_no_entries(); } @@ -661,7 +731,8 @@ impl LoadedPrograms { // Remove entries un/re/deployed on orphan forks let mut first_ancestor_found = false; let mut first_ancestor_env = None; - *second_level = second_level + second_level.slot_versions = second_level + .slot_versions .iter() .rev() .filter(|entry| { @@ -717,7 +788,7 @@ impl LoadedPrograms { }) .cloned() .collect(); - second_level.reverse(); + second_level.slot_versions.reverse(); } self.remove_programs_with_no_entries(); debug_assert!(self.latest_root_slot <= new_root_slot); @@ -770,82 +841,108 @@ impl LoadedPrograms { /// Extracts a subset of the programs relevant to a transaction batch /// and returns which program accounts the accounts DB needs to load. pub fn extract( - &self, - current_slot: Slot, - keys: impl Iterator, - ) -> Arc> { + &mut self, + search_for: &mut Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))>, + loaded_programs_for_tx_batch: &mut LoadedProgramsForTxBatch, + ) -> Option<(Pubkey, u64)> { debug_assert!(self.fork_graph.is_some()); let locked_fork_graph = self.fork_graph.as_ref().unwrap().read().unwrap(); - let current_epoch = locked_fork_graph.slot_epoch(current_slot).unwrap(); - let environments = self.get_environments_for_epoch(current_epoch); - let extracted = Arc::new(Mutex::new(ExtractedPrograms { - loaded: LoadedProgramsForTxBatch { - entries: HashMap::new(), - slot: current_slot, - environments: environments.clone(), - }, - missing: HashMap::new(), - })); - let mut extracting = extracted.lock().unwrap(); - extracting.loaded.entries = keys - .filter_map(|(key, (match_criteria, usage_count))| { - let mut reloading = false; - if let Some(second_level) = self.entries.get(&key) { - for entry in second_level.iter().rev() { - let is_ancestor = matches!( - locked_fork_graph.relationship(entry.deployment_slot, current_slot), - BlockRelation::Ancestor - ); - - if entry.deployment_slot <= self.latest_root_slot - || entry.deployment_slot == current_slot - || is_ancestor - { - let entry_to_return = if current_slot >= entry.effective_slot { - if !Self::is_entry_usable(entry, current_slot, &match_criteria) - || !Self::matches_environment(entry, environments) - { - break; - } + let mut cooperative_loading_task = None; + search_for.retain(|(key, (match_criteria, usage_count))| { + if let Some(second_level) = self.entries.get_mut(key) { + for entry in second_level.slot_versions.iter().rev() { + let is_ancestor = matches!( + locked_fork_graph + .relationship(entry.deployment_slot, loaded_programs_for_tx_batch.slot), + BlockRelation::Ancestor + ); - if let LoadedProgramType::Unloaded(_environment) = &entry.program { - reloading = true; - break; - } + if entry.deployment_slot <= self.latest_root_slot + || entry.deployment_slot == loaded_programs_for_tx_batch.slot + || is_ancestor + { + let entry_to_return = if loaded_programs_for_tx_batch.slot + >= entry.effective_slot + && Self::matches_environment( + entry, + &loaded_programs_for_tx_batch.environments, + ) { + if !Self::is_entry_usable( + entry, + loaded_programs_for_tx_batch.slot, + match_criteria, + ) { + break; + } - entry.clone() - } else if entry.is_implicit_delay_visibility_tombstone(current_slot) { - // Found a program entry on the current fork, but it's not effective - // yet. It indicates that the program has delayed visibility. Return - // the tombstone to reflect that. - Arc::new(LoadedProgram::new_tombstone( - entry.deployment_slot, - LoadedProgramType::DelayVisibility, - )) - } else { - continue; - }; - entry_to_return - .tx_usage_counter - .fetch_add(usage_count, Ordering::Relaxed); - return Some((key, entry_to_return)); - } + if let LoadedProgramType::Unloaded(_environment) = &entry.program { + break; + } + entry.clone() + } else if entry.is_implicit_delay_visibility_tombstone( + loaded_programs_for_tx_batch.slot, + ) { + // Found a program entry on the current fork, but it's not effective + // yet. It indicates that the program has delayed visibility. Return + // the tombstone to reflect that. + Arc::new(LoadedProgram::new_tombstone( + entry.deployment_slot, + LoadedProgramType::DelayVisibility, + )) + } else { + continue; + }; + entry_to_return.update_access_slot(loaded_programs_for_tx_batch.slot); + entry_to_return + .tx_usage_counter + .fetch_add(*usage_count, Ordering::Relaxed); + loaded_programs_for_tx_batch + .entries + .insert(*key, entry_to_return); + return false; } } - extracting.missing.insert(key, (usage_count, reloading)); - None - }) - .collect::>>(); - + } + if cooperative_loading_task.is_none() { + // We have not selected a task so far + let second_level = self.entries.entry(*key).or_default(); + if second_level.cooperative_loading_lock.is_none() { + // Select this missing entry which is not selected by any other TX batch yet + cooperative_loading_task = Some((*key, *usage_count)); + second_level.cooperative_loading_lock = Some(( + loaded_programs_for_tx_batch.slot, + std::thread::current().id(), + )); + } + } + true + }); drop(locked_fork_graph); self.stats .misses - .fetch_add(extracting.missing.len() as u64, Ordering::Relaxed); - self.stats - .hits - .fetch_add(extracting.loaded.entries.len() as u64, Ordering::Relaxed); - drop(extracting); - extracted + .fetch_add(search_for.len() as u64, Ordering::Relaxed); + self.stats.hits.fetch_add( + loaded_programs_for_tx_batch.entries.len() as u64, + Ordering::Relaxed, + ); + cooperative_loading_task + } + + /// Called by Bank::replenish_program_cache() for each program that is done loading. + pub fn finish_cooperative_loading_task( + &mut self, + slot: Slot, + key: Pubkey, + loaded_program: Arc, + ) { + let second_level = self.entries.entry(key).or_default(); + debug_assert_eq!( + second_level.cooperative_loading_lock, + Some((slot, std::thread::current().id())) + ); + second_level.cooperative_loading_lock = None; + self.assign_program(key, loaded_program); + self.loading_task_waiter.notify(); } pub fn merge(&mut self, tx_batch_cache: &LoadedProgramsForTxBatch) { @@ -854,18 +951,18 @@ impl LoadedPrograms { }) } - /// Returns the list of loaded programs which are verified and compiled sorted by `tx_usage_counter`. - /// - /// Entries from program runtime v1 and v2 can be individually filtered. - pub fn get_entries_sorted_by_tx_usage( + /// Returns the list of loaded programs which are verified and compiled. + pub fn get_flattened_entries( &self, include_program_runtime_v1: bool, include_program_runtime_v2: bool, ) -> Vec<(Pubkey, Arc)> { self.entries .iter() - .flat_map(|(id, list)| { - list.iter() + .flat_map(|(id, second_level)| { + second_level + .slot_versions + .iter() .filter_map(move |program| match program.program { LoadedProgramType::LegacyV0(_) | LoadedProgramType::LegacyV1(_) if include_program_runtime_v1 => @@ -880,19 +977,54 @@ impl LoadedPrograms { _ => None, }) }) - .sorted_by_cached_key(|(_id, program)| program.tx_usage_counter.load(Ordering::Relaxed)) .collect() } /// Unloads programs which were used infrequently pub fn sort_and_unload(&mut self, shrink_to: PercentageInteger) { - let sorted_candidates = self.get_entries_sorted_by_tx_usage(true, true); + let mut sorted_candidates = self.get_flattened_entries(true, true); + sorted_candidates + .sort_by_cached_key(|(_id, program)| program.tx_usage_counter.load(Ordering::Relaxed)); let num_to_unload = sorted_candidates .len() .saturating_sub(shrink_to.apply_to(MAX_LOADED_ENTRY_COUNT)); self.unload_program_entries(sorted_candidates.iter().take(num_to_unload)); } + /// Evicts programs using 2's random selection, choosing the least used program out of the two entries. + /// The eviction is performed enough number of times to reduce the cache usage to the given percentage. + pub fn evict_using_2s_random_selection(&mut self, shrink_to: PercentageInteger, now: Slot) { + let mut candidates = self.get_flattened_entries(true, true); + let num_to_unload = candidates + .len() + .saturating_sub(shrink_to.apply_to(MAX_LOADED_ENTRY_COUNT)); + fn random_index_and_usage_counter( + candidates: &[(Pubkey, Arc)], + now: Slot, + ) -> (usize, u64) { + let mut rng = thread_rng(); + let index = rng.gen_range(0..candidates.len()); + let usage_counter = candidates + .get(index) + .expect("Failed to get cached entry") + .1 + .decayed_usage_counter(now); + (index, usage_counter) + } + + for _ in 0..num_to_unload { + let (index1, usage_counter1) = random_index_and_usage_counter(&candidates, now); + let (index2, usage_counter2) = random_index_and_usage_counter(&candidates, now); + + let (program, entry) = if usage_counter1 < usage_counter2 { + candidates.swap_remove(index1) + } else { + candidates.swap_remove(index2) + }; + self.unload_program_entry(&program, &entry); + } + } + /// Removes all the entries at the given keys, if they exist pub fn remove_programs(&mut self, keys: impl Iterator) { for k in keys { @@ -901,8 +1033,8 @@ impl LoadedPrograms { } fn unload_program(&mut self, id: &Pubkey) { - if let Some(entries) = self.entries.get_mut(id) { - for entry in entries.iter_mut() { + if let Some(second_level) = self.entries.get_mut(id) { + for entry in second_level.slot_versions.iter_mut() { if let Some(unloaded) = entry.to_unloaded() { *entry = Arc::new(unloaded); self.stats @@ -910,6 +1042,11 @@ impl LoadedPrograms { .entry(*id) .and_modify(|c| saturating_add_assign!(*c, 1)) .or_insert(1); + } else { + error!( + "Failed to create an unloaded cache entry for a program type {:?}", + entry.program + ); } } } @@ -920,32 +1057,47 @@ impl LoadedPrograms { keys.iter().for_each(|key| self.unload_program(key)); } + /// This function removes the given entry for the given program from the cache. + /// The function expects that the program and entry exists in the cache. Otherwise it'll panic. + fn unload_program_entry(&mut self, program: &Pubkey, remove_entry: &Arc) { + let second_level = self.entries.get_mut(program).expect("Cache lookup failed"); + let candidate = second_level + .slot_versions + .iter_mut() + .find(|entry| entry == &remove_entry) + .expect("Program entry not found"); + + // Certain entry types cannot be unloaded, such as tombstones, or already unloaded entries. + // For such entries, `to_unloaded()` will return None. + // These entry types do not occupy much memory. + if let Some(unloaded) = candidate.to_unloaded() { + if candidate.tx_usage_counter.load(Ordering::Relaxed) == 1 { + self.stats.one_hit_wonders.fetch_add(1, Ordering::Relaxed); + } + self.stats + .evictions + .entry(*program) + .and_modify(|c| saturating_add_assign!(*c, 1)) + .or_insert(1); + *candidate = Arc::new(unloaded); + } + } + fn unload_program_entries<'a>( &mut self, remove: impl Iterator)>, ) { - for (id, program) in remove { - if let Some(entries) = self.entries.get_mut(id) { - if let Some(candidate) = entries.iter_mut().find(|entry| entry == &program) { - if let Some(unloaded) = candidate.to_unloaded() { - if candidate.tx_usage_counter.load(Ordering::Relaxed) == 1 { - self.stats.one_hit_wonders.fetch_add(1, Ordering::Relaxed); - } - self.stats - .evictions - .entry(*id) - .and_modify(|c| saturating_add_assign!(*c, 1)) - .or_insert(1); - *candidate = Arc::new(unloaded); - } - } - } + for (program, entry) in remove { + self.unload_program_entry(program, entry); } } fn remove_programs_with_no_entries(&mut self) { let num_programs_before_removal = self.entries.len(); - self.entries.retain(|_, programs| !programs.is_empty()); + self.entries.retain(|_, second_level| { + !second_level.slot_versions.is_empty() + || second_level.cooperative_loading_lock.is_some() + }); if self.entries.len() < num_programs_before_removal { self.stats.empty_entries.fetch_add( num_programs_before_removal.saturating_sub(self.entries.len()) as u64, @@ -975,8 +1127,8 @@ impl solana_frozen_abi::abi_example::AbiExample for LoadedProgram mod tests { use { crate::loaded_programs::{ - BlockRelation, ExtractedPrograms, ForkGraph, LoadedProgram, LoadedProgramMatchCriteria, - LoadedProgramType, LoadedPrograms, ProgramRuntimeEnvironment, + BlockRelation, ForkGraph, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, + LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, assert_matches::assert_matches, @@ -987,7 +1139,7 @@ mod tests { ops::ControlFlow, sync::{ atomic::{AtomicU64, Ordering}, - Arc, Mutex, RwLock, + Arc, RwLock, }, }, }; @@ -1035,6 +1187,7 @@ mod tests { maybe_expiration_slot: expiry, tx_usage_counter: usage_counter, ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::new(deployment_slot), }) } @@ -1047,6 +1200,7 @@ mod tests { maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), }) } @@ -1075,6 +1229,7 @@ mod tests { maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), } .to_unloaded() .expect("Failed to unload the program"), @@ -1090,8 +1245,9 @@ mod tests { cache .entries .values() - .map(|programs| { - programs + .map(|second_level| { + second_level + .slot_versions .iter() .filter(|program| predicate(&program.program)) .count() @@ -1099,6 +1255,181 @@ mod tests { .sum() } + #[test] + fn test_usage_counter_decay() { + let _cache = new_mock_cache::(); + let program = new_test_loaded_program_with_usage(10, 11, AtomicU64::new(32)); + program.update_access_slot(15); + assert_eq!(program.decayed_usage_counter(15), 32); + assert_eq!(program.decayed_usage_counter(16), 16); + assert_eq!(program.decayed_usage_counter(17), 8); + assert_eq!(program.decayed_usage_counter(18), 4); + assert_eq!(program.decayed_usage_counter(19), 2); + assert_eq!(program.decayed_usage_counter(20), 1); + assert_eq!(program.decayed_usage_counter(21), 0); + assert_eq!(program.decayed_usage_counter(15), 32); + assert_eq!(program.decayed_usage_counter(14), 32); + + program.update_access_slot(18); + assert_eq!(program.decayed_usage_counter(15), 32); + assert_eq!(program.decayed_usage_counter(16), 32); + assert_eq!(program.decayed_usage_counter(17), 32); + assert_eq!(program.decayed_usage_counter(18), 32); + assert_eq!(program.decayed_usage_counter(19), 16); + assert_eq!(program.decayed_usage_counter(20), 8); + assert_eq!(program.decayed_usage_counter(21), 4); + } + + #[test] + fn test_random_eviction() { + let mut programs = vec![]; + + let mut cache = new_mock_cache::(); + + // This test adds different kind of entries to the cache. + // Tombstones and unloaded entries are expected to not be evicted. + // It also adds multiple entries for three programs as it tries to create a typical cache instance. + let program1 = Pubkey::new_unique(); + let program1_deployment_slots = [0, 10, 20]; + let program1_usage_counters = [4, 5, 25]; + program1_deployment_slots + .iter() + .enumerate() + .for_each(|(i, deployment_slot)| { + let usage_counter = *program1_usage_counters.get(i).unwrap_or(&0); + cache.replenish( + program1, + new_test_loaded_program_with_usage( + *deployment_slot, + (*deployment_slot) + 2, + AtomicU64::new(usage_counter), + ), + ); + programs.push((program1, *deployment_slot, usage_counter)); + }); + + let env = Arc::new(BuiltinProgram::new_mock()); + for slot in 21..31 { + set_tombstone( + &mut cache, + program1, + slot, + LoadedProgramType::FailedVerification(env.clone()), + ); + } + + for slot in 31..41 { + insert_unloaded_program(&mut cache, program1, slot); + } + + let program2 = Pubkey::new_unique(); + let program2_deployment_slots = [5, 11]; + let program2_usage_counters = [0, 2]; + program2_deployment_slots + .iter() + .enumerate() + .for_each(|(i, deployment_slot)| { + let usage_counter = *program2_usage_counters.get(i).unwrap_or(&0); + cache.replenish( + program2, + new_test_loaded_program_with_usage( + *deployment_slot, + (*deployment_slot) + 2, + AtomicU64::new(usage_counter), + ), + ); + programs.push((program2, *deployment_slot, usage_counter)); + }); + + for slot in 21..31 { + set_tombstone( + &mut cache, + program2, + slot, + LoadedProgramType::DelayVisibility, + ); + } + + for slot in 31..41 { + insert_unloaded_program(&mut cache, program2, slot); + } + + let program3 = Pubkey::new_unique(); + let program3_deployment_slots = [0, 5, 15]; + let program3_usage_counters = [100, 3, 20]; + program3_deployment_slots + .iter() + .enumerate() + .for_each(|(i, deployment_slot)| { + let usage_counter = *program3_usage_counters.get(i).unwrap_or(&0); + cache.replenish( + program3, + new_test_loaded_program_with_usage( + *deployment_slot, + (*deployment_slot) + 2, + AtomicU64::new(usage_counter), + ), + ); + programs.push((program3, *deployment_slot, usage_counter)); + }); + + for slot in 21..31 { + set_tombstone(&mut cache, program3, slot, LoadedProgramType::Closed); + } + + for slot in 31..41 { + insert_unloaded_program(&mut cache, program3, slot); + } + + programs.sort_by_key(|(_id, _slot, usage_count)| *usage_count); + + let num_loaded = num_matching_entries(&cache, |program_type| { + matches!(program_type, LoadedProgramType::TestLoaded(_)) + }); + let num_unloaded = num_matching_entries(&cache, |program_type| { + matches!(program_type, LoadedProgramType::Unloaded(_)) + }); + let num_tombstones = num_matching_entries(&cache, |program_type| { + matches!( + program_type, + LoadedProgramType::DelayVisibility + | LoadedProgramType::FailedVerification(_) + | LoadedProgramType::Closed + ) + }); + + // Test that the cache is constructed with the expected number of entries. + assert_eq!(num_loaded, 8); + assert_eq!(num_unloaded, 30); + assert_eq!(num_tombstones, 30); + + // Evicting to 2% should update cache with + // * 5 active entries + // * 33 unloaded entries (3 active programs will get unloaded) + // * 30 tombstones (tombstones are not evicted) + cache.evict_using_2s_random_selection(Percentage::from(2), 21); + + let num_loaded = num_matching_entries(&cache, |program_type| { + matches!(program_type, LoadedProgramType::TestLoaded(_)) + }); + let num_unloaded = num_matching_entries(&cache, |program_type| { + matches!(program_type, LoadedProgramType::Unloaded(_)) + }); + let num_tombstones = num_matching_entries(&cache, |program_type| { + matches!( + program_type, + LoadedProgramType::DelayVisibility + | LoadedProgramType::FailedVerification(_) + | LoadedProgramType::Closed + ) + }); + + // Test that expected number of loaded entries get evicted/unloaded. + assert_eq!(num_loaded, 5); + assert_eq!(num_unloaded, 33); + assert_eq!(num_tombstones, 30); + } + #[test] fn test_eviction() { let mut programs = vec![]; @@ -1231,8 +1562,8 @@ mod tests { let unloaded = cache .entries .iter() - .flat_map(|(id, cached_programs)| { - cached_programs.iter().filter_map(|program| { + .flat_map(|(id, second_level)| { + second_level.slot_versions.iter().filter_map(|program| { matches!(program.program, LoadedProgramType::Unloaded(_)) .then_some((*id, program.tx_usage_counter.load(Ordering::Relaxed))) }) @@ -1285,8 +1616,8 @@ mod tests { }); assert_eq!(num_unloaded, 1); - cache.entries.values().for_each(|programs| { - programs.iter().for_each(|program| { + cache.entries.values().for_each(|second_level| { + second_level.slot_versions.iter().for_each(|program| { if matches!(program.program, LoadedProgramType::Unloaded(_)) { // Test that the usage counter is retained for the unloaded program assert_eq!(program.tx_usage_counter.load(Ordering::Relaxed), 10); @@ -1303,8 +1634,8 @@ mod tests { new_test_loaded_program_with_usage(0, 2, AtomicU64::new(0)), ); - cache.entries.values().for_each(|programs| { - programs.iter().for_each(|program| { + cache.entries.values().for_each(|second_level| { + second_level.slot_versions.iter().for_each(|program| { if matches!(program.program, LoadedProgramType::Unloaded(_)) && program.deployment_slot == 0 && program.effective_slot == 2 @@ -1362,8 +1693,8 @@ mod tests { .entries .get(&program1) .expect("Failed to find the entry"); - assert_eq!(second_level.len(), 1); - assert!(second_level.first().unwrap().is_tombstone()); + assert_eq!(second_level.slot_versions.len(), 1); + assert!(second_level.slot_versions.first().unwrap().is_tombstone()); assert_eq!(tombstone.deployment_slot, 10); assert_eq!(tombstone.effective_slot, 10); @@ -1378,8 +1709,8 @@ mod tests { .entries .get(&program2) .expect("Failed to find the entry"); - assert_eq!(second_level.len(), 1); - assert!(!second_level.first().unwrap().is_tombstone()); + assert_eq!(second_level.slot_versions.len(), 1); + assert!(!second_level.slot_versions.first().unwrap().is_tombstone()); let tombstone = set_tombstone( &mut cache, @@ -1391,9 +1722,9 @@ mod tests { .entries .get(&program2) .expect("Failed to find the entry"); - assert_eq!(second_level.len(), 2); - assert!(!second_level.first().unwrap().is_tombstone()); - assert!(second_level.get(1).unwrap().is_tombstone()); + assert_eq!(second_level.slot_versions.len(), 2); + assert!(!second_level.slot_versions.first().unwrap().is_tombstone()); + assert!(second_level.slot_versions.get(1).unwrap().is_tombstone()); assert!(tombstone.is_tombstone()); assert_eq!(tombstone.deployment_slot, 60); assert_eq!(tombstone.effective_slot, 60); @@ -1491,6 +1822,7 @@ mod tests { maybe_expiration_slot: None, tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), }); let (existing, program) = cache.replenish(program1, updated_program.clone()); assert!(!existing); @@ -1502,6 +1834,7 @@ mod tests { .entries .get(&program1) .expect("failed to find the program") + .slot_versions .len(), 2 ); @@ -1514,20 +1847,25 @@ mod tests { .entries .get(&program1) .expect("failed to find the program") + .slot_versions .len(), 2 ); cache.prune(22, cache.latest_root_epoch.saturating_add(1)); - let entries = cache + let second_level = cache .entries .get(&program1) .expect("failed to find the program"); // Test that prune removed 1 entry, since epoch changed - assert_eq!(entries.len(), 1); + assert_eq!(second_level.slot_versions.len(), 1); - let entry = entries.first().expect("Failed to get the program").clone(); + let entry = second_level + .slot_versions + .first() + .expect("Failed to get the program") + .clone(); // Test that the correct entry remains in the cache assert_eq!(entry, updated_program); } @@ -1574,31 +1912,25 @@ mod tests { } fn match_slot( - extracted: &Arc>, + extracted: &LoadedProgramsForTxBatch, program: &Pubkey, deployment_slot: Slot, working_slot: Slot, ) -> bool { - let extracted = extracted.lock().unwrap(); - assert_eq!(extracted.loaded.slot, working_slot); + assert_eq!(extracted.slot, working_slot); extracted - .loaded - .find(program) + .entries + .get(program) .map(|entry| entry.deployment_slot == deployment_slot) .unwrap_or(false) } fn match_missing( - extracted: &Arc>, - key: &Pubkey, - reload: bool, + missing: &[(Pubkey, (LoadedProgramMatchCriteria, u64))], + program: &Pubkey, + _reload: bool, ) -> bool { - let extracted = extracted.lock().unwrap(); - extracted - .missing - .get(key) - .filter(|(_count, reloading)| *reloading == reload) - .is_some() + missing.iter().any(|(key, _)| key == program) } #[test] @@ -1679,34 +2011,30 @@ mod tests { // 23 // Testing fork 0 - 10 - 12 - 22 with current slot at 22 - let extracted = cache.extract( - 22, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 2)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 3)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 4)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 2)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 3)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 4)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 20, 22)); assert!(match_slot(&extracted, &program4, 0, 22)); - assert!(match_missing(&extracted, &program2, false)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program2, false)); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 16 - let extracted = cache.extract( - 15, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 15)); assert!(match_slot(&extracted, &program2, 11, 15)); @@ -1714,27 +2042,22 @@ mod tests { // The effective slot of program4 deployed in slot 15 is 19. So it should not be usable in slot 16. // A delay visibility tombstone should be returned here. let tombstone = extracted - .lock() - .unwrap() - .loaded .find(&program4) .expect("Failed to find the tombstone"); assert_matches!(tombstone.program, LoadedProgramType::DelayVisibility); assert_eq!(tombstone.deployment_slot, 15); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program3, false)); // Testing the same fork above, but current slot is now 18 (equal to effective slot of program4). - let extracted = cache.extract( - 18, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(18, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 18)); assert!(match_slot(&extracted, &program2, 11, 18)); @@ -1742,19 +2065,17 @@ mod tests { // The effective slot of program4 deployed in slot 15 is 18. So it should be usable in slot 18. assert!(match_slot(&extracted, &program4, 15, 18)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program3, false)); // Testing the same fork above, but current slot is now 23 (future slot than effective slot of program4). - let extracted = cache.extract( - 23, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 23)); assert!(match_slot(&extracted, &program2, 11, 23)); @@ -1762,33 +2083,28 @@ mod tests { // The effective slot of program4 deployed in slot 15 is 19. So it should be usable in slot 23. assert!(match_slot(&extracted, &program4, 15, 23)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 15 - 16 with current slot at 11 - let extracted = cache.extract( - 11, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(11, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 11)); // program2 was updated at slot 11, but is not effective till slot 12. The result should contain a tombstone. let tombstone = extracted - .lock() - .unwrap() - .loaded .find(&program2) .expect("Failed to find the tombstone"); assert_matches!(tombstone.program, LoadedProgramType::DelayVisibility); assert_eq!(tombstone.deployment_slot, 11); assert!(match_slot(&extracted, &program4, 5, 11)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program3, false)); // The following is a special case, where there's an expiration slot let test_program = Arc::new(LoadedProgram { @@ -1799,50 +2115,47 @@ mod tests { maybe_expiration_slot: Some(21), tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), }); assert!(!cache.replenish(program4, test_program).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let extracted = cache.extract( - 19, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 19)); assert!(match_slot(&extracted, &program2, 11, 19)); // Program4 deployed at slot 19 should not be expired yet assert!(match_slot(&extracted, &program4, 19, 19)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 21 // This would cause program4 deployed at slot 19 to be expired. - let extracted = cache.extract( - 21, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 21)); assert!(match_slot(&extracted, &program2, 11, 21)); - assert!(match_missing(&extracted, &program3, false)); - assert!(match_missing(&extracted, &program4, false)); + assert!(match_missing(&missing, &program3, false)); + assert!(match_missing(&missing, &program4, false)); // Remove the expired entry to let the rest of the test continue - if let Some(programs) = cache.entries.get_mut(&program4) { - programs.pop(); + if let Some(second_level) = cache.entries.get_mut(&program4) { + second_level.slot_versions.pop(); } cache.prune(5, 0); @@ -1863,35 +2176,31 @@ mod tests { // 23 // Testing fork 11 - 15 - 16- 19 - 22 with root at 5 and current slot at 22 - let extracted = cache.extract( - 21, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(21, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); // Since the fork was pruned, we should not find the entry deployed at slot 20. assert!(match_slot(&extracted, &program1, 0, 21)); assert!(match_slot(&extracted, &program2, 11, 21)); assert!(match_slot(&extracted, &program4, 15, 21)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 - let extracted = cache.extract( - 27, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 27)); assert!(match_slot(&extracted, &program2, 11, 27)); @@ -1916,23 +2225,21 @@ mod tests { // 23 // Testing fork 16, 19, 23, with root at 15, current slot at 23 - let extracted = cache.extract( - 23, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program4, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(23, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 23)); assert!(match_slot(&extracted, &program2, 11, 23)); assert!(match_slot(&extracted, &program4, 15, 23)); // program3 was deployed on slot 25, which has been pruned - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program3, false)); } #[test] @@ -1974,42 +2281,38 @@ mod tests { assert!(!cache.replenish(program3, new_test_loaded_program(25, 26)).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let extracted = cache.extract( - 12, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 12)); assert!(match_slot(&extracted, &program2, 11, 12)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program3, false)); // Test the same fork, but request the program modified at a later slot than what's in the cache. - let extracted = cache.extract( - 12, - vec![ - ( - program1, - (LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(5), 1), - ), - ( - program2, - (LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(5), 1), - ), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + ( + program1, + (LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(5), 1), + ), + ( + program2, + (LoadedProgramMatchCriteria::DeployedOnOrAfterSlot(5), 1), + ), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program2, 11, 12)); - assert!(match_missing(&extracted, &program1, false)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program1, false)); + assert!(match_missing(&missing, &program3, false)); } #[test] @@ -2068,52 +2371,46 @@ mod tests { ); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let extracted = cache.extract( - 19, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(19, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 19)); assert!(match_slot(&extracted, &program2, 11, 19)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 25 - 27 with current slot at 27 - let extracted = cache.extract( - 27, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(27, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 27)); assert!(match_slot(&extracted, &program2, 11, 27)); - assert!(match_missing(&extracted, &program3, true)); + assert!(match_missing(&missing, &program3, true)); // Testing fork 0 - 10 - 20 - 22 with current slot at 22 - let extracted = cache.extract( - 22, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(22, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 20, 22)); - assert!(match_missing(&extracted, &program2, false)); - assert!(match_missing(&extracted, &program3, true)); + assert!(match_missing(&missing, &program2, false)); + assert!(match_missing(&missing, &program3, true)); } #[test] @@ -2162,42 +2459,39 @@ mod tests { maybe_expiration_slot: Some(15), tx_usage_counter: AtomicU64::default(), ix_usage_counter: AtomicU64::default(), + latest_access_slot: AtomicU64::default(), }); assert!(!cache.replenish(program1, test_program).0); // Testing fork 0 - 5 - 11 - 15 - 16 - 19 - 21 - 23 with current slot at 19 - let extracted = cache.extract( - 12, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(12, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); // Program1 deployed at slot 11 should not be expired yet assert!(match_slot(&extracted, &program1, 11, 12)); assert!(match_slot(&extracted, &program2, 11, 12)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program3, false)); // Testing fork 0 - 5 - 11 - 12 - 15 - 16 - 19 - 21 - 23 with current slot at 15 // This would cause program4 deployed at slot 15 to be expired. - let extracted = cache.extract( - 15, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program3, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(15, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program2, 11, 15)); - assert!(match_missing(&extracted, &program1, false)); - assert!(match_missing(&extracted, &program3, false)); + assert!(match_missing(&missing, &program1, false)); + assert!(match_missing(&missing, &program3, false)); // Test that the program still exists in the cache, even though it is expired. assert_eq!( @@ -2205,6 +2499,7 @@ mod tests { .entries .get(&program1) .expect("Didn't find program1") + .slot_versions .len(), 3 ); @@ -2216,10 +2511,14 @@ mod tests { .entries .get(&program1) .expect("Didn't find program1") + .slot_versions .len(), 1 ); + // Unlock the cooperative loading lock so that the subsequent prune can do its job + cache.finish_cooperative_loading_task(15, program1, new_test_loaded_program(0, 1)); + // New root 15 should evict the expired entry for program1 cache.prune(15, 0); assert!(cache.entries.get(&program1).is_none()); @@ -2251,19 +2550,14 @@ mod tests { cache.prune(10, 0); - let extracted = cache.extract( - 20, - vec![(program1, (LoadedProgramMatchCriteria::NoCriteria, 1))].into_iter(), - ); + let mut missing = vec![(program1, (LoadedProgramMatchCriteria::NoCriteria, 1))]; + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); // The cache should have the program deployed at slot 0 assert_eq!( extracted - .lock() - .unwrap() - .loaded - .entries - .get(&program1) + .find(&program1) .expect("Did not find the program") .deployment_slot, 0 @@ -2297,73 +2591,63 @@ mod tests { let program2 = Pubkey::new_unique(); assert!(!cache.replenish(program2, new_test_loaded_program(10, 11)).0); - let extracted = cache.extract( - 20, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 20)); assert!(match_slot(&extracted, &program2, 10, 20)); - let extracted = cache.extract( - 6, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 5, 6)); - assert!(match_missing(&extracted, &program2, false)); + assert!(match_missing(&missing, &program2, false)); // Pruning slot 5 will remove program1 entry deployed at slot 5. // On fork chaining from slot 5, the entry deployed at slot 0 will become visible. cache.prune_by_deployment_slot(5); - let extracted = cache.extract( - 20, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 20)); assert!(match_slot(&extracted, &program2, 10, 20)); - let extracted = cache.extract( - 6, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(6, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 6)); - assert!(match_missing(&extracted, &program2, false)); + assert!(match_missing(&missing, &program2, false)); // Pruning slot 10 will remove program2 entry deployed at slot 10. // As there is no other entry for program2, extract() will return it as missing. cache.prune_by_deployment_slot(10); - let extracted = cache.extract( - 20, - vec![ - (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), - (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), - ] - .into_iter(), - ); + let mut missing = vec![ + (program1, (LoadedProgramMatchCriteria::NoCriteria, 1)), + (program2, (LoadedProgramMatchCriteria::NoCriteria, 1)), + ]; + let mut extracted = LoadedProgramsForTxBatch::new(20, cache.environments.clone()); + cache.extract(&mut missing, &mut extracted); assert!(match_slot(&extracted, &program1, 0, 20)); - assert!(match_missing(&extracted, &program2, false)); + assert!(match_missing(&missing, &program2, false)); } #[test] diff --git a/program-test/src/lib.rs b/program-test/src/lib.rs index 37e848471a8b3a..3ee531ae3b6f77 100644 --- a/program-test/src/lib.rs +++ b/program-test/src/lib.rs @@ -8,7 +8,10 @@ use { base64::{prelude::BASE64_STANDARD, Engine}, chrono_humanize::{Accuracy, HumanTime, Tense}, log::*, - solana_accounts_db::epoch_accounts_hash::EpochAccountsHash, + solana_accounts_db::{ + accounts_db::AccountShrinkThreshold, accounts_index::AccountSecondaryIndexes, + epoch_accounts_hash::EpochAccountsHash, + }, solana_banks_client::start_client, solana_banks_server::banks_server::start_local_server, solana_bpf_loader_program::serialization::serialize_parameters, @@ -805,7 +808,7 @@ impl ProgramTest { debug!("Payer address: {}", mint_keypair.pubkey()); debug!("Genesis config: {}", genesis_config); - let mut bank = Bank::new_with_runtime_config_for_tests( + let mut bank = Bank::new_with_paths( &genesis_config, Arc::new(RuntimeConfig { compute_budget: self.compute_max_units.map(|max_units| ComputeBudget { @@ -815,6 +818,15 @@ impl ProgramTest { transaction_account_lock_limit: self.transaction_account_lock_limit, ..RuntimeConfig::default() }), + Vec::default(), + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + None, + None, + Arc::default(), ); // Add commonly-used SPL programs as a convenience to the user diff --git a/programs/bpf_loader/src/lib.rs b/programs/bpf_loader/src/lib.rs index 25b2df318f7f19..281d314cb4b5b4 100644 --- a/programs/bpf_loader/src/lib.rs +++ b/programs/bpf_loader/src/lib.rs @@ -4021,6 +4021,7 @@ mod tests { maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(100), ix_usage_counter: AtomicU64::new(100), + latest_access_slot: AtomicU64::new(0), }; invoke_context .programs_modified_by_tx @@ -4061,6 +4062,7 @@ mod tests { maybe_expiration_slot: None, tx_usage_counter: AtomicU64::new(100), ix_usage_counter: AtomicU64::new(100), + latest_access_slot: AtomicU64::new(0), }; invoke_context .programs_modified_by_tx diff --git a/programs/sbf/Cargo.lock b/programs/sbf/Cargo.lock index aaa54ae1a104cc..10a8c8d9980403 100644 --- a/programs/sbf/Cargo.lock +++ b/programs/sbf/Cargo.lock @@ -152,9 +152,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "59d2a3357dde987206219e78ecfbbb6e8dad06cbb65292758d3270e6254f7355" [[package]] name = "aquamarine" @@ -425,7 +425,7 @@ checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -579,7 +579,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -732,7 +732,7 @@ dependencies = [ "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", "syn_derive", ] @@ -1140,9 +1140,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "14c3242926edf34aec4ac3a77108ad4854bffaa2e4ddc1824124ce59231302d5" dependencies = [ "cfg-if 1.0.0", "crossbeam-utils", @@ -1174,9 +1174,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.14" +version = "0.8.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" +checksum = "c06d96137f14f244c37f989d9fff8f95e6c18b918e71f36638f8c49112e4c78f" dependencies = [ "cfg-if 1.0.0", ] @@ -1251,7 +1251,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.10.0", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -1262,7 +1262,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -1387,9 +1387,9 @@ dependencies = [ [[package]] name = "dir-diff" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2860407d7d7e2e004bb2128510ad9e8d669e76fa005ccf567977b5d71b8b4a0b" +checksum = "a7ad16bf5f84253b50d6557681c58c3ab67c47c77d39fed9aeb56e947290bd10" dependencies = [ "walkdir", ] @@ -1446,7 +1446,7 @@ checksum = "a6cbae11b3de8fce2a456e8ea3dada226b35fe791f0dc1d360c0941f0bb681f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -1555,7 +1555,7 @@ checksum = "eecf8589574ce9b895052fa12d69af7a233f99e6107f5cb8dd1044f2a17bfdcb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -1822,7 +1822,7 @@ checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -2151,9 +2151,9 @@ checksum = "3c1ad908cc71012b7bea4d0c53ba96a8cba9962f048fa68d143376143d863b7a" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -2166,7 +2166,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -2560,9 +2560,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.150" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "libloading" @@ -3058,7 +3058,7 @@ checksum = "cfb77679af88f8b125209d354a202862602672222e7f2313fdd6dc349bad4712" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -3140,7 +3140,7 @@ dependencies = [ "proc-macro-crate 1.1.3", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -3152,7 +3152,7 @@ dependencies = [ "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -3618,7 +3618,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -3769,7 +3769,7 @@ checksum = "9e2e25ee72f5b24d773cae88422baddefff7714f97aab68d96fe2b6fc4a28fb2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -4030,9 +4030,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ "async-compression", "base64 0.21.5", @@ -4368,7 +4368,7 @@ checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -4413,14 +4413,14 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] name = "serde_yaml" -version = "0.9.27" +version = "0.9.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cc7a1570e38322cfe4154732e5110f887ea57e22b76f4bfd32b5bdd3368666c" +checksum = "9269cfafc7e0257ee4a42f3f68a307f458c63d9e7c8ba4b58c5d15f1b7d7e8d3" dependencies = [ "indexmap 2.1.0", "itoa", @@ -5041,6 +5041,7 @@ dependencies = [ "solana-tpu-client", "solana-transaction-status", "solana-turbine", + "solana-unified-scheduler-pool", "solana-version", "solana-vote", "solana-vote-program", @@ -5161,7 +5162,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -6290,7 +6291,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -6545,6 +6546,22 @@ dependencies = [ "tokio", ] +[[package]] +name = "solana-unified-scheduler-logic" +version = "1.18.0" + +[[package]] +name = "solana-unified-scheduler-pool" +version = "1.18.0" +dependencies = [ + "solana-ledger", + "solana-program-runtime", + "solana-runtime", + "solana-sdk", + "solana-unified-scheduler-logic", + "solana-vote", +] + [[package]] name = "solana-validator" version = "1.18.0" @@ -6792,7 +6809,7 @@ checksum = "fadbefec4f3c678215ca72bd71862697bb06b41fd77c0088902dd3203354387b" dependencies = [ "quote", "spl-discriminator-syn", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -6804,7 +6821,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.40", + "syn 2.0.42", "thiserror", ] @@ -6852,7 +6869,7 @@ dependencies = [ "proc-macro2", "quote", "sha2 0.10.8", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -6957,9 +6974,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "stream-cancel" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0a9eb2715209fb8cc0d942fcdff45674bfc9f0090a0d897e85a22955ad159b" +checksum = "5f9fbf9bd71e4cf18d68a8a0951c0e5b7255920c0cd992c4ff51cddd6ef514a3" dependencies = [ "futures-core", "pin-project", @@ -7025,9 +7042,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.40" +version = "2.0.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13fa70a4ee923979ffb522cacce59d34421ebdea5625e1073c4326ef9d2dd42e" +checksum = "5b7d0a2c048d661a1a59fcd7355baa232f7ed34e0ee4df2eef3c1c1c0d3852d8" dependencies = [ "proc-macro2", "quote", @@ -7043,7 +7060,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -7201,7 +7218,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -7213,7 +7230,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", "test-case-core", ] @@ -7234,22 +7251,22 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "f11c217e1416d6f036b870f14e0413d480dbf28edbee1f877abaf0206af43bb7" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "01742297787513b79cf8e29d1056ede1313e2420b7b3b15d0a768b4921f549df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -7372,7 +7389,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -7734,9 +7751,9 @@ dependencies = [ [[package]] name = "unsafe-libyaml" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1865806a559042e51ab5414598446a5871b561d21b6764f2eabb0dd481d880a6" +checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" [[package]] name = "untrusted" @@ -7872,7 +7889,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", "wasm-bindgen-shared", ] @@ -7906,7 +7923,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8185,22 +8202,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.15" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ba595b9f2772fbee2312de30eeb80ec773b4cb2f1e8098db024afadda6c06f" +checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.15" +version = "0.7.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "772666c41fb6dceaf520b564b962d738a8e1a83b41bd48945f50837aed78bb1d" +checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] @@ -8220,7 +8237,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.40", + "syn 2.0.42", ] [[package]] diff --git a/programs/sbf/tests/programs.rs b/programs/sbf/tests/programs.rs index 6476aa842fa7df..ec76a5cca349a4 100644 --- a/programs/sbf/tests/programs.rs +++ b/programs/sbf/tests/programs.rs @@ -2097,10 +2097,13 @@ fn test_program_sbf_invoke_in_same_tx_as_redeployment() { ); // load_upgradeable_program sets clock sysvar to 1, which causes the program to be effective - // after 2 slots. So we need to advance the bank client by 2 slots here. + // after 2 slots. They need to be called individually to create the correct fork graph in between. + bank_client + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) + .unwrap(); let bank = bank_client - .advance_slot(2, bank_forks.as_ref(), &Pubkey::default()) - .expect("Failed to advance slot"); + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) + .unwrap(); // Prepare redeployment let buffer_keypair = Keypair::new(); @@ -2193,10 +2196,13 @@ fn test_program_sbf_invoke_in_same_tx_as_undeployment() { ); // load_upgradeable_program sets clock sysvar to 1, which causes the program to be effective - // after 2 slots. So we need to advance the bank client by 2 slots here. + // after 2 slots. They need to be called individually to create the correct fork graph in between. + bank_client + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) + .unwrap(); let bank = bank_client - .advance_slot(2, bank_forks.as_ref(), &Pubkey::default()) - .expect("Failed to advance slot"); + .advance_slot(1, bank_forks.as_ref(), &Pubkey::default()) + .unwrap(); // Prepare undeployment let (programdata_address, _) = Pubkey::find_program_address( @@ -3861,7 +3867,6 @@ fn test_program_fees() { &process_compute_budget_instructions(sanitized_message.program_instructions_iter()) .unwrap_or_default() .into(), - true, false, ); bank_client @@ -3885,7 +3890,6 @@ fn test_program_fees() { &process_compute_budget_instructions(sanitized_message.program_instructions_iter()) .unwrap_or_default() .into(), - true, false, ); assert!(expected_normal_fee < expected_prioritized_fee); diff --git a/runtime/benches/accounts.rs b/runtime/benches/accounts.rs index 2822c2533193cf..7efc0a11ac0d75 100644 --- a/runtime/benches/accounts.rs +++ b/runtime/benches/accounts.rs @@ -11,7 +11,7 @@ use { accounts::{AccountAddressFilter, Accounts}, accounts_db::{ test_utils::create_test_accounts, AccountShrinkThreshold, AccountsDb, - VerifyAccountsHashAndLamportsConfig, + VerifyAccountsHashAndLamportsConfig, ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, }, accounts_index::{AccountSecondaryIndexes, ScanConfig}, ancestors::Ancestors, @@ -36,6 +36,18 @@ use { test::Bencher, }; +fn new_accounts_db(account_paths: Vec) -> AccountsDb { + AccountsDb::new_with_config( + account_paths, + &ClusterType::Development, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), + None, + Arc::default(), + ) +} + fn deposit_many(bank: &Bank, pubkeys: &mut Vec, num: usize) -> Result<(), LamportsError> { for t in 0..num { let pubkey = solana_sdk::pubkey::new_rand(); @@ -99,12 +111,7 @@ fn test_accounts_squash(bencher: &mut Bencher) { #[bench] fn test_accounts_hash_bank_hash(bencher: &mut Bencher) { - let accounts_db = AccountsDb::new_with_config_for_benches( - vec![PathBuf::from("bench_accounts_hash_internal")], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(vec![PathBuf::from("bench_accounts_hash_internal")]); let accounts = Accounts::new(Arc::new(accounts_db)); let mut pubkeys: Vec = vec![]; let num_accounts = 60_000; @@ -137,12 +144,7 @@ fn test_accounts_hash_bank_hash(bencher: &mut Bencher) { #[bench] fn test_update_accounts_hash(bencher: &mut Bencher) { solana_logger::setup(); - let accounts_db = AccountsDb::new_with_config_for_benches( - vec![PathBuf::from("update_accounts_hash")], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(vec![PathBuf::from("update_accounts_hash")]); let accounts = Accounts::new(Arc::new(accounts_db)); let mut pubkeys: Vec = vec![]; create_test_accounts(&accounts, &mut pubkeys, 50_000, 0); @@ -157,12 +159,7 @@ fn test_update_accounts_hash(bencher: &mut Bencher) { #[bench] fn test_accounts_delta_hash(bencher: &mut Bencher) { solana_logger::setup(); - let accounts_db = AccountsDb::new_with_config_for_benches( - vec![PathBuf::from("accounts_delta_hash")], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delta_hash")]); let accounts = Accounts::new(Arc::new(accounts_db)); let mut pubkeys: Vec = vec![]; create_test_accounts(&accounts, &mut pubkeys, 100_000, 0); @@ -174,12 +171,7 @@ fn test_accounts_delta_hash(bencher: &mut Bencher) { #[bench] fn bench_delete_dependencies(bencher: &mut Bencher) { solana_logger::setup(); - let accounts_db = AccountsDb::new_with_config_for_benches( - vec![PathBuf::from("accounts_delete_deps")], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(vec![PathBuf::from("accounts_delete_deps")]); let accounts = Accounts::new(Arc::new(accounts_db)); let mut old_pubkey = Pubkey::default(); let zero_account = AccountSharedData::new(0, 0, AccountSharedData::default().owner()); @@ -204,15 +196,10 @@ fn store_accounts_with_possible_contention( F: Fn(&Accounts, &[Pubkey]) + Send + Copy, { let num_readers = 5; - let accounts_db = AccountsDb::new_with_config_for_benches( - vec![ - PathBuf::from(std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string())) - .join(bench_name), - ], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(vec![PathBuf::from( + std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), + ) + .join(bench_name)]); let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); let num_keys = 1000; let slot = 0; @@ -341,15 +328,10 @@ fn bench_rwlock_hashmap_single_reader_with_n_writers(bencher: &mut Bencher) { } fn setup_bench_dashmap_iter() -> (Arc, DashMap) { - let accounts_db = AccountsDb::new_with_config_for_benches( - vec![ - PathBuf::from(std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string())) - .join("bench_dashmap_par_iter"), - ], - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(vec![PathBuf::from( + std::env::var("FARF_DIR").unwrap_or_else(|_| "farf".to_string()), + ) + .join("bench_dashmap_par_iter")]); let accounts = Arc::new(Accounts::new(Arc::new(accounts_db))); let dashmap = DashMap::new(); @@ -399,12 +381,7 @@ fn bench_dashmap_iter(bencher: &mut Bencher) { #[bench] fn bench_load_largest_accounts(b: &mut Bencher) { - let accounts_db = AccountsDb::new_with_config_for_benches( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = new_accounts_db(Vec::new()); let accounts = Accounts::new(Arc::new(accounts_db)); let mut rng = rand::thread_rng(); for _ in 0..10_000 { diff --git a/runtime/src/accounts/mod.rs b/runtime/src/accounts/mod.rs index c5ffa76e36b56b..28343a056f087f 100644 --- a/runtime/src/accounts/mod.rs +++ b/runtime/src/accounts/mod.rs @@ -29,7 +29,6 @@ use { bpf_loader_upgradeable::{self, UpgradeableLoaderState}, feature_set::{ include_loaded_accounts_data_size_in_fee_calculation, - remove_congestion_multiplier_from_fee_calculation, simplify_writable_program_account_check, FeatureSet, }, fee::FeeStructure, @@ -83,8 +82,6 @@ pub(super) fn load_accounts( ) .unwrap_or_default() .into(), - feature_set - .is_active(&remove_congestion_multiplier_from_fee_calculation::id()), feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), ) @@ -202,12 +199,8 @@ fn load_transaction_accounts( .then_some(()) .and_then(|_| loaded_programs.find(key)) { - // This condition block does special handling for accounts that are passed - // as instruction account to any of the instructions in the transaction. - // It's been noticed that some programs are reading other program accounts - // (that are passed to the program as instruction accounts). So such accounts - // are needed to be loaded even though corresponding compiled program may - // already be present in the cache. + // Optimization to skip loading of accounts which are only used as + // programs in top-level instructions and not passed as instruction accounts. account_shared_data_from_program(key, program_accounts) .map(|program_account| (program.account_size, program_account, 0))? } else { @@ -536,10 +529,7 @@ mod tests { use { super::*, nonce::state::Versions as NonceVersions, - solana_accounts_db::{ - accounts::Accounts, accounts_db::AccountShrinkThreshold, - accounts_index::AccountSecondaryIndexes, rent_collector::RentCollector, - }, + solana_accounts_db::{accounts::Accounts, rent_collector::RentCollector}, solana_program_runtime::{ compute_budget_processor, prioritization_fee::{PrioritizationFeeDetails, PrioritizationFeeType}, @@ -548,7 +538,6 @@ mod tests { account::{AccountSharedData, WritableAccount}, compute_budget::ComputeBudgetInstruction, epoch_schedule::EpochSchedule, - genesis_config::ClusterType, hash::Hash, instruction::CompiledInstruction, message::{Message, SanitizedMessage}, @@ -573,12 +562,7 @@ mod tests { ) -> Vec { let mut hash_queue = BlockhashQueue::new(100); hash_queue.register_hash(&tx.message().recent_blockhash, lamports_per_signature); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); for ka in ka.iter() { accounts.accounts_db.store_for_tests(0, &[(&ka.0, &ka.1)]); @@ -737,7 +721,6 @@ mod tests { &process_compute_budget_instructions(message.program_instructions_iter()) .unwrap_or_default() .into(), - true, false, ); assert_eq!(fee, lamports_per_signature); @@ -1384,12 +1367,7 @@ mod tests { #[test] fn test_instructions() { solana_logger::setup(); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let instructions_key = solana_sdk::sysvar::instructions::id(); @@ -1411,12 +1389,7 @@ mod tests { #[test] fn test_overrides() { solana_logger::setup(); - let accounts_db = AccountsDb::new_with_config_for_tests( - Vec::new(), - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_single_for_tests(); let accounts = Accounts::new(Arc::new(accounts_db)); let mut account_overrides = AccountOverrides::default(); let slot_history_id = sysvar::slot_history::id(); @@ -1583,7 +1556,6 @@ mod tests { &process_compute_budget_instructions(message.program_instructions_iter()) .unwrap_or_default() .into(), - true, false, ); assert_eq!(fee, lamports_per_signature + prioritization_fee); diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index 8cabce9e413585..4fbb3b88bd2d21 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -33,6 +33,8 @@ //! It offers a high-level API that signs transactions //! on behalf of the caller, and a low-level API for when they have //! already been signed and verified. +#[cfg(feature = "dev-context-only-utils")] +use solana_accounts_db::accounts_db::ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS; #[allow(deprecated)] use solana_sdk::recent_blockhashes_account; pub use solana_sdk::reward_type::RewardType; @@ -76,7 +78,7 @@ use { accounts_db::{ AccountShrinkThreshold, AccountStorageEntry, AccountsDb, AccountsDbConfig, CalcAccountsHashDataSource, VerifyAccountsHashAndLamportsConfig, - ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS, ACCOUNTS_DB_CONFIG_FOR_TESTING, + ACCOUNTS_DB_CONFIG_FOR_TESTING, }, accounts_hash::{ AccountHash, AccountsHash, CalcAccountsHashConfig, HashStats, IncrementalAccountsHash, @@ -112,8 +114,8 @@ use { compute_budget_processor::process_compute_budget_instructions, invoke_context::BuiltinFunctionWithContext, loaded_programs::{ - ExtractedPrograms, LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, - LoadedProgramType, LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, + LoadProgramMetrics, LoadedProgram, LoadedProgramMatchCriteria, LoadedProgramType, + LoadedPrograms, LoadedProgramsForTxBatch, ProgramRuntimeEnvironment, ProgramRuntimeEnvironments, DELAY_VISIBILITY_SLOT_OFFSET, }, log_collector::LogCollector, @@ -139,10 +141,7 @@ use { epoch_info::EpochInfo, epoch_schedule::EpochSchedule, feature, - feature_set::{ - self, include_loaded_accounts_data_size_in_fee_calculation, - remove_congestion_multiplier_from_fee_calculation, FeatureSet, - }, + feature_set::{self, include_loaded_accounts_data_size_in_fee_calculation, FeatureSet}, fee::FeeStructure, fee_calculator::{FeeCalculator, FeeRateGovernor}, genesis_config::{ClusterType, GenesisConfig}, @@ -955,10 +954,6 @@ pub(super) enum RewardInterval { } impl Bank { - pub fn new_for_benches(genesis_config: &GenesisConfig) -> Self { - Self::new_with_paths_for_benches(genesis_config, Vec::new()) - } - /// Intended for use by tests only. /// create new bank with the given configs. pub fn new_with_runtime_config_for_tests( @@ -1070,24 +1065,6 @@ impl Bank { ) } - /// Intended for use by benches only. - /// create new bank with the given config and paths. - pub fn new_with_paths_for_benches(genesis_config: &GenesisConfig, paths: Vec) -> Self { - Self::new_with_paths( - genesis_config, - Arc::::default(), - paths, - None, - None, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - false, - Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), - None, - Arc::default(), - ) - } - #[allow(clippy::too_many_arguments)] pub fn new_with_paths( genesis_config: &GenesisConfig, @@ -1476,10 +1453,10 @@ impl Bank { } loaded_programs_cache.upcoming_environments = Some(upcoming_environments); loaded_programs_cache.programs_to_recompile = loaded_programs_cache - .get_entries_sorted_by_tx_usage( - changed_program_runtime_v1, - changed_program_runtime_v2, - ); + .get_flattened_entries(changed_program_runtime_v1, changed_program_runtime_v2); + loaded_programs_cache + .programs_to_recompile + .sort_by_cached_key(|(_id, program)| program.decayed_usage_counter(slot)); } }); @@ -4072,8 +4049,6 @@ impl Bank { &process_compute_budget_instructions(message.program_instructions_iter()) .unwrap_or_default() .into(), - self.feature_set - .is_active(&remove_congestion_multiplier_from_fee_calculation::id()), self.feature_set .is_active(&include_loaded_accounts_data_size_in_fee_calculation::id()), ) @@ -4290,7 +4265,7 @@ impl Bank { } /// Prepare a transaction batch from a single transaction without locking accounts - pub(crate) fn prepare_unlocked_batch_from_single_tx<'a>( + pub fn prepare_unlocked_batch_from_single_tx<'a>( &'a self, transaction: &'a SanitizedTransaction, ) -> TransactionBatch<'_, '_> { @@ -4799,6 +4774,7 @@ impl Bank { loaded_program.ix_usage_counter = AtomicU64::new(recompile.ix_usage_counter.load(Ordering::Relaxed)); } + loaded_program.update_access_slot(self.slot()); Arc::new(loaded_program) } @@ -4990,7 +4966,7 @@ impl Bank { &self, program_accounts_map: &HashMap, ) -> LoadedProgramsForTxBatch { - let programs_and_slots: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = + let mut missing_programs: Vec<(Pubkey, (LoadedProgramMatchCriteria, u64))> = if self.check_program_modification_slot { program_accounts_map .iter() @@ -5016,39 +4992,55 @@ impl Bank { .collect() }; - let ExtractedPrograms { - loaded: mut loaded_programs_for_txs, - missing, - } = { - // Lock the global cache to figure out which programs need to be loaded - let loaded_programs_cache = self.loaded_programs_cache.read().unwrap(); - Mutex::into_inner( - Arc::into_inner( - loaded_programs_cache.extract(self.slot, programs_and_slots.into_iter()), - ) - .unwrap(), - ) - .unwrap() - }; - - // Load missing programs while global cache is unlocked - let missing_programs: Vec<(Pubkey, Arc)> = missing - .iter() - .map(|(key, (count, reloading))| { - let program = self.load_program(key, *reloading, None); - program.tx_usage_counter.store(*count, Ordering::Relaxed); - (*key, program) - }) - .collect(); + let mut loaded_programs_for_txs = None; + let mut program_to_store = None; + loop { + let (program_to_load, task_cookie, task_waiter) = { + // Lock the global cache. + let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); + // Initialize our local cache. + if loaded_programs_for_txs.is_none() { + loaded_programs_for_txs = Some(LoadedProgramsForTxBatch::new( + self.slot, + loaded_programs_cache + .get_environments_for_epoch(self.epoch) + .clone(), + )); + } + // Submit our last completed loading task. + if let Some((key, program)) = program_to_store.take() { + loaded_programs_cache.finish_cooperative_loading_task( + self.slot(), + key, + program, + ); + } + // Figure out which program needs to be loaded next. + let program_to_load = loaded_programs_cache.extract( + &mut missing_programs, + loaded_programs_for_txs.as_mut().unwrap(), + ); + let task_waiter = Arc::clone(&loaded_programs_cache.loading_task_waiter); + (program_to_load, task_waiter.cookie(), task_waiter) + // Unlock the global cache again. + }; - // Lock the global cache again to replenish the missing programs - let mut loaded_programs_cache = self.loaded_programs_cache.write().unwrap(); - for (key, program) in missing_programs { - let (_was_occupied, entry) = loaded_programs_cache.replenish(key, program); - // Use the returned entry as that might have been deduplicated globally - loaded_programs_for_txs.replenish(key, entry); + if let Some((key, count)) = program_to_load { + // Load, verify and compile one program. + let program = self.load_program(&key, false, None); + program.tx_usage_counter.store(count, Ordering::Relaxed); + program_to_store = Some((key, program)); + } else if missing_programs.is_empty() { + break; + } else { + // Sleep until the next finish_cooperative_loading_task() call. + // Once a task completes we'll wake up and try to load the + // missing programs inside the tx batch again. + let _new_cookie = task_waiter.wait(task_cookie); + } } - loaded_programs_for_txs + + loaded_programs_for_txs.unwrap() } /// Returns a hash map of executable program accounts (program accounts that are not writable @@ -5271,7 +5263,10 @@ impl Bank { self.loaded_programs_cache .write() .unwrap() - .sort_and_unload(Percentage::from(SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE)); + .evict_using_2s_random_selection( + Percentage::from(SHRINK_LOADED_PROGRAMS_TO_PERCENTAGE), + self.slot(), + ); debug!( "check: {}us load: {}us execute: {}us txs_len={}", @@ -8237,6 +8232,28 @@ impl Bank { ) } + pub fn new_for_benches(genesis_config: &GenesisConfig) -> Self { + Self::new_with_paths_for_benches(genesis_config, Vec::new()) + } + + /// Intended for use by benches only. + /// create new bank with the given config and paths. + pub fn new_with_paths_for_benches(genesis_config: &GenesisConfig, paths: Vec) -> Self { + Self::new_with_paths( + genesis_config, + Arc::::default(), + paths, + None, + None, + AccountSecondaryIndexes::default(), + AccountShrinkThreshold::default(), + false, + Some(ACCOUNTS_DB_CONFIG_FOR_BENCHMARKS), + None, + Arc::default(), + ) + } + /// Prepare a transaction batch from a list of legacy transactions. Used for tests only. pub fn prepare_batch_for_tests(&self, txs: Vec) -> TransactionBatch { let transaction_account_lock_limit = self.get_transaction_account_lock_limit(); diff --git a/runtime/src/bank/tests.rs b/runtime/src/bank/tests.rs index a849101fda14a4..14441c77538208 100644 --- a/runtime/src/bank/tests.rs +++ b/runtime/src/bank/tests.rs @@ -2650,8 +2650,6 @@ fn test_bank_tx_compute_unit_fee() { .create_fee_calculator() .lamports_per_signature, &FeeStructure::default(), - false, - true, ); let (expected_fee_collected, expected_fee_burned) = @@ -2831,8 +2829,6 @@ fn test_bank_blockhash_compute_unit_fee_structure() { &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), cheap_lamports_per_signature, &FeeStructure::default(), - false, - true, ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), @@ -2849,8 +2845,6 @@ fn test_bank_blockhash_compute_unit_fee_structure() { &SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(), expensive_lamports_per_signature, &FeeStructure::default(), - false, - true, ); assert_eq!( bank.get_balance(&mint_keypair.pubkey()), @@ -2962,8 +2956,6 @@ fn test_filter_program_errors_and_collect_compute_unit_fee() { .create_fee_calculator() .lamports_per_signature, &FeeStructure::default(), - false, - true, ) * 2 ) .0 @@ -10001,28 +9993,12 @@ fn calculate_test_fee( message: &SanitizedMessage, lamports_per_signature: u64, fee_structure: &FeeStructure, - support_set_accounts_data_size_limit_ix: bool, - remove_congestion_multiplier: bool, ) -> u64 { - let mut feature_set = FeatureSet::all_enabled(); - - if !support_set_accounts_data_size_limit_ix { - feature_set.deactivate( - &solana_sdk::feature_set::include_loaded_accounts_data_size_in_fee_calculation::id(), - ); - } - let budget_limits = process_compute_budget_instructions(message.program_instructions_iter()) .unwrap_or_default() .into(); - fee_structure.calculate_fee( - message, - lamports_per_signature, - &budget_limits, - remove_congestion_multiplier, - false, - ) + fee_structure.calculate_fee(message, lamports_per_signature, &budget_limits, false) } #[test] @@ -10030,38 +10006,30 @@ fn test_calculate_fee() { // Default: no fee. let message = SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 0, - &FeeStructure { - lamports_per_signature: 0, - ..FeeStructure::default() - }, - support_set_accounts_data_size_limit_ix, - true, - ), - 0 - ); - } + assert_eq!( + calculate_test_fee( + &message, + 0, + &FeeStructure { + lamports_per_signature: 0, + ..FeeStructure::default() + }, + ), + 0 + ); // One signature, a fee. - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 1, - &FeeStructure { - lamports_per_signature: 1, - ..FeeStructure::default() - }, - support_set_accounts_data_size_limit_ix, - true, - ), - 1 - ); - } + assert_eq!( + calculate_test_fee( + &message, + 1, + &FeeStructure { + lamports_per_signature: 1, + ..FeeStructure::default() + }, + ), + 1 + ); // Two signatures, double the fee. let key0 = Pubkey::new_unique(); @@ -10069,21 +10037,17 @@ fn test_calculate_fee() { let ix0 = system_instruction::transfer(&key0, &key1, 1); let ix1 = system_instruction::transfer(&key1, &key0, 1); let message = SanitizedMessage::try_from(Message::new(&[ix0, ix1], Some(&key0))).unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 2, - &FeeStructure { - lamports_per_signature: 2, - ..FeeStructure::default() - }, - support_set_accounts_data_size_limit_ix, - true, - ), - 4 - ); - } + assert_eq!( + calculate_test_fee( + &message, + 2, + &FeeStructure { + lamports_per_signature: 2, + ..FeeStructure::default() + }, + ), + 4 + ); } #[test] @@ -10099,18 +10063,10 @@ fn test_calculate_fee_compute_units() { let message = SanitizedMessage::try_from(Message::new(&[], Some(&Pubkey::new_unique()))).unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 1, - &fee_structure, - support_set_accounts_data_size_limit_ix, - true, - ), - max_fee + lamports_per_signature - ); - } + assert_eq!( + calculate_test_fee(&message, 1, &fee_structure,), + max_fee + lamports_per_signature + ); // Three signatures, two instructions, no unit request @@ -10118,18 +10074,10 @@ fn test_calculate_fee_compute_units() { let ix1 = system_instruction::transfer(&Pubkey::new_unique(), &Pubkey::new_unique(), 1); let message = SanitizedMessage::try_from(Message::new(&[ix0, ix1], Some(&Pubkey::new_unique()))).unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 1, - &fee_structure, - support_set_accounts_data_size_limit_ix, - true, - ), - max_fee + 3 * lamports_per_signature - ); - } + assert_eq!( + calculate_test_fee(&message, 1, &fee_structure,), + max_fee + 3 * lamports_per_signature + ); // Explicit fee schedule @@ -10160,19 +10108,11 @@ fn test_calculate_fee_compute_units() { Some(&Pubkey::new_unique()), )) .unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - let fee = calculate_test_fee( - &message, - 1, - &fee_structure, - support_set_accounts_data_size_limit_ix, - true, - ); - assert_eq!( - fee, - lamports_per_signature + prioritization_fee_details.get_fee() - ); - } + let fee = calculate_test_fee(&message, 1, &fee_structure); + assert_eq!( + fee, + lamports_per_signature + prioritization_fee_details.get_fee() + ); } } @@ -10204,8 +10144,6 @@ fn test_calculate_prioritization_fee() { &message, fee_structure.lamports_per_signature, &fee_structure, - true, - true, ); assert_eq!( fee, @@ -10243,18 +10181,7 @@ fn test_calculate_fee_secp256k1() { Some(&key0), )) .unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 1, - &fee_structure, - support_set_accounts_data_size_limit_ix, - true, - ), - 2 - ); - } + assert_eq!(calculate_test_fee(&message, 1, &fee_structure,), 2); secp_instruction1.data = vec![0]; secp_instruction2.data = vec![10]; @@ -10263,18 +10190,7 @@ fn test_calculate_fee_secp256k1() { Some(&key0), )) .unwrap(); - for support_set_accounts_data_size_limit_ix in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - 1, - &fee_structure, - support_set_accounts_data_size_limit_ix, - true, - ), - 11 - ); - } + assert_eq!(calculate_test_fee(&message, 1, &fee_structure,), 11); } #[test] @@ -12015,39 +11931,17 @@ fn test_calculate_fee_with_congestion_multiplier() { // assert when lamports_per_signature is less than BASE_LAMPORTS, turnning on/off // congestion_multiplier has no effect on fee. - for remove_congestion_multiplier in [true, false] { - assert_eq!( - calculate_test_fee( - &message, - cheap_lamports_per_signature, - &fee_structure, - true, - remove_congestion_multiplier, - ), - signature_fee * signature_count - ); - } + assert_eq!( + calculate_test_fee(&message, cheap_lamports_per_signature, &fee_structure), + signature_fee * signature_count + ); // assert when lamports_per_signature is more than BASE_LAMPORTS, turnning on/off // congestion_multiplier will change calculated fee. - for remove_congestion_multiplier in [true, false] { - let denominator: u64 = if remove_congestion_multiplier { - 1 - } else { - lamports_scale - }; - - assert_eq!( - calculate_test_fee( - &message, - expensive_lamports_per_signature, - &fee_structure, - true, - remove_congestion_multiplier, - ), - signature_fee * signature_count / denominator - ); - } + assert_eq!( + calculate_test_fee(&message, expensive_lamports_per_signature, &fee_structure,), + signature_fee * signature_count + ); } #[test] @@ -12076,7 +11970,7 @@ fn test_calculate_fee_with_request_heap_frame_flag() { // assert when request_heap_frame is presented in tx, prioritization fee will be counted // into transaction fee assert_eq!( - calculate_test_fee(&message, lamports_per_signature, &fee_structure, true, true,), + calculate_test_fee(&message, lamports_per_signature, &fee_structure), signature_fee + request_cu * lamports_per_cu ); } diff --git a/runtime/src/installed_scheduler_pool.rs b/runtime/src/installed_scheduler_pool.rs index 35b46e420f0fd8..d39a18d567232a 100644 --- a/runtime/src/installed_scheduler_pool.rs +++ b/runtime/src/installed_scheduler_pool.rs @@ -39,7 +39,7 @@ use { use {mockall::automock, qualifier_attr::qualifiers}; pub trait InstalledSchedulerPool: Send + Sync + Debug { - fn take_scheduler(&self, context: SchedulingContext) -> DefaultInstalledSchedulerBox; + fn take_scheduler(&self, context: SchedulingContext) -> InstalledSchedulerBox; } #[cfg_attr(doc, aquamarine::aquamarine)] @@ -107,28 +107,36 @@ pub trait InstalledScheduler: Send + Sync + Debug + 'static { transaction_with_index: &'a (&'a SanitizedTransaction, usize), ); - /// Wait for a scheduler to terminate after it is notified with the given reason. + /// Wait for a scheduler to terminate after processing. /// - /// Firstly, this function blocks the current thread while waiting for the scheduler to - /// complete all of the executions for the scheduled transactions. This means the scheduler has - /// prepared the finalized `ResultWithTimings` at least internally at the time of existing from - /// this function. If no trsanction is scheduled, the result and timing will be `Ok(())` and - /// `ExecuteTimings::default()` respectively. This is done in the same way regardless of - /// `WaitReason`. + /// This function blocks the current thread while waiting for the scheduler to complete all of + /// the executions for the scheduled transactions and to return the finalized + /// `ResultWithTimings`. Along with the result, this function also makes the scheduler itself + /// uninstalled from the bank by transforming the consumed self. /// - /// After that, the scheduler may behave differently depending on the reason, regarding the - /// final bookkeeping. Specifically, this function guaranteed to return - /// `Some(finalized_result_with_timings)` unless the reason is `PausedForRecentBlockhash`. In - /// the case of `PausedForRecentBlockhash`, the scheduler is responsible to retain the - /// finalized `ResultWithTimings` until it's `wait_for_termination()`-ed with one of the other - /// two reasons later. - #[must_use] - fn wait_for_termination(&mut self, reason: &WaitReason) -> Option; + /// If no transaction is scheduled, the result and timing will be `Ok(())` and + /// `ExecuteTimings::default()` respectively. + fn wait_for_termination( + self: Box, + is_dropped: bool, + ) -> (ResultWithTimings, UninstalledSchedulerBox); + + /// Pause a scheduler after processing to update bank's recent blockhash. + /// + /// This function blocks the current thread like wait_for_termination(). However, the scheduler + /// won't be consumed. This means the scheduler is responsible to retain the finalized + /// `ResultWithTimings` internally until it's `wait_for_termination()`-ed to collect the result + /// later. + fn pause_for_recent_blockhash(&mut self); +} +#[cfg_attr(feature = "dev-context-only-utils", automock)] +pub trait UninstalledScheduler: Send + Sync + Debug + 'static { fn return_to_pool(self: Box); } -pub type DefaultInstalledSchedulerBox = Box; +pub type InstalledSchedulerBox = Box; +pub type UninstalledSchedulerBox = Box; pub type InstalledSchedulerPoolArc = Arc; @@ -165,9 +173,9 @@ impl SchedulingContext { pub type ResultWithTimings = (Result<()>, ExecuteTimings); -/// A hint from the bank about the reason the caller is waiting on its scheduler termination. +/// A hint from the bank about the reason the caller is waiting on its scheduler. #[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum WaitReason { +enum WaitReason { // The bank wants its scheduler to terminate after the completion of transaction execution, in // order to freeze itself immediately thereafter. This is by far the most normal wait reason. // @@ -178,8 +186,9 @@ pub enum WaitReason { // The bank wants its scheduler to terminate just like `TerminatedToFreeze` and indicate that // Drop::drop() is the caller. DroppedFromBankForks, - // The bank wants its scheduler to pause the scheduler after the completion without being - // returned to the pool to collect scheduler's internally-held `ResultWithTimings` later. + // The bank wants its scheduler to pause after the completion without being returned to the + // pool. This is to update bank's recent blockhash and to collect scheduler's internally-held + // `ResultWithTimings` later. PausedForRecentBlockhash, } @@ -192,6 +201,15 @@ impl WaitReason { WaitReason::TerminatedToFreeze | WaitReason::DroppedFromBankForks => false, } } + + pub fn is_dropped(&self) -> bool { + // Exhaustive `match` is preferred here than `matches!()` to trigger an explicit + // decision to be made, should we add new variants like `PausedForFooBar`... + match self { + WaitReason::DroppedFromBankForks => true, + WaitReason::TerminatedToFreeze | WaitReason::PausedForRecentBlockhash => false, + } + } } /// Very thin wrapper around Arc @@ -221,11 +239,11 @@ pub struct BankWithSchedulerInner { bank: Arc, scheduler: InstalledSchedulerRwLock, } -pub type InstalledSchedulerRwLock = RwLock>; +pub type InstalledSchedulerRwLock = RwLock>; impl BankWithScheduler { #[cfg_attr(feature = "dev-context-only-utils", qualifiers(pub))] - pub(crate) fn new(bank: Arc, scheduler: Option) -> Self { + pub(crate) fn new(bank: Arc, scheduler: Option) -> Self { if let Some(bank_in_context) = scheduler .as_ref() .map(|scheduler| scheduler.context().bank()) @@ -341,18 +359,18 @@ impl BankWithSchedulerInner { ); let mut scheduler = scheduler.write().unwrap(); - let result_with_timings = if scheduler.is_some() { - let result_with_timings = scheduler - .as_mut() - .and_then(|scheduler| scheduler.wait_for_termination(&reason)); - if !reason.is_paused() { - let scheduler = scheduler.take().expect("scheduler after waiting"); - scheduler.return_to_pool(); - } - result_with_timings - } else { - None - }; + let result_with_timings = + if let Some(scheduler) = scheduler.as_mut().filter(|_| reason.is_paused()) { + scheduler.pause_for_recent_blockhash(); + None + } else if let Some(scheduler) = scheduler.take() { + let (result_with_timings, uninstalled_scheduler) = + scheduler.wait_for_termination(reason.is_dropped()); + uninstalled_scheduler.return_to_pool(); + Some(result_with_timings) + } else { + None + }; debug!( "wait_for_scheduler_termination(slot: {}, reason: {:?}): finished with: {:?}...", bank.slot(), @@ -411,39 +429,42 @@ mod tests { assert_matches::assert_matches, mockall::Sequence, solana_sdk::system_transaction, + std::sync::Mutex, }; fn setup_mocked_scheduler_with_extra( bank: Arc, - wait_reasons: impl Iterator, + is_dropped_flags: impl Iterator, f: Option, - ) -> DefaultInstalledSchedulerBox { + ) -> InstalledSchedulerBox { let mut mock = MockInstalledScheduler::new(); - let mut seq = Sequence::new(); + let seq = Arc::new(Mutex::new(Sequence::new())); mock.expect_context() .times(1) - .in_sequence(&mut seq) + .in_sequence(&mut seq.lock().unwrap()) .return_const(SchedulingContext::new(bank)); - for wait_reason in wait_reasons { + for wait_reason in is_dropped_flags { + let seq_cloned = seq.clone(); mock.expect_wait_for_termination() .with(mockall::predicate::eq(wait_reason)) .times(1) - .in_sequence(&mut seq) + .in_sequence(&mut seq.lock().unwrap()) .returning(move |_| { - if wait_reason.is_paused() { - None - } else { - Some((Ok(()), ExecuteTimings::default())) - } + let mut mock_uninstalled = MockUninstalledScheduler::new(); + mock_uninstalled + .expect_return_to_pool() + .times(1) + .in_sequence(&mut seq_cloned.lock().unwrap()) + .returning(|| ()); + ( + (Ok(()), ExecuteTimings::default()), + Box::new(mock_uninstalled), + ) }); } - mock.expect_return_to_pool() - .times(1) - .in_sequence(&mut seq) - .returning(|| ()); if let Some(f) = f { f(&mut mock); } @@ -453,11 +474,11 @@ mod tests { fn setup_mocked_scheduler( bank: Arc, - wait_reasons: impl Iterator, - ) -> DefaultInstalledSchedulerBox { + is_dropped_flags: impl Iterator, + ) -> InstalledSchedulerBox { setup_mocked_scheduler_with_extra( bank, - wait_reasons, + is_dropped_flags, None:: ()>, ) } @@ -469,10 +490,7 @@ mod tests { let bank = Arc::new(Bank::default_for_tests()); let bank = BankWithScheduler::new( bank.clone(), - Some(setup_mocked_scheduler( - bank, - [WaitReason::TerminatedToFreeze].into_iter(), - )), + Some(setup_mocked_scheduler(bank, [false].into_iter())), ); assert!(bank.has_installed_scheduler()); assert_matches!(bank.wait_for_completed_scheduler(), Some(_)); @@ -502,10 +520,7 @@ mod tests { let bank = Arc::new(Bank::default_for_tests()); let bank = BankWithScheduler::new( bank.clone(), - Some(setup_mocked_scheduler( - bank, - [WaitReason::DroppedFromBankForks].into_iter(), - )), + Some(setup_mocked_scheduler(bank, [true].into_iter())), ); drop(bank); } @@ -517,13 +532,15 @@ mod tests { let bank = Arc::new(crate::bank::tests::create_simple_test_bank(42)); let bank = BankWithScheduler::new( bank.clone(), - Some(setup_mocked_scheduler( + Some(setup_mocked_scheduler_with_extra( bank, - [ - WaitReason::PausedForRecentBlockhash, - WaitReason::TerminatedToFreeze, - ] - .into_iter(), + [false].into_iter(), + Some(|mocked: &mut MockInstalledScheduler| { + mocked + .expect_pause_for_recent_blockhash() + .times(1) + .returning(|| ()); + }), )), ); goto_end_of_slot_with_scheduler(&bank); @@ -548,7 +565,7 @@ mod tests { let bank = Arc::new(Bank::new_for_tests(&genesis_config)); let mocked_scheduler = setup_mocked_scheduler_with_extra( bank.clone(), - [WaitReason::DroppedFromBankForks].into_iter(), + [true].into_iter(), Some(|mocked: &mut MockInstalledScheduler| { mocked .expect_schedule_execution() diff --git a/runtime/src/prioritization_fee_cache.rs b/runtime/src/prioritization_fee_cache.rs index c41d5a72bd397f..ece749387a9147 100644 --- a/runtime/src/prioritization_fee_cache.rs +++ b/runtime/src/prioritization_fee_cache.rs @@ -142,6 +142,7 @@ type SlotPrioritizationFee = DashMap; /// Stores up to MAX_NUM_RECENT_BLOCKS recent block's prioritization fee, /// A separate internal thread `service_thread` handles additional tasks when a bank is frozen, /// and collecting stats and reporting metrics. +#[derive(Debug)] pub struct PrioritizationFeeCache { cache: Arc>>>, service_thread: Option>, diff --git a/runtime/src/serde_snapshot/tests.rs b/runtime/src/serde_snapshot/tests.rs index 391617b5ebe0f2..f9d45b372f5fc4 100644 --- a/runtime/src/serde_snapshot/tests.rs +++ b/runtime/src/serde_snapshot/tests.rs @@ -223,12 +223,7 @@ mod serde_snapshot_tests { fn test_accounts_serialize_style(serde_style: SerdeStyle) { solana_logger::setup(); let (_accounts_dir, paths) = get_temp_accounts_paths(4).unwrap(); - let accounts_db = AccountsDb::new_with_config_for_tests( - paths, - &ClusterType::Development, - AccountSecondaryIndexes::default(), - AccountShrinkThreshold::default(), - ); + let accounts_db = AccountsDb::new_for_tests(paths, &ClusterType::Development); let accounts = Accounts::new(Arc::new(accounts_db)); let slot = 0; diff --git a/scripts/cargo-install-all.sh b/scripts/cargo-install-all.sh index e48ce4e02deeb8..549aa15550b0eb 100755 --- a/scripts/cargo-install-all.sh +++ b/scripts/cargo-install-all.sh @@ -17,7 +17,8 @@ if [[ $OSTYPE == darwin* ]]; then fi fi -cargo="$("${readlink_cmd}" -f "${here}/../cargo")" +SOLANA_ROOT="$("${readlink_cmd}" -f "${here}/..")" +cargo="${SOLANA_ROOT}/cargo" set -e @@ -150,15 +151,14 @@ mkdir -p "$installDir/bin" # Exclude `spl-token` binary for net.sh builds if [[ -z "$validatorOnly" ]]; then # shellcheck source=scripts/spl-token-cli-version.sh - source "$here"/spl-token-cli-version.sh + source "$SOLANA_ROOT"/scripts/spl-token-cli-version.sh # the patch-related configs are needed for rust 1.69+ on Windows; see Cargo.toml # shellcheck disable=SC2086 # Don't want to double quote $rust_version "$cargo" $maybeRustVersion \ --config 'patch.crates-io.ntapi.git="https://github.com/solana-labs/ntapi"' \ --config 'patch.crates-io.ntapi.rev="97ede981a1777883ff86d142b75024b023f04fad"' \ - $maybeSplTokenCliVersionArg \ - install --locked spl-token-cli --root "$installDir" + install --locked spl-token-cli --root "$installDir" $maybeSplTokenCliVersionArg fi ) diff --git a/sdk/src/feature_set.rs b/sdk/src/feature_set.rs index 2373eebf7f6d09..fc886c7fefd582 100644 --- a/sdk/src/feature_set.rs +++ b/sdk/src/feature_set.rs @@ -399,7 +399,7 @@ pub mod stake_raise_minimum_delegation_to_1_sol { } pub mod stake_minimum_delegation_for_rewards { - solana_sdk::declare_id!("ELjxSXwNsyXGfAh8TqX8ih22xeT8huF6UngQirbLKYKH"); + solana_sdk::declare_id!("G6ANXD6ptCSyNd9znZm7j4dEczAJCfx7Cy43oBx3rKHJ"); } pub mod add_set_compute_unit_price_ix { @@ -740,6 +740,14 @@ pub mod allow_commission_decrease_at_any_time { solana_sdk::declare_id!("decoMktMcnmiq6t3u7g5BfgcQu91nKZr6RvMYf9z1Jb"); } +pub mod consume_blockstore_duplicate_proofs { + solana_sdk::declare_id!("6YsBCejwK96GZCkJ6mkZ4b68oP63z2PLoQmWjC7ggTqZ"); +} + +pub mod index_erasure_conflict_duplicate_proofs { + solana_sdk::declare_id!("dupPajaLy2SSn8ko42aZz4mHANDNrLe8Nw8VQgFecLa"); +} + lazy_static! { /// Map of feature identifiers to user-visible description pub static ref FEATURE_NAMES: HashMap = [ @@ -920,6 +928,8 @@ lazy_static! { (enable_zk_transfer_with_fee::id(), "enable Zk Token proof program transfer with fee"), (drop_legacy_shreds::id(), "drops legacy shreds #34328"), (allow_commission_decrease_at_any_time::id(), "Allow commission decrease at any time in epoch #33843"), + (consume_blockstore_duplicate_proofs::id(), "consume duplicate proofs from blockstore in consensus #34372"), + (index_erasure_conflict_duplicate_proofs::id(), "generate duplicate proofs for index and erasure conflicts #34360"), /*************** ADD NEW FEATURES HERE ***************/ ] .iter() diff --git a/sdk/src/fee.rs b/sdk/src/fee.rs index 0a883c531957b5..f3377b5254f0a6 100644 --- a/sdk/src/fee.rs +++ b/sdk/src/fee.rs @@ -82,18 +82,13 @@ impl FeeStructure { message: &SanitizedMessage, lamports_per_signature: u64, budget_limits: &FeeBudgetLimits, - remove_congestion_multiplier: bool, include_loaded_account_data_size_in_fee: bool, ) -> u64 { // Fee based on compute units and signatures let congestion_multiplier = if lamports_per_signature == 0 { 0.0 // test only - } else if remove_congestion_multiplier { - 1.0 // multiplier that has no effect } else { - const BASE_CONGESTION: f64 = 5_000.0; - let current_congestion = BASE_CONGESTION.max(lamports_per_signature as f64); - BASE_CONGESTION / current_congestion + 1.0 // multiplier that has no effect }; let signature_fee = message diff --git a/storage-bigtable/src/access_token.rs b/storage-bigtable/src/access_token.rs index 8881f594acedcd..c2cd53057d6538 100644 --- a/storage-bigtable/src/access_token.rs +++ b/storage-bigtable/src/access_token.rs @@ -34,12 +34,24 @@ fn load_stringified_credentials(credential: String) -> Result, - token: Arc>, + token: RwLock<(Token, Instant)>, + refresh_active: AtomicBool, +} + +#[derive(Clone)] +pub struct AccessToken { + inner: Arc, +} + +impl std::ops::Deref for AccessToken { + type Target = AccessTokenInner; + + fn deref(&self) -> &Self::Target { + &self.inner + } } impl AccessToken { @@ -52,12 +64,14 @@ impl AccessToken { if let Err(err) = credentials.rsa_key() { Err(format!("Invalid rsa key: {err}")) } else { - let token = Arc::new(RwLock::new(Self::get_token(&credentials, &scope).await?)); + let token = RwLock::new(Self::get_token(&credentials, &scope).await?); let access_token = Self { - credentials, - scope, - token, - refresh_active: Arc::new(AtomicBool::new(false)), + inner: Arc::new(AccessTokenInner { + credentials, + scope, + token, + refresh_active: AtomicBool::new(false), + }), }; Ok(access_token) } @@ -109,20 +123,17 @@ impl AccessToken { return; } - let credentials = self.credentials.clone(); - let scope = self.scope.clone(); - let refresh_active = Arc::clone(&self.refresh_active); - let token = Arc::clone(&self.token); + let this = self.clone(); tokio::spawn(async move { match time::timeout( time::Duration::from_secs(5), - Self::get_token(&credentials, &scope), + Self::get_token(&this.credentials, &this.scope), ) .await { Ok(new_token) => match new_token { Ok(new_token) => { - let mut token_w = token.write().unwrap(); + let mut token_w = this.token.write().unwrap(); *token_w = new_token; } Err(err) => error!("Failed to fetch new token: {}", err), @@ -131,7 +142,7 @@ impl AccessToken { warn!("Token refresh timeout") } } - refresh_active.store(false, Ordering::Relaxed); + this.refresh_active.store(false, Ordering::Relaxed); info!("Token refreshed"); }); } diff --git a/turbine/src/cluster_nodes.rs b/turbine/src/cluster_nodes.rs index 3e227b66964d15..8079178cf415b9 100644 --- a/turbine/src/cluster_nodes.rs +++ b/turbine/src/cluster_nodes.rs @@ -513,7 +513,7 @@ fn enable_turbine_fanout_experiments(shred_slot: Slot, root_bank: &Bank) -> bool // Returns true if the feature is effective for the shred slot. #[must_use] -fn check_feature_activation(feature: &Pubkey, shred_slot: Slot, root_bank: &Bank) -> bool { +pub fn check_feature_activation(feature: &Pubkey, shred_slot: Slot, root_bank: &Bank) -> bool { match root_bank.feature_set.activated_slot(feature) { None => false, Some(feature_slot) => { diff --git a/unified-scheduler-logic/Cargo.toml b/unified-scheduler-logic/Cargo.toml new file mode 100644 index 00000000000000..764bb0192f5632 --- /dev/null +++ b/unified-scheduler-logic/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "solana-unified-scheduler-logic" +description = "The Solana unified scheduler logic" +documentation = "https://docs.rs/solana-unified-scheduler-logic" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } diff --git a/unified-scheduler-logic/src/lib.rs b/unified-scheduler-logic/src/lib.rs new file mode 100644 index 00000000000000..73a5a82f6d3a7b --- /dev/null +++ b/unified-scheduler-logic/src/lib.rs @@ -0,0 +1 @@ +// This file will be populated with actual implementation later. diff --git a/unified-scheduler-pool/Cargo.toml b/unified-scheduler-pool/Cargo.toml new file mode 100644 index 00000000000000..213bc5bb86c0ef --- /dev/null +++ b/unified-scheduler-pool/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "solana-unified-scheduler-pool" +description = "The Solana unified scheduler pool" +documentation = "https://docs.rs/solana-unified-scheduler-pool" +version = { workspace = true } +authors = { workspace = true } +repository = { workspace = true } +homepage = { workspace = true } +license = { workspace = true } +edition = { workspace = true } + +[dependencies] +solana-ledger = { workspace = true } +solana-program-runtime = { workspace = true } +solana-runtime = { workspace = true } +solana-sdk = { workspace = true } +solana-unified-scheduler-logic = { workspace = true } +solana-vote = { workspace = true } + +[dev-dependencies] +assert_matches = { workspace = true } +solana-logger = { workspace = true } +solana-runtime = { workspace = true, features = ["dev-context-only-utils"] } diff --git a/unified-scheduler-pool/src/lib.rs b/unified-scheduler-pool/src/lib.rs new file mode 100644 index 00000000000000..10cb5309e5e01d --- /dev/null +++ b/unified-scheduler-pool/src/lib.rs @@ -0,0 +1,761 @@ +//! Transaction scheduling code. +//! +//! This crate implements 3 solana-runtime traits (`InstalledScheduler`, `UninstalledScheduler` and +//! `InstalledSchedulerPool`) to provide a concrete transaction scheduling implementation +//! (including executing txes and committing tx results). +//! +//! At the highest level, this crate takes `SanitizedTransaction`s via its `schedule_execution()` +//! and commits any side-effects (i.e. on-chain state changes) into the associated `Bank` via +//! `solana-ledger`'s helper function called `execute_batch()`. + +use { + solana_ledger::blockstore_processor::{ + execute_batch, TransactionBatchWithIndexes, TransactionStatusSender, + }, + solana_program_runtime::timings::ExecuteTimings, + solana_runtime::{ + bank::Bank, + installed_scheduler_pool::{ + InstalledScheduler, InstalledSchedulerBox, InstalledSchedulerPool, + InstalledSchedulerPoolArc, ResultWithTimings, SchedulerId, SchedulingContext, + UninstalledScheduler, UninstalledSchedulerBox, + }, + prioritization_fee_cache::PrioritizationFeeCache, + }, + solana_sdk::transaction::{Result, SanitizedTransaction}, + solana_vote::vote_sender_types::ReplayVoteSender, + std::{ + fmt::Debug, + marker::PhantomData, + sync::{ + atomic::{AtomicU64, Ordering::Relaxed}, + Arc, Mutex, Weak, + }, + }, +}; + +type AtomicSchedulerId = AtomicU64; + +// SchedulerPool must be accessed as a dyn trait from solana-runtime, because SchedulerPool +// contains some internal fields, whose types aren't available in solana-runtime (currently +// TransactionStatusSender; also, PohRecorder in the future)... +#[derive(Debug)] +pub struct SchedulerPool, TH: TaskHandler> { + scheduler_inners: Mutex>, + handler_context: HandlerContext, + // weak_self could be elided by changing InstalledScheduler::take_scheduler()'s receiver to + // Arc from &Self, because SchedulerPool is used as in the form of Arc + // almost always. But, this would cause wasted and noisy Arc::clone()'s at every call sites. + // + // Alternatively, `impl InstalledScheduler for Arc` approach could be explored + // but it entails its own problems due to rustc's coherence and necessitated newtype with the + // type graph of InstalledScheduler being quite elaborate. + // + // After these considerations, this weak_self approach is chosen at the cost of some additional + // memory increase. + weak_self: Weak, + next_scheduler_id: AtomicSchedulerId, + _phantom: PhantomData, +} + +#[derive(Debug)] +pub struct HandlerContext { + log_messages_bytes_limit: Option, + transaction_status_sender: Option, + replay_vote_sender: Option, + prioritization_fee_cache: Arc, +} + +pub type DefaultSchedulerPool = + SchedulerPool, DefaultTaskHandler>; + +impl SchedulerPool +where + S: SpawnableScheduler, + TH: TaskHandler, +{ + // Some internal impl and test code want an actual concrete type, NOT the + // `dyn InstalledSchedulerPool`. So don't merge this into `Self::new_dyn()`. + fn new( + log_messages_bytes_limit: Option, + transaction_status_sender: Option, + replay_vote_sender: Option, + prioritization_fee_cache: Arc, + ) -> Arc { + Arc::new_cyclic(|weak_self| Self { + scheduler_inners: Mutex::default(), + handler_context: HandlerContext { + log_messages_bytes_limit, + transaction_status_sender, + replay_vote_sender, + prioritization_fee_cache, + }, + weak_self: weak_self.clone(), + next_scheduler_id: AtomicSchedulerId::default(), + _phantom: PhantomData, + }) + } + + // This apparently-meaningless wrapper is handy, because some callers explicitly want + // `dyn InstalledSchedulerPool` to be returned for type inference convenience. + pub fn new_dyn( + log_messages_bytes_limit: Option, + transaction_status_sender: Option, + replay_vote_sender: Option, + prioritization_fee_cache: Arc, + ) -> InstalledSchedulerPoolArc { + Self::new( + log_messages_bytes_limit, + transaction_status_sender, + replay_vote_sender, + prioritization_fee_cache, + ) + } + + // See a comment at the weak_self field for justification of this method's existence. + fn self_arc(&self) -> Arc { + self.weak_self + .upgrade() + .expect("self-referencing Arc-ed pool") + } + + fn new_scheduler_id(&self) -> SchedulerId { + self.next_scheduler_id.fetch_add(1, Relaxed) + } + + fn return_scheduler(&self, scheduler: S::Inner) { + self.scheduler_inners + .lock() + .expect("not poisoned") + .push(scheduler); + } + + fn do_take_scheduler(&self, context: SchedulingContext) -> S { + // pop is intentional for filo, expecting relatively warmed-up scheduler due to having been + // returned recently + if let Some(inner) = self.scheduler_inners.lock().expect("not poisoned").pop() { + S::from_inner(inner, context) + } else { + S::spawn(self.self_arc(), context) + } + } +} + +impl InstalledSchedulerPool for SchedulerPool +where + S: SpawnableScheduler, + TH: TaskHandler, +{ + fn take_scheduler(&self, context: SchedulingContext) -> InstalledSchedulerBox { + Box::new(self.do_take_scheduler(context)) + } +} + +pub trait TaskHandler: Send + Sync + Debug + Sized + 'static { + fn handle( + result: &mut Result<()>, + timings: &mut ExecuteTimings, + bank: &Arc, + transaction: &SanitizedTransaction, + index: usize, + handler_context: &HandlerContext, + ); +} + +#[derive(Debug)] +pub struct DefaultTaskHandler; + +impl TaskHandler for DefaultTaskHandler { + fn handle( + result: &mut Result<()>, + timings: &mut ExecuteTimings, + bank: &Arc, + transaction: &SanitizedTransaction, + index: usize, + handler_context: &HandlerContext, + ) { + // scheduler must properly prevent conflicting tx executions. thus, task handler isn't + // responsible for locking. + let batch = bank.prepare_unlocked_batch_from_single_tx(transaction); + let batch_with_indexes = TransactionBatchWithIndexes { + batch, + transaction_indexes: vec![index], + }; + + *result = execute_batch( + &batch_with_indexes, + bank, + handler_context.transaction_status_sender.as_ref(), + handler_context.replay_vote_sender.as_ref(), + timings, + handler_context.log_messages_bytes_limit, + &handler_context.prioritization_fee_cache, + ); + } +} + +// Currently, simplest possible implementation (i.e. single-threaded) +// this will be replaced with more proper implementation... +// not usable at all, especially for mainnet-beta +#[derive(Debug)] +pub struct PooledScheduler { + inner: PooledSchedulerInner, + context: SchedulingContext, + result_with_timings: Mutex, +} + +#[derive(Debug)] +pub struct PooledSchedulerInner, TH: TaskHandler> { + id: SchedulerId, + pool: Arc>, +} + +impl PooledScheduler { + fn do_spawn(pool: Arc>, initial_context: SchedulingContext) -> Self { + Self::from_inner( + PooledSchedulerInner:: { + id: pool.new_scheduler_id(), + pool, + }, + initial_context, + ) + } +} + +pub trait SpawnableScheduler: InstalledScheduler { + type Inner: Debug + Send + Sync; + + fn into_inner(self) -> (ResultWithTimings, Self::Inner); + + fn from_inner(inner: Self::Inner, context: SchedulingContext) -> Self; + + fn spawn(pool: Arc>, initial_context: SchedulingContext) -> Self + where + Self: Sized; +} + +impl SpawnableScheduler for PooledScheduler { + type Inner = PooledSchedulerInner; + + fn into_inner(self) -> (ResultWithTimings, Self::Inner) { + ( + self.result_with_timings.into_inner().expect("not poisoned"), + self.inner, + ) + } + + fn from_inner(inner: Self::Inner, context: SchedulingContext) -> Self { + Self { + inner, + context, + result_with_timings: Mutex::new((Ok(()), ExecuteTimings::default())), + } + } + + fn spawn(pool: Arc>, initial_context: SchedulingContext) -> Self { + Self::do_spawn(pool, initial_context) + } +} + +impl InstalledScheduler for PooledScheduler { + fn id(&self) -> SchedulerId { + self.inner.id + } + + fn context(&self) -> &SchedulingContext { + &self.context + } + + fn schedule_execution(&self, &(transaction, index): &(&SanitizedTransaction, usize)) { + let (result, timings) = &mut *self.result_with_timings.lock().expect("not poisoned"); + if result.is_err() { + // just bail out early to short-circuit the processing altogether + return; + } + + // ... so, we're NOT scheduling at all here; rather, just execute tx straight off. the + // inter-tx locking deps aren't needed to be resolved in the case of single-threaded FIFO + // like this. + TH::handle( + result, + timings, + self.context().bank(), + transaction, + index, + &self.inner.pool.handler_context, + ); + } + + fn wait_for_termination( + self: Box, + _is_dropped: bool, + ) -> (ResultWithTimings, UninstalledSchedulerBox) { + let (result_with_timings, uninstalled_scheduler) = self.into_inner(); + (result_with_timings, Box::new(uninstalled_scheduler)) + } + + fn pause_for_recent_blockhash(&mut self) { + // not surprisingly, there's nothing to do for this min impl! + } +} + +impl UninstalledScheduler for PooledSchedulerInner +where + S: SpawnableScheduler>, + TH: TaskHandler, +{ + fn return_to_pool(self: Box) { + self.pool.clone().return_scheduler(*self) + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + assert_matches::assert_matches, + solana_runtime::{ + bank::Bank, + bank_forks::BankForks, + genesis_utils::{create_genesis_config, GenesisConfigInfo}, + installed_scheduler_pool::{BankWithScheduler, SchedulingContext}, + prioritization_fee_cache::PrioritizationFeeCache, + }, + solana_sdk::{ + clock::MAX_PROCESSING_AGE, + pubkey::Pubkey, + signer::keypair::Keypair, + system_transaction, + transaction::{SanitizedTransaction, TransactionError}, + }, + std::{sync::Arc, thread::JoinHandle}, + }; + + #[test] + fn test_scheduler_pool_new() { + solana_logger::setup(); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + + // this indirectly proves that there should be circular link because there's only one Arc + // at this moment now + assert_eq!((Arc::strong_count(&pool), Arc::weak_count(&pool)), (1, 1)); + let debug = format!("{pool:#?}"); + assert!(!debug.is_empty()); + } + + #[test] + fn test_scheduler_spawn() { + solana_logger::setup(); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + let bank = Arc::new(Bank::default_for_tests()); + let context = SchedulingContext::new(bank); + let scheduler = pool.take_scheduler(context); + + let debug = format!("{scheduler:#?}"); + assert!(!debug.is_empty()); + } + + #[test] + fn test_scheduler_pool_filo() { + solana_logger::setup(); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = DefaultSchedulerPool::new(None, None, None, ignored_prioritization_fee_cache); + let bank = Arc::new(Bank::default_for_tests()); + let context = &SchedulingContext::new(bank); + + let scheduler1 = pool.do_take_scheduler(context.clone()); + let scheduler_id1 = scheduler1.id(); + let scheduler2 = pool.do_take_scheduler(context.clone()); + let scheduler_id2 = scheduler2.id(); + assert_ne!(scheduler_id1, scheduler_id2); + + let (result_with_timings, scheduler1) = scheduler1.into_inner(); + assert_matches!(result_with_timings, (Ok(()), _)); + pool.return_scheduler(scheduler1); + let (result_with_timings, scheduler2) = scheduler2.into_inner(); + assert_matches!(result_with_timings, (Ok(()), _)); + pool.return_scheduler(scheduler2); + + let scheduler3 = pool.do_take_scheduler(context.clone()); + assert_eq!(scheduler_id2, scheduler3.id()); + let scheduler4 = pool.do_take_scheduler(context.clone()); + assert_eq!(scheduler_id1, scheduler4.id()); + } + + #[test] + fn test_scheduler_pool_context_drop_unless_reinitialized() { + solana_logger::setup(); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = DefaultSchedulerPool::new(None, None, None, ignored_prioritization_fee_cache); + let bank = Arc::new(Bank::default_for_tests()); + let context = &SchedulingContext::new(bank); + let mut scheduler = pool.do_take_scheduler(context.clone()); + + // should never panic. + scheduler.pause_for_recent_blockhash(); + assert_matches!( + Box::new(scheduler).wait_for_termination(false), + ((Ok(()), _), _) + ); + } + + #[test] + fn test_scheduler_pool_context_replace() { + solana_logger::setup(); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = DefaultSchedulerPool::new(None, None, None, ignored_prioritization_fee_cache); + let old_bank = &Arc::new(Bank::default_for_tests()); + let new_bank = &Arc::new(Bank::default_for_tests()); + assert!(!Arc::ptr_eq(old_bank, new_bank)); + + let old_context = &SchedulingContext::new(old_bank.clone()); + let new_context = &SchedulingContext::new(new_bank.clone()); + + let scheduler = pool.do_take_scheduler(old_context.clone()); + let scheduler_id = scheduler.id(); + pool.return_scheduler(scheduler.into_inner().1); + + let scheduler = pool.take_scheduler(new_context.clone()); + assert_eq!(scheduler_id, scheduler.id()); + assert!(Arc::ptr_eq(scheduler.context().bank(), new_bank)); + } + + #[test] + fn test_scheduler_pool_install_into_bank_forks() { + solana_logger::setup(); + + let bank = Bank::default_for_tests(); + let bank_forks = BankForks::new_rw_arc(bank); + let mut bank_forks = bank_forks.write().unwrap(); + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + bank_forks.install_scheduler_pool(pool); + } + + #[test] + fn test_scheduler_install_into_bank() { + solana_logger::setup(); + + let GenesisConfigInfo { genesis_config, .. } = create_genesis_config(10_000); + let bank = Arc::new(Bank::new_for_tests(&genesis_config)); + let child_bank = Bank::new_from_parent(bank, &Pubkey::default(), 1); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + + let bank = Bank::default_for_tests(); + let bank_forks = BankForks::new_rw_arc(bank); + let mut bank_forks = bank_forks.write().unwrap(); + + // existing banks in bank_forks shouldn't process transactions anymore in general, so + // shouldn't be touched + assert!(!bank_forks + .working_bank_with_scheduler() + .has_installed_scheduler()); + bank_forks.install_scheduler_pool(pool); + assert!(!bank_forks + .working_bank_with_scheduler() + .has_installed_scheduler()); + + let mut child_bank = bank_forks.insert(child_bank); + assert!(child_bank.has_installed_scheduler()); + bank_forks.remove(child_bank.slot()); + child_bank.drop_scheduler(); + assert!(!child_bank.has_installed_scheduler()); + } + + fn setup_dummy_fork_graph(bank: Bank) -> Arc { + let slot = bank.slot(); + let bank_fork = BankForks::new_rw_arc(bank); + let bank = bank_fork.read().unwrap().get(slot).unwrap(); + bank.loaded_programs_cache + .write() + .unwrap() + .set_fork_graph(bank_fork); + bank + } + + #[test] + fn test_scheduler_schedule_execution_success() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let tx0 = &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &mint_keypair, + &solana_sdk::pubkey::new_rand(), + 2, + genesis_config.hash(), + )); + let bank = Bank::new_for_tests(&genesis_config); + let bank = setup_dummy_fork_graph(bank); + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + let context = SchedulingContext::new(bank.clone()); + + assert_eq!(bank.transaction_count(), 0); + let scheduler = pool.take_scheduler(context); + scheduler.schedule_execution(&(tx0, 0)); + let bank = BankWithScheduler::new(bank, Some(scheduler)); + assert_matches!(bank.wait_for_completed_scheduler(), Some((Ok(()), _))); + assert_eq!(bank.transaction_count(), 1); + } + + #[test] + fn test_scheduler_schedule_execution_failure() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let bank = Bank::new_for_tests(&genesis_config); + let bank = setup_dummy_fork_graph(bank); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + DefaultSchedulerPool::new_dyn(None, None, None, ignored_prioritization_fee_cache); + let context = SchedulingContext::new(bank.clone()); + let mut scheduler = pool.take_scheduler(context); + + let unfunded_keypair = Keypair::new(); + let bad_tx = + &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &unfunded_keypair, + &solana_sdk::pubkey::new_rand(), + 2, + genesis_config.hash(), + )); + assert_eq!(bank.transaction_count(), 0); + scheduler.schedule_execution(&(bad_tx, 0)); + scheduler.pause_for_recent_blockhash(); + assert_eq!(bank.transaction_count(), 0); + + let good_tx_after_bad_tx = + &SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &mint_keypair, + &solana_sdk::pubkey::new_rand(), + 3, + genesis_config.hash(), + )); + // make sure this tx is really a good one to execute. + assert_matches!( + bank.simulate_transaction_unchecked(good_tx_after_bad_tx, false) + .result, + Ok(_) + ); + scheduler.schedule_execution(&(good_tx_after_bad_tx, 0)); + scheduler.pause_for_recent_blockhash(); + // transaction_count should remain same as scheduler should be bailing out. + assert_eq!(bank.transaction_count(), 0); + + let bank = BankWithScheduler::new(bank, Some(scheduler)); + assert_matches!( + bank.wait_for_completed_scheduler(), + Some(( + Err(solana_sdk::transaction::TransactionError::AccountNotFound), + _timings + )) + ); + } + + #[derive(Debug)] + struct AsyncScheduler( + PooledScheduler, + Mutex>>, + ); + + impl AsyncScheduler { + fn do_wait(&self) { + let mut overall_result = Ok(()); + let mut overall_timings = ExecuteTimings::default(); + for handle in self.1.lock().unwrap().drain(..) { + let (result, timings) = handle.join().unwrap(); + match result { + Ok(()) => {} + Err(e) => overall_result = Err(e), + } + overall_timings.accumulate(&timings); + } + *self.0.result_with_timings.lock().unwrap() = (overall_result, overall_timings); + } + } + + impl InstalledScheduler + for AsyncScheduler + { + fn id(&self) -> SchedulerId { + self.0.id() + } + + fn context(&self) -> &SchedulingContext { + self.0.context() + } + + fn schedule_execution(&self, &(transaction, index): &(&SanitizedTransaction, usize)) { + let transaction_and_index = (transaction.clone(), index); + let context = self.context().clone(); + let pool = self.0.inner.pool.clone(); + + self.1.lock().unwrap().push(std::thread::spawn(move || { + // intentionally sleep to simulate race condition where register_recent_blockhash + // is handle before finishing executing scheduled transactions + std::thread::sleep(std::time::Duration::from_secs(1)); + + let mut result = Ok(()); + let mut timings = ExecuteTimings::default(); + + ::handle( + &mut result, + &mut timings, + context.bank(), + &transaction_and_index.0, + transaction_and_index.1, + &pool.handler_context, + ); + (result, timings) + })); + } + + fn wait_for_termination( + self: Box, + is_dropped: bool, + ) -> (ResultWithTimings, UninstalledSchedulerBox) { + self.do_wait(); + Box::new(self.0).wait_for_termination(is_dropped) + } + + fn pause_for_recent_blockhash(&mut self) { + if TRIGGER_RACE_CONDITION { + // this is equivalent to NOT calling wait_for_paused_scheduler() in + // register_recent_blockhash(). + return; + } + self.do_wait(); + } + } + + impl SpawnableScheduler + for AsyncScheduler + { + // well, i wish i can use ! (never type)..... + type Inner = Self; + + fn into_inner(self) -> (ResultWithTimings, Self::Inner) { + todo!(); + } + + fn from_inner(_inner: Self::Inner, _context: SchedulingContext) -> Self { + todo!(); + } + + fn spawn( + pool: Arc>, + initial_context: SchedulingContext, + ) -> Self { + AsyncScheduler::( + PooledScheduler::::from_inner( + PooledSchedulerInner { + id: pool.new_scheduler_id(), + pool: SchedulerPool::new( + pool.handler_context.log_messages_bytes_limit, + pool.handler_context.transaction_status_sender.clone(), + pool.handler_context.replay_vote_sender.clone(), + pool.handler_context.prioritization_fee_cache.clone(), + ), + }, + initial_context, + ), + Mutex::new(vec![]), + ) + } + } + + fn do_test_scheduler_schedule_execution_recent_blockhash_edge_case< + const TRIGGER_RACE_CONDITION: bool, + >() { + solana_logger::setup(); + + let GenesisConfigInfo { + genesis_config, + mint_keypair, + .. + } = create_genesis_config(10_000); + let very_old_valid_tx = + SanitizedTransaction::from_transaction_for_tests(system_transaction::transfer( + &mint_keypair, + &solana_sdk::pubkey::new_rand(), + 2, + genesis_config.hash(), + )); + let mut bank = Bank::new_for_tests(&genesis_config); + for _ in 0..MAX_PROCESSING_AGE { + bank.fill_bank_with_ticks_for_tests(); + bank.freeze(); + let slot = bank.slot(); + bank = Bank::new_from_parent( + Arc::new(bank), + &Pubkey::default(), + slot.checked_add(1).unwrap(), + ); + } + let bank = setup_dummy_fork_graph(bank); + let context = SchedulingContext::new(bank.clone()); + + let ignored_prioritization_fee_cache = Arc::new(PrioritizationFeeCache::new(0u64)); + let pool = + SchedulerPool::, DefaultTaskHandler>::new_dyn( + None, + None, + None, + ignored_prioritization_fee_cache, + ); + let scheduler = pool.take_scheduler(context); + + let bank = BankWithScheduler::new(bank, Some(scheduler)); + assert_eq!(bank.transaction_count(), 0); + + // schedule but not immediately execute transaction + bank.schedule_transaction_executions([(&very_old_valid_tx, &0)].into_iter()); + // this calls register_recent_blockhash internally + bank.fill_bank_with_ticks_for_tests(); + + if TRIGGER_RACE_CONDITION { + // very_old_valid_tx is wrongly handled as expired! + assert_matches!( + bank.wait_for_completed_scheduler(), + Some((Err(TransactionError::BlockhashNotFound), _)) + ); + assert_eq!(bank.transaction_count(), 0); + } else { + assert_matches!(bank.wait_for_completed_scheduler(), Some((Ok(()), _))); + assert_eq!(bank.transaction_count(), 1); + } + } + + #[test] + fn test_scheduler_schedule_execution_recent_blockhash_edge_case_with_race() { + do_test_scheduler_schedule_execution_recent_blockhash_edge_case::(); + } + + #[test] + fn test_scheduler_schedule_execution_recent_blockhash_edge_case_without_race() { + do_test_scheduler_schedule_execution_recent_blockhash_edge_case::(); + } +}