diff --git a/Cargo.lock b/Cargo.lock index 18276b3ea3f..76786ada712 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -125,7 +125,7 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e8b47f52ea9bae42228d07ec09eb676433d7c4ed1ebdf0f1d1c29ed446f1ab8" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cipher 0.3.0", "cpufeatures", "ctr 0.8.0", @@ -138,7 +138,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cipher 0.4.4", "cpufeatures", ] @@ -199,7 +199,7 @@ checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ "getrandom 0.2.9", "once_cell", - "version_check", + "version_check 0.9.4", ] [[package]] @@ -208,9 +208,9 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "once_cell", - "version_check", + "version_check 0.9.4", ] [[package]] @@ -225,7 +225,7 @@ dependencies = [ [[package]] name = "amcl" version = "0.3.0" -source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" +source = "git+https://github.com/sigp/milagro_bls?tag=v1.5.1#d3fc0a40cfe8b72ccda46ba050ee6786a59ce753" [[package]] name = "android_system_properties" @@ -265,6 +265,15 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +[[package]] +name = "archery" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a8da9bc4c4053ee067669762bcaeea6e241841295a2b6c948312dad6ef4cc02" +dependencies = [ + "static_assertions", +] + [[package]] name = "arrayref" version = "0.3.7" @@ -358,7 +367,7 @@ checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" dependencies = [ "async-lock", "autocfg 1.1.0", - "cfg-if", + "cfg-if 1.0.0", "concurrent-queue", "futures-lite", "log", @@ -547,7 +556,7 @@ checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", - "cfg-if", + "cfg-if 1.0.0", "libc", "miniz_oxide 0.6.2", "object", @@ -614,6 +623,7 @@ version = "0.2.0" dependencies = [ "bitvec 0.20.4", "bls", + "crossbeam-channel", "derivative", "environment", "eth1", @@ -655,7 +665,7 @@ dependencies = [ "state_processing", "store", "strum", - "superstruct 0.5.0", + "superstruct 0.7.0", "task_executor", "tempfile", "tokio", @@ -710,21 +720,26 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.2" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "f1c85344eb535a31b62f0af37be84441ba9e7f0f4111eb0530f43d15e513fe57" dependencies = [ "bitflags", "cexpr", + "cfg-if 0.1.10", "clang-sys", + "clap", + "env_logger 0.7.1", "lazy_static", "lazycell", + "log", "peeking_take_while", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", + "which 3.1.1", ] [[package]] @@ -807,13 +822,14 @@ version = "0.2.0" dependencies = [ "arbitrary", "blst", + "criterion", "ethereum-types 0.14.1", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", "hex", "milagro_bls", - "rand 0.7.3", + "rand 0.8.5", "serde", "serde_derive", "tree_hash", @@ -829,7 +845,7 @@ dependencies = [ "cc", "glob", "threadpool", - "which", + "which 4.4.0", "zeroize", ] @@ -1004,6 +1020,9 @@ name = "cc" version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f" +dependencies = [ + "jobserver", +] [[package]] name = "ccm" @@ -1018,13 +1037,19 @@ dependencies = [ [[package]] name = "cexpr" -version = "0.6.0" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +checksum = "fce5b5fb86b0c57c20c834c1b412fd09c77c8a59b9473f86272709e78874cd1d" dependencies = [ - "nom 7.1.3", + "nom 4.2.3", ] +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + [[package]] name = "cfg-if" version = "1.0.0" @@ -1037,7 +1062,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c80e5460aa66fe3b91d40bcbdab953a597b60053e34d684ac6903f863b680a6" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cipher 0.3.0", "cpufeatures", "zeroize", @@ -1102,9 +1127,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.1" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "81de550971c976f176130da4b2978d3b524eaa0fd9ac31f3ceb5ae1231fb4853" dependencies = [ "glob", "libc", @@ -1183,6 +1208,15 @@ dependencies = [ "types", ] +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +dependencies = [ + "bitflags", +] + [[package]] name = "cmake" version = "0.1.50" @@ -1222,12 +1256,6 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - [[package]] name = "core-foundation" version = "0.9.3" @@ -1283,7 +1311,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -1328,7 +1356,7 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crossbeam-utils", ] @@ -1338,7 +1366,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "crossbeam-epoch", "crossbeam-utils", ] @@ -1350,7 +1378,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" dependencies = [ "autocfg 1.1.0", - "cfg-if", + "cfg-if 1.0.0", "crossbeam-utils", "memoffset 0.8.0", "scopeguard", @@ -1362,7 +1390,7 @@ version = "0.8.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -1494,7 +1522,7 @@ version = "4.0.0-rc.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03d928d978dbec61a1167414f5ec534f24bea0d7a0d24dd9b6233d3d8223e585" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "digest 0.10.7", "fiat-crypto", "packed_simd_2", @@ -1628,6 +1656,8 @@ dependencies = [ "clap", "clap_utils", "environment", + "ethereum_ssz", + "hex", "logging", "slog", "sloggers", @@ -1774,10 +1804,8 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "convert_case", "proc-macro2", "quote", - "rustc_version 0.4.0", "syn 1.0.109", ] @@ -1863,7 +1891,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "dirs-sys-next", ] @@ -2034,6 +2062,8 @@ dependencies = [ "fork_choice", "fs2", "hex", + "logging", + "malloc_utils", "rayon", "serde", "serde_derive", @@ -2101,7 +2131,7 @@ version = "0.8.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "071a31f4ee85403370b58aca746f01041ede6f0da2730960ad001edc2b71b394" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -2160,8 +2190,11 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ + "atty", + "humantime 1.3.0", "log", "regex", + "termcolor", ] [[package]] @@ -2171,7 +2204,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", - "humantime", + "humantime 2.1.0", "log", "regex", "termcolor", @@ -2227,7 +2260,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" dependencies = [ "backtrace", - "version_check", + "version_check 0.9.4", ] [[package]] @@ -2254,7 +2287,7 @@ dependencies = [ "slog", "sloggers", "state_processing", - "superstruct 0.5.0", + "superstruct 0.7.0", "task_executor", "tokio", "tree_hash", @@ -2586,7 +2619,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d4e5ad46aede34901f71afdb7bb555710ed9613d88d644245c657dc371aa228" dependencies = [ "Inflector", - "cfg-if", + "cfg-if 1.0.0", "dunce", "ethers-core", "eyre", @@ -2752,7 +2785,7 @@ dependencies = [ "ssz_types", "state_processing", "strum", - "superstruct 0.6.0", + "superstruct 0.7.0", "task_executor", "tempfile", "tokio", @@ -2952,6 +2985,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + [[package]] name = "funty" version = "1.1.0" @@ -3102,7 +3141,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", - "version_check", + "version_check 0.9.4", "zeroize", ] @@ -3133,7 +3172,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", @@ -3146,7 +3185,7 @@ version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -3557,6 +3596,15 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" +[[package]] +name = "humantime" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" +dependencies = [ + "quick-error", +] + [[package]] name = "humantime" version = "2.1.0" @@ -3813,7 +3861,7 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -3931,6 +3979,15 @@ dependencies = [ "libc", ] +[[package]] +name = "jobserver" +version = "0.1.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.63" @@ -3960,7 +4017,7 @@ version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "ecdsa 0.14.8", "elliptic-curve 0.12.3", "sha2 0.10.6", @@ -3973,7 +4030,7 @@ version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cadb76004ed8e97623117f3df85b17aaa6626ab0b0831e6573f104df16cd1bcc" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "ecdsa 0.16.7", "elliptic-curve 0.13.5", "once_cell", @@ -4104,11 +4161,11 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.4" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "f2b111a074963af1d37a139918ac6d49ad1d0d5e47f72fd55388619691a7d753" dependencies = [ - "cfg-if", + "cc", "winapi", ] @@ -4124,21 +4181,6 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7012b1bbb0719e1097c47611d3898568c546d597c2e74d66f6087edd5233ff4" -[[package]] -name = "libmdbx" -version = "0.1.4" -source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" -dependencies = [ - "bitflags", - "byteorder", - "derive_more", - "indexmap", - "libc", - "mdbx-sys", - "parking_lot 0.12.1", - "thiserror", -] - [[package]] name = "libp2p" version = "0.50.1" @@ -4699,6 +4741,7 @@ dependencies = [ "slashing_protection", "slog", "sloggers", + "store", "task_executor", "tempfile", "types", @@ -4752,7 +4795,7 @@ dependencies = [ "snap", "ssz_types", "strum", - "superstruct 0.5.0", + "superstruct 0.7.0", "task_executor", "tempfile", "tiny-keccak", @@ -4833,7 +4876,7 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", ] [[package]] @@ -4872,6 +4915,15 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "lru" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03f1160296536f10c833a82dca22267d5486734230d47bf00bf435885814ba1e" +dependencies = [ + "hashbrown 0.13.2", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -4951,17 +5003,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "mdbx-sys" -version = "0.11.6-4" -source = "git+https://github.com/sigp/libmdbx-rs?tag=v0.1.4#096da80a83d14343f8df833006483f48075cd135" -dependencies = [ - "bindgen", - "cc", - "cmake", - "libc", -] - [[package]] name = "mediatype" version = "0.19.13" @@ -5067,16 +5108,38 @@ dependencies = [ [[package]] name = "milagro_bls" -version = "1.4.2" -source = "git+https://github.com/sigp/milagro_bls?tag=v1.4.2#16655aa033175a90c10ef02aa144e2835de23aec" +version = "1.5.1" +source = "git+https://github.com/sigp/milagro_bls?tag=v1.5.1#d3fc0a40cfe8b72ccda46ba050ee6786a59ce753" dependencies = [ "amcl", "hex", "lazy_static", - "rand 0.7.3", + "rand 0.8.5", "zeroize", ] +[[package]] +name = "milhouse" +version = "0.1.0" +source = "git+https://github.com/sigp/milhouse?branch=main#4035d254ad538dd642fe031fbecfae55d9a4f31d" +dependencies = [ + "arbitrary", + "derivative", + "ethereum-types 0.14.1", + "ethereum_hashing", + "ethereum_ssz", + "ethereum_ssz_derive", + "itertools", + "parking_lot 0.11.2", + "rayon", + "serde", + "smallvec", + "tree_hash", + "triomphe", + "typenum", + "vec_map", +] + [[package]] name = "mime" version = "0.3.17" @@ -5449,7 +5512,7 @@ checksum = "8f3790c00a0150112de0f4cd161e3d7fc4b2d8a5542ffc35f099a2562aecb35c" dependencies = [ "bitflags", "cc", - "cfg-if", + "cfg-if 1.0.0", "libc", "memoffset 0.6.5", ] @@ -5461,7 +5524,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ "bitflags", - "cfg-if", + "cfg-if 1.0.0", "libc", "memoffset 0.6.5", ] @@ -5473,7 +5536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfdda3d196821d6af13126e40375cdf7da646a96114af134d5f417a9a1dc8e1a" dependencies = [ "bitflags", - "cfg-if", + "cfg-if 1.0.0", "libc", "static_assertions", ] @@ -5505,6 +5568,16 @@ version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" +[[package]] +name = "nom" +version = "4.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" +dependencies = [ + "memchr", + "version_check 0.1.5", +] + [[package]] name = "nom" version = "7.1.3" @@ -5697,7 +5770,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" dependencies = [ "bitflags", - "cfg-if", + "cfg-if 1.0.0", "foreign-types", "libc", "once_cell", @@ -5802,7 +5875,7 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1914cd452d8fccd6f9db48147b29fd4ae05bea9dc5d9ad578509f72415de282" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libm 0.1.4", ] @@ -5891,7 +5964,7 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "instant", "libc", "redox_syscall 0.2.16", @@ -5905,7 +5978,7 @@ version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "libc", "redox_syscall 0.2.16", "smallvec", @@ -6116,7 +6189,7 @@ checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" dependencies = [ "autocfg 1.1.0", "bitflags", - "cfg-if", + "cfg-if 1.0.0", "concurrent-queue", "libc", "log", @@ -6141,7 +6214,7 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8419d2b623c7c0896ff2d5d96e2cb4ede590fed28fcc34934f4c33c036e620a1" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "opaque-debug", "universal-hash 0.4.1", @@ -6153,7 +6226,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "opaque-debug", "universal-hash 0.5.1", @@ -6260,7 +6333,7 @@ dependencies = [ "proc-macro2", "quote", "syn 1.0.109", - "version_check", + "version_check 0.9.4", ] [[package]] @@ -6271,7 +6344,7 @@ checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", - "version_check", + "version_check 0.9.4", ] [[package]] @@ -6307,7 +6380,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "fnv", "lazy_static", "memchr", @@ -6368,7 +6441,7 @@ dependencies = [ "regex", "syn 1.0.109", "tempfile", - "which", + "which 4.4.0", ] [[package]] @@ -6432,7 +6505,7 @@ version = "3.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f866af2b0f8e4b0d2d00aad8a9c5fc48fad33466cd99a64cbb3a4c1505f1a62d" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "darwin-libproc", "derive_more", "glob", @@ -6554,6 +6627,25 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "rand" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" +dependencies = [ + "autocfg 0.1.8", + "libc", + "rand_chacha 0.1.1", + "rand_core 0.4.2", + "rand_hc 0.1.0", + "rand_isaac", + "rand_jitter", + "rand_os", + "rand_pcg", + "rand_xorshift 0.1.1", + "winapi", +] + [[package]] name = "rand" version = "0.7.3" @@ -6564,7 +6656,7 @@ dependencies = [ "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc", + "rand_hc 0.2.0", ] [[package]] @@ -6578,6 +6670,16 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_chacha" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +dependencies = [ + "autocfg 0.1.8", + "rand_core 0.3.1", +] + [[package]] name = "rand_chacha" version = "0.2.2" @@ -6598,6 +6700,21 @@ dependencies = [ "rand_core 0.6.4", ] +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + [[package]] name = "rand_core" version = "0.5.1" @@ -6616,6 +6733,15 @@ dependencies = [ "getrandom 0.2.9", ] +[[package]] +name = "rand_hc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "rand_hc" version = "0.2.0" @@ -6625,6 +6751,59 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_isaac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_jitter" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" +dependencies = [ + "libc", + "rand_core 0.4.2", + "winapi", +] + +[[package]] +name = "rand_os" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" +dependencies = [ + "cloudabi", + "fuchsia-cprng", + "libc", + "rand_core 0.4.2", + "rdrand", + "winapi", +] + +[[package]] +name = "rand_pcg" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" +dependencies = [ + "autocfg 0.1.8", + "rand_core 0.4.2", +] + +[[package]] +name = "rand_xorshift" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "rand_xorshift" version = "0.3.0" @@ -6681,6 +6860,15 @@ dependencies = [ "yasna", ] +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "redox_syscall" version = "0.2.16" @@ -6869,6 +7057,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "rpds" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ef5140bcb576bfd6d56cd2de709a7d17851ac1f3805e67fe9d99e42a11821f" +dependencies = [ + "archery", +] + [[package]] name = "rtcp" version = "0.7.2" @@ -7095,7 +7292,7 @@ version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b569c32c806ec3abdf3b5869fb8bf1e0d275a7c1c9b0b05603d9464632649edf" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "derive_more", "parity-scale-codec 3.5.0", "scale-info-derive", @@ -7391,7 +7588,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ "block-buffer 0.9.0", - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -7403,7 +7600,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5058ada175748e33390e40e872bd0fe59a19f265d0158daa551c5a88a76009c" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest 0.10.7", ] @@ -7414,7 +7611,7 @@ version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest 0.10.7", ] @@ -7426,7 +7623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -7438,7 +7635,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "cpufeatures", "digest 0.10.7", ] @@ -7476,9 +7673,9 @@ dependencies = [ [[package]] name = "shlex" -version = "1.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" +checksum = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2" [[package]] name = "signal-hook-registry" @@ -7565,7 +7762,6 @@ dependencies = [ "filesystem", "flate2", "lazy_static", - "libmdbx", "lighthouse_metrics", "lmdb-rkv", "lmdb-rkv-sys", @@ -7865,6 +8061,12 @@ dependencies = [ "typenum", ] +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + [[package]] name = "state_processing" version = "0.2.0" @@ -7884,12 +8086,14 @@ dependencies = [ "lighthouse_metrics", "merkle_proof", "rayon", + "rustc-hash", "safe_arith", "smallvec", "ssz_types", "tokio", "tree_hash", "types", + "vec_map", ] [[package]] @@ -7915,6 +8119,7 @@ name = "store" version = "0.2.0" dependencies = [ "beacon_chain", + "bls", "db-key", "directory", "ethereum_ssz", @@ -7923,16 +8128,23 @@ dependencies = [ "lazy_static", "leveldb", "lighthouse_metrics", - "lru 0.7.8", + "logging", + "lru 0.10.0", "parking_lot 0.12.1", + "safe_arith", "serde", "serde_derive", "slog", "sloggers", + "smallvec", "state_processing", "strum", + "take-until", "tempfile", + "tree_hash", "types", + "xdelta3", + "zstd", ] [[package]] @@ -8029,9 +8241,9 @@ dependencies = [ [[package]] name = "superstruct" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b9e5728aa1a87141cefd4e7509903fc01fa0dcb108022b1e841a67c5159fc5" +checksum = "6f4e1f478a7728f8855d7e620e9a152cf8932c6614f86564c886f9b8141f3201" dependencies = [ "darling 0.13.4", "itertools", @@ -8096,7 +8308,7 @@ version = "0.26.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c18a6156d1f27a9592ee18c1a846ca8dd5c258b7179fc193ae87c74ebb666f5" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "core-foundation-sys", "libc", "ntapi", @@ -8139,6 +8351,12 @@ dependencies = [ "types", ] +[[package]] +name = "take-until" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4e17d8598067a8c134af59cd33c1c263470e089924a11ab61cf61690919fe3b" + [[package]] name = "take_mut" version = "0.2.2" @@ -8172,6 +8390,7 @@ dependencies = [ "futures", "lazy_static", "lighthouse_metrics", + "logging", "slog", "sloggers", "tokio", @@ -8183,7 +8402,7 @@ version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "fastrand", "redox_syscall 0.3.5", "rustix", @@ -8278,7 +8497,7 @@ version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "once_cell", ] @@ -8636,7 +8855,7 @@ version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "log", "pin-project-lite 0.2.9", "tracing-attributes", @@ -8754,6 +8973,16 @@ dependencies = [ "rlp", ] +[[package]] +name = "triomphe" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1ee9bd9239c339d714d657fac840c6d2a4f9c45f4f9ec7b0975113458be78db" +dependencies = [ + "serde", + "stable_deref_trait", +] + [[package]] name = "trust-dns-proto" version = "0.22.0" @@ -8761,7 +8990,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", - "cfg-if", + "cfg-if 1.0.0", "data-encoding", "enum-as-inner", "futures-channel", @@ -8786,7 +9015,7 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "futures-util", "ipconfig", "lazy_static", @@ -8906,11 +9135,13 @@ dependencies = [ "maplit", "merkle_proof", "metastruct", + "milhouse", "parking_lot 0.12.1", "rand 0.8.5", - "rand_xorshift", + "rand_xorshift 0.3.0", "rayon", "regex", + "rpds", "rusqlite", "safe_arith", "serde", @@ -8922,7 +9153,7 @@ dependencies = [ "smallvec", "ssz_types", "state_processing", - "superstruct 0.6.0", + "superstruct 0.7.0", "swap_or_not_shuffle", "tempfile", "test_random_derive", @@ -8956,7 +9187,7 @@ version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ - "version_check", + "version_check 0.9.4", ] [[package]] @@ -9174,6 +9405,12 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" +[[package]] +name = "version_check" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" + [[package]] name = "version_check" version = "0.9.4" @@ -9293,7 +9530,7 @@ version = "0.2.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "wasm-bindgen-macro", ] @@ -9318,7 +9555,7 @@ version = "0.4.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" dependencies = [ - "cfg-if", + "cfg-if 1.0.0", "js-sys", "wasm-bindgen", "web-sys", @@ -9688,6 +9925,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "which" +version = "3.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" +dependencies = [ + "libc", +] + [[package]] name = "which" version = "4.4.0" @@ -10062,6 +10308,18 @@ dependencies = [ "time 0.3.21", ] +[[package]] +name = "xdelta3" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f14e45aed717051e07e25ca49eb35222aacbd6bea4961c1db68274413c8e6e79" +dependencies = [ + "bindgen", + "cc", + "libc", + "rand 0.6.5", +] + [[package]] name = "xml-rs" version = "0.8.11" @@ -10142,3 +10400,32 @@ dependencies = [ "thiserror", "time 0.1.45", ] + +[[package]] +name = "zstd" +version = "0.11.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "5.0.2+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d2a5585e04f9eea4b2a3d1eca508c4dee9592a89ef6f450c11719da0726f4db" +dependencies = [ + "libc", + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.4+zstd.1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fa202f2ef00074143e219d15b62ffc317d17cc33909feac471c044087cad7b0" +dependencies = [ + "cc", + "libc", +] diff --git a/account_manager/src/validator/exit.rs b/account_manager/src/validator/exit.rs index 5755a355f31..79ee8aeaee4 100644 --- a/account_manager/src/validator/exit.rs +++ b/account_manager/src/validator/exit.rs @@ -210,8 +210,8 @@ async fn publish_voluntary_exit( let validator_data = get_validator_data(client, &keypair.pk).await?; match validator_data.status { ValidatorStatus::ActiveExiting => { - let exit_epoch = validator_data.validator.exit_epoch; - let withdrawal_epoch = validator_data.validator.withdrawable_epoch; + let exit_epoch = validator_data.validator.exit_epoch(); + let withdrawal_epoch = validator_data.validator.withdrawable_epoch(); let current_epoch = get_current_epoch::(genesis_data.genesis_time, spec) .ok_or("Failed to get current epoch. Please check your system time")?; eprintln!("Voluntary exit has been accepted into the beacon chain, but not yet finalized. \ @@ -231,7 +231,7 @@ async fn publish_voluntary_exit( ValidatorStatus::ExitedSlashed | ValidatorStatus::ExitedUnslashed => { eprintln!( "Validator has exited on epoch: {}", - validator_data.validator.exit_epoch + validator_data.validator.exit_epoch() ); break; } @@ -257,7 +257,7 @@ async fn get_validator_index_for_exit( ValidatorStatus::ActiveOngoing => { let eligible_epoch = validator_data .validator - .activation_epoch + .activation_epoch() .safe_add(spec.shard_committee_period) .map_err(|e| format!("Failed to calculate eligible epoch, validator activation epoch too high: {:?}", e))?; diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 27d07e33385..ee5687a99b5 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -60,11 +60,12 @@ strum = { version = "0.24.0", features = ["derive"] } logging = { path = "../../common/logging" } execution_layer = { path = "../execution_layer" } sensitive_url = { path = "../../common/sensitive_url" } -superstruct = "0.5.0" +superstruct = "0.7.0" hex = "0.4.2" exit-future = "0.2.0" unused_port = {path = "../../common/unused_port"} oneshot_broadcast = { path = "../../common/oneshot_broadcast" } +crossbeam-channel = "0.5.5" [[test]] name = "beacon_chain_tests" diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index a4a661197f7..e5302cf54b2 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -42,7 +42,8 @@ impl BeaconChain { .ok_or(BeaconChainError::MissingBeaconState(state_root))?; // Calculate ideal_rewards - let participation_cache = ParticipationCache::new(&state, spec)?; + let participation_cache = ParticipationCache::new(&state, spec) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; let previous_epoch = state.previous_epoch(); @@ -52,13 +53,9 @@ impl BeaconChain { let weight = get_flag_weight(flag_index) .map_err(|_| BeaconChainError::AttestationRewardsError)?; - let unslashed_participating_indices = participation_cache - .get_unslashed_participating_indices(flag_index, previous_epoch)?; - - let unslashed_participating_balance = - unslashed_participating_indices - .total_balance() - .map_err(|_| BeaconChainError::AttestationRewardsError)?; + let unslashed_participating_balance = participation_cache + .previous_epoch_flag_attesting_balance(flag_index) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; let unslashed_participating_increments = unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; @@ -112,23 +109,24 @@ impl BeaconChain { .collect::, _>>()? }; - for validator_index in &validators { - let eligible = state.is_eligible_validator(previous_epoch, *validator_index)?; + for &validator_index in &validators { + let validator = participation_cache + .get_validator(validator_index) + .map_err(|_| BeaconChainError::AttestationRewardsError)?; + let eligible = validator.is_eligible; let mut head_reward = 0u64; let mut target_reward = 0i64; let mut source_reward = 0i64; if eligible { - let effective_balance = state.get_effective_balance(*validator_index)?; + let effective_balance = validator.effective_balance; for flag_index in 0..PARTICIPATION_FLAG_WEIGHTS.len() { let (ideal_reward, penalty) = ideal_rewards_hashmap .get(&(flag_index, effective_balance)) .ok_or(BeaconChainError::AttestationRewardsError)?; - let voted_correctly = participation_cache - .get_unslashed_participating_indices(flag_index, previous_epoch) - .map_err(|_| BeaconChainError::AttestationRewardsError)? - .contains(*validator_index) + let voted_correctly = validator + .is_unslashed_participating_index(flag_index) .map_err(|_| BeaconChainError::AttestationRewardsError)?; if voted_correctly { if flag_index == TIMELY_HEAD_FLAG_INDEX { @@ -148,7 +146,7 @@ impl BeaconChain { } } total_rewards.push(TotalAttestationRewards { - validator_index: *validator_index as u64, + validator_index: validator_index as u64, head: head_reward, target: target_reward, source: source_reward, diff --git a/beacon_node/beacon_chain/src/beacon_block_reward.rs b/beacon_node/beacon_chain/src/beacon_block_reward.rs index 786402c9978..0d2ab8ab3d5 100644 --- a/beacon_node/beacon_chain/src/beacon_block_reward.rs +++ b/beacon_node/beacon_chain/src/beacon_block_reward.rs @@ -4,12 +4,11 @@ use operation_pool::RewardCache; use safe_arith::SafeArith; use slog::error; use state_processing::{ - common::{ - altair, get_attestation_participation_flag_indices, get_attesting_indices_from_state, - }, + common::{get_attestation_participation_flag_indices, get_attesting_indices_from_state}, per_block_processing::{ altair::sync_committee::compute_sync_aggregate_rewards, get_slashable_indices, }, + ConsensusContext, }; use store::{ consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}, @@ -124,7 +123,7 @@ impl BeaconChain { proposer_slashing_reward.safe_add_assign( state .get_validator(proposer_slashing.proposer_index() as usize)? - .effective_balance + .effective_balance() .safe_div(self.spec.whistleblower_reward_quotient)?, )?; } @@ -146,7 +145,7 @@ impl BeaconChain { attester_slashing_reward.safe_add_assign( state .get_validator(attester_index as usize)? - .effective_balance + .effective_balance() .safe_div(self.spec.whistleblower_reward_quotient)?, )?; } @@ -178,9 +177,7 @@ impl BeaconChain { block: BeaconBlockRef<'_, T::EthSpec, Payload>, state: &mut BeaconState, ) -> Result { - let total_active_balance = state.get_total_active_balance()?; - let base_reward_per_increment = - altair::BaseRewardPerIncrement::new(total_active_balance, &self.spec)?; + let mut ctxt = ConsensusContext::new(block.slot()); let mut total_proposer_reward = 0; @@ -216,13 +213,9 @@ impl BeaconChain { { validator_participation.add_flag(flag_index)?; proposer_reward_numerator.safe_add_assign( - altair::get_base_reward( - state, - index, - base_reward_per_increment, - &self.spec, - )? - .safe_mul(weight)?, + ctxt.get_base_reward(state, index, &self.spec) + .map_err(|_| BeaconChainError::BlockRewardAttestationError)? + .safe_mul(weight)?, )?; } } diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index e43f2a8dd81..d461f60fb83 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -429,7 +429,7 @@ impl BeaconBlockStreamer { continue; } - match self.beacon_chain.store.try_get_full_block(&root) { + match self.beacon_chain.store.try_get_full_block(&root, None) { Err(e) => db_blocks.push((root, Err(e.into()))), Ok(opt_block) => db_blocks.push(( root, diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ceda7222e69..f78acaa88f7 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -47,7 +47,6 @@ use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_B use crate::persisted_fork_choice::PersistedForkChoice; use crate::pre_finalization_cache::PreFinalizationBlockCache; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::snapshot_cache::{BlockProductionPreState, SnapshotCache}; use crate::sync_committee_verification::{ Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution, }; @@ -105,7 +104,6 @@ use store::{ use task_executor::{ShutdownReason, TaskExecutor}; use tokio_stream::Stream; use tree_hash::TreeHash; -use types::beacon_state::CloneConfig; use types::*; pub type ForkChoiceError = fork_choice::Error; @@ -113,9 +111,6 @@ pub type ForkChoiceError = fork_choice::Error; /// Alias to appease clippy. type HashBlockTuple = (Hash256, Arc>); -/// The time-out before failure during an operation to take a read/write RwLock on the block -/// processing cache. -pub const BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); /// The time-out before failure during an operation to take a read/write RwLock on the /// attestation cache. pub const ATTESTATION_CACHE_LOCK_TIMEOUT: Duration = Duration::from_secs(1); @@ -391,8 +386,6 @@ pub struct BeaconChain { pub event_handler: Option>, /// Used to track the heads of the beacon chain. pub(crate) head_tracker: Arc, - /// A cache dedicated to block processing. - pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the attester shuffling for a given epoch and shuffling key root. pub shuffling_cache: TimeoutRwLock, /// A cache of eth1 deposit data at epoch boundaries for deposit finalization @@ -400,7 +393,7 @@ pub struct BeaconChain { /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Mutex, /// Caches a map of `validator_index -> validator_pubkey`. - pub(crate) validator_pubkey_cache: TimeoutRwLock>, + pub(crate) validator_pubkey_cache: Arc>>, /// A cache used when producing attestations. pub(crate) attester_cache: Arc, /// A cache used when producing attestations whilst the head block is still being imported. @@ -475,10 +468,10 @@ impl BeaconChain { let mut batch = vec![]; let _head_timer = metrics::start_timer(&metrics::PERSIST_HEAD); - batch.push(self.persist_head_in_batch()); + batch.push(self.persist_head_in_batch()?); let _fork_choice_timer = metrics::start_timer(&metrics::PERSIST_FORK_CHOICE); - batch.push(self.persist_fork_choice_in_batch()); + batch.push(self.persist_fork_choice_in_batch()?); self.store.hot_db.do_atomically(batch)?; @@ -498,14 +491,14 @@ impl BeaconChain { } /// Return a database operation for writing the beacon chain head to disk. - pub fn persist_head_in_batch(&self) -> KeyValueStoreOp { + pub fn persist_head_in_batch(&self) -> Result { Self::persist_head_in_batch_standalone(self.genesis_block_root, &self.head_tracker) } pub fn persist_head_in_batch_standalone( genesis_block_root: Hash256, head_tracker: &HeadTracker, - ) -> KeyValueStoreOp { + ) -> Result { Self::make_persisted_head(genesis_block_root, head_tracker) .as_kv_store_op(BEACON_CHAIN_DB_KEY) } @@ -613,9 +606,8 @@ impl BeaconChain { let iter = self.store.forwards_block_roots_iterator( start_slot, - local_head.beacon_state.clone_with(CloneConfig::none()), + local_head.beacon_state.clone(), local_head.beacon_block_root, - &self.spec, )?; Ok(iter.map(|result| result.map_err(Into::into))) @@ -640,17 +632,11 @@ impl BeaconChain { } self.with_head(move |head| { - let iter = self.store.forwards_block_roots_iterator_until( - start_slot, - end_slot, - || { - ( - head.beacon_state.clone_with_only_committee_caches(), - head.beacon_block_root, - ) - }, - &self.spec, - )?; + let iter = + self.store + .forwards_block_roots_iterator_until(start_slot, end_slot, || { + (head.beacon_state.clone(), head.beacon_block_root) + })?; Ok(iter .map(|result| result.map_err(Into::into)) .take_while(move |result| { @@ -719,8 +705,7 @@ impl BeaconChain { let iter = self.store.forwards_state_roots_iterator( start_slot, local_head.beacon_state_root(), - local_head.beacon_state.clone_with(CloneConfig::none()), - &self.spec, + local_head.beacon_state.clone(), )?; Ok(iter.map(|result| result.map_err(Into::into))) @@ -737,17 +722,11 @@ impl BeaconChain { end_slot: Slot, ) -> Result> + '_, Error> { self.with_head(move |head| { - let iter = self.store.forwards_state_roots_iterator_until( - start_slot, - end_slot, - || { - ( - head.beacon_state.clone_with_only_committee_caches(), - head.beacon_state_root(), - ) - }, - &self.spec, - )?; + let iter = + self.store + .forwards_state_roots_iterator_until(start_slot, end_slot, || { + (head.beacon_state.clone(), head.beacon_state_root()) + })?; Ok(iter .map(|result| result.map_err(Into::into)) .take_while(move |result| { @@ -771,7 +750,9 @@ impl BeaconChain { let root = self.block_root_at_slot(request_slot, skips)?; if let Some(block_root) = root { - Ok(self.store.get_blinded_block(&block_root)?) + Ok(self + .store + .get_blinded_block(&block_root, Some(request_slot))?) } else { Ok(None) } @@ -1025,7 +1006,7 @@ impl BeaconChain { ) -> Result>, Error> { // Load block from database, returning immediately if we have the full block w payload // stored. - let blinded_block = match self.store.try_get_full_block(block_root)? { + let blinded_block = match self.store.try_get_full_block(block_root, None)? { Some(DatabaseBlock::Full(block)) => return Ok(Some(block)), Some(DatabaseBlock::Blinded(block)) => block, None => return Ok(None), @@ -1082,7 +1063,7 @@ impl BeaconChain { &self, block_root: &Hash256, ) -> Result>, Error> { - Ok(self.store.get_blinded_block(block_root)?) + Ok(self.store.get_blinded_block(block_root, None)?) } /// Returns the state at the given root, if any. @@ -1290,10 +1271,7 @@ impl BeaconChain { /// /// May return an error if acquiring a read-lock on the `validator_pubkey_cache` times out. pub fn validator_index(&self, pubkey: &PublicKeyBytes) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); Ok(pubkey_cache.get_index(pubkey)) } @@ -1306,10 +1284,7 @@ impl BeaconChain { &self, validator_pubkeys: impl Iterator, ) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); validator_pubkeys .map(|pubkey| { @@ -1334,10 +1309,7 @@ impl BeaconChain { /// /// May return an error if acquiring a read-lock on the `validator_pubkey_cache` times out. pub fn validator_pubkey(&self, validator_index: usize) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); Ok(pubkey_cache.get(validator_index).cloned()) } @@ -1347,11 +1319,7 @@ impl BeaconChain { &self, validator_index: usize, ) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; - + let pubkey_cache = self.validator_pubkey_cache.read(); Ok(pubkey_cache.get_pubkey_bytes(validator_index).copied()) } @@ -1364,10 +1332,7 @@ impl BeaconChain { &self, validator_indices: &[usize], ) -> Result, Error> { - let pubkey_cache = self - .validator_pubkey_cache - .try_read_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)?; + let pubkey_cache = self.validator_pubkey_cache.read(); let mut map = HashMap::with_capacity(validator_indices.len()); for &validator_index in validator_indices { @@ -2856,8 +2821,7 @@ impl BeaconChain { // would be difficult to check that they all lock fork choice first. let mut ops = self .validator_pubkey_cache - .try_write_for(VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT) - .ok_or(Error::ValidatorPubkeyCacheLockTimeout)? + .write() .import_new_pubkeys(&state)?; // Apply the state to the attester cache, only if it is from the previous epoch or later. @@ -2931,6 +2895,13 @@ impl BeaconChain { "Early attester cache insert failed"; "error" => ?e ); + } else { + // Success, record the block as capable of being attested to. + self.block_times_cache.write().set_time_attestable( + block_root, + block.slot(), + timestamp_now(), + ); } } else { warn!( @@ -3048,29 +3019,6 @@ impl BeaconChain { }; let current_finalized_checkpoint = state.finalized_checkpoint(); - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(Error::SnapshotCacheLockTimeout) - .map(|mut snapshot_cache| { - snapshot_cache.insert( - BeaconSnapshot { - beacon_state: state, - beacon_block: signed_block.clone(), - beacon_block_root: block_root, - }, - None, - &self.spec, - ) - }) - .unwrap_or_else(|e| { - error!( - self.log, - "Failed to insert snapshot"; - "error" => ?e, - "task" => "process block" - ); - }); - self.head_tracker .register_block(block_root, parent_root, slot); @@ -3648,22 +3596,22 @@ impl BeaconChain { self.wait_for_fork_choice_before_block_production(slot)?; drop(fork_choice_timer); - // Producing a block requires the tree hash cache, so clone a full state corresponding to - // the head from the snapshot cache. Unfortunately we can't move the snapshot out of the - // cache (which would be fast), because we need to re-process the block after it has been - // signed. If we miss the cache or we're producing a block that conflicts with the head, - // fall back to getting the head from `slot - 1`. let state_load_timer = metrics::start_timer(&metrics::BLOCK_PRODUCTION_STATE_LOAD_TIMES); // Atomically read some values from the head whilst avoiding holding cached head `Arc` any // longer than necessary. - let (head_slot, head_block_root) = { + let (head_slot, head_block_root, head_state_root) = { let head = self.canonical_head.cached_head(); - (head.head_slot(), head.head_block_root()) + ( + head.head_slot(), + head.head_block_root(), + head.head_state_root(), + ) }; let (state, state_root_opt) = if head_slot < slot { // Attempt an aggressive re-org if configured and the conditions are right. - if let Some(re_org_state) = self.get_state_for_re_org(slot, head_slot, head_block_root) + if let Some((re_org_state, re_org_state_root)) = + self.get_state_for_re_org(slot, head_slot, head_block_root) { info!( self.log, @@ -3671,29 +3619,16 @@ impl BeaconChain { "slot" => slot, "head_to_reorg" => %head_block_root, ); - (re_org_state.pre_state, re_org_state.state_root) - } - // Normal case: proposing a block atop the current head. Use the snapshot cache. - else if let Some(pre_state) = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(head_block_root) - }) - { - (pre_state.pre_state, pre_state.state_root) + (re_org_state, Some(re_org_state_root)) } else { - warn!( - self.log, - "Block production cache miss"; - "message" => "this block is more likely to be orphaned", - "slot" => slot, - ); - let state = self - .state_at_slot(slot - 1, StateSkipConfig::WithStateRoots) - .map_err(|_| BlockProductionError::UnableToProduceAtSlot(slot))?; - - (state, None) + // Fetch the head state advanced through to `slot`, which should be present in the + // state cache thanks to the state advance timer. + let (state_root, state) = self + .store + .get_advanced_state(head_block_root, slot, head_state_root) + .map_err(BlockProductionError::FailedToLoadState)? + .ok_or(BlockProductionError::UnableToProduceAtSlot(slot))?; + (state, Some(state_root)) } } else { warn!( @@ -3722,7 +3657,7 @@ impl BeaconChain { slot: Slot, head_slot: Slot, canonical_head: Hash256, - ) -> Option> { + ) -> Option<(BeaconState, Hash256)> { let re_org_threshold = self.config.re_org_threshold?; if self.spec.proposer_score_boost.is_none() { @@ -3803,23 +3738,20 @@ impl BeaconChain { .ok()?; drop(proposer_head_timer); let re_org_parent_block = proposer_head.parent_node.root; + let re_org_parent_state_root = proposer_head.parent_node.state_root; - // Only attempt a re-org if we hit the snapshot cache. - let pre_state = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_state_for_block_production(re_org_parent_block) - }) - .or_else(|| { - debug!( + // FIXME(sproul): consider not re-orging if we miss the cache + let (state_root, state) = self + .store + .get_advanced_state(re_org_parent_block, slot, re_org_parent_state_root) + .map_err(|e| { + warn!( self.log, - "Not attempting re-org"; - "reason" => "missed snapshot cache", - "parent_block" => ?re_org_parent_block, + "Error loading block production state"; + "error" => ?e, ); - None - })?; + }) + .ok()??; info!( self.log, @@ -3830,7 +3762,7 @@ impl BeaconChain { "threshold_weight" => proposer_head.re_org_weight_threshold ); - Some(pre_state) + Some((state, state_root)) } /// Get the proposer index and `prev_randao` value for a proposal at slot `proposal_slot`. @@ -3954,23 +3886,10 @@ impl BeaconChain { let parent_block_root = forkchoice_update_params.head_root; + // FIXME(sproul): optimise this for tree-states let (unadvanced_state, unadvanced_state_root) = if cached_head.head_block_root() == parent_block_root { (Cow::Borrowed(head_state), cached_head.head_state_root()) - } else if let Some(snapshot) = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(Error::SnapshotCacheLockTimeout)? - .get_cloned(parent_block_root, CloneConfig::none()) - { - debug!( - self.log, - "Hit snapshot cache during withdrawals calculation"; - "slot" => proposal_slot, - "parent_block_root" => ?parent_block_root, - ); - let state_root = snapshot.beacon_state_root(); - (Cow::Owned(snapshot.beacon_state), state_root) } else { info!( self.log, @@ -4291,6 +4210,7 @@ impl BeaconChain { drop(slot_timer); state.build_committee_cache(RelativeEpoch::Current, &self.spec)?; + state.apply_pending_mutations()?; let parent_root = if state.slot() > 0 { *state @@ -4305,7 +4225,7 @@ impl BeaconChain { let pubkey = state .validators() .get(proposer_index as usize) - .map(|v| v.pubkey) + .map(|v| *v.pubkey()) .ok_or(BlockProductionError::BeaconChain( BeaconChainError::ValidatorIndexUnknown(proposer_index as usize), ))?; @@ -5515,6 +5435,17 @@ impl BeaconChain { "head_block_root" => head_block_root.to_string(), ); + // If the block's state will be so far ahead of `shuffling_epoch` that even its + // previous epoch committee cache will be too new, then error. Callers of this function + // shouldn't be requesting such old shufflings for this `head_block_root`. + let head_block_epoch = head_block.slot.epoch(T::EthSpec::slots_per_epoch()); + if head_block_epoch > shuffling_epoch + 1 { + return Err(Error::InvalidStateForShuffling { + state_epoch: head_block_epoch, + shuffling_epoch, + }); + } + let state_read_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_READ_TIMES); @@ -5525,69 +5456,52 @@ impl BeaconChain { // to copy the head is liable to race-conditions. let head_state_opt = self.with_head(|head| { if head.beacon_block_root == head_block_root { - Ok(Some(( - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), - head.beacon_state_root(), - ))) + Ok(Some((head.beacon_state.clone(), head.beacon_state_root()))) } else { Ok::<_, Error>(None) } })?; + // Compute the `target_slot` to advance the block's state to. + // + // Since there's a one-epoch look-ahead on the attester shuffling, it suffices to + // only advance into the first slot of the epoch prior to `shuffling_epoch`. + // + // If the `head_block` is already ahead of that slot, then we should load the state + // at that slot, as we've determined above that the `shuffling_epoch` cache will + // not be too far in the past. + let target_slot = std::cmp::max( + shuffling_epoch + .saturating_sub(1_u64) + .start_slot(T::EthSpec::slots_per_epoch()), + head_block.slot, + ); + // If the head state is useful for this request, use it. Otherwise, read a state from - // disk. + // disk that is advanced as close as possible to `target_slot`. let (mut state, state_root) = if let Some((state, state_root)) = head_state_opt { (state, state_root) } else { - let state_root = head_block.state_root; - let state = self + let (state_root, state) = self .store - .get_inconsistent_state_for_attestation_verification_only( - &state_root, - Some(head_block.slot), - )? + .get_advanced_state(head_block_root, target_slot, head_block.state_root)? .ok_or(Error::MissingBeaconState(head_block.state_root))?; (state, state_root) }; - /* - * IMPORTANT - * - * Since it's possible that - * `Store::get_inconsistent_state_for_attestation_verification_only` was used to obtain - * the state, we cannot rely upon the following fields: - * - * - `state.state_roots` - * - `state.block_roots` - * - * These fields should not be used for the rest of this function. - */ - metrics::stop_timer(state_read_timer); let state_skip_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_STATE_SKIP_TIMES); - // If the state is in an earlier epoch, advance it. If it's from a later epoch, reject - // it. + // If the state is still in an earlier epoch, advance it to the `target_slot` so + // that its next epoch committee cache matches the `shuffling_epoch`. if state.current_epoch() + 1 < shuffling_epoch { - // Since there's a one-epoch look-ahead on the attester shuffling, it suffices to - // only advance into the slot prior to the `shuffling_epoch`. - let target_slot = shuffling_epoch - .saturating_sub(1_u64) - .start_slot(T::EthSpec::slots_per_epoch()); - - // Advance the state into the required slot, using the "partial" method since the state - // roots are not relevant for the shuffling. + // Advance the state into the required slot, using the "partial" method since the + // state roots are not relevant for the shuffling. partial_state_advance(&mut state, Some(state_root), target_slot, &self.spec)?; - } else if state.current_epoch() > shuffling_epoch { - return Err(Error::InvalidStateForShuffling { - state_epoch: state.current_epoch(), - shuffling_epoch, - }); } - metrics::stop_timer(state_skip_timer); + let committee_building_timer = metrics::start_timer(&metrics::ATTESTATION_PROCESSING_COMMITTEE_BUILDING_TIMES); @@ -5596,8 +5510,7 @@ impl BeaconChain { state.build_committee_cache(relative_epoch, &self.spec)?; - let committee_cache = state.take_committee_cache(relative_epoch)?; - let committee_cache = Arc::new(committee_cache); + let committee_cache = state.committee_cache(relative_epoch)?.clone(); let shuffling_decision_block = shuffling_id.shuffling_decision_block; self.shuffling_cache @@ -5643,7 +5556,7 @@ impl BeaconChain { let beacon_block = self .store - .get_blinded_block(&beacon_block_root)? + .get_blinded_block(&beacon_block_root, None)? .ok_or_else(|| { Error::DBInconsistent(format!("Missing block {}", beacon_block_root)) })?; diff --git a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs index 9b2edbd8b5d..f0df5a8dbbe 100644 --- a/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs +++ b/beacon_node/beacon_chain/src/beacon_fork_choice_store.rs @@ -315,7 +315,7 @@ where metrics::inc_counter(&metrics::BALANCES_CACHE_MISSES); let justified_block = self .store - .get_blinded_block(&self.justified_checkpoint.root) + .get_blinded_block(&self.justified_checkpoint.root, None) .map_err(Error::FailedToReadBlock)? .ok_or(Error::MissingBlock(self.justified_checkpoint.root))? .deconstruct() diff --git a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs index e76a5a80588..6a7de1da9e6 100644 --- a/beacon_node/beacon_chain/src/beacon_proposer_cache.rs +++ b/beacon_node/beacon_chain/src/beacon_proposer_cache.rs @@ -15,8 +15,7 @@ use smallvec::SmallVec; use state_processing::state_advance::partial_state_advance; use std::cmp::Ordering; use types::{ - BeaconState, BeaconStateError, ChainSpec, CloneConfig, Epoch, EthSpec, Fork, Hash256, Slot, - Unsigned, + BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Fork, Hash256, Slot, Unsigned, }; /// The number of sets of proposer indices that should be cached. @@ -143,10 +142,7 @@ pub fn compute_proposer_duties_from_head( let (mut state, head_state_root, head_block_root) = { let head = chain.canonical_head.cached_head(); // Take a copy of the head state. - let head_state = head - .snapshot - .beacon_state - .clone_with(CloneConfig::committee_caches_only()); + let head_state = head.snapshot.beacon_state.clone(); let head_state_root = head.head_state_root(); let head_block_root = head.head_block_root(); (head_state, head_state_root, head_block_root) diff --git a/beacon_node/beacon_chain/src/beacon_snapshot.rs b/beacon_node/beacon_chain/src/beacon_snapshot.rs index 7d89df98293..095354149b8 100644 --- a/beacon_node/beacon_chain/src/beacon_snapshot.rs +++ b/beacon_node/beacon_chain/src/beacon_snapshot.rs @@ -1,8 +1,8 @@ use serde_derive::Serialize; use std::sync::Arc; use types::{ - beacon_state::CloneConfig, AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, - SignedBeaconBlock, + AbstractExecPayload, BeaconState, EthSpec, FullPayload, Hash256, SignedBeaconBlock, + SignedBlindedBeaconBlock, }; /// Represents some block and its associated state. Generally, this will be used for tracking the @@ -14,6 +14,19 @@ pub struct BeaconSnapshot = FullPayl pub beacon_state: BeaconState, } +/// This snapshot is to be used for verifying a child of `self.beacon_block`. +#[derive(Debug)] +pub struct PreProcessingSnapshot { + /// This state is equivalent to the `self.beacon_block.state_root()` state that has been + /// advanced forward one slot using `per_slot_processing`. This state is "primed and ready" for + /// the application of another block. + pub pre_state: BeaconState, + /// This value is only set to `Some` if the `pre_state` was *not* advanced forward. + pub beacon_state_root: Option, + pub beacon_block: SignedBlindedBeaconBlock, + pub beacon_block_root: Hash256, +} + impl> BeaconSnapshot { /// Create a new checkpoint. pub fn new( @@ -48,12 +61,4 @@ impl> BeaconSnapshot { self.beacon_block_root = beacon_block_root; self.beacon_state = beacon_state; } - - pub fn clone_with(&self, clone_config: CloneConfig) -> Self { - Self { - beacon_block: self.beacon_block.clone(), - beacon_block_root: self.beacon_block_root, - beacon_state: self.beacon_state.clone_with(clone_config), - } - } } diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index 484de841de5..6623932f596 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -18,6 +18,7 @@ type BlockRoot = Hash256; #[derive(Clone, Default)] pub struct Timestamps { pub observed: Option, + pub attestable: Option, pub imported: Option, pub set_as_head: Option, } @@ -26,6 +27,7 @@ pub struct Timestamps { #[derive(Default)] pub struct BlockDelays { pub observed: Option, + pub attestable: Option, pub imported: Option, pub set_as_head: Option, } @@ -35,6 +37,9 @@ impl BlockDelays { let observed = times .observed .and_then(|observed_time| observed_time.checked_sub(slot_start_time)); + let attestable = times + .attestable + .and_then(|attestable_time| attestable_time.checked_sub(slot_start_time)); let imported = times .imported .and_then(|imported_time| imported_time.checked_sub(times.observed?)); @@ -43,6 +48,7 @@ impl BlockDelays { .and_then(|set_as_head_time| set_as_head_time.checked_sub(times.imported?)); BlockDelays { observed, + attestable, imported, set_as_head, } @@ -99,6 +105,14 @@ impl BlockTimesCache { }; } + pub fn set_time_attestable(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { + let block_times = self + .cache + .entry(block_root) + .or_insert_with(|| BlockTimesCacheValue::new(slot)); + block_times.timestamps.attestable = Some(timestamp); + } + pub fn set_time_imported(&mut self, block_root: BlockRoot, slot: Slot, timestamp: Duration) { let block_times = self .cache diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index dba38af9bdd..bc044c22775 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -47,18 +47,18 @@ // returned alongside. #![allow(clippy::result_large_err)] +use crate::beacon_snapshot::PreProcessingSnapshot; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ is_optimistic_candidate_block, validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, }; -use crate::snapshot_cache::PreProcessingSnapshot; use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::{ beacon_chain::{ - BeaconForkChoice, ForkChoiceError, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, - MAXIMUM_GOSSIP_CLOCK_DISPARITY, VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, + BeaconForkChoice, ForkChoiceError, MAXIMUM_GOSSIP_CLOCK_DISPARITY, + VALIDATOR_PUBKEY_CACHE_LOCK_TIMEOUT, }, metrics, BeaconChain, BeaconChainError, BeaconChainTypes, }; @@ -84,15 +84,14 @@ use std::borrow::Cow; use std::fs; use std::io::Write; use std::sync::Arc; -use std::time::Duration; -use store::{Error as DBError, HotStateSummary, KeyValueStore, StoreOp}; +use store::{Error as DBError, KeyValueStore, StoreOp}; use task_executor::JoinHandle; use tree_hash::TreeHash; use types::ExecPayload; use types::{ - BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, CloneConfig, Epoch, - EthSpec, ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, - RelativeEpoch, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, Epoch, EthSpec, + ExecutionBlockHash, Hash256, InconsistentFork, PublicKey, PublicKeyBytes, RelativeEpoch, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; pub const POS_PANDA_BANNER: &str = r#" @@ -537,7 +536,7 @@ pub fn signature_verify_chain_segment( } let (first_root, first_block) = chain_segment.remove(0); - let (mut parent, first_block) = load_parent(first_root, first_block, chain)?; + let (mut parent, first_block) = load_parent(first_block, chain)?; let slot = first_block.slot(); chain_segment.insert(0, (first_root, first_block)); @@ -554,7 +553,7 @@ pub fn signature_verify_chain_segment( )?; let pubkey_cache = get_validator_pubkey_cache(chain)?; - let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + let mut signature_verifier = get_signature_verifier::(&state, &pubkey_cache, &chain.spec); let mut signature_verified_blocks = Vec::with_capacity(chain_segment.len()); @@ -802,7 +801,7 @@ impl GossipVerifiedBlock { } else { // The proposer index was *not* cached and we must load the parent in order to determine // the proposer index. - let (mut parent, block) = load_parent(block_root, block, chain)?; + let (mut parent, block) = load_parent(block, chain)?; debug!( chain.log, @@ -940,7 +939,7 @@ impl SignatureVerifiedBlock { // Check the anchor slot before loading the parent, to avoid spurious lookups. check_block_against_anchor_slot(block.message(), chain)?; - let (mut parent, block) = load_parent(block_root, block, chain)?; + let (mut parent, block) = load_parent(block, chain)?; // Reject any block that exceeds our limit on skipped slots. check_block_skip_slots(chain, parent.beacon_block.slot(), block.message())?; @@ -954,7 +953,8 @@ impl SignatureVerifiedBlock { let pubkey_cache = get_validator_pubkey_cache(chain)?; - let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + let mut signature_verifier = + get_signature_verifier::(&state, &pubkey_cache, &chain.spec); let mut consensus_context = ConsensusContext::new(block.slot()).set_current_block_root(block_root); @@ -992,7 +992,7 @@ impl SignatureVerifiedBlock { let (mut parent, block) = if let Some(parent) = from.parent { (parent, from.block) } else { - load_parent(from.block_root, from.block, chain)? + load_parent(from.block, chain)? }; let state = cheap_state_advance_to_obtain_committees( @@ -1004,7 +1004,8 @@ impl SignatureVerifiedBlock { let pubkey_cache = get_validator_pubkey_cache(chain)?; - let mut signature_verifier = get_signature_verifier(&state, &pubkey_cache, &chain.spec); + let mut signature_verifier = + get_signature_verifier::(&state, &pubkey_cache, &chain.spec); // Gossip verification has already checked the proposer index. Use it to check the RANDAO // signature. @@ -1051,7 +1052,7 @@ impl IntoExecutionPendingBlock for SignatureVerifiedBloc let (parent, block) = if let Some(parent) = self.parent { (parent, self.block) } else { - load_parent(self.block_root, self.block, chain) + load_parent(self.block, chain) .map_err(|e| BlockSlashInfo::SignatureValid(header.clone(), e))? }; @@ -1245,7 +1246,7 @@ impl ExecutionPendingBlock { // Perform a sanity check on the pre-state. let parent_slot = parent.beacon_block.slot(); - if state.slot() < parent_slot || state.slot() > parent_slot + 1 { + if state.slot() < parent_slot { return Err(BeaconChainError::BadPreState { parent_root: parent.beacon_block_root, parent_slot, @@ -1278,29 +1279,9 @@ impl ExecutionPendingBlock { // Store the state immediately, marking it as temporary, and staging the deletion // of its temporary status as part of the larger atomic operation. let txn_lock = chain.store.hot_db.begin_rw_transaction(); - let state_already_exists = - chain.store.load_hot_state_summary(&state_root)?.is_some(); - - let state_batch = if state_already_exists { - // If the state exists, it could be temporary or permanent, but in neither case - // should we rewrite it or store a new temporary flag for it. We *will* stage - // the temporary flag for deletion because it's OK to double-delete the flag, - // and we don't mind if another thread gets there first. - vec![] - } else { - vec![ - if state.slot() % T::EthSpec::slots_per_epoch() == 0 { - StoreOp::PutState(state_root, &state) - } else { - StoreOp::PutStateSummary( - state_root, - HotStateSummary::new(&state_root, &state)?, - ) - }, - StoreOp::PutStateTemporaryFlag(state_root), - ] - }; - chain.store.do_atomically(state_batch)?; + chain + .store + .do_atomically(vec![StoreOp::PutState(state_root, &state)])?; drop(txn_lock); confirmed_state_roots.push(state_root); @@ -1358,8 +1339,7 @@ impl ExecutionPendingBlock { let committee_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_COMMITTEE); - state.build_committee_cache(RelativeEpoch::Previous, &chain.spec)?; - state.build_committee_cache(RelativeEpoch::Current, &chain.spec)?; + state.build_all_committee_caches(&chain.spec)?; metrics::stop_timer(committee_timer); @@ -1681,7 +1661,6 @@ fn verify_parent_block_is_known( /// whilst attempting the operation. #[allow(clippy::type_complexity)] fn load_parent( - block_root: Hash256, block: Arc>, chain: &BeaconChain, ) -> Result< @@ -1691,8 +1670,6 @@ fn load_parent( ), BlockError, > { - let spec = &chain.spec; - // Reject any block if its parent is not known to fork choice. // // A block that is not in fork choice is either: @@ -1711,44 +1688,9 @@ fn load_parent( return Err(BlockError::ParentUnknown(block)); } - let block_delay = chain - .block_times_cache - .read() - .get_block_delays( - block_root, - chain - .slot_clock - .start_of(block.slot()) - .unwrap_or_else(|| Duration::from_secs(0)), - ) - .observed; - let db_read_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DB_READ); - let result = if let Some((snapshot, cloned)) = chain - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|mut snapshot_cache| { - snapshot_cache.get_state_for_block_processing( - block.parent_root(), - block.slot(), - block_delay, - spec, - ) - }) { - if cloned { - metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_CLONES); - debug!( - chain.log, - "Cloned snapshot for late block/skipped slot"; - "slot" => %block.slot(), - "parent_slot" => %snapshot.beacon_block.slot(), - "parent_root" => ?block.parent_root(), - "block_delay" => ?block_delay, - ); - } - Ok((snapshot, block)) - } else { + let result = { // Load the blocks parent block from the database, returning invalid if that block is not // found. // @@ -1771,28 +1713,34 @@ fn load_parent( // Load the parent blocks state from the database, returning an error if it is not found. // It is an error because if we know the parent block we should also know the parent state. let parent_state_root = parent_block.state_root(); - let parent_state = chain - .get_state(&parent_state_root, Some(parent_block.slot()))? + let (advanced_state_root, state) = chain + .store + .get_advanced_state(block.parent_root(), block.slot(), parent_state_root)? .ok_or_else(|| { BeaconChainError::DBInconsistent(format!("Missing state {:?}", parent_state_root)) })?; - metrics::inc_counter(&metrics::BLOCK_PROCESSING_SNAPSHOT_CACHE_MISSES); - debug!( - chain.log, - "Missed snapshot cache"; - "slot" => block.slot(), - "parent_slot" => parent_block.slot(), - "parent_root" => ?block.parent_root(), - "block_delay" => ?block_delay, - ); + if block.slot() != state.slot() { + slog::warn!( + chain.log, + "Parent state is not advanced"; + "block_slot" => block.slot(), + "state_slot" => state.slot(), + ); + } + + let beacon_state_root = if parent_state_root == advanced_state_root { + Some(parent_state_root) + } else { + None + }; Ok(( PreProcessingSnapshot { beacon_block: parent_block, beacon_block_root: root, - pre_state: parent_state, - beacon_state_root: Some(parent_state_root), + pre_state: state, + beacon_state_root, }, block, )) @@ -1835,7 +1783,7 @@ fn cheap_state_advance_to_obtain_committees<'a, E: EthSpec>( parent_slot: state.slot(), }) } else { - let mut state = state.clone_with(CloneConfig::committee_caches_only()); + let mut state = state.clone(); let target_slot = block_epoch.start_slot(E::slots_per_epoch()); // Advance the state into the same epoch as the block. Use the "partial" method since state diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 84148fbfb18..c49af13ee7d 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -7,10 +7,8 @@ use crate::head_tracker::HeadTracker; use crate::migrate::{BackgroundMigrator, MigratorConfig}; use crate::persisted_beacon_chain::PersistedBeaconChain; use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; -use crate::snapshot_cache::{SnapshotCache, DEFAULT_SNAPSHOT_CACHE_SIZE}; use crate::timeout_rw_lock::TimeoutRwLock; use crate::validator_monitor::ValidatorMonitor; -use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::ChainConfig; use crate::{ BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, Eth1Chain, @@ -84,7 +82,6 @@ pub struct BeaconChainBuilder { slot_clock: Option, shutdown_sender: Option>, head_tracker: Option, - validator_pubkey_cache: Option>, spec: ChainSpec, chain_config: ChainConfig, log: Option, @@ -125,7 +122,6 @@ where slot_clock: None, shutdown_sender: None, head_tracker: None, - validator_pubkey_cache: None, spec: TEthSpec::default_spec(), chain_config: ChainConfig::default(), log: None, @@ -281,7 +277,7 @@ where .ok_or("Fork choice not found in store")?; let genesis_block = store - .get_blinded_block(&chain.genesis_block_root) + .get_blinded_block(&chain.genesis_block_root, Some(Slot::new(0))) .map_err(|e| descriptive_db_error("genesis block", &e))? .ok_or("Genesis block not found in store")?; let genesis_state = store @@ -306,16 +302,12 @@ where .unwrap_or_else(OperationPool::new), ); - let pubkey_cache = ValidatorPubkeyCache::load_from_store(store) - .map_err(|e| format!("Unable to open persisted pubkey cache: {:?}", e))?; - self.genesis_block_root = Some(chain.genesis_block_root); self.genesis_state_root = Some(genesis_block.state_root()); self.head_tracker = Some( HeadTracker::from_ssz_container(&chain.ssz_head_tracker) .map_err(|e| format!("Failed to decode head tracker for database: {:?}", e))?, ); - self.validator_pubkey_cache = Some(pubkey_cache); self.fork_choice = Some(fork_choice); Ok(self) @@ -336,6 +328,7 @@ where .ok_or("set_genesis_state requires a store")?; let beacon_block = genesis_block(&mut beacon_state, &self.spec)?; + let blinded_block = beacon_block.clone_as_blinded(); beacon_state .build_all_caches(&self.spec) @@ -344,16 +337,19 @@ where let beacon_state_root = beacon_block.message().state_root(); let beacon_block_root = beacon_block.canonical_root(); + store + .update_finalized_state(beacon_state_root, beacon_block_root, beacon_state.clone()) + .map_err(|e| format!("Failed to set genesis state as finalized state: {:?}", e))?; store .put_state(&beacon_state_root, &beacon_state) .map_err(|e| format!("Failed to store genesis state: {:?}", e))?; store - .put_block(&beacon_block_root, beacon_block.clone()) + .put_cold_blinded_block(&beacon_block_root, &blinded_block) .map_err(|e| format!("Failed to store genesis block: {:?}", e))?; // Store the genesis block under the `ZERO_HASH` key. store - .put_block(&Hash256::zero(), beacon_block.clone()) + .put_cold_blinded_block(&Hash256::zero(), &blinded_block) .map_err(|e| { format!( "Failed to store genesis block under 0x00..00 alias: {:?}", @@ -468,8 +464,21 @@ where let (_, updated_builder) = self.set_genesis_state(genesis_state)?; self = updated_builder; + // Build the committee caches before storing. The database assumes that states have + // committee caches built before storing. + weak_subj_state + .build_all_committee_caches(&self.spec) + .map_err(|e| format!("Error building caches on checkpoint state: {:?}", e))?; + // Write the state and block non-atomically, it doesn't matter if they're forgotten // about on a crash restart. + store + .update_finalized_state( + weak_subj_state_root, + weak_subj_block_root, + weak_subj_state.clone(), + ) + .map_err(|e| format!("Failed to set checkpoint state as finalized state: {:?}", e))?; store .put_state(&weak_subj_state_root, &weak_subj_state) .map_err(|e| format!("Failed to store weak subjectivity state: {:?}", e))?; @@ -480,7 +489,11 @@ where // Stage the database's metadata fields for atomic storage when `build` is called. // This prevents the database from restarting in an inconsistent state if the anchor // info or split point is written before the `PersistedBeaconChain`. - self.pending_io_batch.push(store.store_split_in_batch()); + self.pending_io_batch.push( + store + .store_split_in_batch() + .map_err(|e| format!("Failed to store split: {:?}", e))?, + ); self.pending_io_batch.push( store .init_anchor_info(weak_subj_block.message()) @@ -488,11 +501,14 @@ where ); // Store pruning checkpoint to prevent attempting to prune before the anchor state. - self.pending_io_batch - .push(store.pruning_checkpoint_store_op(Checkpoint { - root: weak_subj_block_root, - epoch: weak_subj_state.slot().epoch(TEthSpec::slots_per_epoch()), - })); + self.pending_io_batch.push( + store + .pruning_checkpoint_store_op(Checkpoint { + root: weak_subj_block_root, + epoch: weak_subj_state.slot().epoch(TEthSpec::slots_per_epoch()), + }) + .map_err(|e| format!("{:?}", e))?, + ); let snapshot = BeaconSnapshot { beacon_block_root: weak_subj_block_root, @@ -645,7 +661,7 @@ where // Try to decode the head block according to the current fork, if that fails, try // to backtrack to before the most recent fork. let (head_block_root, head_block, head_reverted) = - match store.get_full_block(&initial_head_block_root) { + match store.get_full_block(&initial_head_block_root, None) { Ok(Some(block)) => (initial_head_block_root, block, false), Ok(None) => return Err("Head block not found in store".into()), Err(StoreError::SszDecodeError(_)) => { @@ -717,10 +733,16 @@ where )); } - let validator_pubkey_cache = self.validator_pubkey_cache.map(Ok).unwrap_or_else(|| { - ValidatorPubkeyCache::new(&head_snapshot.beacon_state, store.clone()) - .map_err(|e| format!("Unable to init validator pubkey cache: {:?}", e)) - })?; + let validator_pubkey_cache = store.immutable_validators.clone(); + + // Update pubkey cache on first start in case we have started from genesis. + let store_ops = validator_pubkey_cache + .write() + .import_new_pubkeys(&head_snapshot.beacon_state) + .map_err(|e| format!("error initializing pubkey cache: {e:?}"))?; + store + .do_atomically(store_ops) + .map_err(|e| format!("error writing validator store: {e:?}"))?; let migrator_config = self.store_migrator_config.unwrap_or_default(); let store_migrator = BackgroundMigrator::new( @@ -756,12 +778,12 @@ where Witness, >::persist_head_in_batch_standalone( genesis_block_root, &head_tracker - )); + ).map_err(|e| format!("{:?}", e))?); self.pending_io_batch.push(BeaconChain::< Witness, >::persist_fork_choice_in_batch_standalone( &fork_choice - )); + ).map_err(|e| format!("{:?}", e))?); store .hot_db .do_atomically(self.pending_io_batch) @@ -769,7 +791,6 @@ where let genesis_validators_root = head_snapshot.beacon_state.genesis_validators_root(); let genesis_time = head_snapshot.beacon_state.genesis_time(); - let head_for_snapshot_cache = head_snapshot.clone(); let canonical_head = CanonicalHead::new(fork_choice, Arc::new(head_snapshot)); let shuffling_cache_size = self.chain_config.shuffling_cache_size; @@ -844,10 +865,6 @@ where fork_choice_signal_rx, event_handler: self.event_handler, head_tracker, - snapshot_cache: TimeoutRwLock::new(SnapshotCache::new( - DEFAULT_SNAPSHOT_CACHE_SIZE, - head_for_snapshot_cache, - )), shuffling_cache: TimeoutRwLock::new(ShufflingCache::new( shuffling_cache_size, head_shuffling_ids, @@ -857,7 +874,7 @@ where beacon_proposer_cache: <_>::default(), block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), - validator_pubkey_cache: TimeoutRwLock::new(validator_pubkey_cache), + validator_pubkey_cache, attester_cache: <_>::default(), early_attester_cache: <_>::default(), shutdown_sender: self @@ -1112,7 +1129,7 @@ mod test { assert_eq!( chain .store - .get_blinded_block(&Hash256::zero()) + .get_blinded_block(&Hash256::zero(), None) .expect("should read db") .expect("should find genesis block"), block.clone_as_blinded(), @@ -1167,14 +1184,15 @@ mod test { } for v in state.validators() { - let creds = v.withdrawal_credentials.as_bytes(); + let creds = v.withdrawal_credentials(); + let creds = creds.as_bytes(); assert_eq!( creds[0], spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" ); assert_eq!( &creds[1..], - &hash(&v.pubkey.as_ssz_bytes())[1..], + &hash(&v.pubkey().as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) } diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 2b1f714362f..a1c2be20915 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -35,10 +35,7 @@ use crate::beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT; use crate::persisted_fork_choice::PersistedForkChoice; use crate::shuffling_cache::BlockShufflingIds; use crate::{ - beacon_chain::{ - BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, - BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT, FORK_CHOICE_DB_KEY, - }, + beacon_chain::{BeaconForkChoice, BeaconStore, OverrideForkchoiceUpdate, FORK_CHOICE_DB_KEY}, block_times_cache::BlockTimesCache, events::ServerSentEventHandler, metrics, @@ -296,7 +293,7 @@ impl CanonicalHead { let fork_choice_view = fork_choice.cached_fork_choice_view(); let beacon_block_root = fork_choice_view.head_block_root; let beacon_block = store - .get_full_block(&beacon_block_root)? + .get_full_block(&beacon_block_root, None)? .ok_or(Error::MissingBeaconBlock(beacon_block_root))?; let beacon_state_root = beacon_block.state_root(); let beacon_state = store @@ -465,9 +462,7 @@ impl BeaconChain { pub fn head_beacon_state_cloned(&self) -> BeaconState { // Don't clone whilst holding the read-lock, take an Arc-clone to reduce lock contention. let snapshot: Arc<_> = self.head_snapshot(); - snapshot - .beacon_state - .clone_with(CloneConfig::committee_caches_only()) + snapshot.beacon_state.clone() } /// Execute the fork choice algorithm and enthrone the result as the canonical head. @@ -651,44 +646,27 @@ impl BeaconChain { let new_cached_head = if new_view.head_block_root != old_view.head_block_root { metrics::inc_counter(&metrics::FORK_CHOICE_CHANGED_HEAD); - // Try and obtain the snapshot for `beacon_block_root` from the snapshot cache, falling - // back to a database read if that fails. - let new_snapshot = self - .snapshot_cache - .try_read_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .and_then(|snapshot_cache| { - snapshot_cache.get_cloned( - new_view.head_block_root, - CloneConfig::committee_caches_only(), - ) - }) - .map::, _>(Ok) - .unwrap_or_else(|| { - let beacon_block = self - .store - .get_full_block(&new_view.head_block_root)? - .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; - - let beacon_state_root = beacon_block.state_root(); - let beacon_state: BeaconState = self - .get_state(&beacon_state_root, Some(beacon_block.slot()))? - .ok_or(Error::MissingBeaconState(beacon_state_root))?; - - Ok(BeaconSnapshot { - beacon_block: Arc::new(beacon_block), - beacon_block_root: new_view.head_block_root, - beacon_state, - }) - }) - .and_then(|mut snapshot| { - // Regardless of where we got the state from, attempt to build the committee - // caches. - snapshot - .beacon_state - .build_all_committee_caches(&self.spec) - .map_err(Into::into) - .map(|()| snapshot) - })?; + let mut new_snapshot = { + let beacon_block = self + .store + .get_full_block(&new_view.head_block_root, None)? + .ok_or(Error::MissingBeaconBlock(new_view.head_block_root))?; + + let beacon_state_root = beacon_block.state_root(); + let beacon_state: BeaconState = self + .get_state(&beacon_state_root, Some(beacon_block.slot()))? + .ok_or(Error::MissingBeaconState(beacon_state_root))?; + + BeaconSnapshot { + beacon_block: Arc::new(beacon_block), + beacon_block_root: new_view.head_block_root, + beacon_state, + } + }; + + // Regardless of where we got the state from, attempt to build all the + // caches except the tree hash cache. + new_snapshot.beacon_state.build_all_caches(&self.spec)?; let new_cached_head = CachedHead { snapshot: Arc::new(new_snapshot), @@ -829,25 +807,6 @@ impl BeaconChain { .beacon_state .attester_shuffling_decision_root(self.genesis_block_root, RelativeEpoch::Current); - // Update the snapshot cache with the latest head value. - // - // This *could* be done inside `recompute_head`, however updating the head on the snapshot - // cache is not critical so we avoid placing it on a critical path. Note that this function - // will not return an error if the update fails, it will just log an error. - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.update_head(new_snapshot.beacon_block_root); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "update head" - ); - }); - match BlockShufflingIds::try_from_head( new_snapshot.beacon_block_root, &new_snapshot.beacon_state, @@ -979,26 +938,6 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()), ); - self.snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .map(|mut snapshot_cache| { - snapshot_cache.prune(new_view.finalized_checkpoint.epoch); - debug!( - self.log, - "Snapshot cache pruned"; - "new_len" => snapshot_cache.len(), - "remaining_roots" => ?snapshot_cache.beacon_block_roots(), - ); - }) - .unwrap_or_else(|| { - error!( - self.log, - "Failed to obtain cache write lock"; - "lock" => "snapshot_cache", - "task" => "prune" - ); - }); - self.attester_cache .prune_below(new_view.finalized_checkpoint.epoch); @@ -1053,14 +992,14 @@ impl BeaconChain { } /// Return a database operation for writing fork choice to disk. - pub fn persist_fork_choice_in_batch(&self) -> KeyValueStoreOp { + pub fn persist_fork_choice_in_batch(&self) -> Result { Self::persist_fork_choice_in_batch_standalone(&self.canonical_head.fork_choice_read_lock()) } /// Return a database operation for writing fork choice to disk. pub fn persist_fork_choice_in_batch_standalone( fork_choice: &BeaconForkChoice, - ) -> KeyValueStoreOp { + ) -> Result { let persisted_fork_choice = PersistedForkChoice { fork_choice: fork_choice.to_persisted(), fork_choice_store: fork_choice.fc_store().to_persisted(), @@ -1447,6 +1386,7 @@ fn observe_head_block_delays( block_delay: block_delay_total, observed_delay: block_delays.observed, imported_delay: block_delays.imported, + attestable_delay: block_delays.attestable, set_as_head_delay: block_delays.set_as_head, execution_optimistic: head_block_is_optimistic, })); diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index e789b54a21b..fc98229a1b9 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -87,13 +87,10 @@ pub enum BeaconChainError { ValidatorPubkeyCacheLockTimeout, SnapshotCacheLockTimeout, IncorrectStateForAttestation(RelativeEpochError), - InvalidValidatorPubkeyBytes(bls::Error), ValidatorPubkeyCacheIncomplete(usize), SignatureSetError(SignatureSetError), BlockSignatureVerifierError(state_processing::block_signature_verifier::Error), BlockReplayError(BlockReplayError), - DuplicateValidatorPublicKey, - ValidatorPubkeyCacheError(String), ValidatorIndexUnknown(usize), ValidatorPubkeyUnknown(PublicKeyBytes), OpPoolError(OpPoolError), @@ -262,6 +259,7 @@ pub enum BlockProductionError { TerminalPoWBlockLookupFailed(execution_layer::Error), GetPayloadFailed(execution_layer::Error), FailedToReadFinalizedBlock(store::Error), + FailedToLoadState(store::Error), MissingFinalizedBlock(Hash256), BlockTooLarge(usize), ShuttingDown, diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index 8b6c6b37409..9d57c673d08 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -178,8 +178,8 @@ impl StoreItem for SszEth1 { DBColumn::Eth1Cache } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -1021,6 +1021,7 @@ mod test { mod collect_valid_votes { use super::*; + use types::VList; fn get_eth1_data_vec(n: u64, block_number_offset: u64) -> Vec<(Eth1Data, BlockNumber)> { (0..n) @@ -1068,12 +1069,14 @@ mod test { let votes_to_consider = get_eth1_data_vec(slots, 0); - *state.eth1_data_votes_mut() = votes_to_consider[0..slots as usize / 4] - .iter() - .map(|(eth1_data, _)| eth1_data) - .cloned() - .collect::>() - .into(); + *state.eth1_data_votes_mut() = VList::new( + votes_to_consider[0..slots as usize / 4] + .iter() + .map(|(eth1_data, _)| eth1_data) + .cloned() + .collect::>(), + ) + .unwrap(); let votes = collect_valid_votes(&state, &votes_to_consider.clone().into_iter().collect()); @@ -1097,12 +1100,14 @@ mod test { .expect("should have some eth1 data") .clone(); - *state.eth1_data_votes_mut() = vec![duplicate_eth1_data.clone(); 4] - .iter() - .map(|(eth1_data, _)| eth1_data) - .cloned() - .collect::>() - .into(); + *state.eth1_data_votes_mut() = VList::new( + vec![duplicate_eth1_data.clone(); 4] + .iter() + .map(|(eth1_data, _)| eth1_data) + .cloned() + .collect::>(), + ) + .unwrap(); let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect()); assert_votes!( diff --git a/beacon_node/beacon_chain/src/fork_revert.rs b/beacon_node/beacon_chain/src/fork_revert.rs index 084ae95e096..d25cecda55a 100644 --- a/beacon_node/beacon_chain/src/fork_revert.rs +++ b/beacon_node/beacon_chain/src/fork_revert.rs @@ -105,7 +105,7 @@ pub fn reset_fork_choice_to_finalization, Cold: It let finalized_checkpoint = head_state.finalized_checkpoint(); let finalized_block_root = finalized_checkpoint.root; let finalized_block = store - .get_full_block(&finalized_block_root) + .get_full_block(&finalized_block_root, None) .map_err(|e| format!("Error loading finalized block: {:?}", e))? .ok_or_else(|| { format!( diff --git a/beacon_node/beacon_chain/src/head_tracker.rs b/beacon_node/beacon_chain/src/head_tracker.rs index 3fa577ff93d..64e43ea99d6 100644 --- a/beacon_node/beacon_chain/src/head_tracker.rs +++ b/beacon_node/beacon_chain/src/head_tracker.rs @@ -83,8 +83,8 @@ impl PartialEq for HeadTracker { /// This is used when persisting the state of the `BeaconChain` to disk. #[derive(Encode, Decode, Clone)] pub struct SszHeadTracker { - roots: Vec, - slots: Vec, + pub roots: Vec, + pub slots: Vec, } impl SszHeadTracker { diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index 5f590735004..a5045224cdf 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -9,7 +9,7 @@ use std::borrow::Cow; use std::iter; use std::sync::Arc; use std::time::Duration; -use store::{chunked_vector::BlockRoots, AnchorInfo, ChunkWriter, KeyValueStore}; +use store::{get_key_for_col, AnchorInfo, DBColumn, KeyValueStore, KeyValueStoreOp}; use types::{Hash256, SignedBlindedBeaconBlock, Slot}; /// Use a longer timeout on the pubkey cache. @@ -89,11 +89,8 @@ impl BeaconChain { let mut expected_block_root = anchor_info.oldest_block_parent; let mut prev_block_slot = anchor_info.oldest_block_slot; - let mut chunk_writer = - ChunkWriter::::new(&self.store.cold_db, prev_block_slot.as_usize())?; let mut cold_batch = Vec::with_capacity(blocks.len()); - let mut hot_batch = Vec::with_capacity(blocks.len()); for block in blocks_to_import.iter().rev() { // Check chain integrity. @@ -109,11 +106,14 @@ impl BeaconChain { // Store block in the hot database without payload. self.store - .blinded_block_as_kv_store_ops(&block_root, block, &mut hot_batch); + .blinded_block_as_cold_kv_store_ops(&block_root, block, &mut cold_batch)?; // Store block roots, including at all skip slots in the freezer DB. - for slot in (block.slot().as_usize()..prev_block_slot.as_usize()).rev() { - chunk_writer.set(slot, block_root, &mut cold_batch)?; + for slot in (block.slot().as_usize() + 1..prev_block_slot.as_usize()).rev() { + cold_batch.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col(DBColumn::BeaconBlockRoots.into(), &slot.to_be_bytes()), + block_root.as_bytes().to_vec(), + )); } prev_block_slot = block.slot(); @@ -123,17 +123,18 @@ impl BeaconChain { // anchor slot to 0 to indicate completion. if expected_block_root == self.genesis_block_root { let genesis_slot = self.spec.genesis_slot; - chunk_writer.set( - genesis_slot.as_usize(), - self.genesis_block_root, - &mut cold_batch, - )?; + cold_batch.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col( + DBColumn::BeaconBlockRoots.into(), + &genesis_slot.as_u64().to_be_bytes(), + ), + self.genesis_block_root.as_bytes().to_vec(), + )); prev_block_slot = genesis_slot; expected_block_root = Hash256::zero(); break; } } - chunk_writer.write(&mut cold_batch)?; // Verify signatures in one batch, holding the pubkey cache lock for the shortest duration // possible. For each block fetch the parent root from its successor. Slicing from index 1 @@ -177,10 +178,7 @@ impl BeaconChain { drop(verify_timer); drop(sig_timer); - // Write the I/O batches to disk, writing the blocks themselves first, as it's better - // for the hot DB to contain extra blocks than for the cold DB to point to blocks that - // do not exist. - self.store.hot_db.do_atomically(hot_batch)?; + // Write the I/O batch to disk. self.store.cold_db.do_atomically(cold_batch)?; // Update the anchor. diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index d672c168288..21114de1d09 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -41,14 +41,12 @@ mod pre_finalization_cache; pub mod proposer_prep_service; pub mod schema_change; pub mod shuffling_cache; -mod snapshot_cache; pub mod state_advance_timer; pub mod sync_committee_rewards; pub mod sync_committee_verification; pub mod test_utils; mod timeout_rw_lock; pub mod validator_monitor; -pub mod validator_pubkey_cache; pub use self::beacon_chain::{ AttestationProcessingOutcome, BeaconChain, BeaconChainTypes, BeaconStore, ChainSegmentResult, @@ -82,3 +80,13 @@ pub use state_processing::per_block_processing::errors::{ pub use store; pub use timeout_rw_lock::TimeoutRwLock; pub use types; + +pub mod validator_pubkey_cache { + use crate::BeaconChainTypes; + + pub type ValidatorPubkeyCache = store::ValidatorPubkeyCache< + ::EthSpec, + ::HotStore, + ::ColdStore, + >; +} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index d0f695062f3..1e90dcb7096 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -4,12 +4,8 @@ use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; use lazy_static::lazy_static; pub use lighthouse_metrics::*; use slot_clock::SlotClock; -use std::time::Duration; use types::{BeaconState, Epoch, EthSpec, Hash256, Slot}; -/// The maximum time to wait for the snapshot cache lock during a metrics scrape. -const SNAPSHOT_CACHE_TIMEOUT: Duration = Duration::from_millis(100); - lazy_static! { /* * Block Processing @@ -813,7 +809,10 @@ lazy_static! { "Number of attester slashings seen", &["src", "validator"] ); +} +// Fourth lazy-static block is used to account for macro recursion limit. +lazy_static! { /* * Block Delay Metrics */ @@ -834,7 +833,11 @@ lazy_static! { "Duration between the time the block was imported and the time when it was set as head.", // [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5] decimal_buckets(-2,-1) - ); + ); + pub static ref BEACON_BLOCK_HEAD_ATTESTABLE_DELAY_TIME: Result = try_create_histogram( + "beacon_block_head_attestable_delay_time", + "Duration between the start of the slot and the time at which the block could be attested to.", + ); pub static ref BEACON_BLOCK_HEAD_SLOT_START_DELAY_TIME: Result = try_create_histogram_with_buckets( "beacon_block_head_slot_start_delay_time", "Duration between the start of the block's slot and the time when it was set as head.", @@ -846,6 +849,22 @@ lazy_static! { "Triggered when the duration between the start of the block's slot and the current time \ will result in failed attestations.", ); + pub static ref BEACON_BLOCK_HEAD_MISSED_ATT_DEADLINE_LATE: Result = try_create_int_counter( + "beacon_block_head_missed_att_deadline_late", + "Total number of delayed head blocks that arrived late" + ); + pub static ref BEACON_BLOCK_HEAD_MISSED_ATT_DEADLINE_BORDERLINE: Result = try_create_int_counter( + "beacon_block_head_missed_att_deadline_borderline", + "Total number of delayed head blocks that arrived very close to the deadline" + ); + pub static ref BEACON_BLOCK_HEAD_MISSED_ATT_DEADLINE_SLOW: Result = try_create_int_counter( + "beacon_block_head_missed_att_deadline_slow", + "Total number of delayed head blocks that arrived on time but not processed in time" + ); + pub static ref BEACON_BLOCK_HEAD_MISSED_ATT_DEADLINE_OTHER: Result = try_create_int_counter( + "beacon_block_head_missed_att_deadline_other", + "Total number of delayed head blocks that were not late and not slow to process" + ); /* * General block metrics @@ -855,10 +874,7 @@ lazy_static! { "gossip_beacon_block_skipped_slots", "For each gossip blocks, the number of skip slots between it and its parent" ); -} -// Fourth lazy-static block is used to account for macro recursion limit. -lazy_static! { /* * Sync Committee Message Verification */ @@ -1015,15 +1031,10 @@ pub fn scrape_for_metrics(beacon_chain: &BeaconChain) { let attestation_stats = beacon_chain.op_pool.attestation_stats(); - if let Some(snapshot_cache) = beacon_chain - .snapshot_cache - .try_write_for(SNAPSHOT_CACHE_TIMEOUT) - { - set_gauge( - &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, - snapshot_cache.len() as i64, - ) - } + set_gauge_by_usize( + &BLOCK_PROCESSING_SNAPSHOT_CACHE_SIZE, + beacon_chain.store.state_cache_len(), + ); if let Some((size, num_lookups)) = beacon_chain.pre_finalization_block_cache.metrics() { set_gauge_by_usize(&PRE_FINALIZATION_BLOCK_CACHE_SIZE, size); @@ -1135,7 +1146,7 @@ fn scrape_head_state(state: &BeaconState, state_root: Hash256) { num_active += 1; } - if v.slashed { + if v.slashed() { num_slashed += 1; } diff --git a/beacon_node/beacon_chain/src/migrate.rs b/beacon_node/beacon_chain/src/migrate.rs index 66f082742eb..527e0c51de2 100644 --- a/beacon_node/beacon_chain/src/migrate.rs +++ b/beacon_node/beacon_chain/src/migrate.rs @@ -3,10 +3,11 @@ use crate::errors::BeaconChainError; use crate::head_tracker::{HeadTracker, SszHeadTracker}; use crate::persisted_beacon_chain::{PersistedBeaconChain, DUMMY_CANONICAL_HEAD_BLOCK_ROOT}; use parking_lot::Mutex; -use slog::{debug, error, info, warn, Logger}; +use serde::{Deserialize, Serialize}; +use slog::{debug, error, info, trace, warn, Logger}; use std::collections::{HashMap, HashSet}; use std::mem; -use std::sync::{mpsc, Arc}; +use std::sync::Arc; use std::thread; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use store::hot_cold_store::{migrate_database, HotColdDBError}; @@ -24,21 +25,49 @@ const MAX_COMPACTION_PERIOD_SECONDS: u64 = 604800; const MIN_COMPACTION_PERIOD_SECONDS: u64 = 7200; /// Compact after a large finality gap, if we respect `MIN_COMPACTION_PERIOD_SECONDS`. const COMPACTION_FINALITY_DISTANCE: u64 = 1024; +const BLOCKS_PER_RECONSTRUCTION: usize = 8192 * 4; + +/// Default number of epochs to wait between finalization migrations. +pub const DEFAULT_EPOCHS_PER_RUN: u64 = 4; /// The background migrator runs a thread to perform pruning and migrate state from the hot /// to the cold database. pub struct BackgroundMigrator, Cold: ItemStore> { db: Arc>, - #[allow(clippy::type_complexity)] - tx_thread: Option, thread::JoinHandle<()>)>>, + /// Record of when the last migration ran, for enforcing `epochs_per_run`. + prev_migration: Arc>, + tx_thread: Option< + Mutex<( + crossbeam_channel::Sender, + thread::JoinHandle<()>, + )>, + >, /// Genesis block root, for persisting the `PersistedBeaconChain`. genesis_block_root: Hash256, log: Logger, } -#[derive(Debug, Default, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct MigratorConfig { pub blocking: bool, + /// Run migrations at most once per `epochs_per_run`. + /// + /// If set to 0, then run every finalization. + pub epochs_per_run: u64, +} + +impl Default for MigratorConfig { + fn default() -> Self { + Self { + blocking: false, + epochs_per_run: DEFAULT_EPOCHS_PER_RUN, + } + } +} + +pub struct PrevMigration { + epoch: Option, + epochs_per_run: u64, } impl MigratorConfig { @@ -46,6 +75,11 @@ impl MigratorConfig { self.blocking = true; self } + + pub fn epochs_per_run(mut self, epochs_per_run: u64) -> Self { + self.epochs_per_run = epochs_per_run; + self + } } /// Pruning can be successful, or in rare cases deferred to a later point. @@ -83,11 +117,13 @@ pub enum PruningError { } /// Message sent to the migration thread containing the information it needs to run. +#[derive(Debug)] pub enum Notification { Finalization(FinalizationNotification), Reconstruction, } +#[derive(Clone, Debug)] pub struct FinalizationNotification { finalized_state_root: BeaconStateHash, finalized_checkpoint: Checkpoint, @@ -95,6 +131,18 @@ pub struct FinalizationNotification { genesis_block_root: Hash256, } +impl Notification { + pub fn epoch(&self) -> Option { + match self { + Notification::Finalization(FinalizationNotification { + finalized_checkpoint, + .. + }) => Some(finalized_checkpoint.epoch), + Notification::Reconstruction => None, + } + } +} + impl, Cold: ItemStore> BackgroundMigrator { /// Create a new `BackgroundMigrator` and spawn its thread if necessary. pub fn new( @@ -103,14 +151,23 @@ impl, Cold: ItemStore> BackgroundMigrator Self { + let prev_migration = Arc::new(Mutex::new(PrevMigration { + epoch: None, + epochs_per_run: config.epochs_per_run, + })); let tx_thread = if config.blocking { None } else { - Some(Mutex::new(Self::spawn_thread(db.clone(), log.clone()))) + Some(Mutex::new(Self::spawn_thread( + db.clone(), + prev_migration.clone(), + log.clone(), + ))) }; Self { db, tx_thread, + prev_migration, genesis_block_root, log, } @@ -153,7 +210,7 @@ impl, Cold: ItemStore> BackgroundMigrator>, log: &Logger) { - if let Err(e) = db.reconstruct_historic_states() { + if let Err(e) = db.reconstruct_historic_states(None) { error!( log, "State reconstruction failed"; @@ -173,7 +230,11 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator state, @@ -260,7 +322,12 @@ impl, Cold: ItemStore> BackgroundMigrator {} Err(Error::HotColdDBError(HotColdDBError::FreezeSlotUnaligned(slot))) => { debug!( @@ -273,7 +340,7 @@ impl, Cold: ItemStore> BackgroundMigrator format!("{:?}", e) + "error" => ?e ); return; } @@ -297,34 +364,105 @@ impl, Cold: ItemStore> BackgroundMigrator>, + prev_migration: Arc>, log: Logger, - ) -> (mpsc::Sender, thread::JoinHandle<()>) { - let (tx, rx) = mpsc::channel(); + ) -> ( + crossbeam_channel::Sender, + thread::JoinHandle<()>, + ) { + let (tx, rx) = crossbeam_channel::unbounded(); + let tx_thread = tx.clone(); let thread = thread::spawn(move || { - while let Ok(notif) = rx.recv() { - // Read the rest of the messages in the channel, preferring any reconstruction - // notification, or the finalization notification with the greatest finalized epoch. - let notif = - rx.try_iter() - .fold(notif, |best, other: Notification| match (&best, &other) { - (Notification::Reconstruction, _) - | (_, Notification::Reconstruction) => Notification::Reconstruction, - ( - Notification::Finalization(fin1), - Notification::Finalization(fin2), - ) => { - if fin2.finalized_checkpoint.epoch > fin1.finalized_checkpoint.epoch - { - other - } else { - best - } - } - }); + let mut sel = crossbeam_channel::Select::new(); + sel.recv(&rx); + + loop { + // Block until sth is in queue + let _queue_size = sel.ready(); + let queue: Vec = rx.try_iter().collect(); + debug!( + log, + "New worker thread poll"; + "queue" => ?queue + ); + + // Find a reconstruction notification and best finalization notification. + let reconstruction_notif = queue + .iter() + .find(|n| matches!(n, Notification::Reconstruction)); + let migrate_notif = queue + .iter() + .filter_map(|n| match n { + // should not be present anymore + Notification::Reconstruction => None, + Notification::Finalization(f) => Some(f), + }) + .max_by_key(|f| f.finalized_checkpoint.epoch); + + // Do a bit of state reconstruction first if required. + if let Some(_) = reconstruction_notif { + let timer = std::time::Instant::now(); + + match db.reconstruct_historic_states(Some(BLOCKS_PER_RECONSTRUCTION)) { + Err(Error::StateReconstructionDidNotComplete) => { + info!( + log, + "Finished reconstruction batch"; + "batch_time_ms" => timer.elapsed().as_millis() + ); + // Handle send error + let _ = tx_thread.send(Notification::Reconstruction); + } + Err(e) => { + error!( + log, + "State reconstruction failed"; + "error" => ?e, + ); + } + Ok(()) => { + info!( + log, + "Finished state reconstruction"; + "batch_time_ms" => timer.elapsed().as_millis() + ); + } + } + } + + // Do the finalization migration. + if let Some(notif) = migrate_notif { + let timer = std::time::Instant::now(); + + let mut prev_migration = prev_migration.lock(); - match notif { - Notification::Reconstruction => Self::run_reconstruction(db.clone(), &log), - Notification::Finalization(fin) => Self::run_migration(db.clone(), fin, &log), + // Do not run too frequently. + let epoch = notif.finalized_checkpoint.epoch; + if let Some(prev_epoch) = prev_migration.epoch { + if epoch < prev_epoch + prev_migration.epochs_per_run { + debug!( + log, + "Finalization migration deferred"; + "last_finalized_epoch" => prev_epoch, + "new_finalized_epoch" => epoch, + "epochs_per_run" => prev_migration.epochs_per_run, + ); + continue; + } + } + + // We intend to run at this epoch, update the in-memory record of the last epoch + // at which we ran. This value isn't tracked on disk so we will always migrate + // on the first finalization after startup. + prev_migration.epoch = Some(epoch); + + Self::run_migration(db.clone(), notif.to_owned(), &log); + + info!( + log, + "Finished finalization migration"; + "running_time_ms" => timer.elapsed().as_millis() + ); } } }); @@ -407,15 +545,14 @@ impl, Cold: ItemStore> BackgroundMigrator = HashSet::new(); - let mut abandoned_states: HashSet<(Slot, BeaconStateHash)> = HashSet::new(); let mut abandoned_heads: HashSet = HashSet::new(); let heads = head_tracker.heads(); debug!( log, "Extra pruning information"; - "old_finalized_root" => format!("{:?}", old_finalized_checkpoint.root), - "new_finalized_root" => format!("{:?}", new_finalized_checkpoint.root), + "old_finalized_root" => ?old_finalized_checkpoint.root, + "new_finalized_root" => ?new_finalized_checkpoint.root, "head_count" => heads.len(), ); @@ -424,7 +561,7 @@ impl, Cold: ItemStore> BackgroundMigrator block.state_root(), Ok(None) => { return Err(BeaconStateError::MissingBeaconBlock(head_hash.into()).into()) @@ -443,14 +580,15 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator { if slot > new_finalized_slot { - potentially_abandoned_blocks.push(( - slot, - Some(block_root), - Some(state_root), - )); + potentially_abandoned_blocks.insert(block_root); } else if slot >= old_finalized_slot { return Err(PruningError::MissingInfoForCanonicalChain { slot }.into()); } else { @@ -474,7 +608,7 @@ impl, Cold: ItemStore> BackgroundMigrator format!("{:?}", head_hash), + "head_block_root" => ?head_hash, "head_slot" => head_slot, ); potentially_abandoned_head.take(); @@ -502,26 +636,14 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator format!("{:?}", abandoned_head), + "head_block_root" => ?abandoned_head, "head_slot" => head_slot, ); abandoned_heads.insert(abandoned_head); - abandoned_blocks.extend( - potentially_abandoned_blocks - .iter() - .filter_map(|(_, maybe_block_hash, _)| *maybe_block_hash), - ); - abandoned_states.extend(potentially_abandoned_blocks.iter().filter_map( - |(slot, _, maybe_state_hash)| maybe_state_hash.map(|sr| (*slot, sr)), - )); + abandoned_blocks.extend(potentially_abandoned_blocks); } } @@ -565,7 +680,8 @@ impl, Cold: ItemStore> BackgroundMigrator> = abandoned_blocks + let num_deleted_blocks = abandoned_blocks.len(); + let mut batch: Vec> = abandoned_blocks .into_iter() .map(Into::into) .flat_map(|block_root: Hash256| { @@ -574,15 +690,8 @@ impl, Cold: ItemStore> BackgroundMigrator, Cold: ItemStore> BackgroundMigrator num_deleted_blocks, + ); - store.hot_db.do_atomically(kv_batch)?; - debug!(log, "Database pruning complete"); + // Do a separate pass to clean up irrelevant states. + let mut state_delete_batch = vec![]; + for res in store.iter_hot_state_summaries() { + let (state_root, summary) = res?; + + if summary.slot <= new_finalized_slot { + // If state root doesn't match state root from canonical chain, or this slot + // is not part of the recently finalized chain, then delete. + if newly_finalized_chain + .get(&summary.slot) + .map_or(true, |(_, canonical_state_root)| { + state_root != Hash256::from(*canonical_state_root) + }) + { + trace!( + log, + "Deleting state"; + "state_root" => ?state_root, + "slot" => summary.slot, + ); + state_delete_batch.push(StoreOp::DeleteState(state_root, Some(summary.slot))); + } + } + } + let num_deleted_states = state_delete_batch.len(); + store.do_atomically(state_delete_batch)?; + debug!( + log, + "Database state pruning complete"; + "num_deleted_states" => num_deleted_states, + ); Ok(PruningOutcome::Successful { old_finalized_checkpoint, diff --git a/beacon_node/beacon_chain/src/otb_verification_service.rs b/beacon_node/beacon_chain/src/otb_verification_service.rs index 805b61dd9c0..8b3e28ee1c4 100644 --- a/beacon_node/beacon_chain/src/otb_verification_service.rs +++ b/beacon_node/beacon_chain/src/otb_verification_service.rs @@ -85,8 +85,8 @@ impl StoreItem for OptimisticTransitionBlock { OTBColumn } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -119,10 +119,13 @@ pub fn start_otb_verification_service( pub fn load_optimistic_transition_blocks( chain: &BeaconChain, ) -> Result, StoreError> { - process_results(chain.store.hot_db.iter_column(OTBColumn), |iter| { - iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) - .collect() - })? + process_results( + chain.store.hot_db.iter_column::(OTBColumn), + |iter| { + iter.map(|(_, bytes)| OptimisticTransitionBlock::from_store_bytes(&bytes)) + .collect() + }, + )? } #[derive(Debug)] diff --git a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs index adb68def0df..6bfc09c1f76 100644 --- a/beacon_node/beacon_chain/src/persisted_beacon_chain.rs +++ b/beacon_node/beacon_chain/src/persisted_beacon_chain.rs @@ -26,8 +26,8 @@ impl StoreItem for PersistedBeaconChain { DBColumn::BeaconChain } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { diff --git a/beacon_node/beacon_chain/src/persisted_fork_choice.rs b/beacon_node/beacon_chain/src/persisted_fork_choice.rs index 8297ea93457..a182cb358d6 100644 --- a/beacon_node/beacon_chain/src/persisted_fork_choice.rs +++ b/beacon_node/beacon_chain/src/persisted_fork_choice.rs @@ -45,11 +45,11 @@ macro_rules! impl_store_item { DBColumn::ForkChoice } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) } - fn from_store_bytes(bytes: &[u8]) -> std::result::Result { + fn from_store_bytes(bytes: &[u8]) -> Result { Self::from_ssz_bytes(bytes).map_err(Into::into) } } diff --git a/beacon_node/beacon_chain/src/pre_finalization_cache.rs b/beacon_node/beacon_chain/src/pre_finalization_cache.rs index 112394bb184..ca957af2135 100644 --- a/beacon_node/beacon_chain/src/pre_finalization_cache.rs +++ b/beacon_node/beacon_chain/src/pre_finalization_cache.rs @@ -71,7 +71,7 @@ impl BeaconChain { } // 2. Check on disk. - if self.store.get_blinded_block(&block_root)?.is_some() { + if self.store.get_blinded_block(&block_root, None)?.is_some() { cache.block_roots.put(block_root, ()); return Ok(true); } diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index 7b398db2f5b..334fdf6a009 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -5,6 +5,7 @@ mod migration_schema_v14; mod migration_schema_v15; mod migration_schema_v16; mod migration_schema_v17; +mod migration_schema_v20; use crate::beacon_chain::{BeaconChainTypes, ETH1_CACHE_DB_KEY}; use crate::eth1_chain::SszEth1; @@ -28,6 +29,14 @@ pub fn migrate_schema( match (from, to) { // Migrating from the current schema version to itself is always OK, a no-op. (_, _) if from == to && to == CURRENT_SCHEMA_VERSION => Ok(()), + // Upgrade for tree-states database changes. + (SchemaVersion(12), SchemaVersion(20)) => { + migration_schema_v20::upgrade_to_v20::(db, log) + } + // Downgrade for tree-states database changes. + (SchemaVersion(20), SchemaVersion(12)) => { + migration_schema_v20::downgrade_from_v20::(db, log) + } // Upgrade across multiple versions by recursively migrating one step at a time. (_, _) if from.as_u64() + 1 < to.as_u64() => { let next = SchemaVersion(from.as_u64() + 1); @@ -83,7 +92,7 @@ pub fn migrate_schema( ) } }; - ops.push(upgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); + ops.push(upgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)?); } db.store_schema_version_atomically(to, ops)?; @@ -111,7 +120,7 @@ pub fn migrate_schema( ) } }; - ops.push(downgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)); + ops.push(downgraded_eth1_cache.as_kv_store_op(ETH1_CACHE_DB_KEY)?); } db.store_schema_version_atomically(to, ops)?; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs index c9aa2097f8a..90283bdddd5 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v12.rs @@ -39,7 +39,7 @@ pub fn upgrade_to_v12( .unrealized_justified_checkpoint .root; let justified_block = db - .get_blinded_block(&justified_block_root)? + .get_blinded_block(&justified_block_root, None)? .ok_or_else(|| { Error::SchemaMigrationError(format!( "unrealized justified block missing for migration: {justified_block_root:?}", @@ -160,7 +160,7 @@ pub fn upgrade_to_v12( proposer_slashings, voluntary_exits, }); - Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) + Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)?]) } pub fn downgrade_from_v12( @@ -220,5 +220,5 @@ pub fn downgrade_from_v12( proposer_slashings_v5, voluntary_exits_v5, }; - Ok(vec![v5.as_kv_store_op(OP_POOL_DB_KEY)]) + Ok(vec![v5.as_kv_store_op(OP_POOL_DB_KEY)?]) } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs index be913d8cc5f..3368907c8c2 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v14.rs @@ -18,12 +18,13 @@ fn get_slot_clock( log: &Logger, ) -> Result, Error> { let spec = db.get_chain_spec(); - let genesis_block = if let Some(block) = db.get_blinded_block(&Hash256::zero())? { - block - } else { - error!(log, "Missing genesis block"); - return Ok(None); - }; + let genesis_block = + if let Some(block) = db.get_blinded_block(&Hash256::zero(), Some(Slot::new(0)))? { + block + } else { + error!(log, "Missing genesis block"); + return Ok(None); + }; let genesis_state = if let Some(state) = db.get_state(&genesis_block.state_root(), Some(Slot::new(0)))? { state @@ -66,7 +67,7 @@ pub fn upgrade_to_v14( voluntary_exits, bls_to_execution_changes, }); - Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) + Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)?]) } pub fn downgrade_from_v14( @@ -121,5 +122,5 @@ pub fn downgrade_from_v14( proposer_slashings, voluntary_exits, }; - Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)]) + Ok(vec![v12.as_kv_store_op(OP_POOL_DB_KEY)?]) } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs index 07c86bd931f..a2117e184ed 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v15.rs @@ -35,7 +35,7 @@ pub fn upgrade_to_v15( // Initialize with empty set capella_bls_change_broadcast_indices: <_>::default(), }); - Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)]) + Ok(vec![v15.as_kv_store_op(OP_POOL_DB_KEY)?]) } pub fn downgrade_from_v15( @@ -72,5 +72,5 @@ pub fn downgrade_from_v15( voluntary_exits, bls_to_execution_changes, }; - Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)]) + Ok(vec![v14.as_kv_store_op(OP_POOL_DB_KEY)?]) } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs index 230573b0288..7e2d51ccabd 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v16.rs @@ -40,7 +40,7 @@ pub fn drop_balances_cache( // Drop all items in the balances cache. persisted_fork_choice.fork_choice_store.balances_cache = <_>::default(); - let kv_op = persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY); + let kv_op = persisted_fork_choice.as_kv_store_op(FORK_CHOICE_DB_KEY)?; Ok(vec![kv_op]) } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs index 770cbb8ab55..cb5a76255ed 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v17.rs @@ -65,7 +65,7 @@ pub fn upgrade_to_v17( "Removing unused best_justified_checkpoint from fork choice store." ); - Ok(vec![v17.as_kv_store_op(FORK_CHOICE_DB_KEY)]) + Ok(vec![v17.as_kv_store_op(FORK_CHOICE_DB_KEY)?]) } pub fn downgrade_from_v17( @@ -84,5 +84,5 @@ pub fn downgrade_from_v17( "Adding junk best_justified_checkpoint to fork choice store." ); - Ok(vec![v11.as_kv_store_op(FORK_CHOICE_DB_KEY)]) + Ok(vec![v11.as_kv_store_op(FORK_CHOICE_DB_KEY)?]) } diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs new file mode 100644 index 00000000000..8a6f93f535b --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v20.rs @@ -0,0 +1,273 @@ +// FIXME(sproul): implement migration +#![allow(unused)] + +use crate::{ + beacon_chain::{BeaconChainTypes, BEACON_CHAIN_DB_KEY}, + persisted_beacon_chain::PersistedBeaconChain, +}; +use slog::{debug, info, Logger}; +use std::collections::HashMap; +use std::sync::Arc; +use store::{ + get_key_for_col, + hot_cold_store::{HotColdDBError, HotStateSummaryV1, HotStateSummaryV10}, + metadata::SchemaVersion, + DBColumn, Error, HotColdDB, KeyValueStoreOp, StoreItem, +}; +use types::{milhouse::Diff, BeaconState, EthSpec, Hash256, Slot}; + +fn get_summary_v1( + db: &HotColdDB, + state_root: Hash256, +) -> Result { + db.get_item(&state_root)? + .ok_or_else(|| HotColdDBError::MissingHotStateSummary(state_root).into()) +} + +fn get_state_by_replay( + db: &HotColdDB, + state_root: Hash256, +) -> Result, Error> { + /* FIXME(sproul): fix migration + // Load state summary. + let HotStateSummaryV1 { + slot, + latest_block_root, + epoch_boundary_state_root, + } = get_summary_v1::(db, state_root)?; + + // Load full state from the epoch boundary. + let (epoch_boundary_state, _) = db.load_hot_state_full(&epoch_boundary_state_root)?; + + // Replay blocks to reach the target state. + let blocks = db.load_blocks_to_replay(epoch_boundary_state.slot(), slot, latest_block_root)?; + + db.replay_blocks(epoch_boundary_state, blocks, slot, std::iter::empty(), None) + */ + panic!() +} + +pub fn upgrade_to_v20( + db: Arc>, + log: Logger, +) -> Result<(), Error> { + /* FIXME(sproul): fix this + let mut ops = vec![]; + + // Translate hot state summaries to new format: + // - Rewrite epoch boundary root to previous epoch boundary root. + // - Add previous state root. + // + // Replace most epoch boundary states by diffs. + let split = db.get_split_info(); + let finalized_slot = split.slot; + let finalized_state_root = split.state_root; + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + + let ssz_head_tracker = db + .get_item::(&BEACON_CHAIN_DB_KEY)? + .ok_or(Error::MissingPersistedBeaconChain)? + .ssz_head_tracker; + + let mut new_summaries = HashMap::new(); + + for (head_block_root, head_state_slot) in ssz_head_tracker + .roots + .into_iter() + .zip(ssz_head_tracker.slots) + { + let block = db + .get_blinded_block(&head_block_root, Some(head_state_slot))? + .ok_or(Error::BlockNotFound(head_block_root))?; + let head_state_root = block.state_root(); + + debug!( + log, + "Re-writing state summaries for head"; + "block_root" => ?head_block_root, + "state_root" => ?head_state_root, + "slot" => head_state_slot + ); + let mut current_state = get_state_by_replay::(&db, head_state_root)?; + let mut current_state_root = head_state_root; + + new_summaries.insert( + head_state_root, + HotStateSummaryV10::new(&head_state_root, ¤t_state)?, + ); + + for slot in (finalized_slot.as_u64()..current_state.slot().as_u64()) + .rev() + .map(Slot::new) + { + let epoch_boundary_slot = (slot - 1) / slots_per_epoch * slots_per_epoch; + + let state_root = *current_state.get_state_root(slot)?; + let latest_block_root = *current_state.get_block_root(slot)?; + let prev_state_root = *current_state.get_state_root(slot - 1)?; + let epoch_boundary_state_root = *current_state.get_state_root(epoch_boundary_slot)?; + + // FIXME(sproul): rename V10 variant + let summary = HotStateSummaryV10 { + slot, + latest_block_root, + epoch_boundary_state_root, + prev_state_root, + }; + + // Stage the updated state summary for storage. + // If we've reached a known segment of chain then we can stop and continue to the next + // head. + if new_summaries.insert(state_root, summary).is_some() { + debug!( + log, + "Finished migrating chain tip"; + "head_block_root" => ?head_block_root, + "reason" => format!("reached common state {:?}", state_root), + ); + break; + } else { + debug!( + log, + "Rewriting hot state summary"; + "state_root" => ?state_root, + "slot" => slot, + "epoch_boundary_state_root" => ?epoch_boundary_state_root, + "prev_state_root" => ?prev_state_root, + ); + } + + // If the state reached is an epoch boundary state, then load it so that we can continue + // backtracking from it and storing diffs. + if slot % slots_per_epoch == 0 { + debug!( + log, + "Loading epoch boundary state"; + "state_root" => ?state_root, + "slot" => slot, + ); + let backtrack_state = get_state_by_replay::(&db, state_root)?; + + // If the current state is an epoch boundary state too then we might need to convert + // it to a diff relative to the backtrack state. + if current_state.slot() % slots_per_epoch == 0 + && !db.is_stored_as_full_state(current_state_root, current_state.slot())? + { + debug!( + log, + "Converting full state to diff"; + "prev_state_root" => ?state_root, + "state_root" => ?current_state_root, + "slot" => current_state.slot(), + ); + + let diff = BeaconStateDiff::compute_diff(&backtrack_state, ¤t_state)?; + + // Store diff. + ops.push(db.state_diff_as_kv_store_op(¤t_state_root, &diff)?); + + // Delete full state. + let state_key = get_key_for_col( + DBColumn::BeaconState.into(), + current_state_root.as_bytes(), + ); + ops.push(KeyValueStoreOp::DeleteKey(state_key)); + } + + current_state = backtrack_state; + current_state_root = state_root; + } + + if slot == finalized_slot { + // FIXME(sproul): remove assert + assert_eq!(finalized_state_root, state_root); + debug!( + log, + "Finished migrating chain tip"; + "head_block_root" => ?head_block_root, + "reason" => format!("reached finalized state {:?}", finalized_state_root), + ); + break; + } + } + } + + ops.reserve(new_summaries.len()); + for (state_root, summary) in new_summaries { + ops.push(summary.as_kv_store_op(state_root)?); + } + + db.store_schema_version_atomically(SchemaVersion(20), ops) + */ + panic!() +} + +pub fn downgrade_from_v20( + db: Arc>, + log: Logger, +) -> Result<(), Error> { + /* FIXME(sproul): broken + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + + // Iterate hot state summaries and re-write them so that: + // + // - The previous state root is removed. + // - The epoch boundary root points to the most recent epoch boundary root rather than the + // previous epoch boundary root. We exploit the fact that they are the same except when the slot + // of the summary itself lies on an epoch boundary. + let mut summaries = db + .iter_hot_state_summaries() + .collect::, _>>()?; + + // Sort by slot ascending so that the state cache has a better chance of hitting. + summaries.sort_unstable_by(|(_, summ1), (_, summ2)| summ1.slot.cmp(&summ2.slot)); + + info!(log, "Rewriting {} state summaries", summaries.len()); + + let mut ops = Vec::with_capacity(summaries.len()); + + for (state_root, summary) in summaries { + let epoch_boundary_state_root = if summary.slot % slots_per_epoch == 0 { + info!( + log, + "Ensuring state is stored as full state"; + "state_root" => ?state_root, + "slot" => summary.slot + ); + let state = db + .get_hot_state(&state_root)? + .ok_or(Error::MissingState(state_root))?; + + // Delete state diff. + let state_key = + get_key_for_col(DBColumn::BeaconStateDiff.into(), state_root.as_bytes()); + ops.push(KeyValueStoreOp::DeleteKey(state_key)); + + // Store full state. + db.store_full_state_in_batch(&state_root, &state, &mut ops)?; + + // This state root is its own most recent epoch boundary root. + state_root + } else { + summary.epoch_boundary_state_root + }; + let summary_v1 = HotStateSummaryV1 { + slot: summary.slot, + latest_block_root: summary.latest_block_root, + epoch_boundary_state_root, + }; + debug!( + log, + "Rewriting state summary"; + "slot" => summary_v1.slot, + "latest_block_root" => ?summary_v1.latest_block_root, + "epoch_boundary_state_root" => ?summary_v1.epoch_boundary_state_root, + ); + + ops.push(summary_v1.as_kv_store_op(state_root)?); + } + + db.store_schema_version_atomically(SchemaVersion(8), ops) + */ + panic!() +} diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 086e1c09498..956520cdcfc 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -339,7 +339,7 @@ mod test { .clone(); let committee_b = state.committee_cache(RelativeEpoch::Next).unwrap().clone(); assert!(committee_a != committee_b); - (Arc::new(committee_a), Arc::new(committee_b)) + (committee_a, committee_b) } /// Builds a deterministic but incoherent shuffling ID from a `u64`. diff --git a/beacon_node/beacon_chain/src/snapshot_cache.rs b/beacon_node/beacon_chain/src/snapshot_cache.rs deleted file mode 100644 index d2846c08569..00000000000 --- a/beacon_node/beacon_chain/src/snapshot_cache.rs +++ /dev/null @@ -1,523 +0,0 @@ -use crate::BeaconSnapshot; -use itertools::process_results; -use std::cmp; -use std::sync::Arc; -use std::time::Duration; -use types::{ - beacon_state::CloneConfig, BeaconState, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, - SignedBeaconBlock, Slot, -}; - -/// The default size of the cache. -pub const DEFAULT_SNAPSHOT_CACHE_SIZE: usize = 4; - -/// The minimum block delay to clone the state in the cache instead of removing it. -/// This helps keep block processing fast during re-orgs from late blocks. -fn minimum_block_delay_for_clone(seconds_per_slot: u64) -> Duration { - // If the block arrived at the attestation deadline or later, it might get re-orged. - Duration::from_secs(seconds_per_slot) / 3 -} - -/// This snapshot is to be used for verifying a child of `self.beacon_block`. -#[derive(Debug)] -pub struct PreProcessingSnapshot { - /// This state is equivalent to the `self.beacon_block.state_root()` state that has been - /// advanced forward one slot using `per_slot_processing`. This state is "primed and ready" for - /// the application of another block. - pub pre_state: BeaconState, - /// This value is only set to `Some` if the `pre_state` was *not* advanced forward. - pub beacon_state_root: Option, - pub beacon_block: SignedBeaconBlock>, - pub beacon_block_root: Hash256, -} - -impl From> for PreProcessingSnapshot { - fn from(snapshot: BeaconSnapshot) -> Self { - let beacon_state_root = Some(snapshot.beacon_state_root()); - Self { - pre_state: snapshot.beacon_state, - beacon_state_root, - beacon_block: snapshot.beacon_block.clone_as_blinded(), - beacon_block_root: snapshot.beacon_block_root, - } - } -} - -impl CacheItem { - pub fn new_without_pre_state(snapshot: BeaconSnapshot) -> Self { - Self { - beacon_block: snapshot.beacon_block, - beacon_block_root: snapshot.beacon_block_root, - beacon_state: snapshot.beacon_state, - pre_state: None, - } - } - - fn clone_to_snapshot_with(&self, clone_config: CloneConfig) -> BeaconSnapshot { - BeaconSnapshot { - beacon_state: self.beacon_state.clone_with(clone_config), - beacon_block: self.beacon_block.clone(), - beacon_block_root: self.beacon_block_root, - } - } - - pub fn into_pre_state(self) -> PreProcessingSnapshot { - // Do not include the beacon state root if the state has been advanced. - let beacon_state_root = - Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); - - PreProcessingSnapshot { - beacon_block: self.beacon_block.clone_as_blinded(), - beacon_block_root: self.beacon_block_root, - pre_state: self.pre_state.unwrap_or(self.beacon_state), - beacon_state_root, - } - } - - pub fn clone_as_pre_state(&self) -> PreProcessingSnapshot { - // Do not include the beacon state root if the state has been advanced. - let beacon_state_root = - Some(self.beacon_block.state_root()).filter(|_| self.pre_state.is_none()); - - PreProcessingSnapshot { - beacon_block: self.beacon_block.clone_as_blinded(), - beacon_block_root: self.beacon_block_root, - pre_state: self - .pre_state - .as_ref() - .map_or_else(|| self.beacon_state.clone(), |pre_state| pre_state.clone()), - beacon_state_root, - } - } -} - -/// The information required for block production. -pub struct BlockProductionPreState { - /// This state may or may not have been advanced forward a single slot. - /// - /// See the documentation in the `crate::state_advance_timer` module for more information. - pub pre_state: BeaconState, - /// This value will only be `Some` if `self.pre_state` was **not** advanced forward a single - /// slot. - /// - /// This value can be used to avoid tree-hashing the state during the first call to - /// `per_slot_processing`. - pub state_root: Option, -} - -pub enum StateAdvance { - /// The cache does not contain the supplied block root. - BlockNotFound, - /// The cache contains the supplied block root but the state has already been advanced. - AlreadyAdvanced, - /// The cache contains the supplied block root and the state has not yet been advanced. - State { - state: Box>, - state_root: Hash256, - block_slot: Slot, - }, -} - -/// The item stored in the `SnapshotCache`. -pub struct CacheItem { - beacon_block: Arc>, - beacon_block_root: Hash256, - /// This state is equivalent to `self.beacon_block.state_root()`. - beacon_state: BeaconState, - /// This state is equivalent to `self.beacon_state` that has had `per_slot_processing` applied - /// to it. This state assists in optimizing block processing. - pre_state: Option>, -} - -impl Into> for CacheItem { - fn into(self) -> BeaconSnapshot { - BeaconSnapshot { - beacon_state: self.beacon_state, - beacon_block: self.beacon_block, - beacon_block_root: self.beacon_block_root, - } - } -} - -/// Provides a cache of `BeaconSnapshot` that is intended primarily for block processing. -/// -/// ## Cache Queuing -/// -/// The cache has a non-standard queue mechanism (specifically, it is not LRU). -/// -/// The cache has a max number of elements (`max_len`). Until `max_len` is achieved, all snapshots -/// are simply added to the queue. Once `max_len` is achieved, adding a new snapshot will cause an -/// existing snapshot to be ejected. The ejected snapshot will: -/// -/// - Never be the `head_block_root`. -/// - Be the snapshot with the lowest `state.slot` (ties broken arbitrarily). -pub struct SnapshotCache { - max_len: usize, - head_block_root: Hash256, - snapshots: Vec>, -} - -impl SnapshotCache { - /// Instantiate a new cache which contains the `head` snapshot. - /// - /// Setting `max_len = 0` is equivalent to setting `max_len = 1`. - pub fn new(max_len: usize, head: BeaconSnapshot) -> Self { - Self { - max_len: cmp::max(max_len, 1), - head_block_root: head.beacon_block_root, - snapshots: vec![CacheItem::new_without_pre_state(head)], - } - } - - /// The block roots of all snapshots contained in `self`. - pub fn beacon_block_roots(&self) -> Vec { - self.snapshots.iter().map(|s| s.beacon_block_root).collect() - } - - /// The number of snapshots contained in `self`. - pub fn len(&self) -> usize { - self.snapshots.len() - } - - /// Insert a snapshot, potentially removing an existing snapshot if `self` is at capacity (see - /// struct-level documentation for more info). - pub fn insert( - &mut self, - snapshot: BeaconSnapshot, - pre_state: Option>, - spec: &ChainSpec, - ) { - let parent_root = snapshot.beacon_block.message().parent_root(); - let item = CacheItem { - beacon_block: snapshot.beacon_block.clone(), - beacon_block_root: snapshot.beacon_block_root, - beacon_state: snapshot.beacon_state, - pre_state, - }; - - // Remove the grandparent of the block that was just inserted. - // - // Assuming it's unlikely to see re-orgs deeper than one block, this method helps keep the - // cache small by removing any states that already have more than one descendant. - // - // Remove the grandparent first to free up room in the cache. - let grandparent_result = - process_results(item.beacon_state.rev_iter_block_roots(spec), |iter| { - iter.map(|(_slot, root)| root) - .find(|root| *root != item.beacon_block_root && *root != parent_root) - }); - if let Ok(Some(grandparent_root)) = grandparent_result { - let head_block_root = self.head_block_root; - self.snapshots.retain(|snapshot| { - let root = snapshot.beacon_block_root; - root == head_block_root || root != grandparent_root - }); - } - - if self.snapshots.len() < self.max_len { - self.snapshots.push(item); - } else { - let insert_at = self - .snapshots - .iter() - .enumerate() - .filter_map(|(i, snapshot)| { - if snapshot.beacon_block_root != self.head_block_root { - Some((i, snapshot.beacon_state.slot())) - } else { - None - } - }) - .min_by_key(|(_i, slot)| *slot) - .map(|(i, _slot)| i); - - if let Some(i) = insert_at { - self.snapshots[i] = item; - } - } - } - - /// If available, returns a `CacheItem` that should be used for importing/processing a block. - /// The method will remove the block from `self`, carrying across any caches that may or may not - /// be built. - /// - /// In the event the block being processed was observed late, clone the cache instead of - /// moving it. This allows us to process the next block quickly in the case of a re-org. - /// Additionally, if the slot was skipped, clone the cache. This ensures blocks that are - /// later than 1 slot still have access to the cache and can be processed quickly. - pub fn get_state_for_block_processing( - &mut self, - block_root: Hash256, - block_slot: Slot, - block_delay: Option, - spec: &ChainSpec, - ) -> Option<(PreProcessingSnapshot, bool)> { - self.snapshots - .iter() - .position(|snapshot| snapshot.beacon_block_root == block_root) - .map(|i| { - if let Some(cache) = self.snapshots.get(i) { - // Avoid cloning the block during sync (when the `block_delay` is `None`). - if let Some(delay) = block_delay { - if delay >= minimum_block_delay_for_clone(spec.seconds_per_slot) - && delay <= Duration::from_secs(spec.seconds_per_slot) * 4 - || block_slot > cache.beacon_block.slot() + 1 - { - return (cache.clone_as_pre_state(), true); - } - } - } - (self.snapshots.remove(i).into_pre_state(), false) - }) - } - - /// If available, obtains a clone of a `BeaconState` that should be used for block production. - /// The clone will use `CloneConfig:all()`, ensuring any tree-hash cache is cloned too. - /// - /// ## Note - /// - /// This method clones the `BeaconState` (instead of removing it) since we assume that any block - /// we produce will soon be pushed to the `BeaconChain` for importing/processing. Keeping a copy - /// of that `BeaconState` in `self` will greatly help with import times. - pub fn get_state_for_block_production( - &self, - block_root: Hash256, - ) -> Option> { - self.snapshots - .iter() - .find(|snapshot| snapshot.beacon_block_root == block_root) - .map(|snapshot| { - if let Some(pre_state) = &snapshot.pre_state { - BlockProductionPreState { - pre_state: pre_state.clone_with(CloneConfig::all()), - state_root: None, - } - } else { - BlockProductionPreState { - pre_state: snapshot.beacon_state.clone_with(CloneConfig::all()), - state_root: Some(snapshot.beacon_block.state_root()), - } - } - }) - } - - /// If there is a snapshot with `block_root`, clone it and return the clone. - pub fn get_cloned( - &self, - block_root: Hash256, - clone_config: CloneConfig, - ) -> Option> { - self.snapshots - .iter() - .find(|snapshot| snapshot.beacon_block_root == block_root) - .map(|snapshot| snapshot.clone_to_snapshot_with(clone_config)) - } - - pub fn get_for_state_advance(&mut self, block_root: Hash256) -> StateAdvance { - if let Some(snapshot) = self - .snapshots - .iter_mut() - .find(|snapshot| snapshot.beacon_block_root == block_root) - { - if snapshot.pre_state.is_some() { - StateAdvance::AlreadyAdvanced - } else { - let cloned = snapshot - .beacon_state - .clone_with(CloneConfig::committee_caches_only()); - - StateAdvance::State { - state: Box::new(std::mem::replace(&mut snapshot.beacon_state, cloned)), - state_root: snapshot.beacon_block.state_root(), - block_slot: snapshot.beacon_block.slot(), - } - } - } else { - StateAdvance::BlockNotFound - } - } - - pub fn update_pre_state(&mut self, block_root: Hash256, state: BeaconState) -> Option<()> { - self.snapshots - .iter_mut() - .find(|snapshot| snapshot.beacon_block_root == block_root) - .map(|snapshot| { - snapshot.pre_state = Some(state); - }) - } - - /// Removes all snapshots from the queue that are less than or equal to the finalized epoch. - pub fn prune(&mut self, finalized_epoch: Epoch) { - self.snapshots.retain(|snapshot| { - snapshot.beacon_state.slot() > finalized_epoch.start_slot(T::slots_per_epoch()) - }) - } - - /// Inform the cache that the head of the beacon chain has changed. - /// - /// The snapshot that matches this `head_block_root` will never be ejected from the cache - /// during `Self::insert`. - pub fn update_head(&mut self, head_block_root: Hash256) { - self.head_block_root = head_block_root - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType}; - use types::{ - test_utils::generate_deterministic_keypair, BeaconBlock, Epoch, MainnetEthSpec, - SignedBeaconBlock, Slot, - }; - - fn get_harness() -> BeaconChainHarness> { - let harness = BeaconChainHarness::builder(MainnetEthSpec) - .default_spec() - .deterministic_keypairs(1) - .fresh_ephemeral_store() - .build(); - - harness.advance_slot(); - - harness - } - - const CACHE_SIZE: usize = 4; - - fn get_snapshot(i: u64) -> BeaconSnapshot { - let spec = MainnetEthSpec::default_spec(); - - let beacon_state = get_harness().chain.head_beacon_state_cloned(); - - let signed_beacon_block = SignedBeaconBlock::from_block( - BeaconBlock::empty(&spec), - generate_deterministic_keypair(0) - .sk - .sign(Hash256::from_low_u64_be(42)), - ); - - BeaconSnapshot { - beacon_state, - beacon_block: Arc::new(signed_beacon_block), - beacon_block_root: Hash256::from_low_u64_be(i), - } - } - - #[test] - fn insert_get_prune_update() { - let spec = MainnetEthSpec::default_spec(); - let mut cache = SnapshotCache::new(CACHE_SIZE, get_snapshot(0)); - - // Insert a bunch of entries in the cache. It should look like this: - // - // Index Root - // 0 0 <--head - // 1 1 - // 2 2 - // 3 3 - for i in 1..CACHE_SIZE as u64 { - let mut snapshot = get_snapshot(i); - - // Each snapshot should be one slot into an epoch, with each snapshot one epoch apart. - *snapshot.beacon_state.slot_mut() = - Slot::from(i * MainnetEthSpec::slots_per_epoch() + 1); - - cache.insert(snapshot, None, &spec); - - assert_eq!( - cache.snapshots.len(), - i as usize + 1, - "cache length should be as expected" - ); - assert_eq!(cache.head_block_root, Hash256::from_low_u64_be(0)); - } - - // Insert a new value in the cache. Afterwards it should look like: - // - // Index Root - // 0 0 <--head - // 1 42 - // 2 2 - // 3 3 - assert_eq!(cache.snapshots.len(), CACHE_SIZE); - cache.insert(get_snapshot(42), None, &spec); - assert_eq!(cache.snapshots.len(), CACHE_SIZE); - - assert!( - cache - .get_state_for_block_processing( - Hash256::from_low_u64_be(1), - Slot::new(0), - None, - &spec - ) - .is_none(), - "the snapshot with the lowest slot should have been removed during the insert function" - ); - assert!(cache - .get_cloned(Hash256::from_low_u64_be(1), CloneConfig::none()) - .is_none()); - - assert_eq!( - cache - .get_cloned(Hash256::from_low_u64_be(0), CloneConfig::none()) - .expect("the head should still be in the cache") - .beacon_block_root, - Hash256::from_low_u64_be(0), - "get_cloned should get the correct snapshot" - ); - assert_eq!( - cache - .get_state_for_block_processing( - Hash256::from_low_u64_be(0), - Slot::new(0), - None, - &spec - ) - .expect("the head should still be in the cache") - .0 - .beacon_block_root, - Hash256::from_low_u64_be(0), - "get_state_for_block_processing should get the correct snapshot" - ); - - assert_eq!( - cache.snapshots.len(), - CACHE_SIZE - 1, - "get_state_for_block_processing should shorten the cache" - ); - - // Prune the cache. Afterwards it should look like: - // - // Index Root - // 0 2 - // 1 3 - cache.prune(Epoch::new(2)); - - assert_eq!(cache.snapshots.len(), 2); - - cache.update_head(Hash256::from_low_u64_be(2)); - - // Over-fill the cache so it needs to eject some old values on insert. - for i in 0..CACHE_SIZE as u64 { - cache.insert(get_snapshot(u64::max_value() - i), None, &spec); - } - - // Ensure that the new head value was not removed from the cache. - assert_eq!( - cache - .get_state_for_block_processing( - Hash256::from_low_u64_be(2), - Slot::new(0), - None, - &spec - ) - .expect("the new head should still be in the cache") - .0 - .beacon_block_root, - Hash256::from_low_u64_be(2), - "get_state_for_block_processing should get the correct snapshot" - ); - } -} diff --git a/beacon_node/beacon_chain/src/state_advance_timer.rs b/beacon_node/beacon_chain/src/state_advance_timer.rs index f73223fa540..ba722ef8c31 100644 --- a/beacon_node/beacon_chain/src/state_advance_timer.rs +++ b/beacon_node/beacon_chain/src/state_advance_timer.rs @@ -15,9 +15,7 @@ //! 2. There's a possibility that the head block is never built upon, causing wasted CPU cycles. use crate::validator_monitor::HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS; use crate::{ - beacon_chain::{ATTESTATION_CACHE_LOCK_TIMEOUT, BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT}, - chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, - snapshot_cache::StateAdvance, + beacon_chain::ATTESTATION_CACHE_LOCK_TIMEOUT, chain_config::FORK_CHOICE_LOOKAHEAD_FACTOR, BeaconChain, BeaconChainError, BeaconChainTypes, }; use slog::{debug, error, warn, Logger}; @@ -29,7 +27,7 @@ use std::sync::{ }; use task_executor::TaskExecutor; use tokio::time::{sleep, sleep_until, Instant}; -use types::{AttestationShufflingId, EthSpec, Hash256, RelativeEpoch, Slot}; +use types::{AttestationShufflingId, BeaconStateError, EthSpec, Hash256, RelativeEpoch, Slot}; /// If the head slot is more than `MAX_ADVANCE_DISTANCE` from the current slot, then don't perform /// the state advancement. @@ -48,6 +46,8 @@ const MAX_FORK_CHOICE_DISTANCE: u64 = 256; #[derive(Debug)] enum Error { BeaconChain(BeaconChainError), + BeaconState(BeaconStateError), + Store(store::Error), HeadMissingFromSnapshotCache(Hash256), MaxDistanceExceeded { current_slot: Slot, @@ -58,7 +58,7 @@ enum Error { }, BadStateSlot { _state_slot: Slot, - _block_slot: Slot, + _current_slot: Slot, }, } @@ -68,6 +68,18 @@ impl From for Error { } } +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +impl From for Error { + fn from(e: store::Error) -> Self { + Self::Store(e) + } +} + /// Provides a simple thread-safe lock to be used for task co-ordination. Practically equivalent to /// `Mutex<()>`. #[derive(Clone)] @@ -263,11 +275,6 @@ async fn state_advance_timer( } } -/// Reads the `snapshot_cache` from the `beacon_chain` and attempts to take a clone of the -/// `BeaconState` of the head block. If it obtains this clone, the state will be advanced a single -/// slot then placed back in the `snapshot_cache` to be used for block verification. -/// -/// See the module-level documentation for rationale. fn advance_head( beacon_chain: &Arc>, log: &Logger, @@ -290,46 +297,38 @@ fn advance_head( } } - let head_root = beacon_chain.head_beacon_block_root(); - - let (head_slot, head_state_root, mut state) = match beacon_chain - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::SnapshotCacheLockTimeout)? - .get_for_state_advance(head_root) - { - StateAdvance::AlreadyAdvanced => { - return Err(Error::StateAlreadyAdvanced { - block_root: head_root, - }) - } - StateAdvance::BlockNotFound => return Err(Error::HeadMissingFromSnapshotCache(head_root)), - StateAdvance::State { - state, - state_root, - block_slot, - } => (block_slot, state_root, *state), + let (head_block_root, head_block_state_root) = { + let snapshot = beacon_chain.head_snapshot(); + (snapshot.beacon_block_root, snapshot.beacon_state_root()) }; - let initial_slot = state.slot(); - let initial_epoch = state.current_epoch(); + let (head_state_root, mut state) = beacon_chain + .store + .get_advanced_state(head_block_root, current_slot, head_block_state_root)? + .ok_or(Error::HeadMissingFromSnapshotCache(head_block_root))?; - let state_root = if state.slot() == head_slot { - Some(head_state_root) - } else { + if state.slot() == current_slot + 1 { + return Err(Error::StateAlreadyAdvanced { + block_root: head_block_root, + }); + } else if state.slot() != current_slot { // Protect against advancing a state more than a single slot. // // Advancing more than one slot without storing the intermediate state would corrupt the // database. Future works might store temporary, intermediate states inside this function. return Err(Error::BadStateSlot { - _block_slot: head_slot, _state_slot: state.slot(), + _current_slot: current_slot, }); - }; + } + + let initial_slot = state.slot(); + let initial_epoch = state.current_epoch(); // Advance the state a single slot. - if let Some(summary) = per_slot_processing(&mut state, state_root, &beacon_chain.spec) - .map_err(BeaconChainError::from)? + if let Some(summary) = + per_slot_processing(&mut state, Some(head_state_root), &beacon_chain.spec) + .map_err(BeaconChainError::from)? { // Expose Prometheus metrics. if let Err(e) = summary.observe_metrics() { @@ -363,7 +362,7 @@ fn advance_head( debug!( log, "Advanced head state one slot"; - "head_root" => ?head_root, + "head_block_root" => ?head_block_root, "state_slot" => state.slot(), "current_slot" => current_slot, ); @@ -382,14 +381,14 @@ fn advance_head( if initial_epoch < state.current_epoch() { // Update the proposer cache. // - // We supply the `head_root` as the decision block since the prior `if` statement guarantees + // We supply the `head_block_root` as the decision block since the prior `if` statement guarantees // the head root is the latest block from the prior epoch. beacon_chain .beacon_proposer_cache .lock() .insert( state.current_epoch(), - head_root, + head_block_root, state .get_beacon_proposer_indices(&beacon_chain.spec) .map_err(BeaconChainError::from)?, @@ -398,8 +397,9 @@ fn advance_head( .map_err(BeaconChainError::from)?; // Update the attester cache. - let shuffling_id = AttestationShufflingId::new(head_root, &state, RelativeEpoch::Next) - .map_err(BeaconChainError::from)?; + let shuffling_id = + AttestationShufflingId::new(head_block_root, &state, RelativeEpoch::Next) + .map_err(BeaconChainError::from)?; let committee_cache = state .committee_cache(RelativeEpoch::Next) .map_err(BeaconChainError::from)?; @@ -412,7 +412,7 @@ fn advance_head( debug!( log, "Primed proposer and attester caches"; - "head_root" => ?head_root, + "head_block_root" => ?head_block_root, "next_epoch_shuffling_root" => ?shuffling_id.shuffling_decision_block, "state_epoch" => state.current_epoch(), "current_epoch" => current_slot.epoch(T::EthSpec::slots_per_epoch()), @@ -422,44 +422,19 @@ fn advance_head( // Apply the state to the attester cache, if the cache deems it interesting. beacon_chain .attester_cache - .maybe_cache_state(&state, head_root, &beacon_chain.spec) + .maybe_cache_state(&state, head_block_root, &beacon_chain.spec) .map_err(BeaconChainError::from)?; let final_slot = state.slot(); - // Insert the advanced state back into the snapshot cache. - beacon_chain - .snapshot_cache - .try_write_for(BLOCK_PROCESSING_CACHE_LOCK_TIMEOUT) - .ok_or(BeaconChainError::SnapshotCacheLockTimeout)? - .update_pre_state(head_root, state) - .ok_or(Error::HeadMissingFromSnapshotCache(head_root))?; - - // If we have moved into the next slot whilst processing the state then this function is going - // to become ineffective and likely become a hindrance as we're stealing the tree hash cache - // from the snapshot cache (which may force the next block to rebuild a new one). - // - // If this warning occurs very frequently on well-resourced machines then we should consider - // starting it earlier in the slot. Otherwise, it's a good indication that the machine is too - // slow/overloaded and will be useful information for the user. - let starting_slot = current_slot; - let current_slot = beacon_chain.slot()?; - if starting_slot < current_slot { - warn!( - log, - "State advance too slow"; - "head_root" => %head_root, - "advanced_slot" => final_slot, - "current_slot" => current_slot, - "starting_slot" => starting_slot, - "msg" => "system resources may be overloaded", - ); - } + // Write the advanced state to the database. + let advanced_state_root = state.update_tree_hash_cache()?; + beacon_chain.store.put_state(&advanced_state_root, &state)?; debug!( log, "Completed state advance"; - "head_root" => ?head_root, + "head_block_root" => ?head_block_root, "advanced_slot" => final_slot, "initial_slot" => initial_slot, ); diff --git a/beacon_node/beacon_chain/src/sync_committee_rewards.rs b/beacon_node/beacon_chain/src/sync_committee_rewards.rs index 2221aa1d5eb..e67c947f0d8 100644 --- a/beacon_node/beacon_chain/src/sync_committee_rewards.rs +++ b/beacon_node/beacon_chain/src/sync_committee_rewards.rs @@ -38,9 +38,26 @@ impl BeaconChain { })?; let mut balances = HashMap::::new(); + for &validator_index in &sync_committee_indices { + balances.insert( + validator_index, + *state + .balances() + .get(validator_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)?, + ); + } + + let proposer_index = block.proposer_index() as usize; + balances.insert( + proposer_index, + *state + .balances() + .get(proposer_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)?, + ); let mut total_proposer_rewards = 0; - let proposer_index = state.get_beacon_proposer_index(block.slot(), spec)?; // Apply rewards to participant balances. Keep track of proposer rewards for (validator_index, participant_bit) in sync_committee_indices @@ -48,15 +65,15 @@ impl BeaconChain { .zip(sync_aggregate.sync_committee_bits.iter()) { let participant_balance = balances - .entry(*validator_index) - .or_insert_with(|| state.balances()[*validator_index]); + .get_mut(&validator_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)?; if participant_bit { participant_balance.safe_add_assign(participant_reward_value)?; balances - .entry(proposer_index) - .or_insert_with(|| state.balances()[proposer_index]) + .get_mut(&proposer_index) + .ok_or(BeaconChainError::SyncCommitteeRewardsSyncError)? .safe_add_assign(proposer_reward_per_bit)?; total_proposer_rewards.safe_add_assign(proposer_reward_per_bit)?; @@ -67,18 +84,17 @@ impl BeaconChain { Ok(balances .iter() - .filter_map(|(i, new_balance)| { - let reward = if *i != proposer_index { - *new_balance as i64 - state.balances()[*i] as i64 - } else if sync_committee_indices.contains(i) { - *new_balance as i64 - - state.balances()[*i] as i64 - - total_proposer_rewards as i64 + .filter_map(|(&i, &new_balance)| { + let initial_balance = *state.balances().get(i)? as i64; + let reward = if i != proposer_index { + new_balance as i64 - initial_balance + } else if sync_committee_indices.contains(&i) { + new_balance as i64 - initial_balance - total_proposer_rewards as i64 } else { return None; }; Some(SyncCommitteeReward { - validator_index: *i as u64, + validator_index: i as u64, reward, }) }) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 55ea016fbda..2cdd81e782a 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -37,10 +37,7 @@ use sensitive_url::SensitiveUrl; use slog::Logger; use slot_clock::{SlotClock, TestingSlotClock}; use state_processing::per_block_processing::compute_timestamp_at_slot; -use state_processing::{ - state_advance::{complete_state_advance, partial_state_advance}, - StateProcessingStrategy, -}; +use state_processing::state_advance::complete_state_advance; use std::borrow::Cow; use std::collections::{HashMap, HashSet}; use std::fmt; @@ -664,10 +661,7 @@ where pub fn get_current_state_and_root(&self) -> (BeaconState, Hash256) { let head = self.chain.head_snapshot(); let state_root = head.beacon_state_root(); - ( - head.beacon_state.clone_with_only_committee_caches(), - state_root, - ) + (head.beacon_state.clone(), state_root) } pub fn head_slot(&self) -> Slot { @@ -710,8 +704,9 @@ where pub fn get_hot_state(&self, state_hash: BeaconStateHash) -> Option> { self.chain .store - .load_hot_state(&state_hash.into(), StateProcessingStrategy::Accurate) + .load_hot_state(&state_hash.into()) .unwrap() + .map(|(state, _)| state) } pub fn get_cold_state(&self, state_hash: BeaconStateHash) -> Option> { @@ -891,9 +886,7 @@ where return Err(BeaconChainError::CannotAttestToFutureState); } else if state.current_epoch() < epoch { let mut_state = state.to_mut(); - // Only perform a "partial" state advance since we do not require the state roots to be - // accurate. - partial_state_advance( + complete_state_advance( mut_state, Some(state_root), epoch.start_slot(E::slots_per_epoch()), diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index 396aac71b07..334cee0e45f 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -405,10 +405,10 @@ impl ValidatorMonitor { .skip(self.indices.len()) .for_each(|(i, validator)| { let i = i as u64; - if let Some(validator) = self.validators.get_mut(&validator.pubkey) { + if let Some(validator) = self.validators.get_mut(validator.pubkey()) { validator.set_index(i) } - self.indices.insert(i, validator.pubkey); + self.indices.insert(i, *validator.pubkey()); }); // Update metrics for individual validators. @@ -444,12 +444,12 @@ impl ValidatorMonitor { metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_EFFECTIVE_BALANCE_GWEI, &[id], - u64_to_i64(validator.effective_balance), + u64_to_i64(validator.effective_balance()), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_SLASHED, &[id], - i64::from(validator.slashed), + i64::from(validator.slashed()), ); metrics::set_int_gauge( &metrics::VALIDATOR_MONITOR_ACTIVE, @@ -469,22 +469,22 @@ impl ValidatorMonitor { metrics::set_int_gauge( &metrics::VALIDATOR_ACTIVATION_ELIGIBILITY_EPOCH, &[id], - u64_to_i64(validator.activation_eligibility_epoch), + u64_to_i64(validator.activation_eligibility_epoch()), ); metrics::set_int_gauge( &metrics::VALIDATOR_ACTIVATION_EPOCH, &[id], - u64_to_i64(validator.activation_epoch), + u64_to_i64(validator.activation_epoch()), ); metrics::set_int_gauge( &metrics::VALIDATOR_EXIT_EPOCH, &[id], - u64_to_i64(validator.exit_epoch), + u64_to_i64(validator.exit_epoch()), ); metrics::set_int_gauge( &metrics::VALIDATOR_WITHDRAWABLE_EPOCH, &[id], - u64_to_i64(validator.withdrawable_epoch), + u64_to_i64(validator.withdrawable_epoch()), ); } } diff --git a/beacon_node/beacon_chain/tests/attestation_verification.rs b/beacon_node/beacon_chain/tests/attestation_verification.rs index 1040521e5a7..276d1c0cfa2 100644 --- a/beacon_node/beacon_chain/tests/attestation_verification.rs +++ b/beacon_node/beacon_chain/tests/attestation_verification.rs @@ -1032,7 +1032,7 @@ async fn attestation_that_skips_epochs() { let block_slot = harness .chain .store - .get_blinded_block(&block_root) + .get_blinded_block(&block_root, None) .expect("should not error getting block") .expect("should find attestation block") .message() diff --git a/beacon_node/beacon_chain/tests/payload_invalidation.rs b/beacon_node/beacon_chain/tests/payload_invalidation.rs index c39bdeaf366..f5dde29c2fa 100644 --- a/beacon_node/beacon_chain/tests/payload_invalidation.rs +++ b/beacon_node/beacon_chain/tests/payload_invalidation.rs @@ -219,7 +219,7 @@ impl InvalidPayloadRig { let mock_execution_layer = self.harness.mock_execution_layer.as_ref().unwrap(); let head = self.harness.chain.head_snapshot(); - let state = head.beacon_state.clone_with_only_committee_caches(); + let state = head.beacon_state.clone(); let slot = slot_override.unwrap_or(state.slot() + 1); let (block, post_state) = self.harness.make_block(state, slot).await; let block_root = block.canonical_root(); @@ -312,7 +312,7 @@ impl InvalidPayloadRig { self.harness .chain .store - .get_full_block(&block_root) + .get_full_block(&block_root, None) .unwrap() .unwrap(), block, @@ -2013,7 +2013,7 @@ async fn weights_after_resetting_optimistic_status() { .fork_choice_read_lock() .get_block_weight(&head.head_block_root()) .unwrap(), - head.snapshot.beacon_state.validators()[0].effective_balance, + head.snapshot.beacon_state.validators().get(0).unwrap().effective_balance(), "proposer boost should be removed from the head block and the vote of a single validator applied" ); diff --git a/beacon_node/beacon_chain/tests/rewards.rs b/beacon_node/beacon_chain/tests/rewards.rs index b61bea12429..8cc1b69962c 100644 --- a/beacon_node/beacon_chain/tests/rewards.rs +++ b/beacon_node/beacon_chain/tests/rewards.rs @@ -99,8 +99,8 @@ async fn test_sync_committee_rewards() { .get_validator_index(&validator.pubkey) .unwrap() .unwrap(); - let pre_state_balance = parent_state.balances()[validator_index]; - let post_state_balance = state.balances()[validator_index]; + let pre_state_balance = *parent_state.balances().get(validator_index).unwrap(); + let post_state_balance = *state.balances().get(validator_index).unwrap(); let sync_committee_reward = rewards.get(&(validator_index as u64)).unwrap_or(&0); if validator_index == proposer_index { diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 0bc7798a7ff..08e6d9d6ec2 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -66,12 +66,21 @@ fn get_harness( store: Arc, LevelDB>>, validator_count: usize, ) -> TestHarness { + // Most tests were written expecting instant migration on finalization. + let migrator_config = MigratorConfig::default().blocking().epochs_per_run(0); + + let log = store.log.clone(); + let harness = BeaconChainHarness::builder(MinimalEthSpec) + .logger(log) .default_spec() .keypairs(KEYPAIRS[0..validator_count].to_vec()) .logger(store.logger().clone()) .fresh_disk_store(store) .mock_execution_layer() + .initial_mutator(Box::new(|builder: BeaconChainBuilder<_>| { + builder.store_migrator_config(migrator_config) + })) .build(); harness.advance_slot(); harness @@ -275,6 +284,9 @@ async fn split_slot_restore() { ) .await; + // Uhmm. FIXME(sproul) + // tokio::time::sleep(std::time::Duration::from_secs(10)).await; + store.get_split_slot() }; assert_ne!(split_slot, Slot::new(0)); @@ -329,22 +341,6 @@ async fn epoch_boundary_state_attestation_processing() { let mut checked_pre_fin = false; for (attestation, subnet_id) in late_attestations.into_iter().flatten() { - // load_epoch_boundary_state is idempotent! - let block_root = attestation.data.beacon_block_root; - let block = store - .get_blinded_block(&block_root) - .unwrap() - .expect("block exists"); - let epoch_boundary_state = store - .load_epoch_boundary_state(&block.state_root()) - .expect("no error") - .expect("epoch boundary state exists"); - let ebs_of_ebs = store - .load_epoch_boundary_state(&epoch_boundary_state.canonical_root()) - .expect("no error") - .expect("ebs of ebs exists"); - assert_eq!(epoch_boundary_state, ebs_of_ebs); - // If the attestation is pre-finalization it should be rejected. let finalized_epoch = harness.finalized_checkpoint().epoch; @@ -443,50 +439,6 @@ async fn forwards_iter_block_and_state_roots_until() { test_range(Slot::new(0), head_state.slot()); } -#[tokio::test] -async fn block_replay_with_inaccurate_state_roots() { - let num_blocks_produced = E::slots_per_epoch() * 3 + 31; - let db_path = tempdir().unwrap(); - let store = get_store(&db_path); - let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); - let chain = &harness.chain; - - harness - .extend_chain( - num_blocks_produced as usize, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - // Slot must not be 0 mod 32 or else no blocks will be replayed. - let (mut head_state, head_root) = harness.get_current_state_and_root(); - assert_ne!(head_state.slot() % 32, 0); - - let mut fast_head_state = store - .get_inconsistent_state_for_attestation_verification_only( - &head_root, - Some(head_state.slot()), - ) - .unwrap() - .unwrap(); - assert_eq!(head_state.validators(), fast_head_state.validators()); - - head_state.build_all_committee_caches(&chain.spec).unwrap(); - fast_head_state - .build_all_committee_caches(&chain.spec) - .unwrap(); - - assert_eq!( - head_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .unwrap(), - fast_head_state - .get_cached_active_validator_indices(RelativeEpoch::Current) - .unwrap() - ); -} - #[tokio::test] async fn block_replayer_hooks() { let db_path = tempdir().unwrap(); @@ -517,7 +469,7 @@ async fn block_replayer_hooks() { let mut post_block_slots = vec![]; let mut replay_state = BlockReplayer::::new(state, &chain.spec) - .pre_slot_hook(Box::new(|state| { + .pre_slot_hook(Box::new(|_, state| { pre_slots.push(state.slot()); Ok(()) })) @@ -556,6 +508,8 @@ async fn block_replayer_hooks() { assert_eq!(post_block_slots, block_slots); // States match. + end_state.apply_pending_mutations().unwrap(); + replay_state.apply_pending_mutations().unwrap(); end_state.drop_all_caches().unwrap(); replay_state.drop_all_caches().unwrap(); assert_eq!(end_state, replay_state); @@ -622,7 +576,7 @@ async fn delete_blocks_and_states() { ); let faulty_head_block = store - .get_blinded_block(&faulty_head.into()) + .get_blinded_block(&faulty_head.into(), None) .expect("no errors") .expect("faulty head block exists"); @@ -664,7 +618,7 @@ async fn delete_blocks_and_states() { break; } store.delete_block(&block_root).unwrap(); - assert_eq!(store.get_blinded_block(&block_root).unwrap(), None); + assert_eq!(store.get_blinded_block(&block_root, None).unwrap(), None); } // Deleting frozen states should do nothing @@ -908,7 +862,7 @@ fn get_state_for_block(harness: &TestHarness, block_root: Hash256) -> BeaconStat let head_block = harness .chain .store - .get_blinded_block(&block_root) + .get_blinded_block(&block_root, None) .unwrap() .unwrap(); harness @@ -948,9 +902,17 @@ fn check_shuffling_compatible( |committee_cache, _| { let state_cache = head_state.committee_cache(RelativeEpoch::Current).unwrap(); if current_epoch_shuffling_is_compatible { - assert_eq!(committee_cache, state_cache, "block at slot {slot}"); + assert_eq!( + committee_cache, + state_cache.as_ref(), + "block at slot {slot}" + ); } else { - assert_ne!(committee_cache, state_cache, "block at slot {slot}"); + assert_ne!( + committee_cache, + state_cache.as_ref(), + "block at slot {slot}" + ); } Ok(()) }, @@ -980,9 +942,9 @@ fn check_shuffling_compatible( |committee_cache, _| { let state_cache = head_state.committee_cache(RelativeEpoch::Previous).unwrap(); if previous_epoch_shuffling_is_compatible { - assert_eq!(committee_cache, state_cache); + assert_eq!(committee_cache, state_cache.as_ref()); } else { - assert_ne!(committee_cache, state_cache); + assert_ne!(committee_cache, state_cache.as_ref()); } Ok(()) }, @@ -1997,6 +1959,7 @@ async fn pruning_test( check_no_blocks_exist(&harness, stray_blocks.values()); } +/* FIXME(sproul): adapt this test for new paradigm #[tokio::test] async fn garbage_collect_temp_states_from_failed_block() { let db_path = tempdir().unwrap(); @@ -2051,6 +2014,7 @@ async fn garbage_collect_temp_states_from_failed_block() { let store = get_store(&db_path); assert_eq!(store.iter_temporary_state_roots().count(), 0); } +*/ #[tokio::test] async fn weak_subjectivity_sync() { @@ -2078,7 +2042,7 @@ async fn weak_subjectivity_sync() { let wss_block = harness .chain .store - .get_full_block(&wss_checkpoint.root) + .get_full_block(&wss_checkpoint.root, None) .unwrap() .unwrap(); let wss_state = full_store @@ -2223,7 +2187,7 @@ async fn weak_subjectivity_sync() { .unwrap() .map(Result::unwrap) { - let block = store.get_blinded_block(&block_root).unwrap().unwrap(); + let block = store.get_blinded_block(&block_root, None).unwrap().unwrap(); assert_eq!(block.slot(), slot); } @@ -2243,7 +2207,7 @@ async fn weak_subjectivity_sync() { assert_eq!(store.get_anchor_slot(), Some(wss_slot)); // Reconstruct states. - store.clone().reconstruct_historic_states().unwrap(); + store.clone().reconstruct_historic_states(None).unwrap(); assert_eq!(store.get_anchor_slot(), None); } @@ -2634,8 +2598,8 @@ fn assert_chains_pretty_much_the_same(a: &BeaconChain, b // Clone with committee caches only to prevent other caches from messing with the equality // check. assert_eq!( - a_head.beacon_state.clone_with_only_committee_caches(), - b_head.beacon_state.clone_with_only_committee_caches(), + a_head.beacon_state.clone(), + b_head.beacon_state.clone(), "head states should be equal" ); assert_eq!(a.heads(), b.heads(), "heads() should be equal"); @@ -2706,16 +2670,16 @@ fn check_split_slot(harness: &TestHarness, store: Arc, L /// Check that all the states in a chain dump have the correct tree hash. fn check_chain_dump(harness: &TestHarness, expected_len: u64) { - let chain_dump = harness.chain.chain_dump().unwrap(); + let mut chain_dump = harness.chain.chain_dump().unwrap(); let split_slot = harness.chain.store.get_split_slot(); assert_eq!(chain_dump.len() as u64, expected_len); - for checkpoint in &chain_dump { + for checkpoint in &mut chain_dump { // Check that the tree hash of the stored state is as expected assert_eq!( checkpoint.beacon_state_root(), - checkpoint.beacon_state.tree_hash_root(), + checkpoint.beacon_state.update_tree_hash_cache().unwrap(), "tree hash of stored state is incorrect" ); diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index e05b92a2779..e556f9ab598 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -168,6 +168,7 @@ where .task_executor(context.executor.clone()) .custom_spec(spec.clone()) .chain_config(chain_config) + .store_migrator_config(config.store_migrator.clone()) .graffiti(graffiti) .event_handler(event_handler) .execution_layer(execution_layer) diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index 95a00b37492..5701d428abd 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -1,3 +1,4 @@ +use beacon_chain::migrate::MigratorConfig; use beacon_chain::validator_monitor::DEFAULT_INDIVIDUAL_TRACKING_THRESHOLD; use directory::DEFAULT_ROOT_DIR; use environment::LoggerConfig; @@ -70,6 +71,7 @@ pub struct Config { /// via the CLI at runtime, instead of from a configuration file saved to disk. pub genesis: ClientGenesis, pub store: store::StoreConfig, + pub store_migrator: MigratorConfig, pub network: network::NetworkConfig, pub chain: beacon_chain::ChainConfig, pub eth1: eth1::Config, @@ -91,6 +93,7 @@ impl Default for Config { log_file: PathBuf::from(""), genesis: <_>::default(), store: <_>::default(), + store_migrator: <_>::default(), network: NetworkConfig::default(), chain: <_>::default(), dummy_eth1_backend: false, diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml index cc982aee089..856ffe26f83 100644 --- a/beacon_node/eth1/Cargo.toml +++ b/beacon_node/eth1/Cargo.toml @@ -24,7 +24,7 @@ ethereum_ssz_derive = "0.5.0" tree_hash = "0.5.0" parking_lot = "0.12.0" slog = "2.5.2" -superstruct = "0.5.0" +superstruct = "0.7.0" tokio = { version = "1.14.0", features = ["full"] } state_processing = { path = "../../consensus/state_processing" } lighthouse_metrics = { path = "../../common/lighthouse_metrics"} diff --git a/beacon_node/execution_layer/Cargo.toml b/beacon_node/execution_layer/Cargo.toml index 3ed7ba65d6a..1e1d9a282b6 100644 --- a/beacon_node/execution_layer/Cargo.toml +++ b/beacon_node/execution_layer/Cargo.toml @@ -26,7 +26,7 @@ ethereum_ssz = "0.5.0" ssz_types = "0.5.0" eth2 = { path = "../../common/eth2" } state_processing = { path = "../../consensus/state_processing" } -superstruct = "0.6.0" +superstruct = "0.7.0" lru = "0.7.1" exit-future = "0.2.0" tree_hash = "0.5.0" diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 4d2eb565e1c..b142e008c80 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -12,13 +12,14 @@ use http::deposit_methods::RpcError; pub use json_structures::{JsonWithdrawal, TransitionConfigurationV1}; use reqwest::StatusCode; use serde::{Deserialize, Serialize}; +use ssz_types::FixedVector; use std::convert::TryFrom; use strum::IntoStaticStr; use superstruct::superstruct; pub use types::{ Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadHeader, - ExecutionPayloadRef, FixedVector, ForkName, Hash256, Transactions, Uint256, VariableList, - Withdrawal, Withdrawals, + ExecutionPayloadRef, ForkName, Hash256, Transactions, Uint256, VariableList, Withdrawal, + Withdrawals, }; use types::{ExecutionPayloadCapella, ExecutionPayloadMerge}; diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index d85d294c836..e02b2ad94d0 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -1,11 +1,12 @@ use super::*; use serde::{Deserialize, Serialize}; +use ssz_types::FixedVector; use strum::EnumString; use superstruct::superstruct; use types::{ - EthSpec, ExecutionBlockHash, FixedVector, Transactions, Unsigned, VariableList, Withdrawal, + EthSpec, ExecutionBlockHash, ExecutionPayloadCapella, ExecutionPayloadMerge, Transactions, + Unsigned, VariableList, Withdrawal, }; -use types::{ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge}; #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] diff --git a/beacon_node/genesis/src/interop.rs b/beacon_node/genesis/src/interop.rs index d0129834300..eb21ad76c73 100644 --- a/beacon_node/genesis/src/interop.rs +++ b/beacon_node/genesis/src/interop.rs @@ -178,14 +178,15 @@ mod test { } for v in state.validators() { - let creds = v.withdrawal_credentials.as_bytes(); + let creds = v.withdrawal_credentials(); assert_eq!( - creds[0], spec.bls_withdrawal_prefix_byte, + creds.as_bytes()[0], + spec.bls_withdrawal_prefix_byte, "first byte of withdrawal creds should be bls prefix" ); assert_eq!( - &creds[1..], - &hash(&v.pubkey.as_ssz_bytes())[1..], + &creds.as_bytes()[1..], + &hash(&v.pubkey().as_ssz_bytes())[1..], "rest of withdrawal creds should be pubkey hash" ) } diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 5c3e420839d..f75c63ba38f 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -7,9 +7,7 @@ use beacon_chain::{ use eth2::types::{self as api_types}; use slot_clock::SlotClock; use state_processing::state_advance::partial_state_advance; -use types::{ - AttestationDuty, BeaconState, ChainSpec, CloneConfig, Epoch, EthSpec, Hash256, RelativeEpoch, -}; +use types::{AttestationDuty, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, RelativeEpoch}; /// The struct that is returned to the requesting HTTP client. type ApiDuties = api_types::DutiesResponse>; @@ -93,8 +91,7 @@ fn compute_historic_attester_duties( if head.beacon_state.current_epoch() <= request_epoch { Some(( head.beacon_state_root(), - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), + head.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), )) } else { diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 1b924f38288..e2295fc2022 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -277,7 +277,7 @@ pub fn get_block_packing_efficiency( )); let pre_slot_hook = - |state: &mut BeaconState| -> Result<(), PackingEfficiencyError> { + |_, state: &mut BeaconState| -> Result<(), PackingEfficiencyError> { // Add attestations to `available_attestations`. handler.lock().add_attestations(state.slot())?; Ok(()) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 55e00bab340..e26b25b02f5 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -46,7 +46,6 @@ use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use ssz::Encode; pub use state_id::StateId; -use std::borrow::Cow; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::path::PathBuf; @@ -613,7 +612,7 @@ pub fn serve( query.id.as_ref().map_or(true, |ids| { ids.iter().any(|id| match id { ValidatorId::PublicKey(pubkey) => { - &validator.pubkey == pubkey + validator.pubkey() == pubkey } ValidatorId::Index(param_index) => { *param_index == *index as u64 @@ -673,7 +672,7 @@ pub fn serve( query.id.as_ref().map_or(true, |ids| { ids.iter().any(|id| match id { ValidatorId::PublicKey(pubkey) => { - &validator.pubkey == pubkey + validator.pubkey() == pubkey } ValidatorId::Index(param_index) => { *param_index == *index as u64 @@ -731,9 +730,13 @@ pub fn serve( "Invalid validator ID".to_string(), )) })) + .and(log_filter.clone()) .and(warp::path::end()) .and_then( - |state_id: StateId, chain: Arc>, validator_id: ValidatorId| { + |state_id: StateId, + chain: Arc>, + validator_id: ValidatorId, + log: Logger| { blocking_json_task(move || { let (data, execution_optimistic, finalized) = state_id .map_state_and_execution_optimistic_and_finalized( @@ -741,7 +744,23 @@ pub fn serve( |state, execution_optimistic, finalized| { let index_opt = match &validator_id { ValidatorId::PublicKey(pubkey) => { - state.validators().iter().position(|v| v.pubkey == *pubkey) + // Fast path: use the pubkey cache which is probably + // initialised at the head. + match state.get_validator_index_read_only(pubkey) { + Ok(result) => result, + Err(e) => { + // Slow path, fall back to iteration. + debug!( + log, + "Validator look-up cache miss"; + "reason" => ?e, + ); + state + .validators() + .iter() + .position(|v| v.pubkey() == pubkey) + } + } } ValidatorId::Index(index) => Some(*index as usize), }; @@ -832,10 +851,10 @@ pub fn serve( None }; - let committee_cache = if let Some(ref shuffling) = + let committee_cache = if let Some(shuffling) = maybe_cached_shuffling { - Cow::Borrowed(&**shuffling) + shuffling } else { let possibly_built_cache = match RelativeEpoch::from_epoch(current_epoch, epoch) { @@ -846,14 +865,13 @@ pub fn serve( { state .committee_cache(relative_epoch) - .map(Cow::Borrowed) + .map(Arc::clone) } _ => CommitteeCache::initialized( state, epoch, &chain.spec, - ) - .map(Cow::Owned), + ), } .map_err(|e| { match e { @@ -901,7 +919,7 @@ pub fn serve( { cache_write.insert_committee_cache( shuffling_id, - &*possibly_built_cache, + &possibly_built_cache, ); } } diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 7e946b89e72..fd84d76b034 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -10,7 +10,7 @@ use safe_arith::SafeArith; use slog::{debug, Logger}; use slot_clock::SlotClock; use std::cmp::Ordering; -use types::{CloneConfig, Epoch, EthSpec, Hash256, Slot}; +use types::{Epoch, EthSpec, Hash256, Slot}; /// The struct that is returned to the requesting HTTP client. type ApiDuties = api_types::DutiesResponse>; @@ -192,8 +192,7 @@ fn compute_historic_proposer_duties( if head.beacon_state.current_epoch() <= epoch { Some(( head.beacon_state_root(), - head.beacon_state - .clone_with(CloneConfig::committee_caches_only()), + head.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), )) } else { diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index 9e4aadef17e..d5e7d3df0f9 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -163,10 +163,7 @@ impl StateId { .head_and_execution_status() .map_err(warp_utils::reject::beacon_chain_error)?; return Ok(( - cached_head - .snapshot - .beacon_state - .clone_with_only_committee_caches(), + cached_head.snapshot.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), false, )); diff --git a/beacon_node/http_api/src/validator_inclusion.rs b/beacon_node/http_api/src/validator_inclusion.rs index f22ced1e693..0e078e7d035 100644 --- a/beacon_node/http_api/src/validator_inclusion.rs +++ b/beacon_node/http_api/src/validator_inclusion.rs @@ -99,13 +99,13 @@ pub fn validator_inclusion_data( let summary = get_epoch_processing_summary(&mut state, &chain.spec)?; Ok(Some(ValidatorInclusionData { - is_slashed: validator.slashed, + is_slashed: validator.slashed(), is_withdrawable_in_current_epoch: validator.is_withdrawable_at(epoch), is_active_unslashed_in_current_epoch: summary .is_active_unslashed_in_current_epoch(validator_index), is_active_unslashed_in_previous_epoch: summary .is_active_unslashed_in_previous_epoch(validator_index), - current_epoch_effective_balance_gwei: validator.effective_balance, + current_epoch_effective_balance_gwei: validator.effective_balance(), is_current_epoch_target_attester: summary .is_current_epoch_target_attester(validator_index) .map_err(convert_cache_error)?, diff --git a/beacon_node/lighthouse_network/Cargo.toml b/beacon_node/lighthouse_network/Cargo.toml index ca15b5ef2c3..adda2d4c4db 100644 --- a/beacon_node/lighthouse_network/Cargo.toml +++ b/beacon_node/lighthouse_network/Cargo.toml @@ -39,7 +39,7 @@ rand = "0.8.5" directory = { path = "../../common/directory" } regex = "1.5.5" strum = { version = "0.24.0", features = ["derive"] } -superstruct = "0.5.0" +superstruct = "0.7.0" prometheus-client = "0.18.0" unused_port = { path = "../../common/unused_port" } delay_map = "0.3.0" diff --git a/beacon_node/network/src/persisted_dht.rs b/beacon_node/network/src/persisted_dht.rs index e69230c50c0..6176791164f 100644 --- a/beacon_node/network/src/persisted_dht.rs +++ b/beacon_node/network/src/persisted_dht.rs @@ -44,8 +44,8 @@ impl StoreItem for PersistedDht { DBColumn::DhtEnrs } - fn as_store_bytes(&self) -> Vec { - rlp::encode_list(&self.enrs).to_vec() + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(rlp::encode_list(&self.enrs).to_vec()) } fn from_store_bytes(bytes: &[u8]) -> Result { diff --git a/beacon_node/operation_pool/src/attestation.rs b/beacon_node/operation_pool/src/attestation.rs index fbbd5d7ddcf..ab24ea17119 100644 --- a/beacon_node/operation_pool/src/attestation.rs +++ b/beacon_node/operation_pool/src/attestation.rs @@ -47,18 +47,17 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { .get_beacon_committee(att.data.slot, att.data.index) .ok()?; let indices = get_attesting_indices::(committee.committee, &fresh_validators).ok()?; + let sqrt_total_active_balance = base::SqrtTotalActiveBalance::new(total_active_balance); let fresh_validators_rewards: HashMap = indices .iter() .copied() .flat_map(|validator_index| { - let reward = base::get_base_reward( - state, - validator_index as usize, - total_active_balance, - spec, - ) - .ok()? - .checked_div(spec.proposer_reward_quotient)?; + let effective_balance = + state.get_effective_balance(validator_index as usize).ok()?; + let reward = + base::get_base_reward(effective_balance, sqrt_total_active_balance, spec) + .ok()? + .checked_div(spec.proposer_reward_quotient)?; Some((validator_index, reward)) }) .collect(); @@ -99,8 +98,11 @@ impl<'a, T: EthSpec> AttMaxCover<'a, T> { let mut proposer_reward_numerator = 0; + // FIXME(sproul): store base_reward in reward cache + // let effective_balance = reward_cache.get_effective_balance(index)?; + let effective_balance = state.get_effective_balance(index as usize).ok()?; let base_reward = - altair::get_base_reward(state, index as usize, base_reward_per_increment, spec) + altair::get_base_reward(effective_balance, base_reward_per_increment, spec) .ok()?; for (flag_index, weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 24c0623f5c3..cded6b081d8 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -379,7 +379,7 @@ impl OperationPool { && state .validators() .get(slashing.as_inner().signed_header_1.message.proposer_index as usize) - .map_or(false, |validator| !validator.slashed) + .map_or(false, |validator| !validator.slashed()) }, |slashing| slashing.as_inner().clone(), T::MaxProposerSlashings::to_usize(), @@ -438,7 +438,7 @@ impl OperationPool { pub fn prune_proposer_slashings(&self, head_state: &BeaconState) { prune_validator_hash_map( &mut self.proposer_slashings.write(), - |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch() <= head_state.finalized_checkpoint().epoch, head_state, ); } @@ -457,7 +457,7 @@ impl OperationPool { // // We cannot check the `slashed` field since the `head` is not finalized and // a fork could un-slash someone. - validator.exit_epoch > head_state.finalized_checkpoint().epoch + validator.exit_epoch() > head_state.finalized_checkpoint().epoch }) .map_or(false, |indices| !indices.is_empty()); @@ -514,7 +514,7 @@ impl OperationPool { // // We choose simplicity over the gain of pruning more exits since they are small and // should not be seen frequently. - |_, validator| validator.exit_epoch <= head_state.finalized_checkpoint().epoch, + |_, validator| validator.exit_epoch() <= head_state.finalized_checkpoint().epoch, head_state, ); } diff --git a/beacon_node/operation_pool/src/persistence.rs b/beacon_node/operation_pool/src/persistence.rs index 35d2b4ce7ee..e21cfdf4777 100644 --- a/beacon_node/operation_pool/src/persistence.rs +++ b/beacon_node/operation_pool/src/persistence.rs @@ -205,8 +205,8 @@ impl StoreItem for PersistedOperationPoolV5 { DBColumn::OpPool } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -219,8 +219,8 @@ impl StoreItem for PersistedOperationPoolV12 { DBColumn::OpPool } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -233,8 +233,8 @@ impl StoreItem for PersistedOperationPoolV14 { DBColumn::OpPool } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -247,8 +247,8 @@ impl StoreItem for PersistedOperationPoolV15 { DBColumn::OpPool } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -262,8 +262,8 @@ impl StoreItem for PersistedOperationPool { DBColumn::OpPool } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, StoreError> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index e763d93f82a..928faf64101 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -638,7 +638,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) ) /* - * Database purging and compaction. + * Database. */ .arg( Arg::with_name("purge-db") @@ -658,6 +658,21 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("true") ) + .arg( + Arg::with_name("state-cache-size") + .long("state-cache-size") + .value_name("SIZE") + .help("Specifies how many states the database should cache in memory [default: 128]") + .takes_value(true) + ) + .arg( + Arg::with_name("compression-level") + .long("compression-level") + .value_name("LEVEL") + .help("Compression level (-99 to 22) for zstd compression applied to states on disk \ + [default: 1]. You may change the compression level freely without re-syncing.") + .takes_value(true) + ) .arg( Arg::with_name("prune-payloads") .long("prune-payloads") @@ -667,7 +682,26 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .takes_value(true) .default_value("true") ) - + .arg( + Arg::with_name("db-migration-period") + .long("db-migration-period") + .value_name("EPOCHS") + .help("Specifies the number of epochs to wait between applying each finalization \ + migration to the database. Applying migrations less frequently can lead to \ + less total disk writes.") + .default_value("4") + .takes_value(true) + ) + .arg( + Arg::with_name("epochs-per-state-diff") + .long("epochs-per-state-diff") + .value_name("EPOCHS") + .help("Number of epochs between state diffs stored in the database. Lower values \ + result in more writes and more data stored, while higher values result in \ + more block replaying and longer load times in case of cache miss.") + .default_value("4") + .takes_value(true) + ) /* * Misc. */ @@ -1117,4 +1151,10 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { developers. This directory is not pruned, users should be careful to avoid \ filling up their disks.") ) + .arg( + Arg::with_name("unsafe-and-dangerous-mode") + .long("unsafe-and-dangerous-mode") + .help("Don't use this flag unless you know what you're doing. Go back and download a \ + stable Lighthouse release") + ) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index c59b297c1b2..1d7b71b42c9 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -13,7 +13,7 @@ use http_api::TlsConfig; use lighthouse_network::ListenAddress; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; -use slog::{info, warn, Logger}; +use slog::{crit, info, warn, Logger}; use std::cmp; use std::cmp::max; use std::fmt::Debug; @@ -373,14 +373,25 @@ pub fn get_config( client_config.freezer_db_path = Some(PathBuf::from(freezer_dir)); } - let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; - client_config.store.slots_per_restore_point = sprp; - client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; + if !cli_args.is_present("unsafe-and-dangerous-mode") { + crit!( + log, + "This is an EXPERIMENTAL build of Lighthouse. If you are seeing this message you may \ + have downloaded the wrong version by mistake. If so, go back and download the latest \ + stable release. If you are certain that you want to continue, read the docs for the \ + latest experimental release and continue at your own risk." + ); + return Err("FATAL ERROR, YOU HAVE THE WRONG LIGHTHOUSE BINARY".into()); + } - if let Some(block_cache_size) = cli_args.value_of("block-cache-size") { - client_config.store.block_cache_size = block_cache_size - .parse() - .map_err(|_| "block-cache-size is not a valid integer".to_string())?; + if let Some(block_cache_size) = clap_utils::parse_optional(cli_args, "block-cache-size")? { + client_config.store.block_cache_size = block_cache_size; + } + if let Some(state_cache_size) = clap_utils::parse_optional(cli_args, "state-cache-size")? { + client_config.store.state_cache_size = state_cache_size; + } + if let Some(compression_level) = clap_utils::parse_optional(cli_args, "compression-level")? { + client_config.store.compression_level = compression_level; } if let Some(historic_state_cache_size) = cli_args.value_of("historic-state-cache-size") { @@ -400,6 +411,17 @@ pub fn get_config( client_config.store.prune_payloads = prune_payloads; } + if let Some(epochs_per_migration) = clap_utils::parse_optional(cli_args, "db-migration-period")? + { + client_config.store_migrator.epochs_per_run = epochs_per_migration; + } + + if let Some(epochs_per_state_diff) = + clap_utils::parse_optional(cli_args, "epochs-per-state-diff")? + { + client_config.store.epochs_per_state_diff = epochs_per_state_diff; + } + /* * Zero-ports * @@ -1294,25 +1316,6 @@ pub fn get_data_dir(cli_args: &ArgMatches) -> PathBuf { .unwrap_or_else(|| PathBuf::from(".")) } -/// Get the `slots_per_restore_point` value to use for the database. -/// -/// Return `(sprp, set_explicitly)` where `set_explicitly` is `true` if the user provided the value. -pub fn get_slots_per_restore_point( - cli_args: &ArgMatches, -) -> Result<(u64, bool), String> { - if let Some(slots_per_restore_point) = - clap_utils::parse_optional(cli_args, "slots-per-restore-point")? - { - Ok((slots_per_restore_point, true)) - } else { - let default = std::cmp::min( - E::slots_per_historical_root() as u64, - store::config::DEFAULT_SLOTS_PER_RESTORE_POINT, - ); - Ok((default, false)) - } -} - /// Parses the `cli_value` as a comma-separated string of values to be parsed with `parser`. /// /// If there is more than one value, log a warning. If there are no values, return an error. diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index 47694825ca7..5735ec2a11a 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -13,7 +13,7 @@ use beacon_chain::{ use clap::ArgMatches; pub use cli::cli_app; pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; -pub use config::{get_config, get_data_dir, get_slots_per_restore_point, set_network_config}; +pub use config::{get_config, get_data_dir, set_network_config}; use environment::RuntimeContext; pub use eth2_config::Eth2Config; use slasher::{DatabaseBackendOverride, Slasher}; diff --git a/beacon_node/store/Cargo.toml b/beacon_node/store/Cargo.toml index a952f1b2ffb..3c02a51fb56 100644 --- a/beacon_node/store/Cargo.toml +++ b/beacon_node/store/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" [dev-dependencies] tempfile = "3.1.0" beacon_chain = {path = "../beacon_chain"} +logging = { path = "../../common/logging" } [dependencies] db-key = "0.0.5" @@ -16,13 +17,20 @@ itertools = "0.10.0" ethereum_ssz = "0.5.0" ethereum_ssz_derive = "0.5.0" types = { path = "../../consensus/types" } +safe_arith = { path = "../../consensus/safe_arith" } state_processing = { path = "../../consensus/state_processing" } slog = "2.5.2" serde = "1.0.116" serde_derive = "1.0.116" lazy_static = "1.4.0" lighthouse_metrics = { path = "../../common/lighthouse_metrics" } -lru = "0.7.1" +lru = "0.10.0" sloggers = { version = "2.1.1", features = ["json"] } directory = { path = "../../common/directory" } -strum = { version = "0.24.0", features = ["derive"] } \ No newline at end of file +tree_hash = "0.5.0" +take-until = "0.1.0" +zstd = "0.11.0" +strum = { version = "0.24.0", features = ["derive"] } +bls = { path = "../../crypto/bls" } +smallvec = "1.0.0" +xdelta3 = "0.1.5" # FIXME(sproul): fix bindgen version issues diff --git a/beacon_node/store/src/chunk_writer.rs b/beacon_node/store/src/chunk_writer.rs deleted file mode 100644 index 059b812e74c..00000000000 --- a/beacon_node/store/src/chunk_writer.rs +++ /dev/null @@ -1,75 +0,0 @@ -use crate::chunked_vector::{chunk_key, Chunk, ChunkError, Field}; -use crate::{Error, KeyValueStore, KeyValueStoreOp}; -use types::EthSpec; - -/// Buffered writer for chunked vectors (block roots mainly). -pub struct ChunkWriter<'a, F, E, S> -where - F: Field, - E: EthSpec, - S: KeyValueStore, -{ - /// Buffered chunk awaiting writing to disk (always dirty). - chunk: Chunk, - /// Chunk index of `chunk`. - index: usize, - store: &'a S, -} - -impl<'a, F, E, S> ChunkWriter<'a, F, E, S> -where - F: Field, - E: EthSpec, - S: KeyValueStore, -{ - pub fn new(store: &'a S, vindex: usize) -> Result { - let chunk_index = F::chunk_index(vindex); - let chunk = Chunk::load(store, F::column(), &chunk_key(chunk_index))? - .unwrap_or_else(|| Chunk::new(vec![F::Value::default(); F::chunk_size()])); - - Ok(Self { - chunk, - index: chunk_index, - store, - }) - } - - /// Set the value at a given vector index, writing the current chunk and moving on if necessary. - pub fn set( - &mut self, - vindex: usize, - value: F::Value, - batch: &mut Vec, - ) -> Result<(), Error> { - let chunk_index = F::chunk_index(vindex); - - // Advance to the next chunk. - if chunk_index != self.index { - self.write(batch)?; - *self = Self::new(self.store, vindex)?; - } - - let i = vindex % F::chunk_size(); - let existing_value = &self.chunk.values[i]; - - if existing_value == &value || existing_value == &F::Value::default() { - self.chunk.values[i] = value; - Ok(()) - } else { - Err(ChunkError::Inconsistent { - field: F::column(), - chunk_index, - existing_value: format!("{:?}", existing_value), - new_value: format!("{:?}", value), - } - .into()) - } - } - - /// Write the current chunk to disk. - /// - /// Should be called before the writer is dropped, in order to write the final chunk to disk. - pub fn write(&self, batch: &mut Vec) -> Result<(), Error> { - self.chunk.store(F::column(), &chunk_key(self.index), batch) - } -} diff --git a/beacon_node/store/src/chunked_iter.rs b/beacon_node/store/src/chunked_iter.rs deleted file mode 100644 index 8ef0b6d201d..00000000000 --- a/beacon_node/store/src/chunked_iter.rs +++ /dev/null @@ -1,123 +0,0 @@ -use crate::chunked_vector::{chunk_key, Chunk, Field}; -use crate::{HotColdDB, ItemStore}; -use slog::error; -use types::{ChainSpec, EthSpec, Slot}; - -/// Iterator over the values of a `BeaconState` vector field (like `block_roots`). -/// -/// Uses the freezer DB's separate table to load the values. -pub struct ChunkedVectorIter<'a, F, E, Hot, Cold> -where - F: Field, - E: EthSpec, - Hot: ItemStore, - Cold: ItemStore, -{ - pub(crate) store: &'a HotColdDB, - current_vindex: usize, - pub(crate) end_vindex: usize, - next_cindex: usize, - current_chunk: Chunk, -} - -impl<'a, F, E, Hot, Cold> ChunkedVectorIter<'a, F, E, Hot, Cold> -where - F: Field, - E: EthSpec, - Hot: ItemStore, - Cold: ItemStore, -{ - /// Create a new iterator which can yield elements from `start_vindex` up to the last - /// index stored by the restore point at `last_restore_point_slot`. - /// - /// The `last_restore_point` slot should be the slot of a recent restore point as obtained from - /// `HotColdDB::get_latest_restore_point_slot`. We pass it as a parameter so that the caller can - /// maintain a stable view of the database (see `HybridForwardsBlockRootsIterator`). - pub fn new( - store: &'a HotColdDB, - start_vindex: usize, - last_restore_point_slot: Slot, - spec: &ChainSpec, - ) -> Self { - let (_, end_vindex) = F::start_and_end_vindex(last_restore_point_slot, spec); - - // Set the next chunk to the one containing `start_vindex`. - let next_cindex = start_vindex / F::chunk_size(); - // Set the current chunk to the empty chunk, it will never be read. - let current_chunk = Chunk::default(); - - Self { - store, - current_vindex: start_vindex, - end_vindex, - next_cindex, - current_chunk, - } - } -} - -impl<'a, F, E, Hot, Cold> Iterator for ChunkedVectorIter<'a, F, E, Hot, Cold> -where - F: Field, - E: EthSpec, - Hot: ItemStore, - Cold: ItemStore, -{ - type Item = (usize, F::Value); - - fn next(&mut self) -> Option { - let chunk_size = F::chunk_size(); - - // Range exhausted, return `None` forever. - if self.current_vindex >= self.end_vindex { - None - } - // Value lies in the current chunk, return it. - else if self.current_vindex < self.next_cindex * chunk_size { - let vindex = self.current_vindex; - let val = self - .current_chunk - .values - .get(vindex % chunk_size) - .cloned() - .or_else(|| { - error!( - self.store.log, - "Missing chunk value in forwards iterator"; - "vector index" => vindex - ); - None - })?; - self.current_vindex += 1; - Some((vindex, val)) - } - // Need to load the next chunk, load it and recurse back into the in-range case. - else { - self.current_chunk = Chunk::load( - &self.store.cold_db, - F::column(), - &chunk_key(self.next_cindex), - ) - .map_err(|e| { - error!( - self.store.log, - "Database error in forwards iterator"; - "chunk index" => self.next_cindex, - "error" => format!("{:?}", e) - ); - e - }) - .ok()? - .or_else(|| { - error!( - self.store.log, - "Missing chunk in forwards iterator"; - "chunk index" => self.next_cindex - ); - None - })?; - self.next_cindex += 1; - self.next() - } - } -} diff --git a/beacon_node/store/src/chunked_vector.rs b/beacon_node/store/src/chunked_vector.rs deleted file mode 100644 index 73edfbb0744..00000000000 --- a/beacon_node/store/src/chunked_vector.rs +++ /dev/null @@ -1,883 +0,0 @@ -//! Space-efficient storage for `BeaconState` vector fields. -//! -//! This module provides logic for splitting the `FixedVector` fields of a `BeaconState` into -//! chunks, and storing those chunks in contiguous ranges in the on-disk database. The motiviation -//! for doing this is avoiding massive duplication in every on-disk state. For example, rather than -//! storing the whole `historical_roots` vector, which is updated once every couple of thousand -//! slots, at every slot, we instead store all the historical values as a chunked vector on-disk, -//! and fetch only the slice we need when reconstructing the `historical_roots` of a state. -//! -//! ## Terminology -//! -//! * **Chunk size**: the number of vector values stored per on-disk chunk. -//! * **Vector index** (vindex): index into all the historical values, identifying a single element -//! of the vector being stored. -//! * **Chunk index** (cindex): index into the keyspace of the on-disk database, identifying a chunk -//! of elements. To find the chunk index of a vector index: `cindex = vindex / chunk_size`. -use self::UpdatePattern::*; -use crate::*; -use ssz::{Decode, Encode}; -use typenum::Unsigned; -use types::historical_summary::HistoricalSummary; - -/// Description of how a `BeaconState` field is updated during state processing. -/// -/// When storing a state, this allows us to efficiently store only those entries -/// which are not present in the DB already. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum UpdatePattern { - /// The value is updated once per `n` slots. - OncePerNSlots { - n: u64, - /// The slot at which the field begins to accumulate values. - /// - /// The field should not be read or written until `activation_slot` is reached, and the - /// activation slot should act as an offset when converting slots to vector indices. - activation_slot: Option, - /// The slot at which the field ceases to accumulate values. - /// - /// If this is `None` then the field is continually updated. - deactivation_slot: Option, - }, - /// The value is updated once per epoch, for the epoch `current_epoch - lag`. - OncePerEpoch { lag: u64 }, -} - -/// Map a chunk index to bytes that can be used to key the NoSQL database. -/// -/// We shift chunks up by 1 to make room for a genesis chunk that is handled separately. -pub fn chunk_key(cindex: usize) -> [u8; 8] { - (cindex as u64 + 1).to_be_bytes() -} - -/// Return the database key for the genesis value. -fn genesis_value_key() -> [u8; 8] { - 0u64.to_be_bytes() -} - -/// Trait for types representing fields of the `BeaconState`. -/// -/// All of the required methods are type-level, because we do most things with fields at the -/// type-level. We require their value-level witnesses to be `Copy` so that we can avoid the -/// turbofish when calling functions like `store_updated_vector`. -pub trait Field: Copy { - /// The type of value stored in this field: the `T` from `FixedVector`. - /// - /// The `Default` impl will be used to fill extra vector entries. - type Value: Decode + Encode + Default + Clone + PartialEq + std::fmt::Debug; - - /// The length of this field: the `N` from `FixedVector`. - type Length: Unsigned; - - /// The database column where the integer-indexed chunks for this field should be stored. - /// - /// Each field's column **must** be unique. - fn column() -> DBColumn; - - /// Update pattern for this field, so that we can do differential updates. - fn update_pattern(spec: &ChainSpec) -> UpdatePattern; - - /// The number of values to store per chunk on disk. - /// - /// Default is 128 so that we read/write 4K pages when the values are 32 bytes. - // TODO: benchmark and optimise this parameter - fn chunk_size() -> usize { - 128 - } - - /// Convert a v-index (vector index) to a chunk index. - fn chunk_index(vindex: usize) -> usize { - vindex / Self::chunk_size() - } - - /// Get the value of this field at the given vector index, from the state. - fn get_value( - state: &BeaconState, - vindex: u64, - spec: &ChainSpec, - ) -> Result; - - /// True if this is a `FixedLengthField`, false otherwise. - fn is_fixed_length() -> bool; - - /// Compute the start and end vector indices of the slice of history required at `current_slot`. - /// - /// ## Example - /// - /// If we have a field that is updated once per epoch, then the end vindex will be - /// `current_epoch + 1`, because we want to include the value for the current epoch, and the - /// start vindex will be `end_vindex - Self::Length`, because that's how far back we can look. - fn start_and_end_vindex(current_slot: Slot, spec: &ChainSpec) -> (usize, usize) { - // We take advantage of saturating subtraction on slots and epochs - match Self::update_pattern(spec) { - OncePerNSlots { - n, - activation_slot, - deactivation_slot, - } => { - // Per-slot changes exclude the index for the current slot, because - // it won't be set until the slot completes (think of `state_roots`, `block_roots`). - // This also works for the `historical_roots` because at the `n`th slot, the 0th - // entry of the list is created, and before that the list is empty. - // - // To account for the switch from historical roots to historical summaries at - // Capella we also modify the current slot by the activation and deactivation slots. - // The activation slot acts as an offset (subtraction) while the deactivation slot - // acts as a clamp (min). - let slot_with_clamp = deactivation_slot.map_or(current_slot, |deactivation_slot| { - std::cmp::min(current_slot, deactivation_slot) - }); - let slot_with_clamp_and_offset = if let Some(activation_slot) = activation_slot { - slot_with_clamp - activation_slot - } else { - // Return (0, 0) to indicate that the field should not be read/written. - return (0, 0); - }; - let end_vindex = slot_with_clamp_and_offset / n; - let start_vindex = end_vindex - Self::Length::to_u64(); - (start_vindex.as_usize(), end_vindex.as_usize()) - } - OncePerEpoch { lag } => { - // Per-epoch changes include the index for the current epoch, because it - // will have been set at the most recent epoch boundary. - let current_epoch = current_slot.epoch(E::slots_per_epoch()); - let end_epoch = current_epoch + 1 - lag; - let start_epoch = end_epoch + lag - Self::Length::to_u64(); - (start_epoch.as_usize(), end_epoch.as_usize()) - } - } - } - - /// Given an `existing_chunk` stored in the DB, construct an updated chunk to replace it. - fn get_updated_chunk( - existing_chunk: &Chunk, - chunk_index: usize, - start_vindex: usize, - end_vindex: usize, - state: &BeaconState, - spec: &ChainSpec, - ) -> Result, Error> { - let chunk_size = Self::chunk_size(); - let mut new_chunk = Chunk::new(vec![Self::Value::default(); chunk_size]); - - for i in 0..chunk_size { - let vindex = chunk_index * chunk_size + i; - if vindex >= start_vindex && vindex < end_vindex { - let vector_value = Self::get_value(state, vindex as u64, spec)?; - - if let Some(existing_value) = existing_chunk.values.get(i) { - if *existing_value != vector_value && *existing_value != Self::Value::default() - { - return Err(ChunkError::Inconsistent { - field: Self::column(), - chunk_index, - existing_value: format!("{:?}", existing_value), - new_value: format!("{:?}", vector_value), - } - .into()); - } - } - - new_chunk.values[i] = vector_value; - } else { - new_chunk.values[i] = existing_chunk.values.get(i).cloned().unwrap_or_default(); - } - } - - Ok(new_chunk) - } - - /// Determine whether a state at `slot` possesses (or requires) the genesis value. - fn slot_needs_genesis_value(slot: Slot, spec: &ChainSpec) -> bool { - let (_, end_vindex) = Self::start_and_end_vindex(slot, spec); - match Self::update_pattern(spec) { - // If the end_vindex is less than the length of the vector, then the vector - // has not yet been completely filled with non-genesis values, and so the genesis - // value is still required. - OncePerNSlots { .. } => { - Self::is_fixed_length() && end_vindex < Self::Length::to_usize() - } - // If the field has lag, then it takes an extra `lag` vindices beyond the - // `end_vindex` before the vector has been filled with non-genesis values. - OncePerEpoch { lag } => { - Self::is_fixed_length() && end_vindex + (lag as usize) < Self::Length::to_usize() - } - } - } - - /// Load the genesis value for a fixed length field from the store. - /// - /// This genesis value should be used to fill the initial state of the vector. - fn load_genesis_value>(store: &S) -> Result { - let key = &genesis_value_key()[..]; - let chunk = - Chunk::load(store, Self::column(), key)?.ok_or(ChunkError::MissingGenesisValue)?; - chunk - .values - .first() - .cloned() - .ok_or_else(|| ChunkError::MissingGenesisValue.into()) - } - - /// Store the given `value` as the genesis value for this field, unless stored already. - /// - /// Check the existing value (if any) for consistency with the value we intend to store, and - /// return an error if they are inconsistent. - fn check_and_store_genesis_value>( - store: &S, - value: Self::Value, - ops: &mut Vec, - ) -> Result<(), Error> { - let key = &genesis_value_key()[..]; - - if let Some(existing_chunk) = Chunk::::load(store, Self::column(), key)? { - if existing_chunk.values.len() != 1 { - Err(ChunkError::InvalidGenesisChunk { - field: Self::column(), - expected_len: 1, - observed_len: existing_chunk.values.len(), - } - .into()) - } else if existing_chunk.values[0] != value { - Err(ChunkError::InconsistentGenesisValue { - field: Self::column(), - existing_value: format!("{:?}", existing_chunk.values[0]), - new_value: format!("{:?}", value), - } - .into()) - } else { - Ok(()) - } - } else { - let chunk = Chunk::new(vec![value]); - chunk.store(Self::column(), &genesis_value_key()[..], ops)?; - Ok(()) - } - } - - /// Extract the genesis value for a fixed length field from an - /// - /// Will only return a correct value if `slot_needs_genesis_value(state.slot(), spec) == true`. - fn extract_genesis_value( - state: &BeaconState, - spec: &ChainSpec, - ) -> Result { - let (_, end_vindex) = Self::start_and_end_vindex(state.slot(), spec); - match Self::update_pattern(spec) { - // Genesis value is guaranteed to exist at `end_vindex`, as it won't yet have been - // updated - OncePerNSlots { .. } => Ok(Self::get_value(state, end_vindex as u64, spec)?), - // If there's lag, the value of the field at the vindex *without the lag* - // should still be set to the genesis value. - OncePerEpoch { lag } => Ok(Self::get_value(state, end_vindex as u64 + lag, spec)?), - } - } -} - -/// Marker trait for fixed-length fields (`FixedVector`). -pub trait FixedLengthField: Field {} - -/// Marker trait for variable-length fields (`VariableList`). -pub trait VariableLengthField: Field {} - -/// Macro to implement the `Field` trait on a new unit struct type. -macro_rules! field { - ($struct_name:ident, $marker_trait:ident, $value_ty:ty, $length_ty:ty, $column:expr, - $update_pattern:expr, $get_value:expr) => { - #[derive(Clone, Copy)] - pub struct $struct_name; - - impl Field for $struct_name - where - T: EthSpec, - { - type Value = $value_ty; - type Length = $length_ty; - - fn column() -> DBColumn { - $column - } - - fn update_pattern(spec: &ChainSpec) -> UpdatePattern { - $update_pattern(spec) - } - - fn get_value( - state: &BeaconState, - vindex: u64, - spec: &ChainSpec, - ) -> Result { - $get_value(state, vindex, spec) - } - - fn is_fixed_length() -> bool { - stringify!($marker_trait) == "FixedLengthField" - } - } - - impl $marker_trait for $struct_name {} - }; -} - -field!( - BlockRoots, - FixedLengthField, - Hash256, - T::SlotsPerHistoricalRoot, - DBColumn::BeaconBlockRoots, - |_| OncePerNSlots { - n: 1, - activation_slot: Some(Slot::new(0)), - deactivation_slot: None - }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.block_roots(), index) -); - -field!( - StateRoots, - FixedLengthField, - Hash256, - T::SlotsPerHistoricalRoot, - DBColumn::BeaconStateRoots, - |_| OncePerNSlots { - n: 1, - activation_slot: Some(Slot::new(0)), - deactivation_slot: None, - }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.state_roots(), index) -); - -field!( - HistoricalRoots, - VariableLengthField, - Hash256, - T::HistoricalRootsLimit, - DBColumn::BeaconHistoricalRoots, - |spec: &ChainSpec| OncePerNSlots { - n: T::SlotsPerHistoricalRoot::to_u64(), - activation_slot: Some(Slot::new(0)), - deactivation_slot: spec - .capella_fork_epoch - .map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())), - }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.historical_roots(), index) -); - -field!( - RandaoMixes, - FixedLengthField, - Hash256, - T::EpochsPerHistoricalVector, - DBColumn::BeaconRandaoMixes, - |_| OncePerEpoch { lag: 1 }, - |state: &BeaconState<_>, index, _| safe_modulo_index(state.randao_mixes(), index) -); - -field!( - HistoricalSummaries, - VariableLengthField, - HistoricalSummary, - T::HistoricalRootsLimit, - DBColumn::BeaconHistoricalSummaries, - |spec: &ChainSpec| OncePerNSlots { - n: T::SlotsPerHistoricalRoot::to_u64(), - activation_slot: spec - .capella_fork_epoch - .map(|fork_epoch| fork_epoch.start_slot(T::slots_per_epoch())), - deactivation_slot: None, - }, - |state: &BeaconState<_>, index, _| safe_modulo_index( - state - .historical_summaries() - .map_err(|_| ChunkError::InvalidFork)?, - index - ) -); - -pub fn store_updated_vector, E: EthSpec, S: KeyValueStore>( - field: F, - store: &S, - state: &BeaconState, - spec: &ChainSpec, - ops: &mut Vec, -) -> Result<(), Error> { - let chunk_size = F::chunk_size(); - let (start_vindex, end_vindex) = F::start_and_end_vindex(state.slot(), spec); - let start_cindex = start_vindex / chunk_size; - let end_cindex = end_vindex / chunk_size; - - // Store the genesis value if we have access to it, and it hasn't been stored already. - if F::slot_needs_genesis_value(state.slot(), spec) { - let genesis_value = F::extract_genesis_value(state, spec)?; - F::check_and_store_genesis_value(store, genesis_value, ops)?; - } - - // Start by iterating backwards from the last chunk, storing new chunks in the database. - // Stop once a chunk in the database matches what we were about to store, this indicates - // that a previously stored state has already filled-in a portion of the indices covered. - let full_range_checked = store_range( - field, - (start_cindex..=end_cindex).rev(), - start_vindex, - end_vindex, - store, - state, - spec, - ops, - )?; - - // If the previous `store_range` did not check the entire range, it may be the case that the - // state's vector includes elements at low vector indices that are not yet stored in the - // database, so run another `store_range` to ensure these values are also stored. - if !full_range_checked { - store_range( - field, - start_cindex..end_cindex, - start_vindex, - end_vindex, - store, - state, - spec, - ops, - )?; - } - - Ok(()) -} - -#[allow(clippy::too_many_arguments)] -fn store_range( - _: F, - range: I, - start_vindex: usize, - end_vindex: usize, - store: &S, - state: &BeaconState, - spec: &ChainSpec, - ops: &mut Vec, -) -> Result -where - F: Field, - E: EthSpec, - S: KeyValueStore, - I: Iterator, -{ - for chunk_index in range { - let chunk_key = &chunk_key(chunk_index)[..]; - - let existing_chunk = - Chunk::::load(store, F::column(), chunk_key)?.unwrap_or_default(); - - let new_chunk = F::get_updated_chunk( - &existing_chunk, - chunk_index, - start_vindex, - end_vindex, - state, - spec, - )?; - - if new_chunk == existing_chunk { - return Ok(false); - } - - new_chunk.store(F::column(), chunk_key, ops)?; - } - - Ok(true) -} - -// Chunks at the end index are included. -// TODO: could be more efficient with a real range query (perhaps RocksDB) -fn range_query, E: EthSpec, T: Decode + Encode>( - store: &S, - column: DBColumn, - start_index: usize, - end_index: usize, -) -> Result>, Error> { - let range = start_index..=end_index; - let len = range - .end() - // Add one to account for inclusive range. - .saturating_add(1) - .saturating_sub(*range.start()); - let mut result = Vec::with_capacity(len); - - for chunk_index in range { - let key = &chunk_key(chunk_index)[..]; - let chunk = Chunk::load(store, column, key)?.ok_or(ChunkError::Missing { chunk_index })?; - result.push(chunk); - } - - Ok(result) -} - -/// Combine chunks to form a list or vector of all values with vindex in `start_vindex..end_vindex`. -/// -/// The `length` parameter is the length of the vec to construct, with entries set to `default` if -/// they lie outside the vindex range. -fn stitch( - chunks: Vec>, - start_vindex: usize, - end_vindex: usize, - chunk_size: usize, - length: usize, - default: T, -) -> Result, ChunkError> { - if start_vindex + length < end_vindex { - return Err(ChunkError::OversizedRange { - start_vindex, - end_vindex, - length, - }); - } - - let start_cindex = start_vindex / chunk_size; - let end_cindex = end_vindex / chunk_size; - - let mut result = vec![default; length]; - - for (chunk_index, chunk) in (start_cindex..=end_cindex).zip(chunks.into_iter()) { - // All chunks but the last chunk must be full-sized - if chunk_index != end_cindex && chunk.values.len() != chunk_size { - return Err(ChunkError::InvalidSize { - chunk_index, - expected: chunk_size, - actual: chunk.values.len(), - }); - } - - // Copy the chunk entries into the result vector - for (i, value) in chunk.values.into_iter().enumerate() { - let vindex = chunk_index * chunk_size + i; - - if vindex >= start_vindex && vindex < end_vindex { - result[vindex % length] = value; - } - } - } - - Ok(result) -} - -pub fn load_vector_from_db, E: EthSpec, S: KeyValueStore>( - store: &S, - slot: Slot, - spec: &ChainSpec, -) -> Result, Error> { - // Do a range query - let chunk_size = F::chunk_size(); - let (start_vindex, end_vindex) = F::start_and_end_vindex(slot, spec); - let start_cindex = start_vindex / chunk_size; - let end_cindex = end_vindex / chunk_size; - - let chunks = range_query(store, F::column(), start_cindex, end_cindex)?; - - let default = if F::slot_needs_genesis_value(slot, spec) { - F::load_genesis_value(store)? - } else { - F::Value::default() - }; - - let result = stitch( - chunks, - start_vindex, - end_vindex, - chunk_size, - F::Length::to_usize(), - default, - )?; - - Ok(result.into()) -} - -/// The historical roots are stored in vector chunks, despite not actually being a vector. -pub fn load_variable_list_from_db, E: EthSpec, S: KeyValueStore>( - store: &S, - slot: Slot, - spec: &ChainSpec, -) -> Result, Error> { - let chunk_size = F::chunk_size(); - let (start_vindex, end_vindex) = F::start_and_end_vindex(slot, spec); - let start_cindex = start_vindex / chunk_size; - let end_cindex = end_vindex / chunk_size; - - let chunks: Vec> = range_query(store, F::column(), start_cindex, end_cindex)?; - - let mut result = Vec::with_capacity(chunk_size * chunks.len()); - - for (chunk_index, chunk) in chunks.into_iter().enumerate() { - for (i, value) in chunk.values.into_iter().enumerate() { - let vindex = chunk_index * chunk_size + i; - - if vindex >= start_vindex && vindex < end_vindex { - result.push(value); - } - } - } - - Ok(result.into()) -} - -/// Index into a field of the state, avoiding out of bounds and division by 0. -fn safe_modulo_index(values: &[T], index: u64) -> Result { - if values.is_empty() { - Err(ChunkError::ZeroLengthVector) - } else { - Ok(values[index as usize % values.len()]) - } -} - -/// A chunk of a fixed-size vector from the `BeaconState`, stored in the database. -#[derive(Debug, Clone, PartialEq)] -pub struct Chunk { - /// A vector of up-to `chunk_size` values. - pub values: Vec, -} - -impl Default for Chunk -where - T: Decode + Encode, -{ - fn default() -> Self { - Chunk { values: vec![] } - } -} - -impl Chunk -where - T: Decode + Encode, -{ - pub fn new(values: Vec) -> Self { - Chunk { values } - } - - pub fn load, E: EthSpec>( - store: &S, - column: DBColumn, - key: &[u8], - ) -> Result, Error> { - store - .get_bytes(column.into(), key)? - .map(|bytes| Self::decode(&bytes)) - .transpose() - } - - pub fn store( - &self, - column: DBColumn, - key: &[u8], - ops: &mut Vec, - ) -> Result<(), Error> { - let db_key = get_key_for_col(column.into(), key); - ops.push(KeyValueStoreOp::PutKeyValue(db_key, self.encode()?)); - Ok(()) - } - - /// Attempt to decode a single chunk. - pub fn decode(bytes: &[u8]) -> Result { - if !::is_ssz_fixed_len() { - return Err(Error::from(ChunkError::InvalidType)); - } - - let value_size = ::ssz_fixed_len(); - - if value_size == 0 { - return Err(Error::from(ChunkError::InvalidType)); - } - - let values = bytes - .chunks(value_size) - .map(T::from_ssz_bytes) - .collect::>()?; - - Ok(Chunk { values }) - } - - pub fn encoded_size(&self) -> usize { - self.values.len() * ::ssz_fixed_len() - } - - /// Encode a single chunk as bytes. - pub fn encode(&self) -> Result, Error> { - if !::is_ssz_fixed_len() { - return Err(Error::from(ChunkError::InvalidType)); - } - - Ok(self.values.iter().flat_map(T::as_ssz_bytes).collect()) - } -} - -#[derive(Debug, PartialEq)] -pub enum ChunkError { - ZeroLengthVector, - InvalidSize { - chunk_index: usize, - expected: usize, - actual: usize, - }, - Missing { - chunk_index: usize, - }, - MissingGenesisValue, - Inconsistent { - field: DBColumn, - chunk_index: usize, - existing_value: String, - new_value: String, - }, - InconsistentGenesisValue { - field: DBColumn, - existing_value: String, - new_value: String, - }, - InvalidGenesisChunk { - field: DBColumn, - expected_len: usize, - observed_len: usize, - }, - InvalidType, - OversizedRange { - start_vindex: usize, - end_vindex: usize, - length: usize, - }, - InvalidFork, -} - -#[cfg(test)] -mod test { - use super::*; - use types::MainnetEthSpec as TestSpec; - use types::*; - - fn v(i: u64) -> Hash256 { - Hash256::from_low_u64_be(i) - } - - #[test] - fn stitch_default() { - let chunk_size = 4; - - let chunks = vec![ - Chunk::new(vec![0u64, 1, 2, 3]), - Chunk::new(vec![4, 5, 0, 0]), - ]; - - assert_eq!( - stitch(chunks, 2, 6, chunk_size, 12, 99).unwrap(), - vec![99, 99, 2, 3, 4, 5, 99, 99, 99, 99, 99, 99] - ); - } - - #[test] - fn stitch_basic() { - let chunk_size = 4; - let default = v(0); - - let chunks = vec![ - Chunk::new(vec![v(0), v(1), v(2), v(3)]), - Chunk::new(vec![v(4), v(5), v(6), v(7)]), - Chunk::new(vec![v(8), v(9), v(10), v(11)]), - ]; - - assert_eq!( - stitch(chunks.clone(), 0, 12, chunk_size, 12, default).unwrap(), - (0..12).map(v).collect::>() - ); - - assert_eq!( - stitch(chunks, 2, 10, chunk_size, 8, default).unwrap(), - vec![v(8), v(9), v(2), v(3), v(4), v(5), v(6), v(7)] - ); - } - - #[test] - fn stitch_oversized_range() { - let chunk_size = 4; - let default = 0; - - let chunks = vec![Chunk::new(vec![20u64, 21, 22, 23])]; - - // Args (start_vindex, end_vindex, length) - let args = vec![(0, 21, 20), (0, 2048, 1024), (0, 2, 1)]; - - for (start_vindex, end_vindex, length) in args { - assert_eq!( - stitch( - chunks.clone(), - start_vindex, - end_vindex, - chunk_size, - length, - default - ), - Err(ChunkError::OversizedRange { - start_vindex, - end_vindex, - length, - }) - ); - } - } - - #[test] - fn fixed_length_fields() { - fn test_fixed_length>(_: F, expected: bool) { - assert_eq!(F::is_fixed_length(), expected); - } - test_fixed_length(BlockRoots, true); - test_fixed_length(StateRoots, true); - test_fixed_length(HistoricalRoots, false); - test_fixed_length(RandaoMixes, true); - } - - fn needs_genesis_value_once_per_slot>(_: F) { - let spec = &TestSpec::default_spec(); - let max = F::Length::to_u64(); - for i in 0..max { - assert!( - F::slot_needs_genesis_value(Slot::new(i), spec), - "slot {}", - i - ); - } - assert!(!F::slot_needs_genesis_value(Slot::new(max), spec)); - } - - #[test] - fn needs_genesis_value_block_roots() { - needs_genesis_value_once_per_slot(BlockRoots); - } - - #[test] - fn needs_genesis_value_state_roots() { - needs_genesis_value_once_per_slot(StateRoots); - } - - #[test] - fn needs_genesis_value_historical_roots() { - let spec = &TestSpec::default_spec(); - assert!( - !>::slot_needs_genesis_value(Slot::new(0), spec) - ); - } - - fn needs_genesis_value_test_randao>(_: F) { - let spec = &TestSpec::default_spec(); - let max = TestSpec::slots_per_epoch() * (F::Length::to_u64() - 1); - for i in 0..max { - assert!( - F::slot_needs_genesis_value(Slot::new(i), spec), - "slot {}", - i - ); - } - assert!(!F::slot_needs_genesis_value(Slot::new(max), spec)); - } - - #[test] - fn needs_genesis_value_randao() { - needs_genesis_value_test_randao(RandaoMixes); - } -} diff --git a/beacon_node/store/src/config.rs b/beacon_node/store/src/config.rs index 581003b4fae..3f091fda718 100644 --- a/beacon_node/store/src/config.rs +++ b/beacon_node/store/src/config.rs @@ -1,23 +1,29 @@ +use crate::hdiff::HierarchyConfig; use crate::{DBColumn, Error, StoreItem}; use serde_derive::{Deserialize, Serialize}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; -use types::{EthSpec, MinimalEthSpec}; +use std::io::Write; +use zstd::Encoder; -pub const PREV_DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 2048; -pub const DEFAULT_SLOTS_PER_RESTORE_POINT: u64 = 8192; -pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 5; +pub const DEFAULT_EPOCHS_PER_STATE_DIFF: u64 = 4; +pub const DEFAULT_BLOCK_CACHE_SIZE: usize = 64; +pub const DEFAULT_STATE_CACHE_SIZE: usize = 128; +pub const DEFAULT_COMPRESSION_LEVEL: i32 = 1; +const EST_COMPRESSION_FACTOR: usize = 2; pub const DEFAULT_HISTORIC_STATE_CACHE_SIZE: usize = 1; /// Database configuration parameters. #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct StoreConfig { - /// Number of slots to wait between storing restore points in the freezer database. - pub slots_per_restore_point: u64, - /// Flag indicating whether the `slots_per_restore_point` was set explicitly by the user. - pub slots_per_restore_point_set_explicitly: bool, + /// Number of epochs between state diffs in the hot database. + pub epochs_per_state_diff: u64, /// Maximum number of blocks to store in the in-memory block cache. pub block_cache_size: usize, + /// Maximum number of states to store in the in-memory state cache. + pub state_cache_size: usize, + /// Compression level for `BeaconStateDiff`s. + pub compression_level: i32, /// Maximum number of states from freezer database to store in the in-memory state cache. pub historic_state_cache_size: usize, /// Whether to compact the database on initialization. @@ -26,30 +32,42 @@ pub struct StoreConfig { pub compact_on_prune: bool, /// Whether to prune payloads on initialization and finalization. pub prune_payloads: bool, + /// Whether to store finalized blocks compressed and linearised in the freezer database. + pub linear_blocks: bool, + /// Whether to store finalized states compressed and linearised in the freezer database. + pub linear_restore_points: bool, + /// State diff hierarchy. + pub hierarchy_config: HierarchyConfig, } /// Variant of `StoreConfig` that gets written to disk. Contains immutable configuration params. #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] +// FIXME(sproul): schema migration, add hdiff pub struct OnDiskStoreConfig { - pub slots_per_restore_point: u64, + pub linear_blocks: bool, + pub linear_restore_points: bool, } #[derive(Debug, Clone)] pub enum StoreConfigError { MismatchedSlotsPerRestorePoint { config: u64, on_disk: u64 }, + InvalidCompressionLevel { level: i32 }, } impl Default for StoreConfig { fn default() -> Self { Self { - // Safe default for tests, shouldn't ever be read by a CLI node. - slots_per_restore_point: MinimalEthSpec::slots_per_historical_root() as u64, - slots_per_restore_point_set_explicitly: false, + epochs_per_state_diff: DEFAULT_EPOCHS_PER_STATE_DIFF, block_cache_size: DEFAULT_BLOCK_CACHE_SIZE, + state_cache_size: DEFAULT_STATE_CACHE_SIZE, + compression_level: DEFAULT_COMPRESSION_LEVEL, historic_state_cache_size: DEFAULT_HISTORIC_STATE_CACHE_SIZE, compact_on_init: false, compact_on_prune: true, prune_payloads: true, + linear_blocks: true, + linear_restore_points: true, + hierarchy_config: HierarchyConfig::default(), } } } @@ -57,22 +75,58 @@ impl Default for StoreConfig { impl StoreConfig { pub fn as_disk_config(&self) -> OnDiskStoreConfig { OnDiskStoreConfig { - slots_per_restore_point: self.slots_per_restore_point, + linear_blocks: self.linear_blocks, + linear_restore_points: self.linear_restore_points, } } pub fn check_compatibility( &self, - on_disk_config: &OnDiskStoreConfig, + _on_disk_config: &OnDiskStoreConfig, ) -> Result<(), StoreConfigError> { - if self.slots_per_restore_point != on_disk_config.slots_per_restore_point { - return Err(StoreConfigError::MismatchedSlotsPerRestorePoint { - config: self.slots_per_restore_point, - on_disk: on_disk_config.slots_per_restore_point, - }); - } + // FIXME(sproul): TODO Ok(()) } + + /// Check that the compression level is valid. + pub fn verify_compression_level(&self) -> Result<(), StoreConfigError> { + if zstd::compression_level_range().contains(&self.compression_level) { + Ok(()) + } else { + Err(StoreConfigError::InvalidCompressionLevel { + level: self.compression_level, + }) + } + } + + /// Estimate the size of `len` bytes after compression at the current compression level. + pub fn estimate_compressed_size(&self, len: usize) -> usize { + if self.compression_level == 0 { + len + } else { + len / EST_COMPRESSION_FACTOR + } + } + + /// Estimate the size of `len` compressed bytes after decompression at the current compression + /// level. + pub fn estimate_decompressed_size(&self, len: usize) -> usize { + if self.compression_level == 0 { + len + } else { + len * EST_COMPRESSION_FACTOR + } + } + + pub fn compress_bytes(&self, ssz_bytes: &[u8]) -> Result, Error> { + let mut compressed_value = + Vec::with_capacity(self.estimate_compressed_size(ssz_bytes.len())); + let mut encoder = Encoder::new(&mut compressed_value, self.compression_level) + .map_err(Error::Compression)?; + encoder.write_all(ssz_bytes).map_err(Error::Compression)?; + encoder.finish().map_err(Error::Compression)?; + Ok(compressed_value) + } } impl StoreItem for OnDiskStoreConfig { @@ -80,8 +134,8 @@ impl StoreItem for OnDiskStoreConfig { DBColumn::BeaconMeta } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { diff --git a/beacon_node/store/src/errors.rs b/beacon_node/store/src/errors.rs index fcc40706b30..cd4f9895b75 100644 --- a/beacon_node/store/src/errors.rs +++ b/beacon_node/store/src/errors.rs @@ -1,16 +1,15 @@ -use crate::chunked_vector::ChunkError; use crate::config::StoreConfigError; +use crate::hdiff; use crate::hot_cold_store::HotColdDBError; use ssz::DecodeError; use state_processing::BlockReplayError; -use types::{BeaconStateError, Hash256, InconsistentFork, Slot}; +use types::{milhouse, BeaconStateError, Epoch, Hash256, InconsistentFork, Slot}; pub type Result = std::result::Result; #[derive(Debug)] pub enum Error { SszDecodeError(DecodeError), - VectorChunkError(ChunkError), BeaconStateError(BeaconStateError), PartialBeaconStateError, HotColdDBError(HotColdDBError), @@ -40,11 +39,38 @@ pub enum Error { expected: Hash256, computed: Hash256, }, + MissingStateRoot(Slot), + MissingState(Hash256), + MissingSnapshot(Epoch), + MissingDiff(Epoch), + NoBaseStateFound(Hash256), BlockReplayError(BlockReplayError), + MilhouseError(milhouse::Error), + Compression(std::io::Error), + MissingPersistedBeaconChain, + SlotIsBeforeSplit { + slot: Slot, + }, + FinalizedStateDecreasingSlot, + FinalizedStateUnaligned, + StateForCacheHasPendingUpdates { + state_root: Hash256, + slot: Slot, + }, AddPayloadLogicError, SlotClockUnavailableForMigration, + MissingImmutableValidator(usize), + MissingValidator(usize), + V9MigrationFailure(Hash256), + ValidatorPubkeyCacheError(String), + DuplicateValidatorPublicKey, + InvalidValidatorPubkeyBytes(bls::Error), + ValidatorPubkeyCacheUninitialized, + InvalidKey, UnableToDowngrade, + Hdiff(hdiff::Error), InconsistentFork(InconsistentFork), + ZeroCacheSize, } pub trait HandleUnavailable { @@ -67,12 +93,6 @@ impl From for Error { } } -impl From for Error { - fn from(e: ChunkError) -> Error { - Error::VectorChunkError(e) - } -} - impl From for Error { fn from(e: HotColdDBError) -> Error { Error::HotColdDBError(e) @@ -97,6 +117,18 @@ impl From for Error { } } +impl From for Error { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } +} + +impl From for Error { + fn from(e: hdiff::Error) -> Self { + Self::Hdiff(e) + } +} + impl From for Error { fn from(e: BlockReplayError) -> Error { Error::BlockReplayError(e) diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 353be6bf058..fb1bb3a3f93 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -1,29 +1,34 @@ -use crate::chunked_iter::ChunkedVectorIter; -use crate::chunked_vector::{BlockRoots, Field, StateRoots}; use crate::errors::{Error, Result}; use crate::iter::{BlockRootsIterator, StateRootsIterator}; -use crate::{HotColdDB, ItemStore}; +use crate::{ColumnIter, DBColumn, HotColdDB, ItemStore}; use itertools::process_results; -use types::{BeaconState, ChainSpec, EthSpec, Hash256, Slot}; +use std::marker::PhantomData; +use types::{BeaconState, EthSpec, Hash256, Slot}; pub type HybridForwardsBlockRootsIterator<'a, E, Hot, Cold> = - HybridForwardsIterator<'a, E, BlockRoots, Hot, Cold>; + HybridForwardsIterator<'a, E, Hot, Cold>; pub type HybridForwardsStateRootsIterator<'a, E, Hot, Cold> = - HybridForwardsIterator<'a, E, StateRoots, Hot, Cold>; + HybridForwardsIterator<'a, E, Hot, Cold>; -/// Trait unifying `BlockRoots` and `StateRoots` for forward iteration. -pub trait Root: Field { - fn simple_forwards_iterator, Cold: ItemStore>( - store: &HotColdDB, +impl, Cold: ItemStore> HotColdDB { + pub fn simple_forwards_iterator( + &self, + column: DBColumn, start_slot: Slot, end_state: BeaconState, end_root: Hash256, - ) -> Result; -} + ) -> Result { + if column == DBColumn::BeaconBlockRoots { + self.forwards_iter_block_roots_using_state(start_slot, end_state, end_root) + } else if column == DBColumn::BeaconStateRoots { + self.forwards_iter_state_roots_using_state(start_slot, end_state, end_root) + } else { + panic!("FIXME(sproul): better error") + } + } -impl Root for BlockRoots { - fn simple_forwards_iterator, Cold: ItemStore>( - store: &HotColdDB, + pub fn forwards_iter_block_roots_using_state( + &self, start_slot: Slot, end_state: BeaconState, end_block_root: Hash256, @@ -31,7 +36,7 @@ impl Root for BlockRoots { // Iterate backwards from the end state, stopping at the start slot. let values = process_results( std::iter::once(Ok((end_block_root, end_state.slot()))) - .chain(BlockRootsIterator::owned(store, end_state)), + .chain(BlockRootsIterator::owned(self, end_state)), |iter| { iter.take_while(|(_, slot)| *slot >= start_slot) .collect::>() @@ -39,11 +44,9 @@ impl Root for BlockRoots { )?; Ok(SimpleForwardsIterator { values }) } -} -impl Root for StateRoots { - fn simple_forwards_iterator, Cold: ItemStore>( - store: &HotColdDB, + pub fn forwards_iter_state_roots_using_state( + &self, start_slot: Slot, end_state: BeaconState, end_state_root: Hash256, @@ -51,7 +54,7 @@ impl Root for StateRoots { // Iterate backwards from the end state, stopping at the start slot. let values = process_results( std::iter::once(Ok((end_state_root, end_state.slot()))) - .chain(StateRootsIterator::owned(store, end_state)), + .chain(StateRootsIterator::owned(self, end_state)), |iter| { iter.take_while(|(_, slot)| *slot >= start_slot) .collect::>() @@ -62,40 +65,62 @@ impl Root for StateRoots { } /// Forwards root iterator that makes use of a flat field table in the freezer DB. -pub struct FrozenForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> -{ - inner: ChunkedVectorIter<'a, F, E, Hot, Cold>, +pub struct FrozenForwardsIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> { + inner: ColumnIter<'a, Vec>, + limit: Slot, + finished: bool, + _phantom: PhantomData<(E, Hot, Cold)>, } -impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> - FrozenForwardsIterator<'a, E, F, Hot, Cold> +impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> + FrozenForwardsIterator<'a, E, Hot, Cold> { + /// `end_slot` is EXCLUSIVE here. pub fn new( store: &'a HotColdDB, + column: DBColumn, start_slot: Slot, - last_restore_point_slot: Slot, - spec: &ChainSpec, + end_slot: Slot, ) -> Self { + if column != DBColumn::BeaconBlockRoots && column != DBColumn::BeaconStateRoots { + panic!("FIXME(sproul): bad column error"); + } + let start = start_slot.as_u64().to_be_bytes(); Self { - inner: ChunkedVectorIter::new( - store, - start_slot.as_usize(), - last_restore_point_slot, - spec, - ), + inner: store.cold_db.iter_column_from(column, &start), + limit: end_slot, + finished: false, + _phantom: PhantomData, } } } -impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> Iterator - for FrozenForwardsIterator<'a, E, F, Hot, Cold> +impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator + for FrozenForwardsIterator<'a, E, Hot, Cold> { - type Item = (Hash256, Slot); + type Item = Result<(Hash256, Slot)>; fn next(&mut self) -> Option { + if self.finished { + return None; + } + self.inner - .next() - .map(|(slot, root)| (root, Slot::from(slot))) + .next()? + .and_then(|(slot_bytes, root_bytes)| { + if slot_bytes.len() != 8 || root_bytes.len() != 32 { + panic!("FIXME(sproul): put an error here") + } else { + let slot = Slot::new(u64::from_be_bytes(slot_bytes.try_into().unwrap())); + let root = Hash256::from_slice(&root_bytes); + + if slot + 1 == self.limit { + self.finished = true; + } + Ok(Some((root, slot))) + } + }) + .transpose() } } @@ -115,24 +140,27 @@ impl Iterator for SimpleForwardsIterator { } /// Fusion of the above two approaches to forwards iteration. Fast and efficient. -pub enum HybridForwardsIterator<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> { +pub enum HybridForwardsIterator<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> { PreFinalization { - iter: Box>, + iter: Box>, + store: &'a HotColdDB, /// Data required by the `PostFinalization` iterator when we get to it. continuation_data: Option, Hash256)>>, + column: DBColumn, }, PostFinalizationLazy { continuation_data: Option, Hash256)>>, store: &'a HotColdDB, start_slot: Slot, + column: DBColumn, }, PostFinalization { iter: SimpleForwardsIterator, }, } -impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> - HybridForwardsIterator<'a, E, F, Hot, Cold> +impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> + HybridForwardsIterator<'a, E, Hot, Cold> { /// Construct a new hybrid iterator. /// @@ -148,41 +176,41 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> /// function may block for some time while `get_state` runs. pub fn new( store: &'a HotColdDB, + column: DBColumn, start_slot: Slot, end_slot: Option, get_state: impl FnOnce() -> (BeaconState, Hash256), - spec: &ChainSpec, ) -> Result { use HybridForwardsIterator::*; - let latest_restore_point_slot = store.get_latest_restore_point_slot(); + // FIXME(sproul): consider whether this is 100% correct + let split_slot = store.get_split_slot(); - let result = if start_slot < latest_restore_point_slot { + let result = if start_slot < split_slot { let iter = Box::new(FrozenForwardsIterator::new( - store, - start_slot, - latest_restore_point_slot, - spec, + store, column, start_slot, split_slot, )); // No continuation data is needed if the forwards iterator plans to halt before // `end_slot`. If it tries to continue further a `NoContinuationData` error will be // returned. - let continuation_data = - if end_slot.map_or(false, |end_slot| end_slot < latest_restore_point_slot) { - None - } else { - Some(Box::new(get_state())) - }; + let continuation_data = if end_slot.map_or(false, |end_slot| end_slot < split_slot) { + None + } else { + Some(Box::new(get_state())) + }; PreFinalization { iter, + store, continuation_data, + column, } } else { PostFinalizationLazy { continuation_data: Some(Box::new(get_state())), store, start_slot, + column, } }; @@ -195,22 +223,24 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> match self { PreFinalization { iter, + store, continuation_data, + column, } => { match iter.next() { - Some(x) => Ok(Some(x)), + Some(x) => x.map(Some), // Once the pre-finalization iterator is consumed, transition // to a post-finalization iterator beginning from the last slot // of the pre iterator. None => { let continuation_data = continuation_data.take(); - let store = iter.inner.store; - let start_slot = Slot::from(iter.inner.end_vindex); + let start_slot = Slot::from(iter.limit); *self = PostFinalizationLazy { continuation_data, store, start_slot, + column: *column, }; self.do_next() @@ -221,11 +251,17 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> continuation_data, store, start_slot, + column, } => { let (end_state, end_root) = *continuation_data.take().ok_or(Error::NoContinuationData)?; *self = PostFinalization { - iter: F::simple_forwards_iterator(store, *start_slot, end_state, end_root)?, + iter: store.simple_forwards_iterator( + *column, + *start_slot, + end_state, + end_root, + )?, }; self.do_next() } @@ -234,8 +270,8 @@ impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> } } -impl<'a, E: EthSpec, F: Root, Hot: ItemStore, Cold: ItemStore> Iterator - for HybridForwardsIterator<'a, E, F, Hot, Cold> +impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator + for HybridForwardsIterator<'a, E, Hot, Cold> { type Item = Result<(Hash256, Slot)>; diff --git a/beacon_node/store/src/hdiff.rs b/beacon_node/store/src/hdiff.rs new file mode 100644 index 00000000000..0c8d7ceccae --- /dev/null +++ b/beacon_node/store/src/hdiff.rs @@ -0,0 +1,349 @@ +//! Hierarchical diff implementation. +use crate::{DBColumn, StoreItem}; +use itertools::Itertools; +use serde::{Deserialize, Serialize}; +use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; +use std::io::{Read, Write}; +use types::{BeaconState, ChainSpec, Epoch, EthSpec, VList}; +use zstd::{Decoder, Encoder}; + +#[derive(Debug)] +pub enum Error { + InvalidHierarchy, + XorDeletionsNotSupported, + UnableToComputeDiff, + UnableToApplyDiff, + Compression(std::io::Error), +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct HierarchyConfig { + exponents: Vec, +} + +#[derive(Debug)] +pub struct HierarchyModuli { + moduli: Vec, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum StorageStrategy { + Nothing, + DiffFrom(Epoch), + Snapshot, +} + +/// Hierarchical diff output and working buffer. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct HDiffBuffer { + state: Vec, + balances: Vec, +} + +/// Hierarchical state diff. +#[derive(Debug, Encode, Decode)] +pub struct HDiff { + state_diff: BytesDiff, + balances_diff: XorDiff, +} + +#[derive(Debug, Encode, Decode)] +pub struct BytesDiff { + bytes: Vec, +} + +#[derive(Debug, Encode, Decode)] +pub struct XorDiff { + bytes: Vec, +} + +impl HDiffBuffer { + pub fn from_state(mut beacon_state: BeaconState) -> Self { + let balances_list = std::mem::take(beacon_state.balances_mut()); + + let state = beacon_state.as_ssz_bytes(); + let balances = balances_list.to_vec(); + + HDiffBuffer { state, balances } + } + + pub fn into_state(self, spec: &ChainSpec) -> Result, Error> { + let mut state = BeaconState::from_ssz_bytes(&self.state, spec).unwrap(); + *state.balances_mut() = VList::new(self.balances).unwrap(); + Ok(state) + } +} + +impl HDiff { + pub fn compute(source: &HDiffBuffer, target: &HDiffBuffer) -> Result { + let state_diff = BytesDiff::compute(&source.state, &target.state)?; + let balances_diff = XorDiff::compute(&source.balances, &target.balances)?; + + Ok(Self { + state_diff, + balances_diff, + }) + } + + pub fn apply(&self, source: &mut HDiffBuffer) -> Result<(), Error> { + let source_state = std::mem::take(&mut source.state); + self.state_diff.apply(&source_state, &mut source.state)?; + + self.balances_diff.apply(&mut source.balances)?; + Ok(()) + } + + pub fn state_diff_len(&self) -> usize { + self.state_diff.bytes.len() + } + + pub fn balances_diff_len(&self) -> usize { + self.balances_diff.bytes.len() + } +} + +impl StoreItem for HDiff { + fn db_column() -> DBColumn { + DBColumn::BeaconStateDiff + } + + fn as_store_bytes(&self) -> Result, crate::Error> { + Ok(self.as_ssz_bytes()) + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } +} + +impl BytesDiff { + pub fn compute(source: &[u8], target: &[u8]) -> Result { + Self::compute_xdelta(source, target) + } + + pub fn compute_xdelta(source_bytes: &[u8], target_bytes: &[u8]) -> Result { + let bytes = + xdelta3::encode(target_bytes, source_bytes).ok_or(Error::UnableToComputeDiff)?; + Ok(Self { bytes }) + } + + pub fn apply(&self, source: &[u8], target: &mut Vec) -> Result<(), Error> { + self.apply_xdelta(source, target) + } + + pub fn apply_xdelta(&self, source: &[u8], target: &mut Vec) -> Result<(), Error> { + *target = xdelta3::decode(&self.bytes, source).ok_or(Error::UnableToApplyDiff)?; + Ok(()) + } +} + +impl XorDiff { + pub fn compute(xs: &[u64], ys: &[u64]) -> Result { + if xs.len() > ys.len() { + return Err(Error::XorDeletionsNotSupported); + } + + let uncompressed_bytes: Vec = ys + .iter() + .enumerate() + .flat_map(|(i, y)| { + // Diff from 0 if the entry is new. + let x = xs.get(i).copied().unwrap_or(0); + y.wrapping_sub(x).to_be_bytes() + }) + .collect(); + + // FIXME(sproul): reconsider + let compression_level = 1; + let mut compressed_bytes = Vec::with_capacity(uncompressed_bytes.len() / 2); + let mut encoder = + Encoder::new(&mut compressed_bytes, compression_level).map_err(Error::Compression)?; + encoder + .write_all(&uncompressed_bytes) + .map_err(Error::Compression)?; + encoder.finish().map_err(Error::Compression)?; + + Ok(XorDiff { + bytes: compressed_bytes, + }) + } + + pub fn apply(&self, xs: &mut Vec) -> Result<(), Error> { + // Decompress balances diff. + let mut balances_diff_bytes = Vec::with_capacity(2 * self.bytes.len()); + let mut decoder = Decoder::new(&*self.bytes).map_err(Error::Compression)?; + decoder + .read_to_end(&mut balances_diff_bytes) + .map_err(Error::Compression)?; + + for (i, diff_bytes) in balances_diff_bytes + .chunks(u64::BITS as usize / 8) + .enumerate() + { + // FIXME(sproul): unwrap + let diff = u64::from_be_bytes(diff_bytes.try_into().unwrap()); + + if let Some(x) = xs.get_mut(i) { + *x = x.wrapping_add(diff); + } else { + xs.push(diff); + } + } + + Ok(()) + } +} + +impl Default for HierarchyConfig { + fn default() -> Self { + HierarchyConfig { + exponents: vec![0, 4, 6, 8, 11, 13, 16], + } + } +} + +impl HierarchyConfig { + pub fn to_moduli(&self) -> Result { + self.validate()?; + let moduli = self.exponents.iter().map(|n| 1 << n).collect(); + Ok(HierarchyModuli { moduli }) + } + + pub fn validate(&self) -> Result<(), Error> { + if self.exponents.len() > 2 + && self + .exponents + .iter() + .tuple_windows() + .all(|(small, big)| small < big && *big < u64::BITS as u8) + { + Ok(()) + } else { + Err(Error::InvalidHierarchy) + } + } +} + +impl HierarchyModuli { + pub fn storage_strategy(&self, epoch: Epoch) -> Result { + let last = self.moduli.last().copied().ok_or(Error::InvalidHierarchy)?; + + if epoch % last == 0 { + return Ok(StorageStrategy::Snapshot); + } + + let diff_from = self.moduli.iter().rev().find_map(|&n| { + (epoch % n == 0).then(|| { + // Diff from the previous state. + (epoch - 1) / n * n + }) + }); + Ok(diff_from.map_or(StorageStrategy::Nothing, StorageStrategy::DiffFrom)) + } + + /// Return the smallest epoch greater than or equal to `epoch` at which a full snapshot should + /// be stored. + pub fn next_snapshot_epoch(&self, epoch: Epoch) -> Result { + let last = self.moduli.last().copied().ok_or(Error::InvalidHierarchy)?; + if epoch % last == 0 { + Ok(epoch) + } else { + Ok((epoch / last + 1) * last) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn default_storage_strategy() { + let config = HierarchyConfig::default(); + config.validate().unwrap(); + + let moduli = config.to_moduli().unwrap(); + + // Full snapshots at multiples of 2^16. + let snapshot_freq = Epoch::new(1 << 16); + assert_eq!( + moduli.storage_strategy(Epoch::new(0)).unwrap(), + StorageStrategy::Snapshot + ); + assert_eq!( + moduli.storage_strategy(snapshot_freq).unwrap(), + StorageStrategy::Snapshot + ); + assert_eq!( + moduli.storage_strategy(snapshot_freq * 3).unwrap(), + StorageStrategy::Snapshot + ); + + // For the first layer of diffs + let first_layer = Epoch::new(1 << 13); + assert_eq!( + moduli.storage_strategy(first_layer * 2).unwrap(), + StorageStrategy::DiffFrom(first_layer) + ); + } + + #[test] + fn next_snapshot_epoch() { + let config = HierarchyConfig::default(); + config.validate().unwrap(); + + let moduli = config.to_moduli().unwrap(); + let snapshot_freq = Epoch::new(1 << 16); + + assert_eq!( + moduli.next_snapshot_epoch(snapshot_freq).unwrap(), + snapshot_freq + ); + assert_eq!( + moduli.next_snapshot_epoch(snapshot_freq + 1).unwrap(), + snapshot_freq * 2 + ); + assert_eq!( + moduli.next_snapshot_epoch(snapshot_freq * 2 - 1).unwrap(), + snapshot_freq * 2 + ); + assert_eq!( + moduli.next_snapshot_epoch(snapshot_freq * 2).unwrap(), + snapshot_freq * 2 + ); + assert_eq!( + moduli.next_snapshot_epoch(snapshot_freq * 100).unwrap(), + snapshot_freq * 100 + ); + } + + #[test] + fn xor_vs_bytes_diff() { + let x_values = vec![99u64, 55, 123, 6834857, 0, 12]; + let y_values = vec![98u64, 55, 312, 1, 1, 2, 4, 5]; + + let to_bytes = + |nums: &[u64]| -> Vec { nums.iter().flat_map(|x| x.to_be_bytes()).collect() }; + + let x_bytes = to_bytes(&x_values); + let y_bytes = to_bytes(&y_values); + + let xor_diff = XorDiff::compute(&x_values, &y_values).unwrap(); + + let mut y_from_xor = x_values.clone(); + xor_diff.apply(&mut y_from_xor).unwrap(); + + assert_eq!(y_values, y_from_xor); + + let bytes_diff = BytesDiff::compute(&x_bytes, &y_bytes).unwrap(); + + let mut y_from_bytes = vec![]; + bytes_diff.apply(&x_bytes, &mut y_from_bytes).unwrap(); + + assert_eq!(y_bytes, y_from_bytes); + + // XOR diff wins by more than a factor of 3 + assert!(xor_diff.bytes.len() < 3 * bytes_diff.bytes.len()); + } +} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index 7695ea520e8..46101316709 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -1,15 +1,13 @@ -use crate::chunked_vector::{ - store_updated_vector, BlockRoots, HistoricalRoots, HistoricalSummaries, RandaoMixes, StateRoots, -}; -use crate::config::{ - OnDiskStoreConfig, StoreConfig, DEFAULT_SLOTS_PER_RESTORE_POINT, - PREV_DEFAULT_SLOTS_PER_RESTORE_POINT, -}; +use crate::config::{OnDiskStoreConfig, StoreConfig}; use crate::forwards_iter::{HybridForwardsBlockRootsIterator, HybridForwardsStateRootsIterator}; -use crate::impls::beacon_state::{get_full_state, store_full_state}; +use crate::hdiff::{HDiff, HDiffBuffer, HierarchyModuli, StorageStrategy}; +use crate::hot_state_iter::HotStateRootIter; +use crate::impls::{ + beacon_state::{get_full_state, store_full_state}, + frozen_block_slot::FrozenBlockSlot, +}; use crate::iter::{BlockRootsIterator, ParentRootBlockIterator, RootsIterator}; -use crate::leveldb_store::BytesKey; -use crate::leveldb_store::LevelDB; +use crate::leveldb_store::{BytesKey, LevelDB}; use crate::memory_store::MemoryStore; use crate::metadata::{ AnchorInfo, CompactionTimestamp, PruningCheckpoint, SchemaVersion, ANCHOR_INFO_KEY, @@ -17,28 +15,39 @@ use crate::metadata::{ SCHEMA_VERSION_KEY, SPLIT_KEY, }; use crate::metrics; +use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, - PartialBeaconState, StoreItem, StoreOp, + get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, KeyValueStoreOp, StoreItem, + StoreOp, ValidatorPubkeyCache, }; use itertools::process_results; use leveldb::iterator::LevelDBIterator; use lru::LruCache; use parking_lot::{Mutex, RwLock}; +use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; -use slog::{debug, error, info, trace, warn, Logger}; +use slog::{debug, error, info, warn, Logger}; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use state_processing::{ - BlockProcessingError, BlockReplayer, SlotProcessingError, StateProcessingStrategy, + block_replayer::PreSlotHook, BlockProcessingError, BlockReplayer, SlotProcessingError, }; use std::cmp::min; -use std::convert::TryInto; +use std::collections::VecDeque; +use std::io::{Read, Write}; use std::marker::PhantomData; +use std::num::NonZeroUsize; use std::path::Path; use std::sync::Arc; use std::time::Duration; +use types::EthSpec; use types::*; +use zstd::{Decoder, Encoder}; + +// FIXME(sproul): configurable +const DIFF_BUFFER_CACHE_SIZE: usize = 16; + +pub const MAX_PARENT_STATES_TO_CACHE: u64 = 1; /// On-disk database that stores finalized states efficiently. /// @@ -54,6 +63,7 @@ pub struct HotColdDB, Cold: ItemStore> { /// The starting slots for the range of blocks & states stored in the database. anchor_info: RwLock>, pub(crate) config: StoreConfig, + pub(crate) hierarchy: HierarchyModuli, /// Cold database containing compact historical data. pub cold_db: Cold, /// Hot database containing duplicated but quick-to-access recent data. @@ -62,12 +72,22 @@ pub struct HotColdDB, Cold: ItemStore> { pub hot_db: Hot, /// LRU cache of deserialized blocks. Updated whenever a block is loaded. block_cache: Mutex>>, + /// Cache of beacon states. + state_cache: Mutex>, + /// Immutable validator cache. + pub immutable_validators: Arc>>, /// LRU cache of replayed states. - state_cache: Mutex>>, + // FIXME(sproul): re-enable historic state cache + #[allow(dead_code)] + historic_state_cache: Mutex>>, + /// Cache of hierarchical diff buffers. + diff_buffer_cache: Mutex>, + // Cache of hierarchical diffs. + // FIXME(sproul): see if this is necessary /// Chain spec. pub(crate) spec: ChainSpec, /// Logger. - pub(crate) log: Logger, + pub log: Logger, /// Mere vessel for E. _phantom: PhantomData, } @@ -87,25 +107,26 @@ pub enum HotColdDBError { }, MissingStateToFreeze(Hash256), MissingRestorePointHash(u64), + MissingRestorePointState(Slot), MissingRestorePoint(Hash256), MissingColdStateSummary(Hash256), MissingHotStateSummary(Hash256), MissingEpochBoundaryState(Hash256), + MissingPrevState(Hash256), MissingSplitState(Hash256, Slot), + MissingStateDiff(Hash256), + MissingHDiff(Epoch), MissingExecutionPayload(Hash256), MissingFullBlockExecutionPayloadPruned(Hash256, Slot), MissingAnchorInfo, + MissingFrozenBlockSlot(Hash256), + MissingFrozenBlock(Slot), HotStateSummaryError(BeaconStateError), RestorePointDecodeError(ssz::DecodeError), BlockReplayBeaconError(BeaconStateError), BlockReplaySlotError(SlotProcessingError), BlockReplayBlockError(BlockProcessingError), MissingLowerLimitState(Slot), - InvalidSlotsPerRestorePoint { - slots_per_restore_point: u64, - slots_per_historical_root: u64, - slots_per_epoch: u64, - }, RestorePointBlockHashError(BeaconStateError), IterationError { unexpected_key: BytesKey, @@ -123,16 +144,31 @@ impl HotColdDB, MemoryStore> { spec: ChainSpec, log: Logger, ) -> Result, MemoryStore>, Error> { - Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; + config.verify_compression_level()?; + + let hierarchy = config.hierarchy_config.to_moduli()?; + + let block_cache_size = + NonZeroUsize::new(config.block_cache_size).ok_or(Error::ZeroCacheSize)?; + let state_cache_size = + NonZeroUsize::new(config.state_cache_size).ok_or(Error::ZeroCacheSize)?; + let historic_state_cache_size = + NonZeroUsize::new(config.historic_state_cache_size).ok_or(Error::ZeroCacheSize)?; + let diff_buffer_cache_size = + NonZeroUsize::new(DIFF_BUFFER_CACHE_SIZE).ok_or(Error::ZeroCacheSize)?; let db = HotColdDB { split: RwLock::new(Split::default()), anchor_info: RwLock::new(None), cold_db: MemoryStore::open(), hot_db: MemoryStore::open(), - block_cache: Mutex::new(LruCache::new(config.block_cache_size)), - state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), + block_cache: Mutex::new(LruCache::new(block_cache_size)), + state_cache: Mutex::new(StateCache::new(state_cache_size)), + immutable_validators: Arc::new(RwLock::new(Default::default())), + historic_state_cache: Mutex::new(LruCache::new(historic_state_cache_size)), + diff_buffer_cache: Mutex::new(LruCache::new(diff_buffer_cache_size)), config, + hierarchy, spec, log, _phantom: PhantomData, @@ -145,8 +181,6 @@ impl HotColdDB, MemoryStore> { impl HotColdDB, LevelDB> { /// Open a new or existing database, with the given paths to the hot and cold DBs. /// - /// The `slots_per_restore_point` parameter must be a divisor of `SLOTS_PER_HISTORICAL_ROOT`. - /// /// The `migrate_schema` function is passed in so that the parent `BeaconChain` can provide /// context and access `BeaconChain`-level code without creating a circular dependency. pub fn open( @@ -157,40 +191,39 @@ impl HotColdDB, LevelDB> { spec: ChainSpec, log: Logger, ) -> Result, Error> { - Self::verify_slots_per_restore_point(config.slots_per_restore_point)?; + config.verify_compression_level()?; + + let hierarchy = config.hierarchy_config.to_moduli()?; - let mut db = HotColdDB { + let block_cache_size = + NonZeroUsize::new(config.block_cache_size).ok_or(Error::ZeroCacheSize)?; + let state_cache_size = + NonZeroUsize::new(config.state_cache_size).ok_or(Error::ZeroCacheSize)?; + let historic_state_cache_size = + NonZeroUsize::new(config.historic_state_cache_size).ok_or(Error::ZeroCacheSize)?; + let diff_buffer_cache_size = + NonZeroUsize::new(DIFF_BUFFER_CACHE_SIZE).ok_or(Error::ZeroCacheSize)?; + + let db = HotColdDB { split: RwLock::new(Split::default()), anchor_info: RwLock::new(None), cold_db: LevelDB::open(cold_path)?, hot_db: LevelDB::open(hot_path)?, - block_cache: Mutex::new(LruCache::new(config.block_cache_size)), - state_cache: Mutex::new(LruCache::new(config.historic_state_cache_size)), + block_cache: Mutex::new(LruCache::new(block_cache_size)), + state_cache: Mutex::new(StateCache::new(state_cache_size)), + immutable_validators: Arc::new(RwLock::new(Default::default())), + historic_state_cache: Mutex::new(LruCache::new(historic_state_cache_size)), + diff_buffer_cache: Mutex::new(LruCache::new(diff_buffer_cache_size)), config, + hierarchy, spec, log, _phantom: PhantomData, }; - // Allow the slots-per-restore-point value to stay at the previous default if the config - // uses the new default. Don't error on a failed read because the config itself may need - // migrating. - if let Ok(Some(disk_config)) = db.load_config() { - if !db.config.slots_per_restore_point_set_explicitly - && disk_config.slots_per_restore_point == PREV_DEFAULT_SLOTS_PER_RESTORE_POINT - && db.config.slots_per_restore_point == DEFAULT_SLOTS_PER_RESTORE_POINT - { - debug!( - db.log, - "Ignoring slots-per-restore-point config in favour of on-disk value"; - "config" => db.config.slots_per_restore_point, - "on_disk" => disk_config.slots_per_restore_point, - ); - - // Mutate the in-memory config so that it's compatible. - db.config.slots_per_restore_point = PREV_DEFAULT_SLOTS_PER_RESTORE_POINT; - } - } + // Load the config from disk but don't error on a failed read because the config itself may + // need migrating. + let _ = db.load_config(); // Load the previous split slot from the database (if any). This ensures we can // stop and restart correctly. This needs to occur *before* running any migrations @@ -207,6 +240,11 @@ impl HotColdDB, LevelDB> { ); } + // Load validator pubkey cache. + // FIXME(sproul): probably breaks migrations, etc + let pubkey_cache = ValidatorPubkeyCache::load_from_store(&db)?; + *db.immutable_validators.write() = pubkey_cache; + // Ensure that the schema version of the on-disk database matches the software. // If the version is mismatched, an automatic migration will be attempted. let db = Arc::new(db); @@ -264,6 +302,21 @@ impl HotColdDB, LevelDB> { } impl, Cold: ItemStore> HotColdDB { + pub fn update_finalized_state( + &self, + state_root: Hash256, + block_root: Hash256, + state: BeaconState, + ) -> Result<(), Error> { + self.state_cache + .lock() + .update_finalized_state(state_root, block_root, state) + } + + pub fn state_cache_len(&self) -> usize { + self.state_cache.lock().len() + } + /// Store a block and update the LRU cache. pub fn put_block( &self, @@ -297,7 +350,7 @@ impl, Cold: ItemStore> HotColdDB // Store execution payload if present. if let Some(ref execution_payload) = payload { - ops.push(execution_payload.as_kv_store_op(*key)); + ops.push(execution_payload.as_kv_store_op(*key)?); } // Re-construct block. This should always succeed. @@ -323,6 +376,7 @@ impl, Cold: ItemStore> HotColdDB pub fn try_get_full_block( &self, block_root: &Hash256, + slot: Option, ) -> Result>, Error> { metrics::inc_counter(&metrics::BEACON_BLOCK_GET_COUNT); @@ -333,7 +387,7 @@ impl, Cold: ItemStore> HotColdDB } // Load the blinded block. - let blinded_block = match self.get_blinded_block(block_root)? { + let blinded_block = match self.get_blinded_block(block_root, slot)? { Some(block) => block, None => return Ok(None), }; @@ -380,8 +434,9 @@ impl, Cold: ItemStore> HotColdDB pub fn get_full_block( &self, block_root: &Hash256, + slot: Option, ) -> Result>, Error> { - match self.try_get_full_block(block_root)? { + match self.try_get_full_block(block_root, slot)? { Some(DatabaseBlock::Full(block)) => Ok(Some(block)), Some(DatabaseBlock::Blinded(block)) => Err( HotColdDBError::MissingFullBlockExecutionPayloadPruned(*block_root, block.slot()) @@ -412,12 +467,115 @@ impl, Cold: ItemStore> HotColdDB pub fn get_blinded_block( &self, block_root: &Hash256, - ) -> Result>>, Error> { + slot: Option, + ) -> Result>, Error> { + if let Some(slot) = slot { + if slot < self.get_split_slot() || slot == 0 { + // To the freezer DB. + self.get_cold_blinded_block_by_slot(slot) + } else { + self.get_hot_blinded_block(block_root) + } + } else { + match self.get_hot_blinded_block(block_root)? { + Some(block) => Ok(Some(block)), + None => self.get_cold_blinded_block_by_root(block_root), + } + } + } + + pub fn get_hot_blinded_block( + &self, + block_root: &Hash256, + ) -> Result>, Error> { self.get_block_with(block_root, |bytes| { SignedBeaconBlock::from_ssz_bytes(bytes, &self.spec) }) } + pub fn get_cold_blinded_block_by_root( + &self, + block_root: &Hash256, + ) -> Result>, Error> { + // Load slot. + if let Some(FrozenBlockSlot(block_slot)) = self.cold_db.get(block_root)? { + self.get_cold_blinded_block_by_slot(block_slot) + } else { + Ok(None) + } + } + + pub fn get_cold_blinded_block_by_slot( + &self, + slot: Slot, + ) -> Result>, Error> { + let bytes = if let Some(bytes) = self.cold_db.get_bytes( + DBColumn::BeaconBlockFrozen.into(), + &slot.as_u64().to_be_bytes(), + )? { + bytes + } else { + return Ok(None); + }; + + let mut ssz_bytes = Vec::with_capacity(self.config.estimate_decompressed_size(bytes.len())); + let mut decoder = Decoder::new(&*bytes).map_err(Error::Compression)?; + decoder + .read_to_end(&mut ssz_bytes) + .map_err(Error::Compression)?; + Ok(Some(SignedBeaconBlock::from_ssz_bytes( + &ssz_bytes, &self.spec, + )?)) + } + + pub fn put_cold_blinded_block( + &self, + block_root: &Hash256, + block: &SignedBlindedBeaconBlock, + ) -> Result<(), Error> { + let mut ops = Vec::with_capacity(2); + self.blinded_block_as_cold_kv_store_ops(block_root, block, &mut ops)?; + self.cold_db.do_atomically(ops) + } + + pub fn blinded_block_as_cold_kv_store_ops( + &self, + block_root: &Hash256, + block: &SignedBlindedBeaconBlock, + kv_store_ops: &mut Vec, + ) -> Result<(), Error> { + // Write the block root to slot mapping. + let slot = block.slot(); + kv_store_ops.push(FrozenBlockSlot(slot).as_kv_store_op(*block_root)?); + + // Write the slot to block root mapping. + kv_store_ops.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col( + DBColumn::BeaconBlockRoots.into(), + &slot.as_u64().to_be_bytes(), + ), + block_root.as_bytes().to_vec(), + )); + + // Write the block keyed by slot. + let db_key = get_key_for_col( + DBColumn::BeaconBlockFrozen.into(), + &slot.as_u64().to_be_bytes(), + ); + + let ssz_bytes = block.as_ssz_bytes(); + let mut compressed_value = + Vec::with_capacity(self.config.estimate_compressed_size(ssz_bytes.len())); + let mut encoder = Encoder::new(&mut compressed_value, self.config.compression_level) + .map_err(Error::Compression)?; + encoder.write_all(&ssz_bytes).map_err(Error::Compression)?; + encoder.finish().map_err(Error::Compression)?; + + kv_store_ops.push(KeyValueStoreOp::PutKeyValue(db_key, compressed_value)); + + Ok(()) + } + /// Fetch a block from the store, ignoring which fork variant it *should* be for. pub fn get_block_any_variant>( &self, @@ -473,10 +631,14 @@ impl, Cold: ItemStore> HotColdDB .map(|payload| payload.is_some()) } - /// Determine whether a block exists in the database. + /// Determine whether a block exists in the database (hot *or* cold). pub fn block_exists(&self, block_root: &Hash256) -> Result { - self.hot_db - .key_exists(DBColumn::BeaconBlock.into(), block_root.as_bytes()) + Ok(self + .hot_db + .key_exists(DBColumn::BeaconBlock.into(), block_root.as_bytes())? + || self + .cold_db + .key_exists(DBColumn::BeaconBlock.into(), block_root.as_bytes())?) } /// Delete a block from the store and the block cache. @@ -531,49 +693,32 @@ impl, Cold: ItemStore> HotColdDB // chain. This way we avoid returning a state that doesn't match `state_root`. self.load_cold_state(state_root) } else { - self.load_hot_state(state_root, StateProcessingStrategy::Accurate) + self.get_hot_state(state_root) } } else { - match self.load_hot_state(state_root, StateProcessingStrategy::Accurate)? { + match self.get_hot_state(state_root)? { Some(state) => Ok(Some(state)), None => self.load_cold_state(state_root), } } } - /// Fetch a state from the store, but don't compute all of the values when replaying blocks - /// upon that state (e.g., state roots). Additionally, only states from the hot store are - /// returned. - /// - /// See `Self::get_state` for information about `slot`. + /// Get a state with `latest_block_root == block_root` advanced through to at most `slot`. /// - /// ## Warning - /// - /// The returned state **is not a valid beacon state**, it can only be used for obtaining - /// shuffling to process attestations. At least the following components of the state will be - /// broken/invalid: - /// - /// - `state.state_roots` - /// - `state.block_roots` - pub fn get_inconsistent_state_for_attestation_verification_only( + /// The `state_root` argument is used to look up the block's un-advanced state in case of a + /// cache miss. + pub fn get_advanced_state( &self, - state_root: &Hash256, - slot: Option, - ) -> Result>, Error> { - metrics::inc_counter(&metrics::BEACON_STATE_GET_COUNT); - - let split_slot = self.get_split_slot(); - - if slot.map_or(false, |slot| slot < split_slot) { - Err(HotColdDBError::AttestationStateIsFinalized { - split_slot, - request_slot: slot, - state_root: *state_root, - } - .into()) - } else { - self.load_hot_state(state_root, StateProcessingStrategy::Inconsistent) + block_root: Hash256, + slot: Slot, + state_root: Hash256, + ) -> Result)>, Error> { + if let Some(cached) = self.state_cache.lock().get_by_block_root(block_root, slot) { + return Ok(Some(cached)); } + Ok(self + .get_hot_state(&state_root)? + .map(|state| (state_root, state))) } /// Delete a state, ensuring it is removed from the LRU cache, as well as from on-disk. @@ -583,17 +728,7 @@ impl, Cold: ItemStore> HotColdDB /// (which are frozen, and won't be deleted), or valid descendents of the finalized checkpoint /// (which will be deleted by this function but shouldn't be). pub fn delete_state(&self, state_root: &Hash256, slot: Slot) -> Result<(), Error> { - // Delete the state summary. - self.hot_db - .key_delete(DBColumn::BeaconStateSummary.into(), state_root.as_bytes())?; - - // Delete the full state if it lies on an epoch boundary. - if slot % E::slots_per_epoch() == 0 { - self.hot_db - .key_delete(DBColumn::BeaconState.into(), state_root.as_bytes())?; - } - - Ok(()) + self.do_atomically(vec![StoreOp::DeleteState(*state_root, Some(slot))]) } pub fn forwards_block_roots_iterator( @@ -601,14 +736,13 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_state: BeaconState, end_block_root: Hash256, - spec: &ChainSpec, ) -> Result> + '_, Error> { HybridForwardsBlockRootsIterator::new( self, + DBColumn::BeaconBlockRoots, start_slot, None, || (end_state, end_block_root), - spec, ) } @@ -617,9 +751,14 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_slot: Slot, get_state: impl FnOnce() -> (BeaconState, Hash256), - spec: &ChainSpec, ) -> Result, Error> { - HybridForwardsBlockRootsIterator::new(self, start_slot, Some(end_slot), get_state, spec) + HybridForwardsBlockRootsIterator::new( + self, + DBColumn::BeaconBlockRoots, + start_slot, + Some(end_slot), + get_state, + ) } pub fn forwards_state_roots_iterator( @@ -627,14 +766,13 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_state_root: Hash256, end_state: BeaconState, - spec: &ChainSpec, ) -> Result> + '_, Error> { HybridForwardsStateRootsIterator::new( self, + DBColumn::BeaconStateRoots, start_slot, None, || (end_state, end_state_root), - spec, ) } @@ -643,47 +781,14 @@ impl, Cold: ItemStore> HotColdDB start_slot: Slot, end_slot: Slot, get_state: impl FnOnce() -> (BeaconState, Hash256), - spec: &ChainSpec, ) -> Result, Error> { - HybridForwardsStateRootsIterator::new(self, start_slot, Some(end_slot), get_state, spec) - } - - /// Load an epoch boundary state by using the hot state summary look-up. - /// - /// Will fall back to the cold DB if a hot state summary is not found. - pub fn load_epoch_boundary_state( - &self, - state_root: &Hash256, - ) -> Result>, Error> { - if let Some(HotStateSummary { - epoch_boundary_state_root, - .. - }) = self.load_hot_state_summary(state_root)? - { - // NOTE: minor inefficiency here because we load an unnecessary hot state summary - // - // `StateProcessingStrategy` should be irrelevant here since we never replay blocks for an epoch - // boundary state in the hot DB. - let state = self - .load_hot_state( - &epoch_boundary_state_root, - StateProcessingStrategy::Accurate, - )? - .ok_or(HotColdDBError::MissingEpochBoundaryState( - epoch_boundary_state_root, - ))?; - Ok(Some(state)) - } else { - // Try the cold DB - match self.load_cold_state_slot(state_root)? { - Some(state_slot) => { - let epoch_boundary_slot = - state_slot / E::slots_per_epoch() * E::slots_per_epoch(); - self.load_cold_state_by_slot(epoch_boundary_slot) - } - None => Ok(None), - } - } + HybridForwardsStateRootsIterator::new( + self, + DBColumn::BeaconStateRoots, + start_slot, + Some(end_slot), + get_state, + ) } pub fn put_item(&self, key: &Hash256, item: &I) -> Result<(), Error> { @@ -718,12 +823,8 @@ impl, Cold: ItemStore> HotColdDB self.store_hot_state(&state_root, state, &mut key_value_batch)?; } - StoreOp::PutStateSummary(state_root, summary) => { - key_value_batch.push(summary.as_kv_store_op(state_root)); - } - StoreOp::PutStateTemporaryFlag(state_root) => { - key_value_batch.push(TemporaryFlag.as_kv_store_op(state_root)); + key_value_batch.push(TemporaryFlag.as_kv_store_op(state_root)?); } StoreOp::DeleteStateTemporaryFlag(state_root) => { @@ -743,12 +844,19 @@ impl, Cold: ItemStore> HotColdDB key_value_batch.push(KeyValueStoreOp::DeleteKey(state_summary_key)); if slot.map_or(true, |slot| slot % E::slots_per_epoch() == 0) { + // Delete full state if any. let state_key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_bytes()); key_value_batch.push(KeyValueStoreOp::DeleteKey(state_key)); + + // Delete diff too. + let diff_key = get_key_for_col( + DBColumn::BeaconStateDiff.into(), + state_root.as_bytes(), + ); + key_value_batch.push(KeyValueStoreOp::DeleteKey(diff_key)); } } - StoreOp::DeleteExecutionPayload(block_root) => { let key = get_key_for_col(DBColumn::ExecPayload.into(), block_root.as_bytes()); key_value_batch.push(KeyValueStoreOp::DeleteKey(key)); @@ -765,27 +873,28 @@ impl, Cold: ItemStore> HotColdDB pub fn do_atomically(&self, batch: Vec>) -> Result<(), Error> { // Update the block cache whilst holding a lock, to ensure that the cache updates atomically // with the database. - let mut guard = self.block_cache.lock(); + let mut block_cache = self.block_cache.lock(); for op in &batch { match op { StoreOp::PutBlock(block_root, block) => { - guard.put(*block_root, (**block).clone()); + block_cache.put(*block_root, (**block).clone()); } StoreOp::PutState(_, _) => (), - StoreOp::PutStateSummary(_, _) => (), - StoreOp::PutStateTemporaryFlag(_) => (), StoreOp::DeleteStateTemporaryFlag(_) => (), StoreOp::DeleteBlock(block_root) => { - guard.pop(block_root); + block_cache.pop(block_root); + self.state_cache.lock().delete_block_states(block_root); } - StoreOp::DeleteState(_, _) => (), + StoreOp::DeleteState(state_root, _) => { + self.state_cache.lock().delete_state(state_root) + } StoreOp::DeleteExecutionPayload(_) => (), @@ -795,7 +904,7 @@ impl, Cold: ItemStore> HotColdDB self.hot_db .do_atomically(self.convert_to_kv_batch(batch)?)?; - drop(guard); + drop(block_cache); Ok(()) } @@ -810,115 +919,560 @@ impl, Cold: ItemStore> HotColdDB state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { - // On the epoch boundary, store the full state. - if state.slot() % E::slots_per_epoch() == 0 { - trace!( - self.log, - "Storing full state on epoch boundary"; - "slot" => state.slot().as_u64(), - "state_root" => format!("{:?}", state_root) - ); - store_full_state(state_root, state, ops)?; + // Put the state in the cache. + // FIXME(sproul): could optimise out the block root + let block_root = state.get_latest_block_root(*state_root); + + // Avoid storing states in the database if they already exist in the state cache. + // The exception to this is the finalized state, which must exist in the cache before it + // is stored on disk. + if let PutStateOutcome::Duplicate = + self.state_cache + .lock() + .put_state(*state_root, block_root, state)? + { + return Ok(()); } // Store a summary of the state. // We store one even for the epoch boundary states, as we may need their slots // when doing a look up by state root. - let hot_state_summary = HotStateSummary::new(state_root, state)?; - let op = hot_state_summary.as_kv_store_op(*state_root); + let diff_base_slot = self.state_diff_slot(state.slot()); + + let hot_state_summary = HotStateSummary::new(state_root, state, diff_base_slot)?; + let op = hot_state_summary.as_kv_store_op(*state_root)?; ops.push(op); + // On an epoch boundary, consider storing: + // + // 1. A full state, if the state is the split state or a fork boundary state. + // 2. A state diff, if the state is a multiple of `epochs_per_state_diff` after the + // split state. + if state.slot() % E::slots_per_epoch() == 0 { + if self.is_stored_as_full_state(*state_root, state.slot())? { + info!( + self.log, + "Storing full state on epoch boundary"; + "slot" => state.slot(), + "state_root" => ?state_root, + ); + self.store_full_state_in_batch(state_root, state, ops)?; + } else if let Some(base_slot) = diff_base_slot { + debug!( + self.log, + "Storing state diff on boundary"; + "slot" => state.slot(), + "base_slot" => base_slot, + "state_root" => ?state_root, + ); + let diff_base_state_root = hot_state_summary.diff_base_state_root; + let diff_base_state = self.get_hot_state(&diff_base_state_root)?.ok_or( + HotColdDBError::MissingEpochBoundaryState(diff_base_state_root), + )?; + + let compute_diff_timer = + metrics::start_timer(&metrics::BEACON_STATE_DIFF_COMPUTE_TIME); + + let base_buffer = HDiffBuffer::from_state(diff_base_state); + let target_buffer = HDiffBuffer::from_state(state.clone()); + let diff = HDiff::compute(&base_buffer, &target_buffer)?; + drop(compute_diff_timer); + ops.push(diff.as_kv_store_op(*state_root)?); + } + } + Ok(()) } + pub fn store_full_state( + &self, + state_root: &Hash256, + state: &BeaconState, + ) -> Result<(), Error> { + let mut ops = Vec::with_capacity(4); + self.store_full_state_in_batch(state_root, state, &mut ops)?; + self.hot_db.do_atomically(ops) + } + + pub fn store_full_state_in_batch( + &self, + state_root: &Hash256, + state: &BeaconState, + ops: &mut Vec, + ) -> Result<(), Error> { + store_full_state(state_root, state, ops, &self.config) + } + + /// Get a post-finalization state from the database or store. + pub fn get_hot_state(&self, state_root: &Hash256) -> Result>, Error> { + if let Some(state) = self.state_cache.lock().get_by_state_root(*state_root) { + return Ok(Some(state)); + } + warn!( + self.log, + "State cache missed"; + "state_root" => ?state_root, + ); + + let state_from_disk = self.load_hot_state(state_root)?; + + if let Some((state, block_root)) = state_from_disk { + self.state_cache + .lock() + .put_state(*state_root, block_root, &state)?; + Ok(Some(state)) + } else { + Ok(None) + } + } + /// Load a post-finalization state from the hot database. /// - /// Will replay blocks from the nearest epoch boundary. + /// Use a combination of state diffs and replayed blocks as appropriate. + /// + /// Return the `(state, latest_block_root)` if found. pub fn load_hot_state( &self, state_root: &Hash256, - state_processing_strategy: StateProcessingStrategy, - ) -> Result>, Error> { + ) -> Result, Hash256)>, Error> { + let _timer = metrics::start_timer(&metrics::BEACON_HOT_STATE_READ_TIMES); metrics::inc_counter(&metrics::BEACON_STATE_HOT_GET_COUNT); - // If the state is marked as temporary, do not return it. It will become visible - // only once its transaction commits and deletes its temporary flag. - if self.load_state_temporary_flag(state_root)?.is_some() { - return Ok(None); + // If the state is the finalized state, load it from disk. This should only be necessary + // once during start-up, after which point the finalized state will be cached. + if *state_root == self.get_split_info().state_root { + return self.load_hot_state_full(state_root).map(Some); } - if let Some(HotStateSummary { - slot, - latest_block_root, - epoch_boundary_state_root, - }) = self.load_hot_state_summary(state_root)? + let target_summary = if let Some(summary) = self.load_hot_state_summary(state_root)? { + summary + } else { + return Ok(None); + }; + + let target_slot = target_summary.slot; + let target_latest_block_root = target_summary.latest_block_root; + + // Load the latest block, and use it to confirm the validity of this state. + if self + .get_blinded_block(&target_summary.latest_block_root, None)? + .is_none() { - let boundary_state = - get_full_state(&self.hot_db, &epoch_boundary_state_root, &self.spec)?.ok_or( - HotColdDBError::MissingEpochBoundaryState(epoch_boundary_state_root), - )?; + // Dangling state, will be deleted fully once finalization advances past it. + debug!( + self.log, + "Ignoring state load for dangling state"; + "state_root" => ?state_root, + "slot" => target_slot, + "latest_block_root" => ?target_summary.latest_block_root, + ); + return Ok(None); + } + + // Take a read lock on the split point while we load data from prior states. We need + // to prevent the finalization migration from deleting the state summaries and state diffs + // that we are iterating back through. + let split_read_lock = self.split.read_recursive(); + + // Backtrack until we reach a state that is in the cache, or in the worst case + // the finalized state (this should only be reachable on first start-up). + let state_summary_iter = HotStateRootIter::new(self, target_slot, *state_root); + + // State and state root of the state upon which blocks and diffs will be replayed. + let mut base_state = None; + + // State diffs to be replayed on top of `base_state`. + // Each element is `(summary, state_root, diff)` such that applying `diff` to the + // state with `summary.diff_base_state_root` yields the state with `state_root`. + let mut state_diffs = VecDeque::new(); + + // State roots for all slots between `base_state` and the `target_slot`. Depending on how + // the diffs fall, some of these roots may not be needed. + let mut state_roots = VecDeque::new(); + + for res in state_summary_iter { + let (prior_state_root, prior_summary) = res?; + + state_roots.push_front(Ok((prior_state_root, prior_summary.slot))); + + // Check if this state is in the cache. + if let Some(state) = self.state_cache.lock().get_by_state_root(prior_state_root) { + debug!( + self.log, + "Found cached base state for replay"; + "base_state_root" => ?prior_state_root, + "base_slot" => prior_summary.slot, + "target_state_root" => ?state_root, + "target_slot" => target_slot, + ); + base_state = Some((prior_state_root, state)); + break; + } + + // If the prior state is the split state and it isn't cached then load it in + // entirety from disk. This should only happen on first start up. + if prior_state_root == split_read_lock.state_root { + debug!( + self.log, + "Using split state as base state for replay"; + "base_state_root" => ?prior_state_root, + "base_slot" => prior_summary.slot, + "target_state_root" => ?state_root, + "target_slot" => target_slot, + ); + let (split_state, _) = self.load_hot_state_full(&prior_state_root)?; + base_state = Some((prior_state_root, split_state)); + break; + } + + // If there's a state diff stored at this slot, load it and store it for application. + if !prior_summary.diff_base_state_root.is_zero() { + let diff = self.load_hot_state_diff(prior_state_root)?; + state_diffs.push_front((prior_summary, prior_state_root, diff)); + } + } - // Optimization to avoid even *thinking* about replaying blocks if we're already - // on an epoch boundary. - let state = if slot % E::slots_per_epoch() == 0 { - boundary_state + let (_, mut state) = base_state.ok_or(Error::NoBaseStateFound(*state_root))?; + + // Finished reading information about prior states, allow the split point to update. + drop(split_read_lock); + + // Construct a mutable iterator for the state roots, which will be iterated through + // consecutive calls to `replay_blocks`. + let mut state_roots_iter = state_roots.into_iter(); + + // This hook caches states from block replay so that they may be reused. + let state_cacher_hook = |opt_state_root: Option, state: &mut BeaconState<_>| { + // Ensure all caches are built before attempting to cache. + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; + + if let Some(state_root) = opt_state_root { + // Cache + if state.slot() + MAX_PARENT_STATES_TO_CACHE >= target_slot + || state.slot() % E::slots_per_epoch() == 0 + { + let slot = state.slot(); + let latest_block_root = state.get_latest_block_root(state_root); + if let PutStateOutcome::New = + self.state_cache + .lock() + .put_state(state_root, latest_block_root, state)? + { + debug!( + self.log, + "Cached ancestor state"; + "state_root" => ?state_root, + "slot" => slot, + ); + } + } } else { + debug!( + self.log, + "Block replay state root miss"; + "slot" => state.slot(), + ); + } + Ok(()) + }; + + // Apply the diffs, and replay blocks atop the base state to reach the target state. + while state.slot() < target_slot { + // Drop unncessary diffs. + state_diffs.retain(|(summary, diff_root, _)| { + let keep = summary.diff_base_slot >= state.slot(); + if !keep { + debug!( + self.log, + "Ignoring irrelevant state diff"; + "diff_state_root" => ?diff_root, + "diff_base_slot" => summary.diff_base_slot, + "current_state_slot" => state.slot(), + ); + } + keep + }); + + // Get the next diff that will be applicable, taking the highest slot diff in case of + // multiple diffs which are applicable at the same base slot, which can happen if the + // diff frequency has changed. + let mut next_state_diff: Option<(HotStateSummary, Hash256, HDiff)> = None; + while let Some((summary, _, _)) = state_diffs.front() { + if next_state_diff.as_ref().map_or(true, |(current, _, _)| { + summary.diff_base_slot == current.diff_base_slot + }) { + next_state_diff = state_diffs.pop_front(); + } else { + break; + } + } + + // Replay blocks to get to the next diff's base state, or to the target state if there + // is no next diff to apply. + if next_state_diff + .as_ref() + .map_or(true, |(next_summary, _, _)| { + next_summary.diff_base_slot != state.slot() + }) + { + let (next_slot, latest_block_root) = next_state_diff + .as_ref() + .map(|(summary, _, _)| (summary.diff_base_slot, summary.latest_block_root)) + .unwrap_or_else(|| (target_summary.slot, target_latest_block_root)); + debug!( + self.log, + "Replaying blocks"; + "from_slot" => state.slot(), + "to_slot" => next_slot, + "latest_block_root" => ?latest_block_root, + ); let blocks = - self.load_blocks_to_replay(boundary_state.slot(), slot, latest_block_root)?; - self.replay_blocks( - boundary_state, + self.load_blocks_to_replay(state.slot(), next_slot, latest_block_root)?; + + state = self.replay_blocks( + state, blocks, - slot, - no_state_root_iter(), - state_processing_strategy, - )? - }; + next_slot, + &mut state_roots_iter, + Some(Box::new(state_cacher_hook)), + )?; - Ok(Some(state)) + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; + } + + // Apply state diff. Block replay should have ensured that the diff is now applicable. + if let Some((summary, to_root, diff)) = next_state_diff { + let block_root = summary.latest_block_root; + debug!( + self.log, + "Applying state diff"; + "from_root" => ?summary.diff_base_state_root, + "from_slot" => summary.diff_base_slot, + "to_root" => ?to_root, + "to_slot" => summary.slot, + "block_root" => ?block_root, + ); + assert_eq!(summary.diff_base_slot, state.slot()); + + let mut base_buffer = HDiffBuffer::from_state(state); + diff.apply(&mut base_buffer)?; + state = base_buffer.into_state(&self.spec)?; + + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; + + // Add state to the cache, it is by definition an epoch boundary state and likely + // to be useful. + self.state_cache + .lock() + .put_state(to_root, block_root, &state)?; + } + } + + Ok(Some((state, target_latest_block_root))) + } + + /// Determine if the `state_root` at `slot` should be stored as a full state. + /// + /// This is dependent on the database's current split point, so may change from `false` to + /// `true` after a finalization update. It cannot change from `true` to `false` for a state in + /// the hot database as the split state will be migrated to the freezer. + /// + /// All fork boundary states are also stored as full states. + pub fn is_stored_as_full_state(&self, state_root: Hash256, slot: Slot) -> Result { + let split = self.get_split_info(); + + if slot >= split.slot { + Ok(state_root == split.state_root + || self.spec.fork_activated_at_slot::(slot).is_some()) } else { - Ok(None) + Err(Error::SlotIsBeforeSplit { slot }) } } - /// Store a pre-finalization state in the freezer database. + /// Determine if a state diff should be stored at `slot`. /// - /// If the state doesn't lie on a restore point boundary then just its summary will be stored. + /// If `Some(base_slot)` is returned then a state diff should be constructed for the state + /// at `slot` based on the ancestor state at `base_slot`. The frequency of state diffs stored + /// on disk is determined by the `epochs_per_state_diff` parameter. + pub fn state_diff_slot(&self, slot: Slot) -> Option { + let split = self.get_split_info(); + let slots_per_epoch = E::slots_per_epoch(); + + if slot % slots_per_epoch != 0 { + return None; + } + + let epochs_since_split = slot.saturating_sub(split.slot).epoch(slots_per_epoch); + + (epochs_since_split > 0 && epochs_since_split % self.config.epochs_per_state_diff == 0) + .then(|| slot.saturating_sub(self.config.epochs_per_state_diff * slots_per_epoch)) + } + + pub fn load_hot_state_full( + &self, + state_root: &Hash256, + ) -> Result<(BeaconState, Hash256), Error> { + let pubkey_cache = self.immutable_validators.read(); + let validator_pubkeys = |i: usize| pubkey_cache.get_validator_pubkey(i); + let mut state = get_full_state( + &self.hot_db, + state_root, + validator_pubkeys, + &self.config, + &self.spec, + )? + .ok_or(HotColdDBError::MissingEpochBoundaryState(*state_root))?; + + // Do a tree hash here so that the cache is fully built. + state.update_tree_hash_cache()?; + state.build_all_caches(&self.spec)?; + + let latest_block_root = state.get_latest_block_root(*state_root); + Ok((state, latest_block_root)) + } + + pub fn load_hot_state_diff(&self, state_root: Hash256) -> Result { + self.hot_db + .get(&state_root)? + .ok_or(HotColdDBError::MissingStateDiff(state_root).into()) + } + + pub fn store_cold_state_summary( + &self, + state_root: &Hash256, + slot: Slot, + ops: &mut Vec, + ) -> Result<(), Error> { + ops.push(ColdStateSummary { slot }.as_kv_store_op(*state_root)?); + ops.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col( + DBColumn::BeaconStateRoots.into(), + &slot.as_u64().to_be_bytes(), + ), + state_root.as_bytes().to_vec(), + )); + Ok(()) + } + + /// Store a pre-finalization state in the freezer database. pub fn store_cold_state( &self, state_root: &Hash256, state: &BeaconState, ops: &mut Vec, ) -> Result<(), Error> { - ops.push(ColdStateSummary { slot: state.slot() }.as_kv_store_op(*state_root)); + self.store_cold_state_summary(state_root, state.slot(), ops)?; - if state.slot() % self.config.slots_per_restore_point != 0 { + if state.slot() % E::slots_per_epoch() != 0 { return Ok(()); } - trace!( - self.log, - "Creating restore point"; - "slot" => state.slot(), - "state_root" => format!("{:?}", state_root) - ); + let epoch = state.current_epoch(); + match self.hierarchy.storage_strategy(epoch)? { + StorageStrategy::Nothing => { + debug!( + self.log, + "Storing cold state"; + "strategy" => "replay", + "slot" => state.slot(), + ); + } + StorageStrategy::Snapshot => { + debug!( + self.log, + "Storing cold state"; + "strategy" => "snapshot", + "slot" => state.slot(), + ); + self.store_cold_state_as_snapshot(state, ops)?; + } + StorageStrategy::DiffFrom(from) => { + debug!( + self.log, + "Storing cold state"; + "strategy" => "diff", + "slot" => state.slot(), + ); + self.store_cold_state_as_diff(state, from, ops)?; + } + } - // 1. Convert to PartialBeaconState and store that in the DB. - let partial_state = PartialBeaconState::from_state_forgetful(state); - let op = partial_state.as_kv_store_op(*state_root); - ops.push(op); + Ok(()) + } - // 2. Store updated vector entries. - let db = &self.cold_db; - store_updated_vector(BlockRoots, db, state, &self.spec, ops)?; - store_updated_vector(StateRoots, db, state, &self.spec, ops)?; - store_updated_vector(HistoricalRoots, db, state, &self.spec, ops)?; - store_updated_vector(RandaoMixes, db, state, &self.spec, ops)?; - store_updated_vector(HistoricalSummaries, db, state, &self.spec, ops)?; + pub fn store_cold_state_as_snapshot( + &self, + state: &BeaconState, + ops: &mut Vec, + ) -> Result<(), Error> { + let bytes = state.as_ssz_bytes(); + let mut compressed_value = + Vec::with_capacity(self.config.estimate_compressed_size(bytes.len())); + let mut encoder = Encoder::new(&mut compressed_value, self.config.compression_level) + .map_err(Error::Compression)?; + encoder.write_all(&bytes).map_err(Error::Compression)?; + encoder.finish().map_err(Error::Compression)?; + + let epoch = state.current_epoch(); + let key = get_key_for_col( + DBColumn::BeaconStateSnapshot.into(), + &epoch.as_u64().to_be_bytes(), + ); + ops.push(KeyValueStoreOp::PutKeyValue(key, compressed_value)); + Ok(()) + } + + pub fn load_cold_state_bytes_as_snapshot( + &self, + epoch: Epoch, + ) -> Result>, Error> { + match self.cold_db.get_bytes( + DBColumn::BeaconStateSnapshot.into(), + &epoch.as_u64().to_be_bytes(), + )? { + Some(bytes) => { + let mut ssz_bytes = + Vec::with_capacity(self.config.estimate_decompressed_size(bytes.len())); + let mut decoder = Decoder::new(&*bytes).map_err(Error::Compression)?; + decoder + .read_to_end(&mut ssz_bytes) + .map_err(Error::Compression)?; + Ok(Some(ssz_bytes)) + } + None => Ok(None), + } + } - // 3. Store restore point. - let restore_point_index = state.slot().as_u64() / self.config.slots_per_restore_point; - self.store_restore_point_hash(restore_point_index, *state_root, ops); + pub fn load_cold_state_as_snapshot( + &self, + epoch: Epoch, + ) -> Result>, Error> { + Ok(self + .load_cold_state_bytes_as_snapshot(epoch)? + .map(|bytes| BeaconState::from_ssz_bytes(&bytes, &self.spec)) + .transpose()?) + } + pub fn store_cold_state_as_diff( + &self, + state: &BeaconState, + from_epoch: Epoch, + ops: &mut Vec, + ) -> Result<(), Error> { + // Load diff base state bytes. + let base_buffer = self.load_hdiff_buffer_for_epoch(from_epoch)?; + let target_buffer = HDiffBuffer::from_state(state.clone()); + let diff = HDiff::compute(&base_buffer, &target_buffer)?; + let diff_bytes = diff.as_ssz_bytes(); + + let key = get_key_for_col( + DBColumn::BeaconStateDiff.into(), + &state.current_epoch().as_u64().to_be_bytes(), + ); + ops.push(KeyValueStoreOp::PutKeyValue(key, diff_bytes)); Ok(()) } @@ -936,151 +1490,91 @@ impl, Cold: ItemStore> HotColdDB /// /// Will reconstruct the state if it lies between restore points. pub fn load_cold_state_by_slot(&self, slot: Slot) -> Result>, Error> { - // Guard against fetching states that do not exist due to gaps in the historic state - // database, which can occur due to checkpoint sync or re-indexing. - // See the comments in `get_historic_state_limits` for more information. - let (lower_limit, upper_limit) = self.get_historic_state_limits(); - - if slot <= lower_limit || slot >= upper_limit { - if slot % self.config.slots_per_restore_point == 0 { - let restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point; - self.load_restore_point_by_index(restore_point_idx) - } else { - self.load_cold_intermediate_state(slot) - } - .map(Some) - } else { - Ok(None) + let epoch = slot.epoch(E::slots_per_epoch()); + + let hdiff_buffer = self.load_hdiff_buffer_for_epoch(epoch)?; + let base_state = hdiff_buffer.into_state(&self.spec)?; + + if base_state.slot() == slot { + return Ok(Some(base_state)); } - } - /// Load a restore point state by its `state_root`. - fn load_restore_point(&self, state_root: &Hash256) -> Result, Error> { - let partial_state_bytes = self - .cold_db - .get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? - .ok_or(HotColdDBError::MissingRestorePoint(*state_root))?; - let mut partial_state: PartialBeaconState = - PartialBeaconState::from_ssz_bytes(&partial_state_bytes, &self.spec)?; + let blocks = self.load_cold_blocks(base_state.slot() + 1, slot)?; - // Fill in the fields of the partial state. - partial_state.load_block_roots(&self.cold_db, &self.spec)?; - partial_state.load_state_roots(&self.cold_db, &self.spec)?; - partial_state.load_historical_roots(&self.cold_db, &self.spec)?; - partial_state.load_randao_mixes(&self.cold_db, &self.spec)?; - partial_state.load_historical_summaries(&self.cold_db, &self.spec)?; + // Include state root for base state as it is required by block processing. + let state_root_iter = + self.forwards_state_roots_iterator_until(base_state.slot(), slot, || { + panic!("FIXME(sproul): unreachable state root iter miss") + })?; - partial_state.try_into() + self.replay_blocks(base_state, blocks, slot, state_root_iter, None) + .map(Some) } - /// Load a restore point state by its `restore_point_index`. - fn load_restore_point_by_index( - &self, - restore_point_index: u64, - ) -> Result, Error> { - let state_root = self.load_restore_point_hash(restore_point_index)?; - self.load_restore_point(&state_root) + fn load_hdiff_for_epoch(&self, epoch: Epoch) -> Result { + self.cold_db + .get_bytes( + DBColumn::BeaconStateDiff.into(), + &epoch.as_u64().to_be_bytes(), + )? + .map(|bytes| HDiff::from_ssz_bytes(&bytes)) + .ok_or(HotColdDBError::MissingHDiff(epoch))? + .map_err(Into::into) } - /// Load a frozen state that lies between restore points. - fn load_cold_intermediate_state(&self, slot: Slot) -> Result, Error> { - if let Some(state) = self.state_cache.lock().get(&slot) { - return Ok(state.clone()); + fn load_hdiff_buffer_for_epoch(&self, epoch: Epoch) -> Result { + if let Some(buffer) = self.diff_buffer_cache.lock().get(&epoch) { + debug!( + self.log, + "Hit diff buffer cache"; + "epoch" => epoch + ); + return Ok(buffer.clone()); } - // 1. Load the restore points either side of the intermediate state. - let low_restore_point_idx = slot.as_u64() / self.config.slots_per_restore_point; - let high_restore_point_idx = low_restore_point_idx + 1; - - // Use low restore point as the base state. - let mut low_slot: Slot = - Slot::new(low_restore_point_idx * self.config.slots_per_restore_point); - let mut low_state: Option> = None; - - // Try to get a more recent state from the cache to avoid massive blocks replay. - for (s, state) in self.state_cache.lock().iter() { - if s.as_u64() / self.config.slots_per_restore_point == low_restore_point_idx - && *s < slot - && low_slot < *s - { - low_slot = *s; - low_state = Some(state.clone()); + // Load buffer for the previous state. + // This amount of recursion (<10 levels) should be OK. + let t = std::time::Instant::now(); + let mut buffer = match self.hierarchy.storage_strategy(epoch)? { + // Base case. + StorageStrategy::Snapshot => { + let state = self + .load_cold_state_as_snapshot(epoch)? + .ok_or(Error::MissingSnapshot(epoch))?; + return Ok(HDiffBuffer::from_state(state)); } - } - - // If low_state is still None, use load_restore_point_by_index to load the state. - let low_state = match low_state { - Some(state) => state, - None => self.load_restore_point_by_index(low_restore_point_idx)?, + // Recursive case. + StorageStrategy::DiffFrom(from) => self.load_hdiff_buffer_for_epoch(from)?, + StorageStrategy::Nothing => unreachable!("FIXME(sproul)"), }; - // Acquire the read lock, so that the split can't change while this is happening. - let split = self.split.read_recursive(); - - let high_restore_point = self.get_restore_point(high_restore_point_idx, &split)?; - - // 2. Load the blocks from the high restore point back to the low point. - let blocks = self.load_blocks_to_replay( - low_slot, - slot, - self.get_high_restore_point_block_root(&high_restore_point, slot)?, - )?; - - // 3. Replay the blocks on top of the low point. - // Use a forwards state root iterator to avoid doing any tree hashing. - // The state root of the high restore point should never be used, so is safely set to 0. - let state_root_iter = self.forwards_state_roots_iterator_until( - low_slot, - slot, - || (high_restore_point, Hash256::zero()), - &self.spec, - )?; + // Load diff and apply it to buffer. + let diff = self.load_hdiff_for_epoch(epoch)?; + diff.apply(&mut buffer)?; - let state = self.replay_blocks( - low_state, - blocks, - slot, - Some(state_root_iter), - StateProcessingStrategy::Accurate, - )?; - - // If state is not error, put it in the cache. - self.state_cache.lock().put(slot, state.clone()); - - Ok(state) - } + self.diff_buffer_cache.lock().put(epoch, buffer.clone()); + debug!( + self.log, + "Added diff buffer to cache"; + "load_time_ms" => t.elapsed().as_millis(), + "epoch" => epoch + ); - /// Get the restore point with the given index, or if it is out of bounds, the split state. - pub(crate) fn get_restore_point( - &self, - restore_point_idx: u64, - split: &Split, - ) -> Result, Error> { - if restore_point_idx * self.config.slots_per_restore_point >= split.slot.as_u64() { - self.get_state(&split.state_root, Some(split.slot))? - .ok_or(HotColdDBError::MissingSplitState( - split.state_root, - split.slot, - )) - .map_err(Into::into) - } else { - self.load_restore_point_by_index(restore_point_idx) - } + Ok(buffer) } - /// Get a suitable block root for backtracking from `high_restore_point` to the state at `slot`. - /// - /// Defaults to the block root for `slot`, which *should* be in range. - fn get_high_restore_point_block_root( + /// Load cold blocks between `start_slot` and `end_slot` inclusive. + pub fn load_cold_blocks( &self, - high_restore_point: &BeaconState, - slot: Slot, - ) -> Result { - high_restore_point - .get_block_root(slot) - .or_else(|_| high_restore_point.get_oldest_block_root()) - .map(|x| *x) - .map_err(HotColdDBError::RestorePointBlockHashError) + start_slot: Slot, + end_slot: Slot, + ) -> Result>, Error> { + process_results( + (start_slot.as_u64()..=end_slot.as_u64()) + .map(Slot::new) + .map(|slot| self.get_cold_blinded_block_by_slot(slot)), + |iter| iter.filter_map(|x| x).collect(), + ) } /// Load the blocks between `start_slot` and `end_slot` by backtracking from `end_block_hash`. @@ -1127,35 +1621,33 @@ impl, Cold: ItemStore> HotColdDB /// /// Will skip slots as necessary. The returned state is not guaranteed /// to have any caches built, beyond those immediately required by block processing. - fn replay_blocks( + pub fn replay_blocks( &self, state: BeaconState, blocks: Vec>>, target_slot: Slot, - state_root_iter: Option>>, - state_processing_strategy: StateProcessingStrategy, + state_root_iter: impl Iterator>, + pre_slot_hook: Option>, ) -> Result, Error> { let mut block_replayer = BlockReplayer::new(state, &self.spec) - .state_processing_strategy(state_processing_strategy) .no_signature_verification() - .minimal_block_root_verification(); + .minimal_block_root_verification() + .state_root_iter(state_root_iter); - let have_state_root_iterator = state_root_iter.is_some(); - if let Some(state_root_iter) = state_root_iter { - block_replayer = block_replayer.state_root_iter(state_root_iter); + if let Some(pre_slot_hook) = pre_slot_hook { + block_replayer = block_replayer.pre_slot_hook(pre_slot_hook); } block_replayer .apply_blocks(blocks, Some(target_slot)) .map(|block_replayer| { - if have_state_root_iterator && block_replayer.state_root_miss() { + if block_replayer.state_root_miss() { warn!( self.log, - "State root iterator miss"; + "State root cache miss during block replay"; "slot" => target_slot, ); } - block_replayer.into_state() }) } @@ -1184,12 +1676,6 @@ impl, Cold: ItemStore> HotColdDB *self.split.write() = Split { slot, state_root }; } - /// Fetch the slot of the most recently stored restore point. - pub fn get_latest_restore_point_slot(&self) -> Slot { - (self.get_split_slot() - 1) / self.config.slots_per_restore_point - * self.config.slots_per_restore_point - } - /// Load the database schema version from disk. fn load_schema_version(&self) -> Result, Error> { self.hot_db.get(&SCHEMA_VERSION_KEY) @@ -1209,7 +1695,7 @@ impl, Cold: ItemStore> HotColdDB let column = SchemaVersion::db_column().into(); let key = SCHEMA_VERSION_KEY.as_bytes(); let db_key = get_key_for_col(column, key); - let op = KeyValueStoreOp::PutKeyValue(db_key, schema_version.as_store_bytes()); + let op = KeyValueStoreOp::PutKeyValue(db_key, schema_version.as_store_bytes()?); ops.push(op); self.hot_db.do_atomically(ops) @@ -1218,20 +1704,19 @@ impl, Cold: ItemStore> HotColdDB /// Initialise the anchor info for checkpoint sync starting from `block`. pub fn init_anchor_info(&self, block: BeaconBlockRef<'_, E>) -> Result { let anchor_slot = block.slot(); - let slots_per_restore_point = self.config.slots_per_restore_point; + let anchor_epoch = anchor_slot.epoch(E::slots_per_epoch()); - // Set the `state_upper_limit` to the slot of the *next* restore point. + // Set the `state_upper_limit` to the slot of the *next* checkpoint. // See `get_state_upper_limit` for rationale. - let next_restore_point_slot = if anchor_slot % slots_per_restore_point == 0 { - anchor_slot - } else { - (anchor_slot / slots_per_restore_point + 1) * slots_per_restore_point - }; + let next_snapshot_slot = self + .hierarchy + .next_snapshot_epoch(anchor_epoch)? + .start_slot(E::slots_per_epoch()); let anchor_info = AnchorInfo { anchor_slot, oldest_block_slot: anchor_slot, oldest_block_parent: block.parent_root(), - state_upper_limit: next_restore_point_slot, + state_upper_limit: next_snapshot_slot, state_lower_limit: self.spec.genesis_slot, }; self.compare_and_set_anchor_info(None, Some(anchor_info)) @@ -1258,7 +1743,7 @@ impl, Cold: ItemStore> HotColdDB ) -> Result { let mut anchor_info = self.anchor_info.write(); if *anchor_info == prev_value { - let kv_op = self.store_anchor_info_in_batch(&new_value); + let kv_op = self.store_anchor_info_in_batch(&new_value)?; *anchor_info = new_value; Ok(kv_op) } else { @@ -1285,14 +1770,17 @@ impl, Cold: ItemStore> HotColdDB /// /// The argument is intended to be `self.anchor_info`, but is passed manually to avoid issues /// with recursive locking. - fn store_anchor_info_in_batch(&self, anchor_info: &Option) -> KeyValueStoreOp { + fn store_anchor_info_in_batch( + &self, + anchor_info: &Option, + ) -> Result { if let Some(ref anchor_info) = anchor_info { anchor_info.as_kv_store_op(ANCHOR_INFO_KEY) } else { - KeyValueStoreOp::DeleteKey(get_key_for_col( + Ok(KeyValueStoreOp::DeleteKey(get_key_for_col( DBColumn::BeaconMeta.into(), ANCHOR_INFO_KEY.as_bytes(), - )) + ))) } } @@ -1367,11 +1855,12 @@ impl, Cold: ItemStore> HotColdDB } /// Stage the split for storage to disk. - pub fn store_split_in_batch(&self) -> KeyValueStoreOp { + pub fn store_split_in_batch(&self) -> Result { self.split.read_recursive().as_kv_store_op(SPLIT_KEY) } /// Load the state root of a restore point. + #[allow(unused)] fn load_restore_point_hash(&self, restore_point_index: u64) -> Result { let key = Self::restore_point_key(restore_point_index); self.cold_db @@ -1381,18 +1870,21 @@ impl, Cold: ItemStore> HotColdDB } /// Store the state root of a restore point. + #[allow(unused)] fn store_restore_point_hash( &self, restore_point_index: u64, state_root: Hash256, ops: &mut Vec, - ) { + ) -> Result<(), Error> { let value = &RestorePointHash { state_root }; - let op = value.as_kv_store_op(Self::restore_point_key(restore_point_index)); + let op = value.as_kv_store_op(Self::restore_point_key(restore_point_index))?; ops.push(op); + Ok(()) } /// Convert a `restore_point_index` into a database key. + #[allow(unused)] fn restore_point_key(restore_point_index: u64) -> Hash256 { Hash256::from_low_u64_be(restore_point_index) } @@ -1413,6 +1905,18 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.get(state_root) } + /// Iterate all hot state summaries in the database. + pub fn iter_hot_state_summaries( + &self, + ) -> impl Iterator> + '_ { + self.hot_db + .iter_column(DBColumn::BeaconStateSummary) + .map(|res| { + let (key, value_bytes) = res?; + Ok((key, HotStateSummary::from_store_bytes(&value_bytes)?)) + }) + } + /// Load the temporary flag for a state root, if one exists. /// /// Returns `Some` if the state is temporary, or `None` if the state is permanent or does not @@ -1424,36 +1928,6 @@ impl, Cold: ItemStore> HotColdDB self.hot_db.get(state_root) } - /// Check that the restore point frequency is valid. - /// - /// Specifically, check that it is: - /// (1) A divisor of the number of slots per historical root, and - /// (2) Divisible by the number of slots per epoch - /// - /// - /// (1) ensures that we have at least one restore point within range of our state - /// root history when iterating backwards (and allows for more frequent restore points if - /// desired). - /// - /// (2) ensures that restore points align with hot state summaries, making it - /// quick to migrate hot to cold. - fn verify_slots_per_restore_point(slots_per_restore_point: u64) -> Result<(), HotColdDBError> { - let slots_per_historical_root = E::SlotsPerHistoricalRoot::to_u64(); - let slots_per_epoch = E::slots_per_epoch(); - if slots_per_restore_point > 0 - && slots_per_historical_root % slots_per_restore_point == 0 - && slots_per_restore_point % slots_per_epoch == 0 - { - Ok(()) - } else { - Err(HotColdDBError::InvalidSlotsPerRestorePoint { - slots_per_restore_point, - slots_per_historical_root, - slots_per_epoch, - }) - } - } - /// Run a compaction pass to free up space used by deleted states. pub fn compact(&self) -> Result<(), Error> { self.hot_db.compact()?; @@ -1476,11 +1950,14 @@ impl, Cold: ItemStore> HotColdDB /// Store the checkpoint to begin pruning from (the "old finalized checkpoint"). pub fn store_pruning_checkpoint(&self, checkpoint: Checkpoint) -> Result<(), Error> { self.hot_db - .do_atomically(vec![self.pruning_checkpoint_store_op(checkpoint)]) + .do_atomically(vec![self.pruning_checkpoint_store_op(checkpoint)?]) } /// Create a staged store for the pruning checkpoint. - pub fn pruning_checkpoint_store_op(&self, checkpoint: Checkpoint) -> KeyValueStoreOp { + pub fn pruning_checkpoint_store_op( + &self, + checkpoint: Checkpoint, + ) -> Result { PruningCheckpoint { checkpoint }.as_kv_store_op(PRUNING_CHECKPOINT_KEY) } @@ -1611,80 +2088,114 @@ impl, Cold: ItemStore> HotColdDB /// Advance the split point of the store, moving new finalized states to the freezer. pub fn migrate_database, Cold: ItemStore>( store: Arc>, - frozen_head_root: Hash256, - frozen_head: &BeaconState, + finalized_state_root: Hash256, + finalized_block_root: Hash256, + finalized_state: &BeaconState, ) -> Result<(), Error> { debug!( store.log, "Freezer migration started"; - "slot" => frozen_head.slot() + "slot" => finalized_state.slot() ); // 0. Check that the migration is sensible. - // The new frozen head must increase the current split slot, and lie on an epoch + // The new finalized state must increase the current split slot, and lie on an epoch // boundary (in order for the hot state summary scheme to work). let current_split_slot = store.split.read_recursive().slot; - let anchor_slot = store - .anchor_info - .read_recursive() - .as_ref() - .map(|a| a.anchor_slot); + let anchor_info = store.anchor_info.read_recursive().clone(); + let anchor_slot = anchor_info.as_ref().map(|a| a.anchor_slot); - if frozen_head.slot() < current_split_slot { + if finalized_state.slot() < current_split_slot { return Err(HotColdDBError::FreezeSlotError { current_split_slot, - proposed_split_slot: frozen_head.slot(), + proposed_split_slot: finalized_state.slot(), } .into()); } - if frozen_head.slot() % E::slots_per_epoch() != 0 { - return Err(HotColdDBError::FreezeSlotUnaligned(frozen_head.slot()).into()); + if finalized_state.slot() % E::slots_per_epoch() != 0 { + return Err(HotColdDBError::FreezeSlotUnaligned(finalized_state.slot()).into()); } + // Store the new finalized state as a full state in the database. It would likely previously + // have been stored in memory, or maybe as a diff. + store.store_full_state(&finalized_state_root, finalized_state)?; + + // Copy all of the states between the new finalized state and the split slot, from the hot DB to + // the cold DB. let mut hot_db_ops: Vec> = Vec::new(); + let mut cold_db_block_ops: Vec = vec![]; - // 1. Copy all of the states between the head and the split slot, from the hot DB - // to the cold DB. Delete the execution payloads of these now-finalized blocks. - let state_root_iter = RootsIterator::new(&store, frozen_head); - for maybe_tuple in state_root_iter.take_while(|result| match result { - Ok((_, _, slot)) => { - slot >= ¤t_split_slot - && anchor_slot.map_or(true, |anchor_slot| slot >= &anchor_slot) + let state_roots = RootsIterator::new(&store, finalized_state) + .take_while(|result| match result { + Ok((_, _, slot)) => { + slot >= ¤t_split_slot + && anchor_slot.map_or(true, |anchor_slot| slot >= &anchor_slot) + } + Err(_) => true, + }) + .collect::, _>>()?; + + // Iterate states in slot ascending order, as they are stored wrt previous states. + for (block_root, state_root, slot) in state_roots.into_iter().rev() { + // Delete the execution payload if payload pruning is enabled. At a skipped slot we may + // delete the payload for the finalized block itself, but that's OK as we only guarantee + // that payloads are present for slots >= the split slot. The payload fetching code is also + // forgiving of missing payloads. + if store.config.prune_payloads { + hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); + } + + // Copy the blinded block from the hot database to the freezer. + // FIXME(sproul): make this load lazy + let blinded_block = store + .get_blinded_block(&block_root, None)? + .ok_or(Error::BlockNotFound(block_root))?; + if blinded_block.slot() == slot { + store.blinded_block_as_cold_kv_store_ops( + &block_root, + &blinded_block, + &mut cold_db_block_ops, + )?; + } + + // Store the slot to block root mapping. + cold_db_block_ops.push(KeyValueStoreOp::PutKeyValue( + get_key_for_col( + DBColumn::BeaconBlockRoots.into(), + &slot.as_u64().to_be_bytes(), + ), + block_root.as_bytes().to_vec(), + )); + + // Delete the old summary, and the full state if we lie on an epoch boundary. + hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); + + // Do not try to store states if the first snapshot is yet to be stored. + if anchor_info + .as_ref() + .map_or(false, |anchor| slot < anchor.state_upper_limit) + { + debug!(store.log, "Skipping cold state storage"; "slot" => slot); + continue; } - Err(_) => true, - }) { - let (block_root, state_root, slot) = maybe_tuple?; let mut cold_db_ops: Vec = Vec::new(); - if slot % store.config.slots_per_restore_point == 0 { - let state: BeaconState = get_full_state(&store.hot_db, &state_root, &store.spec)? + if slot % E::slots_per_epoch() == 0 { + let state: BeaconState = store + .get_hot_state(&state_root)? .ok_or(HotColdDBError::MissingStateToFreeze(state_root))?; store.store_cold_state(&state_root, &state, &mut cold_db_ops)?; + } else { + // Store slot -> state_root and state_root -> slot mappings. + store.store_cold_state_summary(&state_root, slot, &mut cold_db_ops)?; } - // Store a pointer from this state root to its slot, so we can later reconstruct states - // from their state root alone. - let cold_state_summary = ColdStateSummary { slot }; - let op = cold_state_summary.as_kv_store_op(state_root); - cold_db_ops.push(op); - // There are data dependencies between calls to `store_cold_state()` that prevent us from // doing one big call to `store.cold_db.do_atomically()` at end of the loop. store.cold_db.do_atomically(cold_db_ops)?; - - // Delete the old summary, and the full state if we lie on an epoch boundary. - hot_db_ops.push(StoreOp::DeleteState(state_root, Some(slot))); - - // Delete the execution payload if payload pruning is enabled. At a skipped slot we may - // delete the payload for the finalized block itself, but that's OK as we only guarantee - // that payloads are present for slots >= the split slot. The payload fetching code is also - // forgiving of missing payloads. - if store.config.prune_payloads { - hot_db_ops.push(StoreOp::DeleteExecutionPayload(block_root)); - } } // Warning: Critical section. We have to take care not to put any of the two databases in an @@ -1698,6 +2209,7 @@ pub fn migrate_database, Cold: ItemStore>( // exceedingly rare event, this should be an acceptable tradeoff. // Flush to disk all the states that have just been migrated to the cold store. + store.cold_db.do_atomically(cold_db_block_ops)?; store.cold_db.sync()?; { @@ -1724,8 +2236,8 @@ pub fn migrate_database, Cold: ItemStore>( // Before updating the in-memory split value, we flush it to disk first, so that should the // OS process die at this point, we pick up from the right place after a restart. let split = Split { - slot: frozen_head.slot(), - state_root: frozen_head_root, + slot: finalized_state.slot(), + state_root: finalized_state_root, }; store.hot_db.put_sync(&SPLIT_KEY, &split)?; @@ -1738,10 +2250,17 @@ pub fn migrate_database, Cold: ItemStore>( // Delete the states from the hot database if we got this far. store.do_atomically(hot_db_ops)?; + // Update the cache's view of the finalized state. + store.update_finalized_state( + finalized_state_root, + finalized_block_root, + finalized_state.clone(), + )?; + debug!( store.log, "Freezer migration complete"; - "slot" => frozen_head.slot() + "slot" => finalized_state.slot() ); Ok(()) @@ -1750,8 +2269,8 @@ pub fn migrate_database, Cold: ItemStore>( /// Struct for storing the split slot and state root in the database. #[derive(Debug, Clone, Copy, PartialEq, Default, Encode, Decode, Deserialize, Serialize)] pub struct Split { - pub(crate) slot: Slot, - pub(crate) state_root: Hash256, + pub slot: Slot, + pub state_root: Hash256, } impl StoreItem for Split { @@ -1759,8 +2278,8 @@ impl StoreItem for Split { DBColumn::BeaconMeta } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -1768,54 +2287,89 @@ impl StoreItem for Split { } } -/// Type hint. -fn no_state_root_iter() -> Option>> { - None -} - /// Struct for summarising a state in the hot database. /// /// Allows full reconstruction by replaying blocks. -#[derive(Debug, Clone, Copy, Default, Encode, Decode)] +// FIXME(sproul): change to V20 +#[superstruct( + variants(V1, V10), + variant_attributes(derive(Debug, Clone, Copy, Default, Encode, Decode)), + no_enum +)] pub struct HotStateSummary { pub slot: Slot, pub latest_block_root: Hash256, - epoch_boundary_state_root: Hash256, + /// The state root of a state prior to this state with respect to which this state's diff is + /// stored. + /// + /// Set to 0 if this state *is not* stored as a diff. + /// + /// Formerly known as the `epoch_boundary_state_root`. + pub diff_base_state_root: Hash256, + /// The slot of the state with `diff_base_state_root`, or 0 if no diff is stored. + pub diff_base_slot: Slot, + /// The state root of the state at the prior slot. + #[superstruct(only(V10))] + pub prev_state_root: Hash256, } -impl StoreItem for HotStateSummary { - fn db_column() -> DBColumn { - DBColumn::BeaconStateSummary - } +pub type HotStateSummary = HotStateSummaryV10; - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } +macro_rules! impl_store_item_summary { + ($t:ty) => { + impl StoreItem for $t { + fn db_column() -> DBColumn { + DBColumn::BeaconStateSummary + } - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self::from_ssz_bytes(bytes)?) - } + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) + } + } + }; } +impl_store_item_summary!(HotStateSummaryV1); +impl_store_item_summary!(HotStateSummaryV10); impl HotStateSummary { /// Construct a new summary of the given state. - pub fn new(state_root: &Hash256, state: &BeaconState) -> Result { + pub fn new( + state_root: &Hash256, + state: &BeaconState, + diff_base_slot: Option, + ) -> Result { // Fill in the state root on the latest block header if necessary (this happens on all // slots where there isn't a skip). + let slot = state.slot(); let latest_block_root = state.get_latest_block_root(*state_root); - let epoch_boundary_slot = state.slot() / E::slots_per_epoch() * E::slots_per_epoch(); - let epoch_boundary_state_root = if epoch_boundary_slot == state.slot() { - *state_root + + // Set the diff state root as appropriate. + let diff_base_state_root = if let Some(base_slot) = diff_base_slot { + *state + .get_state_root(base_slot) + .map_err(HotColdDBError::HotStateSummaryError)? } else { + Hash256::zero() + }; + + let prev_state_root = if let Ok(prev_slot) = slot.safe_sub(1) { *state - .get_state_root(epoch_boundary_slot) + .get_state_root(prev_slot) .map_err(HotColdDBError::HotStateSummaryError)? + } else { + Hash256::zero() }; Ok(HotStateSummary { - slot: state.slot(), + slot, latest_block_root, - epoch_boundary_state_root, + diff_base_state_root, + diff_base_slot: diff_base_slot.unwrap_or(Slot::new(0)), + prev_state_root, }) } } @@ -1831,8 +2385,8 @@ impl StoreItem for ColdStateSummary { DBColumn::BeaconStateSummary } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -1851,8 +2405,8 @@ impl StoreItem for RestorePointHash { DBColumn::BeaconRestorePoint } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -1868,8 +2422,8 @@ impl StoreItem for TemporaryFlag { DBColumn::BeaconStateTemporary } - fn as_store_bytes(&self) -> Vec { - vec![] + fn as_store_bytes(&self) -> Result, Error> { + Ok(vec![]) } fn from_store_bytes(_: &[u8]) -> Result { diff --git a/beacon_node/store/src/hot_state_iter.rs b/beacon_node/store/src/hot_state_iter.rs new file mode 100644 index 00000000000..22ecf1dadfd --- /dev/null +++ b/beacon_node/store/src/hot_state_iter.rs @@ -0,0 +1,50 @@ +use crate::{hot_cold_store::HotColdDBError, Error, HotColdDB, HotStateSummary, ItemStore}; +use types::{EthSpec, Hash256, Slot}; + +pub struct HotStateRootIter<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> { + store: &'a HotColdDB, + next_slot: Slot, + next_state_root: Hash256, +} + +impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> HotStateRootIter<'a, E, Hot, Cold> { + pub fn new( + store: &'a HotColdDB, + next_slot: Slot, + next_state_root: Hash256, + ) -> Self { + Self { + store, + next_slot, + next_state_root, + } + } + + fn do_next(&mut self) -> Result, Error> { + if self.next_state_root.is_zero() { + return Ok(None); + } + + let summary = self + .store + .load_hot_state_summary(&self.next_state_root)? + .ok_or(HotColdDBError::MissingHotStateSummary(self.next_state_root))?; + + let state_root = self.next_state_root; + + self.next_state_root = summary.prev_state_root; + self.next_slot -= 1; + + Ok(Some((state_root, summary))) + } +} + +impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> Iterator + for HotStateRootIter<'a, E, Hot, Cold> +{ + type Item = Result<(Hash256, HotStateSummary), Error>; + + fn next(&mut self) -> Option { + self.do_next().transpose() + } +} diff --git a/beacon_node/store/src/impls.rs b/beacon_node/store/src/impls.rs index 736585a72aa..b2af9a408ef 100644 --- a/beacon_node/store/src/impls.rs +++ b/beacon_node/store/src/impls.rs @@ -1,2 +1,3 @@ pub mod beacon_state; pub mod execution_payload; +pub mod frozen_block_slot; diff --git a/beacon_node/store/src/impls/beacon_state.rs b/beacon_node/store/src/impls/beacon_state.rs index 88d1d2d7a16..c914d52cd3f 100644 --- a/beacon_node/store/src/impls/beacon_state.rs +++ b/beacon_node/store/src/impls/beacon_state.rs @@ -1,62 +1,83 @@ use crate::*; -use ssz::{DecodeError, Encode}; +use ssz::Encode; use ssz_derive::Encode; -use std::convert::TryInto; -use types::beacon_state::{CloneConfig, CommitteeCache, CACHED_EPOCHS}; +use std::io::{Read, Write}; +use std::sync::Arc; +use types::{CompactBeaconState, PublicKeyBytes}; +use zstd::{Decoder, Encoder}; pub fn store_full_state( state_root: &Hash256, state: &BeaconState, ops: &mut Vec, + config: &StoreConfig, ) -> Result<(), Error> { let bytes = { let _overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_WRITE_OVERHEAD_TIMES); StorageContainer::new(state).as_ssz_bytes() }; - metrics::inc_counter_by(&metrics::BEACON_STATE_WRITE_BYTES, bytes.len() as u64); + let mut compressed_value = Vec::with_capacity(config.estimate_compressed_size(bytes.len())); + let mut encoder = Encoder::new(&mut compressed_value, config.compression_level) + .map_err(Error::Compression)?; + encoder.write_all(&bytes).map_err(Error::Compression)?; + encoder.finish().map_err(Error::Compression)?; + + metrics::inc_counter_by( + &metrics::BEACON_STATE_WRITE_BYTES, + compressed_value.len() as u64, + ); metrics::inc_counter(&metrics::BEACON_STATE_WRITE_COUNT); + let key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_bytes()); - ops.push(KeyValueStoreOp::PutKeyValue(key, bytes)); + ops.push(KeyValueStoreOp::PutKeyValue(key, compressed_value)); Ok(()) } -pub fn get_full_state, E: EthSpec>( +pub fn get_full_state, E: EthSpec, F>( db: &KV, state_root: &Hash256, + immutable_validators: F, + config: &StoreConfig, spec: &ChainSpec, -) -> Result>, Error> { +) -> Result>, Error> +where + F: Fn(usize) -> Option>, +{ let total_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_TIMES); match db.get_bytes(DBColumn::BeaconState.into(), state_root.as_bytes())? { Some(bytes) => { + let mut ssz_bytes = Vec::with_capacity(config.estimate_decompressed_size(bytes.len())); + let mut decoder = Decoder::new(&*bytes).map_err(Error::Compression)?; + decoder + .read_to_end(&mut ssz_bytes) + .map_err(Error::Compression)?; + let overhead_timer = metrics::start_timer(&metrics::BEACON_STATE_READ_OVERHEAD_TIMES); - let container = StorageContainer::from_ssz_bytes(&bytes, spec)?; + let container = StorageContainer::from_ssz_bytes(&ssz_bytes, spec)?; metrics::stop_timer(overhead_timer); metrics::stop_timer(total_timer); metrics::inc_counter(&metrics::BEACON_STATE_READ_COUNT); metrics::inc_counter_by(&metrics::BEACON_STATE_READ_BYTES, bytes.len() as u64); - Ok(Some(container.try_into()?)) + Ok(Some(container.into_beacon_state(immutable_validators)?)) } None => Ok(None), } } /// A container for storing `BeaconState` components. -// TODO: would be more space efficient with the caches stored separately and referenced by hash #[derive(Encode)] pub struct StorageContainer { - state: BeaconState, - committee_caches: Vec, + state: CompactBeaconState, } impl StorageContainer { /// Create a new instance for storing a `BeaconState`. pub fn new(state: &BeaconState) -> Self { Self { - state: state.clone_with(CloneConfig::none()), - committee_caches: state.committee_caches().to_vec(), + state: state.clone().into_compact_state(), } } @@ -66,36 +87,20 @@ impl StorageContainer { let mut builder = ssz::SszDecoderBuilder::new(bytes); builder.register_anonymous_variable_length_item()?; - builder.register_type::>()?; let mut decoder = builder.build()?; - let state = decoder.decode_next_with(|bytes| BeaconState::from_ssz_bytes(bytes, spec))?; - let committee_caches = decoder.decode_next()?; + let state = + decoder.decode_next_with(|bytes| CompactBeaconState::from_ssz_bytes(bytes, spec))?; - Ok(Self { - state, - committee_caches, - }) + Ok(Self { state }) } -} - -impl TryInto> for StorageContainer { - type Error = Error; - - fn try_into(mut self) -> Result, Error> { - let mut state = self.state; - - for i in (0..CACHED_EPOCHS).rev() { - if i >= self.committee_caches.len() { - return Err(Error::SszDecodeError(DecodeError::BytesInvalid( - "Insufficient committees for BeaconState".to_string(), - ))); - }; - - state.committee_caches_mut()[i] = self.committee_caches.remove(i); - } + fn into_beacon_state(self, immutable_validators: F) -> Result, Error> + where + F: Fn(usize) -> Option>, + { + let state = self.state.try_into_full_state(immutable_validators)?; Ok(state) } } diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index b5753f3797e..a2855294c7f 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -9,8 +9,8 @@ macro_rules! impl_store_item { DBColumn::ExecPayload } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -31,8 +31,8 @@ impl StoreItem for ExecutionPayload { DBColumn::ExecPayload } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { diff --git a/beacon_node/store/src/impls/frozen_block_slot.rs b/beacon_node/store/src/impls/frozen_block_slot.rs new file mode 100644 index 00000000000..13dea827641 --- /dev/null +++ b/beacon_node/store/src/impls/frozen_block_slot.rs @@ -0,0 +1,19 @@ +use crate::{DBColumn, Error, StoreItem}; +use ssz::{Decode, Encode}; +use types::Slot; + +pub struct FrozenBlockSlot(pub Slot); + +impl StoreItem for FrozenBlockSlot { + fn db_column() -> DBColumn { + DBColumn::BeaconBlock + } + + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.0.as_ssz_bytes()) + } + + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(FrozenBlockSlot(Slot::from_ssz_bytes(bytes)?)) + } +} diff --git a/beacon_node/store/src/iter.rs b/beacon_node/store/src/iter.rs index 07c99e5a4ef..dc7aa02a5a0 100644 --- a/beacon_node/store/src/iter.rs +++ b/beacon_node/store/src/iter.rs @@ -189,7 +189,7 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> RootsIterator<'a, T, block_hash: Hash256, ) -> Result { let block = store - .get_blinded_block(&block_hash)? + .get_blinded_block(&block_hash, None)? .ok_or_else(|| BeaconStateError::MissingBeaconBlock(block_hash.into()))?; let state = store .get_state(&block.state_root(), Some(block.slot()))? @@ -286,7 +286,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> let block = if self.decode_any_variant { self.store.get_block_any_variant(&block_root) } else { - self.store.get_blinded_block(&block_root) + self.store.get_blinded_block(&block_root, None) }? .ok_or(Error::BlockNotFound(block_root))?; self.next_block_root = block.message().parent_root(); @@ -329,7 +329,8 @@ impl<'a, T: EthSpec, Hot: ItemStore, Cold: ItemStore> BlockIterator<'a, T, fn do_next(&mut self) -> Result>>, Error> { if let Some(result) = self.roots.next() { let (root, _slot) = result?; - self.roots.inner.store.get_blinded_block(&root) + // Don't use slot hint here as it could be a skipped slot. + self.roots.inner.store.get_blinded_block(&root, None) } else { Ok(None) } @@ -413,15 +414,15 @@ mod test { let mut hashes = (0..).map(Hash256::from_low_u64_be); let roots_a = state_a.block_roots_mut(); for i in 0..roots_a.len() { - roots_a[i] = hashes.next().unwrap() + *roots_a.get_mut(i).unwrap() = hashes.next().unwrap() } let roots_b = state_b.block_roots_mut(); for i in 0..roots_b.len() { - roots_b[i] = hashes.next().unwrap() + *roots_b.get_mut(i).unwrap() = hashes.next().unwrap() } let state_a_root = hashes.next().unwrap(); - state_b.state_roots_mut()[0] = state_a_root; + *state_b.state_roots_mut().get_mut(0).unwrap() = state_a_root; store.put_state(&state_a_root, &state_a).unwrap(); let iter = BlockRootsIterator::new(&store, &state_b); diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 86bd4ffaccd..47e26aca317 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -1,7 +1,6 @@ use super::*; use crate::hot_cold_store::HotColdDBError; use crate::metrics; -use db_key::Key; use leveldb::compaction::Compaction; use leveldb::database::batch::{Batch, Writebatch}; use leveldb::database::kv::KV; @@ -27,6 +26,7 @@ impl LevelDB { let mut options = Options::new(); options.create_if_missing = true; + options.write_buffer_size = Some(512 * 1024 * 1024); let db = Database::open(path, options)?; let transaction_mutex = Mutex::new(()); @@ -168,8 +168,9 @@ impl KeyValueStore for LevelDB { }; for (start_key, end_key) in vec![ - endpoints(DBColumn::BeaconStateTemporary), endpoints(DBColumn::BeaconState), + endpoints(DBColumn::BeaconStateDiff), + endpoints(DBColumn::BeaconStateSummary), ] { self.db.compact(&start_key, &end_key); } @@ -177,9 +178,12 @@ impl KeyValueStore for LevelDB { } /// Iterate through all keys and values in a particular column. - fn iter_column(&self, column: DBColumn) -> ColumnIter { - let start_key = - BytesKey::from_vec(get_key_for_col(column.into(), Hash256::zero().as_bytes())); + fn iter_column(&self, column: DBColumn) -> ColumnIter { + self.iter_column_from(column, &vec![0; column.key_size()]) + } + + fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { + let start_key = BytesKey::from_vec(get_key_for_col(column.into(), &from)); let iter = self.db.iter(self.read_options()); iter.seek(&start_key); @@ -187,13 +191,12 @@ impl KeyValueStore for LevelDB { Box::new( iter.take_while(move |(key, _)| key.matches_column(column)) .map(move |(bytes_key, value)| { - let key = - bytes_key - .remove_column(column) - .ok_or(HotColdDBError::IterationError { - unexpected_key: bytes_key, - })?; - Ok((key, value)) + let key = bytes_key.remove_column_variable(column).ok_or_else(|| { + HotColdDBError::IterationError { + unexpected_key: bytes_key.clone(), + } + })?; + Ok((K::from_bytes(key)?, value)) }), ) } @@ -224,12 +227,12 @@ impl KeyValueStore for LevelDB { impl ItemStore for LevelDB {} /// Used for keying leveldb. -#[derive(Debug, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub struct BytesKey { key: Vec, } -impl Key for BytesKey { +impl db_key::Key for BytesKey { fn from_u8(key: &[u8]) -> Self { Self { key: key.to_vec() } } @@ -245,12 +248,20 @@ impl BytesKey { self.key.starts_with(column.as_bytes()) } - /// Remove the column from a key, returning its `Hash256` portion. + /// Remove the column from a 32 byte key, yielding the `Hash256` key. pub fn remove_column(&self, column: DBColumn) -> Option { + let key = self.remove_column_variable(column)?; + (column.key_size() == 32).then(|| Hash256::from_slice(key)) + } + + /// Remove the column from a key. + /// + /// Will return `None` if the value doesn't match the column or has the wrong length. + pub fn remove_column_variable(&self, column: DBColumn) -> Option<&[u8]> { if self.matches_column(column) { let subkey = &self.key[column.as_bytes().len()..]; - if subkey.len() == 32 { - return Some(Hash256::from_slice(subkey)); + if subkey.len() == column.key_size() { + return Some(subkey); } } None diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index ee01fa1ae15..3273d0b4a6f 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -10,30 +10,28 @@ #[macro_use] extern crate lazy_static; -mod chunk_writer; -pub mod chunked_iter; -pub mod chunked_vector; pub mod config; pub mod errors; mod forwards_iter; mod garbage_collection; +pub mod hdiff; pub mod hot_cold_store; +mod hot_state_iter; mod impls; mod leveldb_store; mod memory_store; pub mod metadata; pub mod metrics; -mod partial_beacon_state; pub mod reconstruct; +mod state_cache; +pub mod validator_pubkey_cache; pub mod iter; -pub use self::chunk_writer::ChunkWriter; pub use self::config::StoreConfig; pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; pub use self::leveldb_store::LevelDB; pub use self::memory_store::MemoryStore; -pub use self::partial_beacon_state::PartialBeaconState; pub use errors::Error; pub use impls::beacon_state::StorageContainer as BeaconStateStorageContainer; pub use metadata::AnchorInfo; @@ -42,8 +40,9 @@ use parking_lot::MutexGuard; use std::sync::Arc; use strum::{EnumString, IntoStaticStr}; pub use types::*; +pub use validator_pubkey_cache::ValidatorPubkeyCache; -pub type ColumnIter<'a> = Box), Error>> + 'a>; +pub type ColumnIter<'a, K> = Box), Error>> + 'a>; pub type ColumnKeyIter<'a> = Box> + 'a>; pub trait KeyValueStore: Sync + Send + Sized + 'static { @@ -80,7 +79,11 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { fn compact(&self) -> Result<(), Error>; /// Iterate through all keys and values in a particular column. - fn iter_column(&self, _column: DBColumn) -> ColumnIter { + fn iter_column(&self, column: DBColumn) -> ColumnIter { + self.iter_column_from(column, &vec![0; column.key_size()]) + } + + fn iter_column_from(&self, _column: DBColumn, _from: &[u8]) -> ColumnIter { // Default impl for non LevelDB databases Box::new(std::iter::empty()) } @@ -92,6 +95,26 @@ pub trait KeyValueStore: Sync + Send + Sized + 'static { } } +pub trait Key: Sized + 'static { + fn from_bytes(key: &[u8]) -> Result; +} + +impl Key for Hash256 { + fn from_bytes(key: &[u8]) -> Result { + if key.len() == 32 { + Ok(Hash256::from_slice(key)) + } else { + Err(Error::InvalidKey) + } + } +} + +impl Key for Vec { + fn from_bytes(key: &[u8]) -> Result { + Ok(key.to_vec()) + } +} + pub fn get_key_for_col(column: &str, key: &[u8]) -> Vec { let mut result = column.as_bytes().to_vec(); result.extend_from_slice(key); @@ -110,7 +133,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati let column = I::db_column().into(); let key = key.as_bytes(); - self.put_bytes(column, key, &item.as_store_bytes()) + self.put_bytes(column, key, &item.as_store_bytes()?) .map_err(Into::into) } @@ -118,7 +141,7 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati let column = I::db_column().into(); let key = key.as_bytes(); - self.put_bytes_sync(column, key, &item.as_store_bytes()) + self.put_bytes_sync(column, key, &item.as_store_bytes()?) .map_err(Into::into) } @@ -155,7 +178,6 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati pub enum StoreOp<'a, E: EthSpec> { PutBlock(Hash256, Arc>), PutState(Hash256, &'a BeaconState), - PutStateSummary(Hash256, HotStateSummary), PutStateTemporaryFlag(Hash256), DeleteStateTemporaryFlag(Hash256), DeleteBlock(Hash256), @@ -170,11 +192,28 @@ pub enum DBColumn { /// For data related to the database itself. #[strum(serialize = "bma")] BeaconMeta, + /// Data related to blocks. + /// + /// - Key: `Hash256` block root. + /// - Value in hot DB: SSZ-encoded blinded block. + /// - Value in cold DB: 8-byte slot of block. #[strum(serialize = "blk")] BeaconBlock, + /// Frozen beacon blocks. + /// + /// - Key: 8-byte slot. + /// - Value: ZSTD-compressed SSZ-encoded blinded block. + #[strum(serialize = "bbf")] + BeaconBlockFrozen, /// For full `BeaconState`s in the hot database (finalized or fork-boundary states). #[strum(serialize = "ste")] BeaconState, + /// For beacon state snapshots in the freezer DB. + #[strum(serialize = "bsn")] + BeaconStateSnapshot, + /// For compact `BeaconStateDiff`s in the freezer DB. + #[strum(serialize = "bsd")] + BeaconStateDiff, /// For the mapping from state roots to their slots or summaries. #[strum(serialize = "bss")] BeaconStateSummary, @@ -196,7 +235,7 @@ pub enum DBColumn { ForkChoice, #[strum(serialize = "pkc")] PubkeyCache, - /// For the table mapping restore point numbers to state roots. + /// For the legacy table mapping restore point numbers to state roots. #[strum(serialize = "brp")] BeaconRestorePoint, #[strum(serialize = "bbr")] @@ -230,6 +269,36 @@ impl DBColumn { pub fn as_bytes(self) -> &'static [u8] { self.as_str().as_bytes() } + + /// Most database keys are 32 bytes, but some freezer DB keys are 8 bytes. + /// + /// This function returns the number of bytes used by keys in a given column. + pub fn key_size(self) -> usize { + match self { + Self::BeaconMeta + | Self::BeaconBlock + | Self::BeaconState + | Self::BeaconStateSummary + | Self::BeaconStateTemporary + | Self::ExecPayload + | Self::BeaconChain + | Self::OpPool + | Self::Eth1Cache + | Self::ForkChoice + | Self::PubkeyCache + | Self::BeaconRestorePoint + | Self::DhtEnrs + | Self::OptimisticTransitionBlock => 32, + Self::BeaconBlockRoots + | Self::BeaconStateRoots + | Self::BeaconHistoricalRoots + | Self::BeaconHistoricalSummaries + | Self::BeaconRandaoMixes + | Self::BeaconBlockFrozen + | Self::BeaconStateSnapshot + | Self::BeaconStateDiff => 8, + } + } } /// An item that may stored in a `Store` by serializing and deserializing from bytes. @@ -238,16 +307,16 @@ pub trait StoreItem: Sized { fn db_column() -> DBColumn; /// Serialize `self` as bytes. - fn as_store_bytes(&self) -> Vec; + fn as_store_bytes(&self) -> Result, Error>; /// De-serialize `self` from bytes. /// /// Return an instance of the type and the number of bytes that were read. fn from_store_bytes(bytes: &[u8]) -> Result; - fn as_kv_store_op(&self, key: Hash256) -> KeyValueStoreOp { + fn as_kv_store_op(&self, key: Hash256) -> Result { let db_key = get_key_for_col(Self::db_column().into(), key.as_bytes()); - KeyValueStoreOp::PutKeyValue(db_key, self.as_store_bytes()) + Ok(KeyValueStoreOp::PutKeyValue(db_key, self.as_store_bytes()?)) } } @@ -269,8 +338,8 @@ mod tests { DBColumn::BeaconBlock } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { diff --git a/beacon_node/store/src/memory_store.rs b/beacon_node/store/src/memory_store.rs index 1473f59a4e9..f7c50d6518a 100644 --- a/beacon_node/store/src/memory_store.rs +++ b/beacon_node/store/src/memory_store.rs @@ -1,5 +1,4 @@ -use super::{Error, ItemStore, KeyValueStore, KeyValueStoreOp}; -use crate::{ColumnIter, DBColumn}; +use crate::{ColumnIter, DBColumn, Error, ItemStore, Key, KeyValueStore, KeyValueStoreOp}; use parking_lot::{Mutex, MutexGuard, RwLock}; use std::collections::{HashMap, HashSet}; use std::marker::PhantomData; @@ -94,8 +93,7 @@ impl KeyValueStore for MemoryStore { Ok(()) } - // pub type ColumnIter<'a> = Box), Error>> + 'a>; - fn iter_column(&self, column: DBColumn) -> ColumnIter { + fn iter_column(&self, column: DBColumn) -> ColumnIter { let col = column.as_str(); if let Some(keys) = self .col_keys @@ -104,10 +102,11 @@ impl KeyValueStore for MemoryStore { .map(|set| set.iter().cloned().collect::>()) { Box::new(keys.into_iter().filter_map(move |key| { - let hash = Hash256::from_slice(&key); - self.get_bytes(col, &key) - .transpose() - .map(|res| res.map(|bytes| (hash, bytes))) + self.get_bytes(col, &key).transpose().map(|res| { + let k = K::from_bytes(&key)?; + let v = res?; + Ok((k, v)) + }) })) } else { Box::new(std::iter::empty()) diff --git a/beacon_node/store/src/metadata.rs b/beacon_node/store/src/metadata.rs index 6f50d7038f8..ede9eed90c3 100644 --- a/beacon_node/store/src/metadata.rs +++ b/beacon_node/store/src/metadata.rs @@ -4,7 +4,7 @@ use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use types::{Checkpoint, Hash256, Slot}; -pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(17); +pub const CURRENT_SCHEMA_VERSION: SchemaVersion = SchemaVersion(23); // All the keys that get stored under the `BeaconMeta` column. // @@ -30,8 +30,8 @@ impl StoreItem for SchemaVersion { DBColumn::BeaconMeta } - fn as_store_bytes(&self) -> Vec { - self.0.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.0.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -52,8 +52,8 @@ impl StoreItem for PruningCheckpoint { DBColumn::BeaconMeta } - fn as_store_bytes(&self) -> Vec { - self.checkpoint.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.checkpoint.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -71,8 +71,8 @@ impl StoreItem for CompactionTimestamp { DBColumn::BeaconMeta } - fn as_store_bytes(&self) -> Vec { - self.0.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.0.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { @@ -111,8 +111,8 @@ impl StoreItem for AnchorInfo { DBColumn::BeaconMeta } - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) } fn from_store_bytes(bytes: &[u8]) -> Result { diff --git a/beacon_node/store/src/metrics.rs b/beacon_node/store/src/metrics.rs index 72c5e61969e..326a111920f 100644 --- a/beacon_node/store/src/metrics.rs +++ b/beacon_node/store/src/metrics.rs @@ -54,17 +54,13 @@ lazy_static! { "store_beacon_state_hot_get_total", "Total number of hot beacon states requested from the store (cache or DB)" ); - pub static ref BEACON_STATE_CACHE_HIT_COUNT: Result = try_create_int_counter( - "store_beacon_state_cache_hit_total", - "Number of hits to the store's state cache" - ); - pub static ref BEACON_STATE_CACHE_CLONE_TIME: Result = try_create_histogram( - "store_beacon_state_cache_clone_time", - "Time to load a beacon block from the block cache" - ); pub static ref BEACON_STATE_READ_TIMES: Result = try_create_histogram( "store_beacon_state_read_seconds", - "Total time required to read a BeaconState from the database" + "Total time required to read a full BeaconState from the database" + ); + pub static ref BEACON_HOT_STATE_READ_TIMES: Result = try_create_histogram( + "store_beacon_hot_state_read_seconds", + "Total time required to read a hot BeaconState from the database" ); pub static ref BEACON_STATE_READ_OVERHEAD_TIMES: Result = try_create_histogram( "store_beacon_state_read_overhead_seconds", @@ -90,6 +86,33 @@ lazy_static! { "store_beacon_state_write_bytes_total", "Total number of beacon state bytes written to the DB" ); + /* + * Beacon state diffs + */ + pub static ref BEACON_STATE_DIFF_WRITE_BYTES: Result = try_create_int_counter( + "store_beacon_state_diff_write_bytes_total", + "Total number of bytes written for beacon state diffs" + ); + pub static ref BEACON_STATE_DIFF_WRITE_COUNT: Result = try_create_int_counter( + "store_beacon_state_diff_write_count_total", + "Total number of beacon state diffs written" + ); + pub static ref BEACON_STATE_DIFF_COMPRESSION_RATIO: Result = try_create_float_gauge( + "store_beacon_state_diff_compression_ratio", + "Compression ratio for beacon state diffs (higher is better)" + ); + pub static ref BEACON_STATE_DIFF_COMPUTE_TIME: Result = try_create_histogram( + "store_beacon_state_diff_compute_time", + "Time to calculate a beacon state diff" + ); + pub static ref BEACON_STATE_DIFF_ENCODE_TIME: Result = try_create_histogram( + "store_beacon_state_diff_encode_time", + "Time to encode a beacon state diff as SSZ" + ); + pub static ref BEACON_STATE_DIFF_COMPRESSION_TIME: Result = try_create_histogram( + "store_beacon_state_diff_compression_time", + "Time to compress beacon state SSZ using Flate2" + ); /* * Beacon Block */ diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs deleted file mode 100644 index cd923da40dc..00000000000 --- a/beacon_node/store/src/partial_beacon_state.rs +++ /dev/null @@ -1,456 +0,0 @@ -use crate::chunked_vector::{ - load_variable_list_from_db, load_vector_from_db, BlockRoots, HistoricalRoots, - HistoricalSummaries, RandaoMixes, StateRoots, -}; -use crate::{get_key_for_col, DBColumn, Error, KeyValueStore, KeyValueStoreOp}; -use ssz::{Decode, DecodeError, Encode}; -use ssz_derive::{Decode, Encode}; -use std::convert::TryInto; -use std::sync::Arc; -use types::historical_summary::HistoricalSummary; -use types::superstruct; -use types::*; - -/// Lightweight variant of the `BeaconState` that is stored in the database. -/// -/// Utilises lazy-loading from separate storage for its vector fields. -#[superstruct( - variants(Base, Altair, Merge, Capella), - variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) -)] -#[derive(Debug, PartialEq, Clone, Encode)] -#[ssz(enum_behaviour = "transparent")] -pub struct PartialBeaconState -where - T: EthSpec, -{ - // Versioning - pub genesis_time: u64, - pub genesis_validators_root: Hash256, - #[superstruct(getter(copy))] - pub slot: Slot, - pub fork: Fork, - - // History - pub latest_block_header: BeaconBlockHeader, - - #[ssz(skip_serializing, skip_deserializing)] - pub block_roots: Option>, - #[ssz(skip_serializing, skip_deserializing)] - pub state_roots: Option>, - - #[ssz(skip_serializing, skip_deserializing)] - pub historical_roots: Option>, - - // Ethereum 1.0 chain data - pub eth1_data: Eth1Data, - pub eth1_data_votes: VariableList, - pub eth1_deposit_index: u64, - - // Registry - pub validators: VariableList, - pub balances: VariableList, - - // Shuffling - /// Randao value from the current slot, for patching into the per-epoch randao vector. - pub latest_randao_value: Hash256, - #[ssz(skip_serializing, skip_deserializing)] - pub randao_mixes: Option>, - - // Slashings - slashings: FixedVector, - - // Attestations (genesis fork only) - #[superstruct(only(Base))] - pub previous_epoch_attestations: VariableList, T::MaxPendingAttestations>, - #[superstruct(only(Base))] - pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, - - // Participation (Altair and later) - #[superstruct(only(Altair, Merge, Capella))] - pub previous_epoch_participation: VariableList, - #[superstruct(only(Altair, Merge, Capella))] - pub current_epoch_participation: VariableList, - - // Finality - pub justification_bits: BitVector, - pub previous_justified_checkpoint: Checkpoint, - pub current_justified_checkpoint: Checkpoint, - pub finalized_checkpoint: Checkpoint, - - // Inactivity - #[superstruct(only(Altair, Merge, Capella))] - pub inactivity_scores: VariableList, - - // Light-client sync committees - #[superstruct(only(Altair, Merge, Capella))] - pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Merge, Capella))] - pub next_sync_committee: Arc>, - - // Execution - #[superstruct( - only(Merge), - partial_getter(rename = "latest_execution_payload_header_merge") - )] - pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, - #[superstruct( - only(Capella), - partial_getter(rename = "latest_execution_payload_header_capella") - )] - pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, - - // Capella - #[superstruct(only(Capella))] - pub next_withdrawal_index: u64, - #[superstruct(only(Capella))] - pub next_withdrawal_validator_index: u64, - - #[ssz(skip_serializing, skip_deserializing)] - #[superstruct(only(Capella))] - pub historical_summaries: Option>, -} - -/// Implement the conversion function from BeaconState -> PartialBeaconState. -macro_rules! impl_from_state_forgetful { - ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_fields_opt:ident),*]) => { - PartialBeaconState::$variant_name($struct_name { - // Versioning - genesis_time: $s.genesis_time, - genesis_validators_root: $s.genesis_validators_root, - slot: $s.slot, - fork: $s.fork, - - // History - latest_block_header: $s.latest_block_header.clone(), - block_roots: None, - state_roots: None, - historical_roots: None, - - // Eth1 - eth1_data: $s.eth1_data.clone(), - eth1_data_votes: $s.eth1_data_votes.clone(), - eth1_deposit_index: $s.eth1_deposit_index, - - // Validator registry - validators: $s.validators.clone(), - balances: $s.balances.clone(), - - // Shuffling - latest_randao_value: *$outer - .get_randao_mix($outer.current_epoch()) - .expect("randao at current epoch is OK"), - randao_mixes: None, - - // Slashings - slashings: $s.slashings.clone(), - - // Finality - justification_bits: $s.justification_bits.clone(), - previous_justified_checkpoint: $s.previous_justified_checkpoint, - current_justified_checkpoint: $s.current_justified_checkpoint, - finalized_checkpoint: $s.finalized_checkpoint, - - // Variant-specific fields - $( - $extra_fields: $s.$extra_fields.clone() - ),*, - - // Variant-specific optional - $( - $extra_fields_opt: None - ),* - }) - } -} - -impl PartialBeaconState { - /// Convert a `BeaconState` to a `PartialBeaconState`, while dropping the optional fields. - pub fn from_state_forgetful(outer: &BeaconState) -> Self { - match outer { - BeaconState::Base(s) => impl_from_state_forgetful!( - s, - outer, - Base, - PartialBeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations], - [] - ), - BeaconState::Altair(s) => impl_from_state_forgetful!( - s, - outer, - Altair, - PartialBeaconStateAltair, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores - ], - [] - ), - BeaconState::Merge(s) => impl_from_state_forgetful!( - s, - outer, - Merge, - PartialBeaconStateMerge, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ], - [] - ), - BeaconState::Capella(s) => impl_from_state_forgetful!( - s, - outer, - Capella, - PartialBeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - next_withdrawal_index, - next_withdrawal_validator_index - ], - [historical_summaries] - ), - } - } - - /// SSZ decode. - pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { - // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). - let slot_offset = ::ssz_fixed_len() + ::ssz_fixed_len(); - let slot_len = ::ssz_fixed_len(); - let slot_bytes = bytes.get(slot_offset..slot_offset + slot_len).ok_or( - DecodeError::InvalidByteLength { - len: bytes.len(), - expected: slot_offset + slot_len, - }, - )?; - - let slot = Slot::from_ssz_bytes(slot_bytes)?; - let fork_at_slot = spec.fork_name_at_slot::(slot); - - Ok(map_fork_name!( - fork_at_slot, - Self, - <_>::from_ssz_bytes(bytes)? - )) - } - - /// Prepare the partial state for storage in the KV database. - pub fn as_kv_store_op(&self, state_root: Hash256) -> KeyValueStoreOp { - let db_key = get_key_for_col(DBColumn::BeaconState.into(), state_root.as_bytes()); - KeyValueStoreOp::PutKeyValue(db_key, self.as_ssz_bytes()) - } - - pub fn load_block_roots>( - &mut self, - store: &S, - spec: &ChainSpec, - ) -> Result<(), Error> { - if self.block_roots().is_none() { - *self.block_roots_mut() = Some(load_vector_from_db::( - store, - self.slot(), - spec, - )?); - } - Ok(()) - } - - pub fn load_state_roots>( - &mut self, - store: &S, - spec: &ChainSpec, - ) -> Result<(), Error> { - if self.state_roots().is_none() { - *self.state_roots_mut() = Some(load_vector_from_db::( - store, - self.slot(), - spec, - )?); - } - Ok(()) - } - - pub fn load_historical_roots>( - &mut self, - store: &S, - spec: &ChainSpec, - ) -> Result<(), Error> { - if self.historical_roots().is_none() { - *self.historical_roots_mut() = Some( - load_variable_list_from_db::(store, self.slot(), spec)?, - ); - } - Ok(()) - } - - pub fn load_historical_summaries>( - &mut self, - store: &S, - spec: &ChainSpec, - ) -> Result<(), Error> { - let slot = self.slot(); - if let Ok(historical_summaries) = self.historical_summaries_mut() { - if historical_summaries.is_none() { - *historical_summaries = - Some(load_variable_list_from_db::( - store, slot, spec, - )?); - } - } - Ok(()) - } - - pub fn load_randao_mixes>( - &mut self, - store: &S, - spec: &ChainSpec, - ) -> Result<(), Error> { - if self.randao_mixes().is_none() { - // Load the per-epoch values from the database - let mut randao_mixes = - load_vector_from_db::(store, self.slot(), spec)?; - - // Patch the value for the current slot into the index for the current epoch - let current_epoch = self.slot().epoch(T::slots_per_epoch()); - let len = randao_mixes.len(); - randao_mixes[current_epoch.as_usize() % len] = *self.latest_randao_value(); - - *self.randao_mixes_mut() = Some(randao_mixes) - } - Ok(()) - } -} - -/// Implement the conversion from PartialBeaconState -> BeaconState. -macro_rules! impl_try_into_beacon_state { - ($inner:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*], [$($extra_opt_fields:ident),*]) => { - BeaconState::$variant_name($struct_name { - // Versioning - genesis_time: $inner.genesis_time, - genesis_validators_root: $inner.genesis_validators_root, - slot: $inner.slot, - fork: $inner.fork, - - // History - latest_block_header: $inner.latest_block_header, - block_roots: unpack_field($inner.block_roots)?, - state_roots: unpack_field($inner.state_roots)?, - historical_roots: unpack_field($inner.historical_roots)?, - - // Eth1 - eth1_data: $inner.eth1_data, - eth1_data_votes: $inner.eth1_data_votes, - eth1_deposit_index: $inner.eth1_deposit_index, - - // Validator registry - validators: $inner.validators, - balances: $inner.balances, - - // Shuffling - randao_mixes: unpack_field($inner.randao_mixes)?, - - // Slashings - slashings: $inner.slashings, - - // Finality - justification_bits: $inner.justification_bits, - previous_justified_checkpoint: $inner.previous_justified_checkpoint, - current_justified_checkpoint: $inner.current_justified_checkpoint, - finalized_checkpoint: $inner.finalized_checkpoint, - - // Caching - total_active_balance: <_>::default(), - committee_caches: <_>::default(), - pubkey_cache: <_>::default(), - exit_cache: <_>::default(), - tree_hash_cache: <_>::default(), - - // Variant-specific fields - $( - $extra_fields: $inner.$extra_fields - ),*, - - // Variant-specific optional fields - $( - $extra_opt_fields: unpack_field($inner.$extra_opt_fields)? - ),* - }) - } -} - -fn unpack_field(x: Option) -> Result { - x.ok_or(Error::PartialBeaconStateError) -} - -impl TryInto> for PartialBeaconState { - type Error = Error; - - fn try_into(self) -> Result, Error> { - let state = match self { - PartialBeaconState::Base(inner) => impl_try_into_beacon_state!( - inner, - Base, - BeaconStateBase, - [previous_epoch_attestations, current_epoch_attestations], - [] - ), - PartialBeaconState::Altair(inner) => impl_try_into_beacon_state!( - inner, - Altair, - BeaconStateAltair, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores - ], - [] - ), - PartialBeaconState::Merge(inner) => impl_try_into_beacon_state!( - inner, - Merge, - BeaconStateMerge, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header - ], - [] - ), - PartialBeaconState::Capella(inner) => impl_try_into_beacon_state!( - inner, - Capella, - BeaconStateCapella, - [ - previous_epoch_participation, - current_epoch_participation, - current_sync_committee, - next_sync_committee, - inactivity_scores, - latest_execution_payload_header, - next_withdrawal_index, - next_withdrawal_validator_index - ], - [historical_summaries] - ), - }; - Ok(state) - } -} diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index cd50babdb0c..98d49f697fe 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -8,7 +8,7 @@ use state_processing::{ StateProcessingStrategy, VerifyBlockRoot, }; use std::sync::Arc; -use types::{EthSpec, Hash256}; +use types::EthSpec; impl HotColdDB where @@ -16,7 +16,10 @@ where Hot: ItemStore, Cold: ItemStore, { - pub fn reconstruct_historic_states(self: &Arc) -> Result<(), Error> { + pub fn reconstruct_historic_states( + self: &Arc, + num_blocks: Option, + ) -> Result<(), Error> { let mut anchor = if let Some(anchor) = self.get_anchor_info() { anchor } else { @@ -37,26 +40,17 @@ where "start_slot" => anchor.state_lower_limit, ); - let slots_per_restore_point = self.config.slots_per_restore_point; - // Iterate blocks from the state lower limit to the upper limit. - let lower_limit_slot = anchor.state_lower_limit; let split = self.get_split_info(); - let upper_limit_state = self.get_restore_point( - anchor.state_upper_limit.as_u64() / slots_per_restore_point, - &split, - )?; - let upper_limit_slot = upper_limit_state.slot(); - - // Use a dummy root, as we never read the block for the upper limit state. - let upper_limit_block_root = Hash256::repeat_byte(0xff); - - let block_root_iter = self.forwards_block_roots_iterator( - lower_limit_slot, - upper_limit_state, - upper_limit_block_root, - &self.spec, - )?; + let lower_limit_slot = anchor.state_lower_limit; + let upper_limit_slot = std::cmp::min(split.slot, anchor.state_upper_limit); + + // If `num_blocks` is not specified iterate all blocks. + let block_root_iter = self + .forwards_block_roots_iterator_until(lower_limit_slot, upper_limit_slot - 1, || { + panic!("FIXME(sproul): reconstruction doesn't need this state") + })? + .take(num_blocks.unwrap_or(usize::MAX)); // The state to be advanced. let mut state = self @@ -77,7 +71,7 @@ where None } else { Some( - self.get_blinded_block(&block_root)? + self.get_blinded_block(&block_root, Some(slot))? .ok_or(Error::BlockNotFound(block_root))?, ) }; @@ -114,7 +108,7 @@ where self.store_cold_state(&state_root, &state, &mut io_batch)?; // If the slot lies on an epoch boundary, commit the batch and update the anchor. - if slot % slots_per_restore_point == 0 || slot + 1 == upper_limit_slot { + if slot % E::slots_per_epoch() == 0 || slot + 1 == upper_limit_slot { info!( self.log, "State reconstruction in progress"; diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs new file mode 100644 index 00000000000..95ad09e261f --- /dev/null +++ b/beacon_node/store/src/state_cache.rs @@ -0,0 +1,210 @@ +use crate::Error; +use lru::LruCache; +use std::collections::{BTreeMap, HashMap, HashSet}; +use std::num::NonZeroUsize; +use types::{BeaconState, EthSpec, Hash256, Slot}; + +#[derive(Debug)] +pub struct FinalizedState { + state_root: Hash256, + state: BeaconState, +} + +/// Map from block_root -> slot -> state_root. +#[derive(Debug, Default)] +pub struct BlockMap { + blocks: HashMap, +} + +/// Map from slot -> state_root. +#[derive(Debug, Default)] +pub struct SlotMap { + slots: BTreeMap, +} + +#[derive(Debug)] +pub struct StateCache { + finalized_state: Option>, + states: LruCache>, + block_map: BlockMap, +} + +#[derive(Debug)] +pub enum PutStateOutcome { + Finalized, + Duplicate, + New, +} + +impl StateCache { + pub fn new(capacity: NonZeroUsize) -> Self { + StateCache { + finalized_state: None, + states: LruCache::new(capacity), + block_map: BlockMap::default(), + } + } + + pub fn len(&self) -> usize { + self.states.len() + } + + pub fn update_finalized_state( + &mut self, + state_root: Hash256, + block_root: Hash256, + state: BeaconState, + ) -> Result<(), Error> { + if state.slot() % E::slots_per_epoch() != 0 { + return Err(Error::FinalizedStateUnaligned); + } + + if self + .finalized_state + .as_ref() + .map_or(false, |finalized_state| { + state.slot() < finalized_state.state.slot() + }) + { + return Err(Error::FinalizedStateDecreasingSlot); + } + + // Add to block map. + self.block_map.insert(block_root, state.slot(), state_root); + + // Prune block map. + let state_roots_to_prune = self.block_map.prune(state.slot()); + + // Delete states. + for state_root in state_roots_to_prune { + self.states.pop(&state_root); + } + + // Update finalized state. + self.finalized_state = Some(FinalizedState { state_root, state }); + Ok(()) + } + + /// Return a status indicating whether the state already existed in the cache. + pub fn put_state( + &mut self, + state_root: Hash256, + block_root: Hash256, + state: &BeaconState, + ) -> Result { + if self + .finalized_state + .as_ref() + .map_or(false, |finalized_state| { + finalized_state.state_root == state_root + }) + { + return Ok(PutStateOutcome::Finalized); + } + + if self.states.peek(&state_root).is_some() { + return Ok(PutStateOutcome::Duplicate); + } + + // Refuse states with pending mutations: we want cached states to be as small as possible + // i.e. stored entirely as a binary merkle tree with no updates overlaid. + if state.has_pending_mutations() { + return Err(Error::StateForCacheHasPendingUpdates { + state_root, + slot: state.slot(), + }); + } + + // Insert the full state into the cache. + self.states.put(state_root, state.clone()); + + // Record the connection from block root and slot to this state. + let slot = state.slot(); + self.block_map.insert(block_root, slot, state_root); + + Ok(PutStateOutcome::New) + } + + pub fn get_by_state_root(&mut self, state_root: Hash256) -> Option> { + if let Some(ref finalized_state) = self.finalized_state { + if state_root == finalized_state.state_root { + return Some(finalized_state.state.clone()); + } + } + self.states.get(&state_root).cloned() + } + + pub fn get_by_block_root( + &mut self, + block_root: Hash256, + slot: Slot, + ) -> Option<(Hash256, BeaconState)> { + let slot_map = self.block_map.blocks.get(&block_root)?; + + // Find the state at `slot`, or failing that the most recent ancestor. + let state_root = slot_map + .slots + .iter() + .rev() + .find_map(|(ancestor_slot, state_root)| { + (*ancestor_slot <= slot).then_some(*state_root) + })?; + + let state = self.get_by_state_root(state_root)?; + Some((state_root, state)) + } + + pub fn delete_state(&mut self, state_root: &Hash256) { + self.states.pop(state_root); + self.block_map.delete(state_root); + } + + pub fn delete_block_states(&mut self, block_root: &Hash256) { + if let Some(slot_map) = self.block_map.delete_block_states(block_root) { + for state_root in slot_map.slots.values() { + self.states.pop(state_root); + } + } + } +} + +impl BlockMap { + fn insert(&mut self, block_root: Hash256, slot: Slot, state_root: Hash256) { + let slot_map = self + .blocks + .entry(block_root) + .or_insert_with(SlotMap::default); + slot_map.slots.insert(slot, state_root); + } + + fn prune(&mut self, finalized_slot: Slot) -> HashSet { + let mut pruned_states = HashSet::new(); + + self.blocks.retain(|_, slot_map| { + slot_map.slots.retain(|slot, state_root| { + let keep = *slot >= finalized_slot; + if !keep { + pruned_states.insert(*state_root); + } + keep + }); + + !slot_map.slots.is_empty() + }); + + pruned_states + } + + fn delete(&mut self, state_root_to_delete: &Hash256) { + self.blocks.retain(|_, slot_map| { + slot_map + .slots + .retain(|_, state_root| state_root != state_root_to_delete); + !slot_map.slots.is_empty() + }); + } + + fn delete_block_states(&mut self, block_root: &Hash256) -> Option { + self.blocks.remove(block_root) + } +} diff --git a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs b/beacon_node/store/src/validator_pubkey_cache.rs similarity index 60% rename from beacon_node/beacon_chain/src/validator_pubkey_cache.rs rename to beacon_node/store/src/validator_pubkey_cache.rs index 79910df2923..529f73e5545 100644 --- a/beacon_node/beacon_chain/src/validator_pubkey_cache.rs +++ b/beacon_node/store/src/validator_pubkey_cache.rs @@ -1,11 +1,12 @@ -use crate::errors::BeaconChainError; -use crate::{BeaconChainTypes, BeaconStore}; +use crate::{DBColumn, Error, HotColdDB, ItemStore, StoreItem, StoreOp}; +use bls::PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN; +use smallvec::SmallVec; use ssz::{Decode, Encode}; +use ssz_derive::{Decode, Encode}; use std::collections::HashMap; -use std::convert::TryInto; use std::marker::PhantomData; -use store::{DBColumn, Error as StoreError, StoreItem, StoreOp}; -use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; +use std::sync::Arc; +use types::{BeaconState, EthSpec, Hash256, PublicKey, PublicKeyBytes}; /// Provides a mapping of `validator_index -> validator_publickey`. /// @@ -15,25 +16,40 @@ use types::{BeaconState, Hash256, PublicKey, PublicKeyBytes}; /// 2. To reduce the amount of public key _decompression_ required. A `BeaconState` stores public /// keys in compressed form and they are needed in decompressed form for signature verification. /// Decompression is expensive when many keys are involved. -pub struct ValidatorPubkeyCache { +/// +/// The cache has a `backing` that it uses to maintain a persistent, on-disk +/// copy of itself. This allows it to be restored between process invocations. +#[derive(Debug)] +pub struct ValidatorPubkeyCache, Cold: ItemStore> { pubkeys: Vec, indices: HashMap, - pubkey_bytes: Vec, - _phantom: PhantomData, + validators: Vec>, + _phantom: PhantomData<(E, Hot, Cold)>, +} + +// Temp value. +impl, Cold: ItemStore> Default + for ValidatorPubkeyCache +{ + fn default() -> Self { + ValidatorPubkeyCache { + pubkeys: vec![], + indices: HashMap::new(), + validators: vec![], + _phantom: PhantomData, + } + } } -impl ValidatorPubkeyCache { +impl, Cold: ItemStore> ValidatorPubkeyCache { /// Create a new public key cache using the keys in `state.validators`. /// /// The new cache will be updated with the keys from `state` and immediately written to disk. - pub fn new( - state: &BeaconState, - store: BeaconStore, - ) -> Result { + pub fn new(state: &BeaconState, store: &HotColdDB) -> Result { let mut cache = Self { pubkeys: vec![], indices: HashMap::new(), - pubkey_bytes: vec![], + validators: vec![], _phantom: PhantomData, }; @@ -44,20 +60,20 @@ impl ValidatorPubkeyCache { } /// Load the pubkey cache from the given on-disk database. - pub fn load_from_store(store: BeaconStore) -> Result { + pub fn load_from_store(store: &HotColdDB) -> Result { let mut pubkeys = vec![]; let mut indices = HashMap::new(); - let mut pubkey_bytes = vec![]; + let mut validators = vec![]; for validator_index in 0.. { - if let Some(DatabasePubkey(pubkey)) = - store.get_item(&DatabasePubkey::key_for_index(validator_index))? + if let Some(db_validator) = + store.get_item(&DatabaseValidator::key_for_index(validator_index))? { - pubkeys.push((&pubkey).try_into().map_err(|e| { - BeaconChainError::ValidatorPubkeyCacheError(format!("{:?}", e)) - })?); - pubkey_bytes.push(pubkey); - indices.insert(pubkey, validator_index); + let (pubkey, pubkey_bytes) = + DatabaseValidator::into_immutable_validator(&db_validator)?; + pubkeys.push(pubkey); + indices.insert(pubkey_bytes, validator_index); + validators.push(Arc::new(pubkey_bytes)); } else { break; } @@ -66,7 +82,7 @@ impl ValidatorPubkeyCache { Ok(ValidatorPubkeyCache { pubkeys, indices, - pubkey_bytes, + validators, _phantom: PhantomData, }) } @@ -78,13 +94,14 @@ impl ValidatorPubkeyCache { /// NOTE: The caller *must* commit the returned I/O batch as part of the block import process. pub fn import_new_pubkeys( &mut self, - state: &BeaconState, - ) -> Result>, BeaconChainError> { - if state.validators().len() > self.pubkeys.len() { + state: &BeaconState, + ) -> Result>, Error> { + if state.validators().len() > self.validators.len() { self.import( - state.validators()[self.pubkeys.len()..] - .iter() - .map(|v| v.pubkey), + state + .validators() + .iter_from(self.pubkeys.len())? + .map(|v| v.pubkey.clone()), ) } else { Ok(vec![]) @@ -92,41 +109,38 @@ impl ValidatorPubkeyCache { } /// Adds zero or more validators to `self`. - fn import( - &mut self, - validator_keys: I, - ) -> Result>, BeaconChainError> + fn import(&mut self, validator_keys: I) -> Result>, Error> where - I: Iterator + ExactSizeIterator, + I: Iterator> + ExactSizeIterator, { - self.pubkey_bytes.reserve(validator_keys.len()); + self.validators.reserve(validator_keys.len()); self.pubkeys.reserve(validator_keys.len()); self.indices.reserve(validator_keys.len()); let mut store_ops = Vec::with_capacity(validator_keys.len()); - for pubkey in validator_keys { + for pubkey_bytes in validator_keys { let i = self.pubkeys.len(); - if self.indices.contains_key(&pubkey) { - return Err(BeaconChainError::DuplicateValidatorPublicKey); + if self.indices.contains_key(&pubkey_bytes) { + return Err(Error::DuplicateValidatorPublicKey); } + let pubkey = (&*pubkey_bytes) + .try_into() + .map_err(Error::InvalidValidatorPubkeyBytes)?; + // Stage the new validator key for writing to disk. // It will be committed atomically when the block that introduced it is written to disk. // Notably it is NOT written while the write lock on the cache is held. // See: https://github.com/sigp/lighthouse/issues/2327 store_ops.push(StoreOp::KeyValueOp( - DatabasePubkey(pubkey).as_kv_store_op(DatabasePubkey::key_for_index(i)), + DatabaseValidator::from_immutable_validator(&pubkey, &pubkey_bytes) + .as_kv_store_op(DatabaseValidator::key_for_index(i))?, )); - self.pubkeys.push( - (&pubkey) - .try_into() - .map_err(BeaconChainError::InvalidValidatorPubkeyBytes)?, - ); - self.pubkey_bytes.push(pubkey); - - self.indices.insert(pubkey, i); + self.pubkeys.push(pubkey); + self.indices.insert(*pubkey_bytes, i); + self.validators.push(pubkey_bytes); } Ok(store_ops) @@ -137,6 +151,11 @@ impl ValidatorPubkeyCache { self.pubkeys.get(i) } + /// Get the immutable validator with index `i`. + pub fn get_validator_pubkey(&self, i: usize) -> Option> { + self.validators.get(i).cloned() + } + /// Get the `PublicKey` for a validator with `PublicKeyBytes`. pub fn get_pubkey_from_pubkey_bytes(&self, pubkey: &PublicKeyBytes) -> Option<&PublicKey> { self.get_index(pubkey).and_then(|index| self.get(index)) @@ -144,7 +163,7 @@ impl ValidatorPubkeyCache { /// Get the public key (in bytes form) for a validator with index `i`. pub fn get_pubkey_bytes(&self, i: usize) -> Option<&PublicKeyBytes> { - self.pubkey_bytes.get(i) + self.validators.get(i).map(|pubkey_bytes| &**pubkey_bytes) } /// Get the index of a validator with `pubkey`. @@ -166,39 +185,57 @@ impl ValidatorPubkeyCache { /// Wrapper for a public key stored in the database. /// /// Keyed by the validator index as `Hash256::from_low_u64_be(index)`. -struct DatabasePubkey(PublicKeyBytes); +#[derive(Encode, Decode)] +struct DatabaseValidator { + pubkey: SmallVec<[u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN]>, +} -impl StoreItem for DatabasePubkey { +impl StoreItem for DatabaseValidator { fn db_column() -> DBColumn { DBColumn::PubkeyCache } - fn as_store_bytes(&self) -> Vec { - self.0.as_ssz_bytes() + fn as_store_bytes(&self) -> Result, Error> { + Ok(self.as_ssz_bytes()) } - fn from_store_bytes(bytes: &[u8]) -> Result { - Ok(Self(PublicKeyBytes::from_ssz_bytes(bytes)?)) + fn from_store_bytes(bytes: &[u8]) -> Result { + Ok(Self::from_ssz_bytes(bytes)?) } } -impl DatabasePubkey { +impl DatabaseValidator { fn key_for_index(index: usize) -> Hash256 { Hash256::from_low_u64_be(index as u64) } + + // FIXME(sproul): remove param + fn from_immutable_validator(pubkey: &PublicKey, _validator: &PublicKeyBytes) -> Self { + DatabaseValidator { + pubkey: pubkey.serialize_uncompressed().into(), + } + } + + #[allow(clippy::wrong_self_convention)] + fn into_immutable_validator(&self) -> Result<(PublicKey, PublicKeyBytes), Error> { + let pubkey = PublicKey::deserialize_uncompressed(&self.pubkey) + .map_err(Error::InvalidValidatorPubkeyBytes)?; + let pubkey_bytes = pubkey.compress(); + Ok((pubkey, pubkey_bytes)) + } } #[cfg(test)] mod test { use super::*; - use crate::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + use crate::{HotColdDB, MemoryStore}; + use beacon_chain::test_utils::BeaconChainHarness; use logging::test_logger; use std::sync::Arc; - use store::HotColdDB; use types::{BeaconState, EthSpec, Keypair, MainnetEthSpec}; type E = MainnetEthSpec; - type T = EphemeralHarnessType; + type Store = MemoryStore; fn get_state(validator_count: usize) -> (BeaconState, Vec) { let harness = BeaconChainHarness::builder(MainnetEthSpec) @@ -212,14 +249,14 @@ mod test { (harness.get_current_state(), harness.validator_keypairs) } - fn get_store() -> BeaconStore { + fn get_store() -> Arc> { Arc::new( HotColdDB::open_ephemeral(<_>::default(), E::default_spec(), test_logger()).unwrap(), ) } #[allow(clippy::needless_range_loop)] - fn check_cache_get(cache: &ValidatorPubkeyCache, keypairs: &[Keypair]) { + fn check_cache_get(cache: &ValidatorPubkeyCache, keypairs: &[Keypair]) { let validator_count = keypairs.len(); for i in 0..validator_count + 1 { @@ -252,7 +289,7 @@ mod test { let store = get_store(); - let mut cache = ValidatorPubkeyCache::new(&state, store).expect("should create cache"); + let mut cache = ValidatorPubkeyCache::new(&state, &store).expect("should create cache"); check_cache_get(&cache, &keypairs[..]); @@ -285,13 +322,12 @@ mod test { let store = get_store(); // Create a new cache. - let cache = ValidatorPubkeyCache::new(&state, store.clone()).expect("should create cache"); + let cache = ValidatorPubkeyCache::new(&state, &store).expect("should create cache"); check_cache_get(&cache, &keypairs[..]); drop(cache); // Re-init the cache from the store. - let mut cache = - ValidatorPubkeyCache::load_from_store(store.clone()).expect("should open cache"); + let mut cache = ValidatorPubkeyCache::load_from_store(&store).expect("should open cache"); check_cache_get(&cache, &keypairs[..]); // Add some more keypairs. @@ -304,7 +340,7 @@ mod test { drop(cache); // Re-init the cache from the store. - let cache = ValidatorPubkeyCache::load_from_store(store).expect("should open cache"); + let cache = ValidatorPubkeyCache::load_from_store(&store).expect("should open cache"); check_cache_get(&cache, &keypairs[..]); } } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index 55759a2e158..ef15f029faa 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -360,20 +360,20 @@ pub enum ValidatorStatus { impl ValidatorStatus { pub fn from_validator(validator: &Validator, epoch: Epoch, far_future_epoch: Epoch) -> Self { if validator.is_withdrawable_at(epoch) { - if validator.effective_balance == 0 { + if validator.effective_balance() == 0 { ValidatorStatus::WithdrawalDone } else { ValidatorStatus::WithdrawalPossible } - } else if validator.is_exited_at(epoch) && epoch < validator.withdrawable_epoch { - if validator.slashed { + } else if validator.is_exited_at(epoch) && epoch < validator.withdrawable_epoch() { + if validator.slashed() { ValidatorStatus::ExitedSlashed } else { ValidatorStatus::ExitedUnslashed } } else if validator.is_active_at(epoch) { - if validator.exit_epoch < far_future_epoch { - if validator.slashed { + if validator.exit_epoch() < far_future_epoch { + if validator.slashed() { ValidatorStatus::ActiveSlashed } else { ValidatorStatus::ActiveExiting @@ -384,7 +384,7 @@ impl ValidatorStatus { // `pending` statuses are specified as validators where `validator.activation_epoch > current_epoch`. // If this code is reached, this criteria must have been met because `validator.is_active_at(epoch)`, // `validator.is_exited_at(epoch)`, and `validator.is_withdrawable_at(epoch)` all returned false. - } else if validator.activation_eligibility_epoch == far_future_epoch { + } else if validator.activation_eligibility_epoch() == far_future_epoch { ValidatorStatus::PendingInitialized } else { ValidatorStatus::PendingQueued @@ -912,6 +912,7 @@ pub struct SseLateHead { pub proposer_graffiti: String, pub block_delay: Duration, pub observed_delay: Option, + pub attestable_delay: Option, pub imported_delay: Option, pub set_as_head_delay: Option, pub execution_optimistic: bool, diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index 3f2745bf90c..6eba8e0e51e 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -38,7 +38,7 @@ mod test { #[test] fn version_formatting() { let re = - Regex::new(r"^Lighthouse/v[0-9]+\.[0-9]+\.[0-9]+(-rc.[0-9])?(-[[:xdigit:]]{7})?\+?$") + Regex::new(r"^Lighthouse/v[0-9]+\.[0-9]+\.[0-9]+(-tree.[0-9])?(-[[:xdigit:]]{7})?\+?$") .unwrap(); assert!( re.is_match(VERSION), diff --git a/common/task_executor/Cargo.toml b/common/task_executor/Cargo.toml index 08bb565870d..c5d35c6368c 100644 --- a/common/task_executor/Cargo.toml +++ b/common/task_executor/Cargo.toml @@ -12,3 +12,4 @@ exit-future = "0.2.0" lazy_static = "1.4.0" lighthouse_metrics = { path = "../lighthouse_metrics" } sloggers = { version = "2.1.1", features = ["json"] } +logging = { path = "../logging" } diff --git a/common/task_executor/src/test_utils.rs b/common/task_executor/src/test_utils.rs index c6e5ad01e68..71a451ca848 100644 --- a/common/task_executor/src/test_utils.rs +++ b/common/task_executor/src/test_utils.rs @@ -1,4 +1,5 @@ use crate::TaskExecutor; +use logging::test_logger; use slog::Logger; use sloggers::{null::NullLoggerBuilder, Build}; use std::sync::Arc; @@ -26,7 +27,7 @@ impl Default for TestRuntime { fn default() -> Self { let (runtime_shutdown, exit) = exit_future::signal(); let (shutdown_tx, _) = futures::channel::mpsc::channel(1); - let log = null_logger().unwrap(); + let log = test_logger(); let (runtime, handle) = if let Ok(handle) = runtime::Handle::try_current() { (None, handle) diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 5d86f99f1ab..352b005316f 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -7,6 +7,7 @@ use slog::{crit, debug, warn, Logger}; use ssz_derive::{Decode, Encode}; use state_processing::{ per_block_processing::errors::AttesterSlashingValidationError, per_epoch_processing, + per_epoch_processing::altair::participation_cache, }; use std::cmp::Ordering; use std::collections::BTreeSet; @@ -71,7 +72,7 @@ pub enum Error { proposer_boost_root: Hash256, }, UnrealizedVoteProcessing(state_processing::EpochProcessingError), - ParticipationCacheBuild(BeaconStateError), + ParticipationCacheBuild(participation_cache::Error), ValidatorStatuses(BeaconStateError), } diff --git a/consensus/proto_array/src/justified_balances.rs b/consensus/proto_array/src/justified_balances.rs index c8787817f1a..141d585c80a 100644 --- a/consensus/proto_array/src/justified_balances.rs +++ b/consensus/proto_array/src/justified_balances.rs @@ -24,11 +24,11 @@ impl JustifiedBalances { .validators() .iter() .map(|validator| { - if !validator.slashed && validator.is_active_at(current_epoch) { - total_effective_balance.safe_add_assign(validator.effective_balance)?; + if !validator.slashed() && validator.is_active_at(current_epoch) { + total_effective_balance.safe_add_assign(validator.effective_balance())?; num_active_validators.safe_add_assign(1)?; - Ok(validator.effective_balance) + Ok(validator.effective_balance()) } else { Ok(0) } diff --git a/consensus/state_processing/Cargo.toml b/consensus/state_processing/Cargo.toml index c16742782c6..5d71e549a90 100644 --- a/consensus/state_processing/Cargo.toml +++ b/consensus/state_processing/Cargo.toml @@ -27,6 +27,8 @@ smallvec = "1.6.1" arbitrary = { version = "1.0", features = ["derive"], optional = true } lighthouse_metrics = { path = "../../common/lighthouse_metrics", optional = true } lazy_static = { version = "1.4.0", optional = true } +rustc-hash = "1.1.0" +vec_map = "0.8.2" derivative = "2.1.1" [features] diff --git a/consensus/state_processing/src/block_replayer.rs b/consensus/state_processing/src/block_replayer.rs index ed5e6429412..965e12c6d7b 100644 --- a/consensus/state_processing/src/block_replayer.rs +++ b/consensus/state_processing/src/block_replayer.rs @@ -6,17 +6,18 @@ use crate::{ use std::marker::PhantomData; use types::{BeaconState, BlindedPayload, ChainSpec, EthSpec, Hash256, SignedBeaconBlock, Slot}; -type PreBlockHook<'a, E, Error> = Box< +pub type PreBlockHook<'a, E, Error> = Box< dyn FnMut(&mut BeaconState, &SignedBeaconBlock>) -> Result<(), Error> + 'a, >; -type PostBlockHook<'a, E, Error> = PreBlockHook<'a, E, Error>; -type PreSlotHook<'a, E, Error> = Box) -> Result<(), Error> + 'a>; -type PostSlotHook<'a, E, Error> = Box< +pub type PostBlockHook<'a, E, Error> = PreBlockHook<'a, E, Error>; +pub type PreSlotHook<'a, E, Error> = + Box, &mut BeaconState) -> Result<(), Error> + 'a>; +pub type PostSlotHook<'a, E, Error> = Box< dyn FnMut(&mut BeaconState, Option>, bool) -> Result<(), Error> + 'a, >; -type StateRootIterDefault = std::iter::Empty>; +pub type StateRootIterDefault = std::iter::Empty>; /// Efficiently apply blocks to a state while configuring various parameters. /// @@ -29,7 +30,6 @@ pub struct BlockReplayer< > { state: BeaconState, spec: &'a ChainSpec, - state_processing_strategy: StateProcessingStrategy, block_sig_strategy: BlockSignatureStrategy, verify_block_root: Option, pre_block_hook: Option>, @@ -87,7 +87,6 @@ where Self { state, spec, - state_processing_strategy: StateProcessingStrategy::Accurate, block_sig_strategy: BlockSignatureStrategy::VerifyBulk, verify_block_root: Some(VerifyBlockRoot::True), pre_block_hook: None, @@ -105,10 +104,10 @@ where mut self, state_processing_strategy: StateProcessingStrategy, ) -> Self { + // FIXME(sproul): no-op if state_processing_strategy == StateProcessingStrategy::Inconsistent { self.verify_block_root = None; } - self.state_processing_strategy = state_processing_strategy; self } @@ -184,11 +183,6 @@ where blocks: &[SignedBeaconBlock>], i: usize, ) -> Result, Error> { - // If we don't care about state roots then return immediately. - if self.state_processing_strategy == StateProcessingStrategy::Inconsistent { - return Ok(Some(Hash256::zero())); - } - // If a state root iterator is configured, use it to find the root. if let Some(ref mut state_root_iter) = self.state_root_iter { let opt_root = state_root_iter @@ -230,11 +224,12 @@ where } while self.state.slot() < block.slot() { + let state_root = self.get_state_root(self.state.slot(), &blocks, i)?; + if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { - pre_slot_hook(&mut self.state)?; + pre_slot_hook(state_root, &mut self.state)?; } - let state_root = self.get_state_root(self.state.slot(), &blocks, i)?; let summary = per_slot_processing(&mut self.state, state_root, self.spec) .map_err(BlockReplayError::from)?; @@ -248,15 +243,11 @@ where pre_block_hook(&mut self.state, block)?; } - let verify_block_root = self.verify_block_root.unwrap_or_else(|| { - // If no explicit policy is set, verify only the first 1 or 2 block roots if using - // accurate state roots. Inaccurate state roots require block root verification to - // be off. - if i <= 1 && self.state_processing_strategy == StateProcessingStrategy::Accurate { - VerifyBlockRoot::True - } else { - VerifyBlockRoot::False - } + // If no explicit policy is set, verify only the first 1 or 2 block roots. + let verify_block_root = self.verify_block_root.unwrap_or(if i <= 1 { + VerifyBlockRoot::True + } else { + VerifyBlockRoot::False }); // Proposer index was already checked when this block was originally processed, we // can omit recomputing it during replay. @@ -266,7 +257,7 @@ where &mut self.state, block, self.block_sig_strategy, - self.state_processing_strategy, + StateProcessingStrategy::Accurate, verify_block_root, &mut ctxt, self.spec, @@ -280,11 +271,12 @@ where if let Some(target_slot) = target_slot { while self.state.slot() < target_slot { + let state_root = self.get_state_root(self.state.slot(), &blocks, blocks.len())?; + if let Some(ref mut pre_slot_hook) = self.pre_slot_hook { - pre_slot_hook(&mut self.state)?; + pre_slot_hook(state_root, &mut self.state)?; } - let state_root = self.get_state_root(self.state.slot(), &blocks, blocks.len())?; let summary = per_slot_processing(&mut self.state, state_root, self.spec) .map_err(BlockReplayError::from)?; diff --git a/consensus/state_processing/src/common/altair.rs b/consensus/state_processing/src/common/altair.rs index 8943ef2f40b..43801541336 100644 --- a/consensus/state_processing/src/common/altair.rs +++ b/consensus/state_processing/src/common/altair.rs @@ -24,14 +24,12 @@ impl BaseRewardPerIncrement { /// shown to be a significant optimisation. /// /// Spec v1.1.0 -pub fn get_base_reward( - state: &BeaconState, - index: usize, +pub fn get_base_reward( + validator_effective_balance: u64, base_reward_per_increment: BaseRewardPerIncrement, spec: &ChainSpec, ) -> Result { - state - .get_effective_balance(index)? + validator_effective_balance .safe_div(spec.effective_balance_increment)? .safe_mul(base_reward_per_increment.as_u64()) .map_err(Into::into) diff --git a/consensus/state_processing/src/common/base.rs b/consensus/state_processing/src/common/base.rs index b5cb382721f..47b0de9ef1e 100644 --- a/consensus/state_processing/src/common/base.rs +++ b/consensus/state_processing/src/common/base.rs @@ -1,19 +1,30 @@ use integer_sqrt::IntegerSquareRoot; -use safe_arith::SafeArith; +use safe_arith::{ArithError, SafeArith}; use types::*; +/// This type exists to avoid confusing `total_active_balance` with `sqrt_total_active_balance`, +/// since they are used in close proximity and the same type (`u64`). +#[derive(Copy, Clone)] +pub struct SqrtTotalActiveBalance(u64); + +impl SqrtTotalActiveBalance { + pub fn new(total_active_balance: u64) -> Self { + Self(total_active_balance.integer_sqrt()) + } + + pub fn as_u64(&self) -> u64 { + self.0 + } +} + /// Returns the base reward for some validator. -pub fn get_base_reward( - state: &BeaconState, - index: usize, - // Should be == get_total_active_balance(state, spec) - total_active_balance: u64, +pub fn get_base_reward( + validator_effective_balance: u64, + sqrt_total_active_balance: SqrtTotalActiveBalance, spec: &ChainSpec, -) -> Result { - state - .get_effective_balance(index)? +) -> Result { + validator_effective_balance .safe_mul(spec.base_reward_factor)? - .safe_div(total_active_balance.integer_sqrt())? + .safe_div(sqrt_total_active_balance.as_u64())? .safe_div(spec.base_rewards_per_epoch) - .map_err(Into::into) } diff --git a/consensus/state_processing/src/common/initiate_validator_exit.rs b/consensus/state_processing/src/common/initiate_validator_exit.rs index 85e5e1df1db..41cfe8ee796 100644 --- a/consensus/state_processing/src/common/initiate_validator_exit.rs +++ b/consensus/state_processing/src/common/initiate_validator_exit.rs @@ -8,10 +8,10 @@ pub fn initiate_validator_exit( index: usize, spec: &ChainSpec, ) -> Result<(), Error> { - // Return if the validator already initiated exit - if state.get_validator(index)?.exit_epoch != spec.far_future_epoch { - return Ok(()); - } + // We do things in a slightly different order to the spec here. Instead of immediately checking + // whether the validator has already exited, we instead prepare the exit cache and compute the + // cheap-to-calculate values from that. *Then* we look up the validator a single time in the + // validator tree (expensive), make the check and mutate as appropriate. // Ensure the exit cache is built. state.build_exit_cache(spec)?; @@ -28,12 +28,21 @@ pub fn initiate_validator_exit( exit_queue_epoch.safe_add_assign(1)?; } + let validator = state.get_validator_cow(index)?; + + // Return if the validator already initiated exit + if validator.exit_epoch() != spec.far_future_epoch { + return Ok(()); + } + + let validator = validator.to_mut(); + validator.mutable.exit_epoch = exit_queue_epoch; + validator.mutable.withdrawable_epoch = + exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; + state .exit_cache_mut() .record_validator_exit(exit_queue_epoch)?; - state.get_validator_mut(index)?.exit_epoch = exit_queue_epoch; - state.get_validator_mut(index)?.withdrawable_epoch = - exit_queue_epoch.safe_add(spec.min_validator_withdrawability_delay)?; Ok(()) } diff --git a/consensus/state_processing/src/common/mod.rs b/consensus/state_processing/src/common/mod.rs index 8a2e2439bb6..17b193e5f45 100644 --- a/consensus/state_processing/src/common/mod.rs +++ b/consensus/state_processing/src/common/mod.rs @@ -24,8 +24,7 @@ pub fn increase_balance( index: usize, delta: u64, ) -> Result<(), BeaconStateError> { - state.get_balance_mut(index)?.safe_add_assign(delta)?; - Ok(()) + increase_balance_directly(state.get_balance_mut(index)?, delta) } /// Decrease the balance of a validator, saturating upon overflow, as per the spec. @@ -34,7 +33,17 @@ pub fn decrease_balance( index: usize, delta: u64, ) -> Result<(), BeaconStateError> { - let balance = state.get_balance_mut(index)?; + decrease_balance_directly(state.get_balance_mut(index)?, delta) +} + +/// Increase the balance of a validator, erroring upon overflow, as per the spec. +pub fn increase_balance_directly(balance: &mut u64, delta: u64) -> Result<(), BeaconStateError> { + balance.safe_add_assign(delta)?; + Ok(()) +} + +/// Decrease the balance of a validator, saturating upon overflow, as per the spec. +pub fn decrease_balance_directly(balance: &mut u64, delta: u64) -> Result<(), BeaconStateError> { *balance = balance.saturating_sub(delta); Ok(()) } diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index d4675f5ef5d..ea878ff181e 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -23,12 +23,12 @@ pub fn slash_validator( initiate_validator_exit(state, slashed_index, spec)?; let validator = state.get_validator_mut(slashed_index)?; - validator.slashed = true; - validator.withdrawable_epoch = cmp::max( - validator.withdrawable_epoch, + validator.mutable.slashed = true; + validator.mutable.withdrawable_epoch = cmp::max( + validator.withdrawable_epoch(), epoch.safe_add(T::EpochsPerSlashingsVector::to_u64())?, ); - let validator_effective_balance = validator.effective_balance; + let validator_effective_balance = validator.effective_balance(); state.set_slashings( epoch, state diff --git a/consensus/state_processing/src/consensus_context.rs b/consensus/state_processing/src/consensus_context.rs index ccf8cefb69f..fe34d5e93e3 100644 --- a/consensus/state_processing/src/consensus_context.rs +++ b/consensus/state_processing/src/consensus_context.rs @@ -1,5 +1,7 @@ use crate::common::get_indexed_attestation; use crate::per_block_processing::errors::{AttestationInvalid, BlockOperationError}; +use crate::{EpochCache, EpochCacheError}; +use std::borrow::Cow; use std::collections::{hash_map::Entry, HashMap}; use std::marker::PhantomData; use tree_hash::TreeHash; @@ -8,7 +10,7 @@ use types::{ ChainSpec, Epoch, EthSpec, Hash256, IndexedAttestation, SignedBeaconBlock, Slot, }; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ConsensusContext { /// Slot to act as an identifier/safeguard slot: Slot, @@ -16,6 +18,8 @@ pub struct ConsensusContext { proposer_index: Option, /// Block root of the block at `slot`. current_block_root: Option, + /// Epoch cache of values that are useful for block processing that are static over an epoch. + epoch_cache: Option, /// Cache of indexed attestations constructed during block processing. indexed_attestations: HashMap<(AttestationData, BitList), IndexedAttestation>, @@ -25,6 +29,7 @@ pub struct ConsensusContext { #[derive(Debug, PartialEq, Clone)] pub enum ContextError { BeaconState(BeaconStateError), + EpochCache(EpochCacheError), SlotMismatch { slot: Slot, expected: Slot }, EpochMismatch { epoch: Epoch, expected: Epoch }, } @@ -35,12 +40,19 @@ impl From for ContextError { } } +impl From for ContextError { + fn from(e: EpochCacheError) -> Self { + Self::EpochCache(e) + } +} + impl ConsensusContext { pub fn new(slot: Slot) -> Self { Self { slot, proposer_index: None, current_block_root: None, + epoch_cache: None, indexed_attestations: HashMap::new(), _phantom: PhantomData, } @@ -133,6 +145,31 @@ impl ConsensusContext { } } + pub fn set_epoch_cache(mut self, epoch_cache: EpochCache) -> Self { + self.epoch_cache = Some(epoch_cache); + self + } + + pub fn get_base_reward( + &mut self, + state: &BeaconState, + validator_index: usize, + spec: &ChainSpec, + ) -> Result { + self.check_slot(state.slot())?; + + // Build epoch cache if not already built. + let epoch_cache = if let Some(ref cache) = self.epoch_cache { + Cow::Borrowed(cache) + } else { + let cache = EpochCache::new(state, spec)?; + self.epoch_cache = Some(cache.clone()); + Cow::Owned(cache) + }; + + Ok(epoch_cache.get_base_reward(validator_index)?) + } + pub fn get_indexed_attestation( &mut self, state: &BeaconState, diff --git a/consensus/state_processing/src/epoch_cache.rs b/consensus/state_processing/src/epoch_cache.rs new file mode 100644 index 00000000000..6e9114b3576 --- /dev/null +++ b/consensus/state_processing/src/epoch_cache.rs @@ -0,0 +1,137 @@ +use crate::common::{ + altair::{self, BaseRewardPerIncrement}, + base::{self, SqrtTotalActiveBalance}, +}; +use safe_arith::ArithError; +use std::sync::Arc; +use types::{BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, Hash256, Slot}; + +/// Cache of values which are uniquely determined at the start of an epoch. +/// +/// The values are fixed with respect to the last block of the _prior_ epoch, which we refer +/// to as the "decision block". This cache is very similar to the `BeaconProposerCache` in that +/// beacon proposers are determined at exactly the same time as the values in this cache, so +/// the keys for the two caches are identical. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct EpochCache { + inner: Arc, +} + +#[derive(Debug, PartialEq, Eq, Clone)] +struct Inner { + /// Unique identifier for this cache, which can be used to check its validity before use + /// with any `BeaconState`. + key: EpochCacheKey, + /// Base reward for every validator in this epoch. + base_rewards: Vec, +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub struct EpochCacheKey { + pub epoch: Epoch, + pub decision_block_root: Hash256, +} + +#[derive(Debug, PartialEq, Clone)] +pub enum EpochCacheError { + IncorrectEpoch { cache: Epoch, state: Epoch }, + IncorrectDecisionBlock { cache: Hash256, state: Hash256 }, + ValidatorIndexOutOfBounds { validator_index: usize }, + InvalidSlot { slot: Slot }, + Arith(ArithError), + BeaconState(BeaconStateError), +} + +impl From for EpochCacheError { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +impl From for EpochCacheError { + fn from(e: ArithError) -> Self { + Self::Arith(e) + } +} + +impl EpochCache { + pub fn new( + state: &BeaconState, + spec: &ChainSpec, + ) -> Result { + let epoch = state.current_epoch(); + let decision_block_root = state + .proposer_shuffling_decision_root(Hash256::zero()) + .map_err(EpochCacheError::BeaconState)?; + + // The cache should never be constructed at slot 0 because it should only be used for + // block processing (which implies slot > 0) or epoch processing (which implies slot >= 32). + /* FIXME(sproul): EF tests like this + if decision_block_root.is_zero() { + return Err(EpochCacheError::InvalidSlot { slot: state.slot() }); + } + */ + + // Compute base rewards. + let total_active_balance = state.get_total_active_balance()?; + let sqrt_total_active_balance = SqrtTotalActiveBalance::new(total_active_balance); + let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; + + let mut base_rewards = Vec::with_capacity(state.validators().len()); + + for validator in state.validators().iter() { + let effective_balance = validator.effective_balance(); + + let base_reward = if spec + .altair_fork_epoch + .map_or(false, |altair_epoch| epoch < altair_epoch) + { + base::get_base_reward(effective_balance, sqrt_total_active_balance, spec)? + } else { + altair::get_base_reward(effective_balance, base_reward_per_increment, spec)? + }; + base_rewards.push(base_reward); + } + + Ok(Self { + inner: Arc::new(Inner { + key: EpochCacheKey { + epoch, + decision_block_root, + }, + base_rewards, + }), + }) + } + + pub fn check_validity( + &self, + state: &BeaconState, + ) -> Result<(), EpochCacheError> { + if self.inner.key.epoch != state.current_epoch() { + return Err(EpochCacheError::IncorrectEpoch { + cache: self.inner.key.epoch, + state: state.current_epoch(), + }); + } + let state_decision_root = state + .proposer_shuffling_decision_root(Hash256::zero()) + .map_err(EpochCacheError::BeaconState)?; + if self.inner.key.decision_block_root != state_decision_root { + return Err(EpochCacheError::IncorrectDecisionBlock { + cache: self.inner.key.decision_block_root, + state: state_decision_root, + }); + } + Ok(()) + } + + #[inline] + pub fn get_base_reward(&self, validator_index: usize) -> Result { + self.inner + .base_rewards + .get(validator_index) + .copied() + .ok_or(EpochCacheError::ValidatorIndexOutOfBounds { validator_index }) + } +} diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 68f04b554e3..c1088a3c84a 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -26,7 +26,7 @@ pub fn initialize_beacon_state_from_eth1( let mut state = BeaconState::new(genesis_time, eth1_data, spec); // Seed RANDAO with Eth1 entropy - state.fill_randao_mixes_with(eth1_block_hash); + state.fill_randao_mixes_with(eth1_block_hash)?; let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); @@ -116,18 +116,20 @@ pub fn process_activations( spec: &ChainSpec, ) -> Result<(), Error> { let (validators, balances) = state.validators_and_balances_mut(); - for (index, validator) in validators.iter_mut().enumerate() { + let mut validators_iter = validators.iter_cow(); + while let Some((index, validator)) = validators_iter.next_cow() { + let validator = validator.to_mut(); let balance = balances .get(index) .copied() .ok_or(Error::BalancesOutOfBounds(index))?; - validator.effective_balance = std::cmp::min( + validator.mutable.effective_balance = std::cmp::min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, ); - if validator.effective_balance == spec.max_effective_balance { - validator.activation_eligibility_epoch = T::genesis_epoch(); - validator.activation_epoch = T::genesis_epoch(); + if validator.effective_balance() == spec.max_effective_balance { + validator.mutable.activation_eligibility_epoch = T::genesis_epoch(); + validator.mutable.activation_epoch = T::genesis_epoch(); } } Ok(()) diff --git a/consensus/state_processing/src/lib.rs b/consensus/state_processing/src/lib.rs index 7340206a345..6140570d8a4 100644 --- a/consensus/state_processing/src/lib.rs +++ b/consensus/state_processing/src/lib.rs @@ -19,6 +19,7 @@ mod metrics; pub mod block_replayer; pub mod common; pub mod consensus_context; +pub mod epoch_cache; pub mod genesis; pub mod per_block_processing; pub mod per_epoch_processing; @@ -29,6 +30,7 @@ pub mod verify_operation; pub use block_replayer::{BlockReplayError, BlockReplayer, StateProcessingStrategy}; pub use consensus_context::{ConsensusContext, ContextError}; +pub use epoch_cache::{EpochCache, EpochCacheError, EpochCacheKey}; pub use genesis::{ eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, process_activations, diff --git a/consensus/state_processing/src/metrics.rs b/consensus/state_processing/src/metrics.rs index ddfaae56403..2b82ad93c30 100644 --- a/consensus/state_processing/src/metrics.rs +++ b/consensus/state_processing/src/metrics.rs @@ -23,4 +23,11 @@ lazy_static! { "beacon_participation_prev_epoch_active_gwei_total", "Total effective balance (gwei) of validators active in the previous epoch" ); + /* + * Processing metrics + */ + pub static ref PROCESS_EPOCH_TIME: Result = try_create_histogram( + "beacon_state_processing_process_epoch", + "Time required for process_epoch", + ); } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 124fdf6500b..eaee295683c 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -234,7 +234,7 @@ pub fn process_block_header( // Verify proposer is not slashed verify!( - !state.get_validator(proposer_index as usize)?.slashed, + !state.get_validator(proposer_index as usize)?.slashed(), HeaderInvalid::ProposerSlashed(proposer_index) ); diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index a5dcd6e0b61..7773bc8ac35 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -47,17 +47,20 @@ pub fn process_sync_aggregate( // Apply participant and proposer rewards let committee_indices = state.get_sync_committee_indices(¤t_sync_committee)?; + let mut total_proposer_reward = 0; for (participant_index, participation_bit) in committee_indices .into_iter() .zip(aggregate.sync_committee_bits.iter()) { + // FIXME(sproul): double-check this for Capella, proposer shouldn't have 0 effective balance if participation_bit { increase_balance(state, participant_index, participant_reward)?; - increase_balance(state, proposer_index as usize, proposer_reward)?; + total_proposer_reward.safe_add_assign(proposer_reward)?; } else { decrease_balance(state, participant_index, participant_reward)?; } } + increase_balance(state, proposer_index as usize, total_proposer_reward)?; Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/errors.rs b/consensus/state_processing/src/per_block_processing/errors.rs index 1aaf298d690..5907faa68e1 100644 --- a/consensus/state_processing/src/per_block_processing/errors.rs +++ b/consensus/state_processing/src/per_block_processing/errors.rs @@ -1,5 +1,5 @@ use super::signature_sets::Error as SignatureSetError; -use crate::ContextError; +use crate::{ContextError, EpochCacheError}; use merkle_proof::MerkleTreeError; use safe_arith::ArithError; use ssz::DecodeError; @@ -78,6 +78,8 @@ pub enum BlockProcessingError { }, ExecutionInvalid, ConsensusContext(ContextError), + MilhouseError(milhouse::Error), + EpochCacheError(EpochCacheError), WithdrawalsRootMismatch { expected: Hash256, found: Hash256, @@ -127,6 +129,18 @@ impl From for BlockProcessingError { } } +impl From for BlockProcessingError { + fn from(e: EpochCacheError) -> Self { + BlockProcessingError::EpochCacheError(e) + } +} + +impl From for BlockProcessingError { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } +} + impl From> for BlockProcessingError { fn from(e: BlockOperationError) -> BlockProcessingError { match e { diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 4bee596615a..2d98d221ae0 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -1,12 +1,12 @@ use super::*; use crate::common::{ - altair::{get_base_reward, BaseRewardPerIncrement}, get_attestation_participation_flag_indices, increase_balance, initiate_validator_exit, slash_validator, }; use crate::per_block_processing::errors::{BlockProcessingError, IntoWithIndex}; use crate::VerifySignatures; use safe_arith::SafeArith; +use std::sync::Arc; use types::consts::altair::{PARTICIPATION_FLAG_WEIGHTS, PROPOSER_WEIGHT, WEIGHT_DENOMINATOR}; pub fn process_operations>( @@ -126,7 +126,7 @@ pub mod altair { let proposer_index = ctxt.get_proposer_index(state, spec)?; - let attesting_indices = &verify_attestation_for_block_inclusion( + let attesting_indices = verify_attestation_for_block_inclusion( state, attestation, ctxt, @@ -134,7 +134,8 @@ pub mod altair { spec, ) .map_err(|e| e.into_with_index(att_index))? - .attesting_indices; + .attesting_indices + .clone(); // Matching roots, participation flag indices let data = &attestation.data; @@ -143,10 +144,8 @@ pub mod altair { get_attestation_participation_flag_indices(state, data, inclusion_delay, spec)?; // Update epoch participation flags. - let total_active_balance = state.get_total_active_balance()?; - let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; let mut proposer_reward_numerator = 0; - for index in attesting_indices { + for index in &attesting_indices { let index = *index as usize; for (flag_index, &weight) in PARTICIPATION_FLAG_WEIGHTS.iter().enumerate() { @@ -160,8 +159,7 @@ pub mod altair { { validator_participation.add_flag(flag_index)?; proposer_reward_numerator.safe_add_assign( - get_base_reward(state, index, base_reward_per_increment, spec)? - .safe_mul(weight)?, + ctxt.get_base_reward(state, index, spec)?.safe_mul(weight)?, )?; } } @@ -392,17 +390,19 @@ pub fn process_deposit( // Create a new validator. let validator = Validator { - pubkey: deposit.data.pubkey, - withdrawal_credentials: deposit.data.withdrawal_credentials, - activation_eligibility_epoch: spec.far_future_epoch, - activation_epoch: spec.far_future_epoch, - exit_epoch: spec.far_future_epoch, - withdrawable_epoch: spec.far_future_epoch, - effective_balance: std::cmp::min( - amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, - spec.max_effective_balance, - ), - slashed: false, + pubkey: Arc::new(deposit.data.pubkey), + mutable: ValidatorMutable { + withdrawal_credentials: deposit.data.withdrawal_credentials, + activation_eligibility_epoch: spec.far_future_epoch, + activation_epoch: spec.far_future_epoch, + exit_epoch: spec.far_future_epoch, + withdrawable_epoch: spec.far_future_epoch, + effective_balance: std::cmp::min( + amount.safe_sub(amount.safe_rem(spec.effective_balance_increment)?)?, + spec.max_effective_balance, + ), + slashed: false, + }, }; state.validators_mut().push(validator)?; state.balances_mut().push(deposit.data.amount)?; diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index c05d3f057d7..f1a344ed10a 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -64,7 +64,7 @@ where .validators() .get(validator_index) .and_then(|v| { - let pk: Option = v.pubkey.decompress().ok(); + let pk: Option = v.pubkey().decompress().ok(); pk }) .map(Cow::Owned) diff --git a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs index 731a82aa951..fe016f085dc 100644 --- a/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs +++ b/consensus/state_processing/src/per_block_processing/verify_bls_to_execution_change.rs @@ -29,7 +29,7 @@ pub fn verify_bls_to_execution_change( verify!( validator - .withdrawal_credentials + .withdrawal_credentials() .as_bytes() .first() .map(|byte| *byte == spec.bls_withdrawal_prefix_byte) @@ -41,7 +41,7 @@ pub fn verify_bls_to_execution_change( // future. let pubkey_hash = hash(address_change.from_bls_pubkey.as_serialized()); verify!( - validator.withdrawal_credentials.as_bytes().get(1..) == pubkey_hash.get(1..), + validator.withdrawal_credentials().as_bytes().get(1..) == pubkey_hash.get(1..), Invalid::WithdrawalCredentialsMismatch ); diff --git a/consensus/state_processing/src/per_block_processing/verify_exit.rs b/consensus/state_processing/src/per_block_processing/verify_exit.rs index 9e9282912de..d140b4c06d7 100644 --- a/consensus/state_processing/src/per_block_processing/verify_exit.rs +++ b/consensus/state_processing/src/per_block_processing/verify_exit.rs @@ -41,7 +41,7 @@ pub fn verify_exit( // Verify that the validator has not yet exited. verify!( - validator.exit_epoch == spec.far_future_epoch, + validator.exit_epoch() == spec.far_future_epoch, ExitInvalid::AlreadyExited(exit.validator_index) ); @@ -56,7 +56,7 @@ pub fn verify_exit( // Verify the validator has been active long enough. let earliest_exit_epoch = validator - .activation_epoch + .activation_epoch() .safe_add(spec.shard_committee_period)?; verify!( current_epoch >= earliest_exit_epoch, diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 6350685f822..26324b454c7 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -1,5 +1,6 @@ #![deny(clippy::wildcard_imports)] +use crate::metrics; pub use epoch_processing_summary::EpochProcessingSummary; use errors::EpochProcessingError as Error; pub use justification_and_finalization_state::JustificationAndFinalizationState; @@ -32,6 +33,8 @@ pub fn process_epoch( state: &mut BeaconState, spec: &ChainSpec, ) -> Result, Error> { + let _timer = metrics::start_timer(&metrics::PROCESS_EPOCH_TIME); + // Verify that the `BeaconState` instantiation matches the fork at `state.slot()`. state .fork_name(spec) diff --git a/consensus/state_processing/src/per_epoch_processing/altair.rs b/consensus/state_processing/src/per_epoch_processing/altair.rs index d5df2fc975d..5e8bdef3e4d 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair.rs @@ -29,7 +29,7 @@ pub fn process_epoch( state.build_committee_cache(RelativeEpoch::Next, spec)?; // Pre-compute participating indices and total balances. - let participation_cache = ParticipationCache::new(state, spec)?; + let mut participation_cache = ParticipationCache::new(state, spec)?; let sync_committee = state.current_sync_committee()?.clone(); // Justification and finalization. @@ -37,7 +37,7 @@ pub fn process_epoch( process_justification_and_finalization(state, &participation_cache)?; justification_and_finalization_state.apply_changes_to_state(state); - process_inactivity_updates(state, &participation_cache, spec)?; + process_inactivity_updates(state, &mut participation_cache, spec)?; // Rewards and Penalties. process_rewards_and_penalties(state, &participation_cache, spec)?; @@ -48,6 +48,7 @@ pub fn process_epoch( // Slashings. process_slashings( state, + Some(participation_cache.process_slashings_indices()), participation_cache.current_epoch_total_active_balance(), spec, )?; diff --git a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs index 967f642e85d..f568f5fc988 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/inactivity_updates.rs @@ -1,7 +1,5 @@ use super::ParticipationCache; use crate::EpochProcessingError; -use core::result::Result; -use core::result::Result::Ok; use safe_arith::SafeArith; use std::cmp::min; use types::beacon_state::BeaconState; @@ -11,7 +9,7 @@ use types::eth_spec::EthSpec; pub fn process_inactivity_updates( state: &mut BeaconState, - participation_cache: &ParticipationCache, + participation_cache: &mut ParticipationCache, spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { let previous_epoch = state.previous_epoch(); @@ -20,24 +18,50 @@ pub fn process_inactivity_updates( return Ok(()); } - let unslashed_indices = participation_cache - .get_unslashed_participating_indices(TIMELY_TARGET_FLAG_INDEX, state.previous_epoch())?; + // Fast path: inactivity scores have already been pre-computed. + if let Some(inactivity_score_updates) = participation_cache.inactivity_score_updates.take() { + // We need to flush the existing inactivity scores in case tree hashing hasn't happened in + // a long time (e.g. during state reconstruction). + // FIXME(sproul): re-think this + state.inactivity_scores_mut()?.apply_updates()?; + state + .inactivity_scores_mut()? + .bulk_update(inactivity_score_updates)?; + return Ok(()); + } + + let is_in_inactivity_leak = state.is_in_inactivity_leak(previous_epoch, spec); + + let mut inactivity_scores = state.inactivity_scores_mut()?.iter_cow(); + + while let Some((index, inactivity_score)) = inactivity_scores.next_cow() { + let validator = match participation_cache.get_validator(index) { + Ok(val) if val.is_eligible => val, + _ => continue, + }; + + let inactivity_score_mut; - for &index in participation_cache.eligible_validator_indices() { // Increase inactivity score of inactive validators - if unslashed_indices.contains(index)? { - let inactivity_score = state.get_inactivity_score_mut(index)?; - inactivity_score.safe_sub_assign(min(1, *inactivity_score))?; + if validator.is_unslashed_participating_index(TIMELY_TARGET_FLAG_INDEX)? { + // Avoid mutating when the inactivity score is 0 and can't go any lower -- the common + // case. + if *inactivity_score == 0 { + continue; + } + inactivity_score_mut = inactivity_score.to_mut(); + inactivity_score_mut.safe_sub_assign(1)?; } else { - state - .get_inactivity_score_mut(index)? - .safe_add_assign(spec.inactivity_score_bias)?; + inactivity_score_mut = inactivity_score.to_mut(); + inactivity_score_mut.safe_add_assign(spec.inactivity_score_bias)?; } + // Decrease the score of all validators for forgiveness when not during a leak - if !state.is_in_inactivity_leak(previous_epoch, spec) { - let inactivity_score = state.get_inactivity_score_mut(index)?; - inactivity_score - .safe_sub_assign(min(spec.inactivity_score_recovery_rate, *inactivity_score))?; + if !is_in_inactivity_leak { + inactivity_score_mut.safe_sub_assign(min( + spec.inactivity_score_recovery_rate, + *inactivity_score_mut, + ))?; } } Ok(()) diff --git a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs index 1f17cf56e05..9c619e57702 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/justification_and_finalization.rs @@ -4,7 +4,6 @@ use crate::per_epoch_processing::{ weigh_justification_and_finalization, JustificationAndFinalizationState, }; use safe_arith::SafeArith; -use types::consts::altair::TIMELY_TARGET_FLAG_INDEX; use types::{BeaconState, EthSpec}; /// Update the justified and finalized checkpoints for matching target attestations. @@ -18,15 +17,9 @@ pub fn process_justification_and_finalization( return Ok(justification_and_finalization_state); } - let previous_epoch = state.previous_epoch(); - let current_epoch = state.current_epoch(); - let previous_indices = participation_cache - .get_unslashed_participating_indices(TIMELY_TARGET_FLAG_INDEX, previous_epoch)?; - let current_indices = participation_cache - .get_unslashed_participating_indices(TIMELY_TARGET_FLAG_INDEX, current_epoch)?; let total_active_balance = participation_cache.current_epoch_total_active_balance(); - let previous_target_balance = previous_indices.total_balance()?; - let current_target_balance = current_indices.total_balance()?; + let previous_target_balance = participation_cache.previous_epoch_target_attesting_balance()?; + let current_target_balance = participation_cache.current_epoch_target_attesting_balance()?; weigh_justification_and_finalization( justification_and_finalization_state, total_active_balance, diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs index 004726923e9..31dfd095257 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_cache.rs @@ -11,19 +11,40 @@ //! Additionally, this cache is returned from the `altair::process_epoch` function and can be used //! to get useful summaries about the validator participation in an epoch. +use crate::common::altair::{get_base_reward, BaseRewardPerIncrement}; use safe_arith::{ArithError, SafeArith}; +use types::milhouse::update_map::{MaxMap, UpdateMap}; use types::{ consts::altair::{ NUM_FLAG_INDICES, TIMELY_HEAD_FLAG_INDEX, TIMELY_SOURCE_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, }, BeaconState, BeaconStateError, ChainSpec, Epoch, EthSpec, ParticipationFlags, RelativeEpoch, + Unsigned, Validator, }; +use vec_map::VecMap; #[derive(Debug, PartialEq)] pub enum Error { InvalidFlagIndex(usize), + NoUnslashedParticipatingIndices, + MissingValidator(usize), + BeaconState(BeaconStateError), + Arith(ArithError), InvalidValidatorIndex(usize), + InconsistentTotalActiveBalance { cached: u64, computed: u64 }, +} + +impl From for Error { + fn from(e: BeaconStateError) -> Self { + Self::BeaconState(e) + } +} + +impl From for Error { + fn from(e: ArithError) -> Self { + Self::Arith(e) + } } /// A balance which will never be below the specified `minimum`. @@ -55,16 +76,6 @@ impl Balance { /// Caches the participation values for one epoch (either the previous or current). #[derive(PartialEq, Debug)] struct SingleEpochParticipationCache { - /// Maps an active validator index to their participation flags. - /// - /// To reiterate, only active and unslashed validator indices are stored in this map. - /// - /// ## Note - /// - /// It would be ideal to maintain a reference to the `BeaconState` here rather than copying the - /// `ParticipationFlags`, however that would cause us to run into mutable reference limitations - /// upstream. - unslashed_participating_indices: Vec>, /// Stores the sum of the balances for all validators in `self.unslashed_participating_indices` /// for all flags in `NUM_FLAG_INDICES`. /// @@ -76,12 +87,10 @@ struct SingleEpochParticipationCache { } impl SingleEpochParticipationCache { - fn new(state: &BeaconState, spec: &ChainSpec) -> Self { - let num_validators = state.validators().len(); + fn new(spec: &ChainSpec) -> Self { let zero_balance = Balance::zero(spec.effective_balance_increment); Self { - unslashed_participating_indices: vec![None; num_validators], total_flag_balances: [zero_balance; NUM_FLAG_INDICES], total_active_balance: zero_balance, } @@ -95,76 +104,41 @@ impl SingleEpochParticipationCache { .ok_or(Error::InvalidFlagIndex(flag_index)) } - /// Returns `true` if `val_index` is active, unslashed and has `flag_index` set. - /// - /// ## Errors - /// - /// May return an error if `flag_index` is out-of-bounds. - fn has_flag(&self, val_index: usize, flag_index: usize) -> Result { - let participation_flags = self - .unslashed_participating_indices - .get(val_index) - .ok_or(Error::InvalidValidatorIndex(val_index))?; - if let Some(participation_flags) = participation_flags { - participation_flags - .has_flag(flag_index) - .map_err(|_| Error::InvalidFlagIndex(flag_index)) - } else { - Ok(false) - } - } - - /// Process an **active** validator, reading from the `state` with respect to the + /// Process an **active** validator, reading from the `epoch_participation` with respect to the /// `relative_epoch`. /// /// ## Errors /// - /// - The provided `state` **must** be Altair. An error will be returned otherwise. /// - An error will be returned if the `val_index` validator is inactive at the given /// `relative_epoch`. - fn process_active_validator( + fn process_active_validator( &mut self, val_index: usize, - state: &BeaconState, + validator: &Validator, + epoch_participation: &ParticipationFlags, current_epoch: Epoch, relative_epoch: RelativeEpoch, ) -> Result<(), BeaconStateError> { - let val_balance = state.get_effective_balance(val_index)?; - let validator = state.get_validator(val_index)?; - // Sanity check to ensure the validator is active. let epoch = relative_epoch.into_epoch(current_epoch); if !validator.is_active_at(epoch) { return Err(BeaconStateError::ValidatorIsInactive { val_index }); } - let epoch_participation = match relative_epoch { - RelativeEpoch::Current => state.current_epoch_participation(), - RelativeEpoch::Previous => state.previous_epoch_participation(), - _ => Err(BeaconStateError::EpochOutOfBounds), - }? - .get(val_index) - .ok_or(BeaconStateError::ParticipationOutOfBounds(val_index))?; - // All active validators increase the total active balance. - self.total_active_balance.safe_add_assign(val_balance)?; + self.total_active_balance + .safe_add_assign(validator.effective_balance())?; // Only unslashed validators may proceed. - if validator.slashed { + if validator.slashed() { return Ok(()); } - // Add their `ParticipationFlags` to the map. - *self - .unslashed_participating_indices - .get_mut(val_index) - .ok_or(BeaconStateError::UnknownValidator(val_index))? = Some(*epoch_participation); - // Iterate through all the flags and increment the total flag balances for whichever flags // are set for `val_index`. for (flag, balance) in self.total_flag_balances.iter_mut().enumerate() { if epoch_participation.has_flag(flag)? { - balance.safe_add_assign(val_balance)?; + balance.safe_add_assign(validator.effective_balance())?; } } @@ -172,6 +146,43 @@ impl SingleEpochParticipationCache { } } +#[derive(Debug, PartialEq, Clone)] +pub struct ValidatorInfo { + pub effective_balance: u64, + pub base_reward: u64, + pub is_eligible: bool, + pub is_slashed: bool, + pub is_active_current_epoch: bool, + pub is_active_previous_epoch: bool, + pub previous_epoch_participation: ParticipationFlags, +} + +impl ValidatorInfo { + #[inline] + pub fn is_unslashed_participating_index(&self, flag_index: usize) -> Result { + Ok(self.is_active_previous_epoch + && !self.is_slashed + && self + .previous_epoch_participation + .has_flag(flag_index) + .map_err(|_| Error::InvalidFlagIndex(flag_index))?) + } +} + +/// Single `HashMap` for validator info relevant to `process_epoch`. +#[derive(Debug, PartialEq)] +struct ValidatorInfoCache { + info: Vec>, +} + +impl ValidatorInfoCache { + pub fn new(capacity: usize) -> Self { + Self { + info: vec![None; capacity], + } + } +} + /// Maintains a cache to be used during `altair::process_epoch`. #[derive(PartialEq, Debug)] pub struct ParticipationCache { @@ -181,8 +192,15 @@ pub struct ParticipationCache { previous_epoch: Epoch, /// Caches information about active validators pertaining to `self.previous_epoch`. previous_epoch_participation: SingleEpochParticipationCache, + /// Caches validator information relevant to `process_epoch`. + validators: ValidatorInfoCache, /// Caches the result of the `get_eligible_validator_indices` function. eligible_indices: Vec, + /// Caches the indices and effective balances of validators that need to be processed by + /// `process_slashings`. + process_slashings_indices: Vec<(usize, u64)>, + /// Updates to the inactivity scores if we are definitely not in an inactivity leak. + pub inactivity_score_updates: Option>>, } impl ParticipationCache { @@ -191,17 +209,21 @@ impl ParticipationCache { /// ## Errors /// /// - The provided `state` **must** be an Altair state. An error will be returned otherwise. - pub fn new( - state: &BeaconState, - spec: &ChainSpec, - ) -> Result { + pub fn new(state: &BeaconState, spec: &ChainSpec) -> Result { let current_epoch = state.current_epoch(); let previous_epoch = state.previous_epoch(); // Both the current/previous epoch participations are set to a capacity that is slightly // larger than required. The difference will be due slashed-but-active validators. - let mut current_epoch_participation = SingleEpochParticipationCache::new(state, spec); - let mut previous_epoch_participation = SingleEpochParticipationCache::new(state, spec); + let mut current_epoch_participation = SingleEpochParticipationCache::new(spec); + let mut previous_epoch_participation = SingleEpochParticipationCache::new(spec); + + let mut validators = ValidatorInfoCache::new(state.validators().len()); + + let current_epoch_total_active_balance = state.get_total_active_balance()?; + let base_reward_per_increment = + BaseRewardPerIncrement::new(current_epoch_total_active_balance, spec)?; + // Contains the set of validators which are either: // // - Active in the previous epoch. @@ -211,6 +233,16 @@ impl ParticipationCache { // reallocations. let mut eligible_indices = Vec::with_capacity(state.validators().len()); + let mut process_slashings_indices = vec![]; + + // Fast path for inactivity scores update when we are definitely not in an inactivity leak. + // This breaks the dependence of `process_inactivity_updates` on the finalization + // re-calculation. + let definitely_not_in_inactivity_leak = + state.finalized_checkpoint().epoch + spec.min_epochs_to_inactivity_penalty + 1 + >= state.current_epoch(); + let mut inactivity_score_updates = MaxMap::default(); + // Iterate through all validators, updating: // // 1. Validator participation for current and previous epochs. @@ -218,30 +250,100 @@ impl ParticipationCache { // // Care is taken to ensure that the ordering of `eligible_indices` is the same as the // `get_eligible_validator_indices` function in the spec. - for (val_index, val) in state.validators().iter().enumerate() { - if val.is_active_at(current_epoch) { + let iter = state + .validators() + .iter() + .zip(state.current_epoch_participation()?) + .zip(state.previous_epoch_participation()?) + .zip(state.inactivity_scores()?) + .enumerate(); + for (val_index, (((val, curr_epoch_flags), prev_epoch_flags), inactivity_score)) in iter { + let is_active_current_epoch = val.is_active_at(current_epoch); + let is_active_previous_epoch = val.is_active_at(previous_epoch); + let is_eligible = state.is_eligible_validator(previous_epoch, val); + + if is_active_current_epoch { current_epoch_participation.process_active_validator( val_index, - state, + val, + curr_epoch_flags, current_epoch, RelativeEpoch::Current, )?; } - if val.is_active_at(previous_epoch) { + if is_active_previous_epoch { + assert!(is_eligible); + previous_epoch_participation.process_active_validator( val_index, - state, + val, + prev_epoch_flags, current_epoch, RelativeEpoch::Previous, )?; } + if val.slashed() + && current_epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)? + == val.withdrawable_epoch() + { + process_slashings_indices.push((val_index, val.effective_balance())); + } + // Note: a validator might still be "eligible" whilst returning `false` to - // `Validator::is_active_at`. - if state.is_eligible_validator(previous_epoch, val_index)? { - eligible_indices.push(val_index) + // `Validator::is_active_at`. It's also possible for a validator to be active + // in the current epoch without being eligible (if it was just activated). + if is_eligible { + eligible_indices.push(val_index); } + + let mut validator_info = ValidatorInfo { + effective_balance: val.effective_balance(), + base_reward: 0, // not read + is_eligible, + is_slashed: val.slashed(), + is_active_current_epoch, + is_active_previous_epoch, + previous_epoch_participation: *prev_epoch_flags, + }; + + // Calculate inactivity updates. + if is_eligible && definitely_not_in_inactivity_leak { + let mut new_inactivity_score = + if validator_info.is_unslashed_participating_index(TIMELY_TARGET_FLAG_INDEX)? { + inactivity_score.saturating_sub(1) + } else { + inactivity_score.safe_add(spec.inactivity_score_bias)? + }; + + // Decrease the score of all validators for forgiveness when not during a leak + new_inactivity_score = + new_inactivity_score.saturating_sub(spec.inactivity_score_recovery_rate); + + if new_inactivity_score != *inactivity_score { + inactivity_score_updates.insert(val_index, new_inactivity_score); + } + } + + #[allow(clippy::indexing_slicing)] + if is_eligible || is_active_current_epoch { + let effective_balance = val.effective_balance(); + let base_reward = + get_base_reward(effective_balance, base_reward_per_increment, spec)?; + validator_info.base_reward = base_reward; + validators.info[val_index] = Some(validator_info); + } + } + + // Sanity check total active balance. + if current_epoch_participation.total_active_balance.get() + != current_epoch_total_active_balance + { + return Err(Error::InconsistentTotalActiveBalance { + cached: current_epoch_total_active_balance, + computed: current_epoch_participation.total_active_balance.get(), + }); } Ok(Self { @@ -249,7 +351,11 @@ impl ParticipationCache { current_epoch_participation, previous_epoch, previous_epoch_participation, + validators, eligible_indices, + process_slashings_indices, + inactivity_score_updates: definitely_not_in_inactivity_leak + .then_some(inactivity_score_updates), }) } @@ -258,24 +364,8 @@ impl ParticipationCache { &self.eligible_indices } - /// Equivalent to the `get_unslashed_participating_indices` function in the specification. - pub fn get_unslashed_participating_indices( - &self, - flag_index: usize, - epoch: Epoch, - ) -> Result { - let participation = if epoch == self.current_epoch { - &self.current_epoch_participation - } else if epoch == self.previous_epoch { - &self.previous_epoch_participation - } else { - return Err(BeaconStateError::EpochOutOfBounds); - }; - - Ok(UnslashedParticipatingIndices { - participation, - flag_index, - }) + pub fn process_slashings_indices(&mut self) -> Vec<(usize, u64)> { + std::mem::take(&mut self.process_slashings_indices) } /* @@ -296,51 +386,63 @@ impl ParticipationCache { } pub fn previous_epoch_target_attesting_balance(&self) -> Result { - self.previous_epoch_participation - .total_flag_balance(TIMELY_TARGET_FLAG_INDEX) + self.previous_epoch_flag_attesting_balance(TIMELY_TARGET_FLAG_INDEX) } pub fn previous_epoch_source_attesting_balance(&self) -> Result { - self.previous_epoch_participation - .total_flag_balance(TIMELY_SOURCE_FLAG_INDEX) + self.previous_epoch_flag_attesting_balance(TIMELY_SOURCE_FLAG_INDEX) } pub fn previous_epoch_head_attesting_balance(&self) -> Result { + self.previous_epoch_flag_attesting_balance(TIMELY_HEAD_FLAG_INDEX) + } + + pub fn previous_epoch_flag_attesting_balance(&self, flag_index: usize) -> Result { self.previous_epoch_participation - .total_flag_balance(TIMELY_HEAD_FLAG_INDEX) + .total_flag_balance(flag_index) } /* * Active/Unslashed */ - /// Returns `None` for an unknown `val_index`. - pub fn is_active_unslashed_in_previous_epoch(&self, val_index: usize) -> Option { - self.previous_epoch_participation - .unslashed_participating_indices - .get(val_index) - .map(|flags| flags.is_some()) + pub fn is_active_unslashed_in_previous_epoch(&self, val_index: usize) -> bool { + self.get_validator(val_index).map_or(false, |validator| { + validator.is_active_previous_epoch && !validator.is_slashed + }) } - /// Returns `None` for an unknown `val_index`. - pub fn is_active_unslashed_in_current_epoch(&self, val_index: usize) -> Option { - self.current_epoch_participation - .unslashed_participating_indices + pub fn is_active_unslashed_in_current_epoch(&self, val_index: usize) -> bool { + self.get_validator(val_index).map_or(false, |validator| { + validator.is_active_current_epoch && !validator.is_slashed + }) + } + + pub fn get_validator(&self, val_index: usize) -> Result<&ValidatorInfo, Error> { + self.validators + .info .get(val_index) - .map(|flags| flags.is_some()) + .ok_or(Error::MissingValidator(val_index))? + .as_ref() + .ok_or(Error::MissingValidator(val_index)) } /* * Flags */ - /// Always returns false for a slashed validator. pub fn is_previous_epoch_timely_source_attester( &self, val_index: usize, ) -> Result { - self.previous_epoch_participation - .has_flag(val_index, TIMELY_SOURCE_FLAG_INDEX) + self.get_validator(val_index) + .map_or(Ok(false), |validator| { + Ok(!validator.is_slashed + && validator + .previous_epoch_participation + .has_flag(TIMELY_SOURCE_FLAG_INDEX) + .map_err(|_| Error::InvalidFlagIndex(TIMELY_SOURCE_FLAG_INDEX))?) + }) } /// Always returns false for a slashed validator. @@ -348,63 +450,35 @@ impl ParticipationCache { &self, val_index: usize, ) -> Result { - self.previous_epoch_participation - .has_flag(val_index, TIMELY_TARGET_FLAG_INDEX) + self.get_validator(val_index) + .map_or(Ok(false), |validator| { + Ok(!validator.is_slashed + && validator + .previous_epoch_participation + .has_flag(TIMELY_TARGET_FLAG_INDEX) + .map_err(|_| Error::InvalidFlagIndex(TIMELY_TARGET_FLAG_INDEX))?) + }) } /// Always returns false for a slashed validator. pub fn is_previous_epoch_timely_head_attester(&self, val_index: usize) -> Result { - self.previous_epoch_participation - .has_flag(val_index, TIMELY_HEAD_FLAG_INDEX) - } - - /// Always returns false for a slashed validator. - pub fn is_current_epoch_timely_source_attester(&self, val_index: usize) -> Result { - self.current_epoch_participation - .has_flag(val_index, TIMELY_SOURCE_FLAG_INDEX) + self.get_validator(val_index) + .map_or(Ok(false), |validator| { + Ok(!validator.is_slashed + && validator + .previous_epoch_participation + .has_flag(TIMELY_HEAD_FLAG_INDEX) + .map_err(|_| Error::InvalidFlagIndex(TIMELY_TARGET_FLAG_INDEX))?) + }) } /// Always returns false for a slashed validator. - pub fn is_current_epoch_timely_target_attester(&self, val_index: usize) -> Result { - self.current_epoch_participation - .has_flag(val_index, TIMELY_TARGET_FLAG_INDEX) - } - - /// Always returns false for a slashed validator. - pub fn is_current_epoch_timely_head_attester(&self, val_index: usize) -> Result { - self.current_epoch_participation - .has_flag(val_index, TIMELY_HEAD_FLAG_INDEX) - } -} - -/// Imitates the return value of the `get_unslashed_participating_indices` in the -/// specification. -/// -/// This struct exists to help make the Lighthouse code read more like the specification. -pub struct UnslashedParticipatingIndices<'a> { - participation: &'a SingleEpochParticipationCache, - flag_index: usize, -} - -impl<'a> UnslashedParticipatingIndices<'a> { - /// Returns `Ok(true)` if the given `val_index` is both: - /// - /// - An active validator. - /// - Has `self.flag_index` set. - pub fn contains(&self, val_index: usize) -> Result { - self.participation.has_flag(val_index, self.flag_index) - } - - /// Returns the sum of all balances of validators which have `self.flag_index` set. - /// - /// ## Notes - /// - /// Respects the `EFFECTIVE_BALANCE_INCREMENT` minimum. - pub fn total_balance(&self) -> Result { - self.participation - .total_flag_balances - .get(self.flag_index) - .ok_or(Error::InvalidFlagIndex(self.flag_index)) - .map(Balance::get) + pub fn is_current_epoch_timely_target_attester( + &self, + _val_index: usize, + ) -> Result { + // FIXME(sproul): decide whether it's worth storing the current epoch participation flags + // *just* for this call. Perhaps the validator API could source it from the state directly. + Ok(false) } } diff --git a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs index 7162fa7f4af..d06baa72f93 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/participation_flag_updates.rs @@ -1,20 +1,15 @@ use crate::EpochProcessingError; -use core::result::Result; -use core::result::Result::Ok; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; use types::participation_flags::ParticipationFlags; -use types::VariableList; +use types::VList; pub fn process_participation_flag_updates( state: &mut BeaconState, ) -> Result<(), EpochProcessingError> { *state.previous_epoch_participation_mut()? = std::mem::take(state.current_epoch_participation_mut()?); - *state.current_epoch_participation_mut()? = VariableList::new(vec![ - ParticipationFlags::default( - ); - state.validators().len() - ])?; + *state.current_epoch_participation_mut()? = + VList::repeat(ParticipationFlags::default(), state.validators().len())?; Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs index e2aa67a6193..19987f01538 100644 --- a/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/altair/rewards_and_penalties.rs @@ -4,12 +4,9 @@ use types::consts::altair::{ PARTICIPATION_FLAG_WEIGHTS, TIMELY_HEAD_FLAG_INDEX, TIMELY_TARGET_FLAG_INDEX, WEIGHT_DENOMINATOR, }; -use types::{BeaconState, ChainSpec, EthSpec}; +use types::{BeaconState, BeaconStateError, ChainSpec, EthSpec}; -use crate::common::{ - altair::{get_base_reward, BaseRewardPerIncrement}, - decrease_balance, increase_balance, -}; +use crate::common::{decrease_balance_directly, increase_balance_directly}; use crate::per_epoch_processing::{Delta, Error}; /// Apply attester and proposer rewards. @@ -43,9 +40,20 @@ pub fn process_rewards_and_penalties( // Apply the deltas, erroring on overflow above but not on overflow below (saturating at 0 // instead). - for (i, delta) in deltas.into_iter().enumerate() { - increase_balance(state, i, delta.rewards)?; - decrease_balance(state, i, delta.penalties)?; + let mut balances = state.balances_mut().iter_cow(); + + while let Some((i, balance)) = balances.next_cow() { + let delta = deltas + .get(i) + .ok_or(BeaconStateError::BalancesOutOfBounds(i))?; + + if delta.rewards == 0 && delta.penalties == 0 { + continue; + } + + let balance = balance.to_mut(); + increase_balance_directly(balance, delta.rewards)?; + decrease_balance_directly(balance, delta.penalties)?; } Ok(()) @@ -62,21 +70,21 @@ pub fn get_flag_index_deltas( participation_cache: &ParticipationCache, spec: &ChainSpec, ) -> Result<(), Error> { - let previous_epoch = state.previous_epoch(); - let unslashed_participating_indices = - participation_cache.get_unslashed_participating_indices(flag_index, previous_epoch)?; let weight = get_flag_weight(flag_index)?; - let unslashed_participating_balance = unslashed_participating_indices.total_balance()?; + let unslashed_participating_balance = + participation_cache.previous_epoch_flag_attesting_balance(flag_index)?; let unslashed_participating_increments = unslashed_participating_balance.safe_div(spec.effective_balance_increment)?; let active_increments = total_active_balance.safe_div(spec.effective_balance_increment)?; - let base_reward_per_increment = BaseRewardPerIncrement::new(total_active_balance, spec)?; + let previous_epoch = state.previous_epoch(); for &index in participation_cache.eligible_validator_indices() { - let base_reward = get_base_reward(state, index, base_reward_per_increment, spec)?; + let validator = participation_cache.get_validator(index)?; + let base_reward = validator.base_reward; + let mut delta = Delta::default(); - if unslashed_participating_indices.contains(index)? { + if validator.is_unslashed_participating_index(flag_index)? { if !state.is_in_inactivity_leak(previous_epoch, spec) { let reward_numerator = base_reward .safe_mul(weight)? @@ -110,15 +118,12 @@ pub fn get_inactivity_penalty_deltas( participation_cache: &ParticipationCache, spec: &ChainSpec, ) -> Result<(), Error> { - let previous_epoch = state.previous_epoch(); - let matching_target_indices = participation_cache - .get_unslashed_participating_indices(TIMELY_TARGET_FLAG_INDEX, previous_epoch)?; for &index in participation_cache.eligible_validator_indices() { + let validator = participation_cache.get_validator(index)?; let mut delta = Delta::default(); - if !matching_target_indices.contains(index)? { - let penalty_numerator = state - .get_validator(index)? + if !validator.is_unslashed_participating_index(TIMELY_TARGET_FLAG_INDEX)? { + let penalty_numerator = validator .effective_balance .safe_mul(state.get_inactivity_score(index)?)?; let penalty_denominator = spec diff --git a/consensus/state_processing/src/per_epoch_processing/base.rs b/consensus/state_processing/src/per_epoch_processing/base.rs index cb7e7d4b300..5e5188dd252 100644 --- a/consensus/state_processing/src/per_epoch_processing/base.rs +++ b/consensus/state_processing/src/per_epoch_processing/base.rs @@ -44,6 +44,7 @@ pub fn process_epoch( // Slashings. process_slashings( state, + None, validator_statuses.total_balances.current_epoch(), spec, )?; diff --git a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs index e7a4d9c4dcd..411e0ee9c21 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/rewards_and_penalties.rs @@ -1,4 +1,7 @@ -use crate::common::{base::get_base_reward, decrease_balance, increase_balance}; +use crate::common::{ + base::{get_base_reward, SqrtTotalActiveBalance}, + decrease_balance, increase_balance, +}; use crate::per_epoch_processing::{ base::{TotalBalances, ValidatorStatus, ValidatorStatuses}, Delta, Error, @@ -78,7 +81,6 @@ pub fn get_attestation_deltas( validator_statuses: &ValidatorStatuses, spec: &ChainSpec, ) -> Result, Error> { - let previous_epoch = state.previous_epoch(); let finality_delay = state .previous_epoch() .safe_sub(state.finalized_checkpoint().epoch)? @@ -87,17 +89,22 @@ pub fn get_attestation_deltas( let mut deltas = vec![AttestationDelta::default(); state.validators().len()]; let total_balances = &validator_statuses.total_balances; + let sqrt_total_active_balance = SqrtTotalActiveBalance::new(total_balances.current_epoch()); for (index, validator) in validator_statuses.statuses.iter().enumerate() { // Ignore ineligible validators. All sub-functions of the spec do this except for // `get_inclusion_delay_deltas`. It's safe to do so here because any validator that is in // the unslashed indices of the matching source attestations is active, and therefore // eligible. - if !state.is_eligible_validator(previous_epoch, index)? { + if !validator.is_eligible { continue; } - let base_reward = get_base_reward(state, index, total_balances.current_epoch(), spec)?; + let base_reward = get_base_reward( + validator.current_epoch_effective_balance, + sqrt_total_active_balance, + spec, + )?; let source_delta = get_source_delta(validator, base_reward, total_balances, finality_delay, spec)?; diff --git a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs index 26d2536e5fa..38ece87cf63 100644 --- a/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs +++ b/consensus/state_processing/src/per_epoch_processing/base/validator_statuses.rs @@ -53,6 +53,8 @@ impl InclusionInfo { pub struct ValidatorStatus { /// True if the validator has been slashed, ever. pub is_slashed: bool, + /// True if the validator is eligible. + pub is_eligible: bool, /// True if the validator can withdraw in the current epoch. pub is_withdrawable_in_current_epoch: bool, /// True if the validator was active in the state's _current_ epoch. @@ -92,6 +94,7 @@ impl ValidatorStatus { // Update all the bool fields, only updating `self` if `other` is true (never setting // `self` to false). set_self_if_other_is_true!(self, other, is_slashed); + set_self_if_other_is_true!(self, other, is_eligible); set_self_if_other_is_true!(self, other, is_withdrawable_in_current_epoch); set_self_if_other_is_true!(self, other, is_active_in_current_epoch); set_self_if_other_is_true!(self, other, is_active_in_previous_epoch); @@ -195,24 +198,27 @@ impl ValidatorStatuses { let mut statuses = Vec::with_capacity(state.validators().len()); let mut total_balances = TotalBalances::new(spec); - for (i, validator) in state.validators().iter().enumerate() { - let effective_balance = state.get_effective_balance(i)?; + let current_epoch = state.current_epoch(); + let previous_epoch = state.previous_epoch(); + + for validator in state.validators().iter() { + let effective_balance = validator.effective_balance(); let mut status = ValidatorStatus { - is_slashed: validator.slashed, - is_withdrawable_in_current_epoch: validator - .is_withdrawable_at(state.current_epoch()), + is_slashed: validator.slashed(), + is_eligible: state.is_eligible_validator(previous_epoch, validator), + is_withdrawable_in_current_epoch: validator.is_withdrawable_at(current_epoch), current_epoch_effective_balance: effective_balance, ..ValidatorStatus::default() }; - if validator.is_active_at(state.current_epoch()) { + if validator.is_active_at(current_epoch) { status.is_active_in_current_epoch = true; total_balances .current_epoch .safe_add_assign(effective_balance)?; } - if validator.is_active_at(state.previous_epoch()) { + if validator.is_active_at(previous_epoch) { status.is_active_in_previous_epoch = true; total_balances .previous_epoch @@ -285,10 +291,10 @@ impl ValidatorStatuses { } // Compute the total balances - for (index, v) in self.statuses.iter().enumerate() { + for v in self.statuses.iter() { // According to the spec, we only count unslashed validators towards the totals. if !v.is_slashed { - let validator_balance = state.get_effective_balance(index)?; + let validator_balance = v.current_epoch_effective_balance; if v.is_current_epoch_attester { self.total_balances diff --git a/consensus/state_processing/src/per_epoch_processing/capella.rs b/consensus/state_processing/src/per_epoch_processing/capella.rs index aaf301f29ec..87d634e605e 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella.rs @@ -25,7 +25,7 @@ pub fn process_epoch( state.build_committee_cache(RelativeEpoch::Next, spec)?; // Pre-compute participating indices and total balances. - let participation_cache = ParticipationCache::new(state, spec)?; + let mut participation_cache = ParticipationCache::new(state, spec)?; let sync_committee = state.current_sync_committee()?.clone(); // Justification and finalization. @@ -33,7 +33,7 @@ pub fn process_epoch( process_justification_and_finalization(state, &participation_cache)?; justification_and_finalization_state.apply_changes_to_state(state); - process_inactivity_updates(state, &participation_cache, spec)?; + process_inactivity_updates(state, &mut participation_cache, spec)?; // Rewards and Penalties. process_rewards_and_penalties(state, &participation_cache, spec)?; @@ -44,6 +44,7 @@ pub fn process_epoch( // Slashings. process_slashings( state, + Some(participation_cache.process_slashings_indices()), participation_cache.current_epoch_total_active_balance(), spec, )?; diff --git a/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs index 9a87ceb6050..36d3c2af331 100644 --- a/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs +++ b/consensus/state_processing/src/per_epoch_processing/capella/historical_summaries_update.rs @@ -13,6 +13,9 @@ pub fn process_historical_summaries_update( .safe_rem((T::slots_per_historical_root() as u64).safe_div(T::slots_per_epoch())?)? == 0 { + // We need to flush any pending mutations before hashing. + state.block_roots_mut().apply_updates()?; + state.state_roots_mut().apply_updates()?; let summary = HistoricalSummary::new(state); return state .historical_summaries_mut()? diff --git a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs index c166667b5a9..82f62915e3f 100644 --- a/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/effective_balance_updates.rs @@ -8,26 +8,47 @@ pub fn process_effective_balance_updates( state: &mut BeaconState, spec: &ChainSpec, ) -> Result<(), EpochProcessingError> { + // Compute new total active balance for the next epoch as a side-effect of iterating the + // effective balances. + let next_epoch = state.next_epoch()?; + let mut new_total_active_balance = 0; + let hysteresis_increment = spec .effective_balance_increment .safe_div(spec.hysteresis_quotient)?; let downward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_downward_multiplier)?; let upward_threshold = hysteresis_increment.safe_mul(spec.hysteresis_upward_multiplier)?; let (validators, balances) = state.validators_and_balances_mut(); - for (index, validator) in validators.iter_mut().enumerate() { + let mut validators_iter = validators.iter_cow(); + + while let Some((index, validator)) = validators_iter.next_cow() { let balance = balances .get(index) .copied() .ok_or(BeaconStateError::BalancesOutOfBounds(index))?; - if balance.safe_add(downward_threshold)? < validator.effective_balance - || validator.effective_balance.safe_add(upward_threshold)? < balance + let new_effective_balance = if balance.safe_add(downward_threshold)? + < validator.effective_balance() + || validator.effective_balance().safe_add(upward_threshold)? < balance { - validator.effective_balance = std::cmp::min( + std::cmp::min( balance.safe_sub(balance.safe_rem(spec.effective_balance_increment)?)?, spec.max_effective_balance, - ); + ) + } else { + validator.effective_balance() + }; + + if validator.is_active_at(next_epoch) { + new_total_active_balance.safe_add_assign(new_effective_balance)?; + } + + if new_effective_balance != validator.effective_balance() { + validator.to_mut().mutable.effective_balance = new_effective_balance; } } + + state.set_total_active_balance(next_epoch, new_total_active_balance); + Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 6eb2f97766b..984f93d550b 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -101,9 +101,7 @@ impl EpochProcessingSummary { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache - .is_active_unslashed_in_current_epoch(val_index) - .unwrap_or(false), + } => participation_cache.is_active_unslashed_in_current_epoch(val_index), } } @@ -204,9 +202,7 @@ impl EpochProcessingSummary { EpochProcessingSummary::Altair { participation_cache, .. - } => participation_cache - .is_active_unslashed_in_previous_epoch(val_index) - .unwrap_or(false), + } => participation_cache.is_active_unslashed_in_previous_epoch(val_index), } } diff --git a/consensus/state_processing/src/per_epoch_processing/errors.rs b/consensus/state_processing/src/per_epoch_processing/errors.rs index 04797c56342..404bec2c052 100644 --- a/consensus/state_processing/src/per_epoch_processing/errors.rs +++ b/consensus/state_processing/src/per_epoch_processing/errors.rs @@ -1,5 +1,5 @@ use crate::per_epoch_processing::altair::participation_cache::Error as ParticipationCacheError; -use types::{BeaconStateError, InconsistentFork}; +use types::{milhouse, BeaconStateError, InconsistentFork}; #[derive(Debug, PartialEq)] pub enum EpochProcessingError { @@ -25,6 +25,7 @@ pub enum EpochProcessingError { InvalidJustificationBit(ssz_types::Error), InvalidFlagIndex(usize), ParticipationCache(ParticipationCacheError), + MilhouseError(milhouse::Error), } impl From for EpochProcessingError { @@ -57,6 +58,12 @@ impl From for EpochProcessingError { } } +impl From for EpochProcessingError { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } +} + #[derive(Debug, PartialEq)] pub enum InclusionError { /// The validator did not participate in an attestation in this period. diff --git a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs index 8466104aa53..9734e7b8c99 100644 --- a/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs +++ b/consensus/state_processing/src/per_epoch_processing/historical_roots_update.rs @@ -16,7 +16,7 @@ pub fn process_historical_roots_update( .safe_rem(T::SlotsPerHistoricalRoot::to_u64().safe_div(T::slots_per_epoch())?)? == 0 { - let historical_batch = state.historical_batch(); + let historical_batch = state.historical_batch()?; state .historical_roots_mut() .push(historical_batch.tree_hash_root())?; diff --git a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs index 4fd2d685867..7f3bfd9dce4 100644 --- a/consensus/state_processing/src/per_epoch_processing/registry_updates.rs +++ b/consensus/state_processing/src/per_epoch_processing/registry_updates.rs @@ -17,7 +17,7 @@ pub fn process_registry_updates( let current_epoch = state.current_epoch(); let is_ejectable = |validator: &Validator| { validator.is_active_at(current_epoch) - && validator.effective_balance <= spec.ejection_balance + && validator.effective_balance() <= spec.ejection_balance }; let indices_to_update: Vec<_> = state .validators() @@ -32,7 +32,7 @@ pub fn process_registry_updates( for index in indices_to_update { let validator = state.get_validator_mut(index)?; if validator.is_eligible_for_activation_queue(spec) { - validator.activation_eligibility_epoch = current_epoch.safe_add(1)?; + validator.mutable.activation_eligibility_epoch = current_epoch.safe_add(1)?; } if is_ejectable(validator) { initiate_validator_exit(state, index, spec)?; @@ -45,7 +45,7 @@ pub fn process_registry_updates( .iter() .enumerate() .filter(|(_, validator)| validator.is_eligible_for_activation(state, spec)) - .sorted_by_key(|(index, validator)| (validator.activation_eligibility_epoch, *index)) + .sorted_by_key(|(index, validator)| (validator.activation_eligibility_epoch(), *index)) .map(|(index, _)| index) .collect_vec(); @@ -53,7 +53,7 @@ pub fn process_registry_updates( let churn_limit = state.get_churn_limit(spec)? as usize; let delayed_activation_epoch = state.compute_activation_exit_epoch(current_epoch, spec)?; for index in activation_queue.into_iter().take(churn_limit) { - state.get_validator_mut(index)?.activation_epoch = delayed_activation_epoch; + state.get_validator_mut(index)?.mutable.activation_epoch = delayed_activation_epoch; } Ok(()) diff --git a/consensus/state_processing/src/per_epoch_processing/resets.rs b/consensus/state_processing/src/per_epoch_processing/resets.rs index dc3c9f07c06..8664bd98aae 100644 --- a/consensus/state_processing/src/per_epoch_processing/resets.rs +++ b/consensus/state_processing/src/per_epoch_processing/resets.rs @@ -1,10 +1,8 @@ use super::errors::EpochProcessingError; -use core::result::Result; -use core::result::Result::Ok; use safe_arith::SafeArith; use types::beacon_state::BeaconState; use types::eth_spec::EthSpec; -use types::{Unsigned, VariableList}; +use types::{Unsigned, VList}; pub fn process_eth1_data_reset( state: &mut BeaconState, @@ -15,7 +13,7 @@ pub fn process_eth1_data_reset( .safe_rem(T::SlotsPerEth1VotingPeriod::to_u64())? == 0 { - *state.eth1_data_votes_mut() = VariableList::empty(); + *state.eth1_data_votes_mut() = VList::empty(); } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/slashings.rs b/consensus/state_processing/src/per_epoch_processing/slashings.rs index 6d5342cd363..1a14f8f9934 100644 --- a/consensus/state_processing/src/per_epoch_processing/slashings.rs +++ b/consensus/state_processing/src/per_epoch_processing/slashings.rs @@ -1,10 +1,12 @@ +use crate::common::decrease_balance; use crate::per_epoch_processing::Error; use safe_arith::{SafeArith, SafeArithIter}; -use types::{BeaconState, BeaconStateError, ChainSpec, EthSpec, Unsigned}; +use types::{BeaconState, ChainSpec, EthSpec, Unsigned}; /// Process slashings. pub fn process_slashings( state: &mut BeaconState, + indices: Option>, total_balance: u64, spec: &ChainSpec, ) -> Result<(), Error> { @@ -16,27 +18,30 @@ pub fn process_slashings( total_balance, ); - let (validators, balances) = state.validators_and_balances_mut(); - for (index, validator) in validators.iter().enumerate() { - if validator.slashed - && epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)? - == validator.withdrawable_epoch - { - let increment = spec.effective_balance_increment; - let penalty_numerator = validator - .effective_balance - .safe_div(increment)? - .safe_mul(adjusted_total_slashing_balance)?; - let penalty = penalty_numerator - .safe_div(total_balance)? - .safe_mul(increment)?; + let target_withdrawable_epoch = + epoch.safe_add(T::EpochsPerSlashingsVector::to_u64().safe_div(2)?)?; + let indices = indices.unwrap_or_else(|| { + state + .validators() + .iter() + .enumerate() + .filter(|(_, validator)| { + validator.slashed() && target_withdrawable_epoch == validator.withdrawable_epoch() + }) + .map(|(index, validator)| (index, validator.effective_balance())) + .collect() + }); - // Equivalent to `decrease_balance(state, index, penalty)`, but avoids borrowing `state`. - let balance = balances - .get_mut(index) - .ok_or(BeaconStateError::BalancesOutOfBounds(index))?; - *balance = balance.saturating_sub(penalty); - } + for (index, validator_effective_balance) in indices { + let increment = spec.effective_balance_increment; + let penalty_numerator = validator_effective_balance + .safe_div(increment)? + .safe_mul(adjusted_total_slashing_balance)?; + let penalty = penalty_numerator + .safe_div(total_balance)? + .safe_mul(increment)?; + + decrease_balance(state, index, penalty)?; } Ok(()) diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index ead06edbf56..d7dc589e3a2 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -59,6 +59,11 @@ pub fn per_slot_processing( if spec.capella_fork_epoch == Some(state.current_epoch()) { upgrade_to_capella(state, spec)?; } + + // Additionally build all caches so that all valid states that are advanced always have + // committee caches built, and we don't have to worry about initialising them at higher + // layers. + state.build_all_caches(spec)?; } Ok(summary) diff --git a/consensus/state_processing/src/upgrade/altair.rs b/consensus/state_processing/src/upgrade/altair.rs index 176f1af15c6..d5b28330e39 100644 --- a/consensus/state_processing/src/upgrade/altair.rs +++ b/consensus/state_processing/src/upgrade/altair.rs @@ -3,13 +3,13 @@ use std::mem; use std::sync::Arc; use types::{ BeaconState, BeaconStateAltair, BeaconStateError as Error, ChainSpec, EthSpec, Fork, - ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, VariableList, + ParticipationFlags, PendingAttestation, RelativeEpoch, SyncCommittee, VList, }; /// Translate the participation information from the epoch prior to the fork into Altair's format. pub fn translate_participation( state: &mut BeaconState, - pending_attestations: &VariableList, E::MaxPendingAttestations>, + pending_attestations: &VList, E::MaxPendingAttestations>, spec: &ChainSpec, ) -> Result<(), Error> { // Previous epoch committee cache is required for `get_attesting_indices`. @@ -50,8 +50,8 @@ pub fn upgrade_to_altair( let pre = pre_state.as_base_mut()?; let default_epoch_participation = - VariableList::new(vec![ParticipationFlags::default(); pre.validators.len()])?; - let inactivity_scores = VariableList::new(vec![0; pre.validators.len()])?; + VList::new(vec![ParticipationFlags::default(); pre.validators.len()])?; + let inactivity_scores = VList::new(vec![0; pre.validators.len()])?; let temp_sync_committee = Arc::new(SyncCommittee::temporary()?); @@ -104,7 +104,6 @@ pub fn upgrade_to_altair( committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); // Fill in previous epoch participation from the pre state's pending attestations. diff --git a/consensus/state_processing/src/upgrade/capella.rs b/consensus/state_processing/src/upgrade/capella.rs index 3b933fac37a..07839f198e2 100644 --- a/consensus/state_processing/src/upgrade/capella.rs +++ b/consensus/state_processing/src/upgrade/capella.rs @@ -1,6 +1,7 @@ -use ssz_types::VariableList; use std::mem; -use types::{BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork}; +use types::{ + BeaconState, BeaconStateCapella, BeaconStateError as Error, ChainSpec, EthSpec, Fork, VList, +}; /// Transform a `Merge` state into an `Capella` state. pub fn upgrade_to_capella( @@ -59,13 +60,12 @@ pub fn upgrade_to_capella( // Capella next_withdrawal_index: 0, next_withdrawal_validator_index: 0, - historical_summaries: VariableList::default(), + historical_summaries: VList::default(), // Caches total_active_balance: pre.total_active_balance, committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); *pre_state = post; diff --git a/consensus/state_processing/src/upgrade/merge.rs b/consensus/state_processing/src/upgrade/merge.rs index c172466248a..26ae5a334c2 100644 --- a/consensus/state_processing/src/upgrade/merge.rs +++ b/consensus/state_processing/src/upgrade/merge.rs @@ -63,7 +63,6 @@ pub fn upgrade_to_bellatrix( committee_caches: mem::take(&mut pre.committee_caches), pubkey_cache: mem::take(&mut pre.pubkey_cache), exit_cache: mem::take(&mut pre.exit_cache), - tree_hash_cache: mem::take(&mut pre.tree_hash_cache), }); *pre_state = post; diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 91ad3089f1c..daeb36c91b4 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -46,10 +46,12 @@ regex = "1.5.5" lazy_static = "1.4.0" parking_lot = "0.12.0" itertools = "0.10.0" -superstruct = "0.6.0" +superstruct = "0.7.0" metastruct = "0.1.0" serde_json = "1.0.74" smallvec = "1.8.0" +milhouse = { git = "https://github.com/sigp/milhouse", branch = "main" } +rpds = "0.11.0" serde_with = "1.13.0" maplit = "1.0.2" diff --git a/consensus/types/examples/clone_state.rs b/consensus/types/examples/clone_state.rs deleted file mode 100644 index a7e80cf4078..00000000000 --- a/consensus/types/examples/clone_state.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! These examples only really exist so we can use them for flamegraph. If they get annoying to -//! maintain, feel free to delete. - -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Eth1Data, EthSpec, Hash256, - MinimalEthSpec, Validator, -}; - -type E = MinimalEthSpec; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state - .balances_mut() - .push(i as u64) - .expect("should add balance"); - state - .validators_mut() - .push(Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: i as u64, - slashed: i % 2 == 0, - activation_eligibility_epoch: i.into(), - activation_epoch: i.into(), - exit_epoch: i.into(), - withdrawable_epoch: i.into(), - }) - .expect("should add validator"); - } - - state -} - -fn main() { - let validator_count = 1_024; - let state = get_state(validator_count); - - for _ in 0..100_000 { - let _ = state.clone(); - } -} diff --git a/consensus/types/examples/ssz_encode_state.rs b/consensus/types/examples/ssz_encode_state.rs deleted file mode 100644 index 5d0a2db17c7..00000000000 --- a/consensus/types/examples/ssz_encode_state.rs +++ /dev/null @@ -1,54 +0,0 @@ -//! These examples only really exist so we can use them for flamegraph. If they get annoying to -//! maintain, feel free to delete. - -use ssz::Encode; -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Eth1Data, EthSpec, Hash256, - MinimalEthSpec, Validator, -}; - -type E = MinimalEthSpec; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state - .balances_mut() - .push(i as u64) - .expect("should add balance"); - state - .validators_mut() - .push(Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: i as u64, - slashed: i % 2 == 0, - activation_eligibility_epoch: i.into(), - activation_epoch: i.into(), - exit_epoch: i.into(), - withdrawable_epoch: i.into(), - }) - .expect("should add validator"); - } - - state -} - -fn main() { - let validator_count = 1_024; - let state = get_state(validator_count); - - for _ in 0..1_024 { - let state_bytes = state.as_ssz_bytes(); - let _: BeaconState = - BeaconState::from_ssz_bytes(&state_bytes, &E::default_spec()).expect("should decode"); - } -} diff --git a/consensus/types/examples/tree_hash_state.rs b/consensus/types/examples/tree_hash_state.rs deleted file mode 100644 index a421a23ad5a..00000000000 --- a/consensus/types/examples/tree_hash_state.rs +++ /dev/null @@ -1,56 +0,0 @@ -//! These examples only really exist so we can use them for flamegraph. If they get annoying to -//! maintain, feel free to delete. - -use types::{ - test_utils::generate_deterministic_keypair, BeaconState, Eth1Data, EthSpec, Hash256, - MinimalEthSpec, Validator, -}; - -type E = MinimalEthSpec; - -fn get_state(validator_count: usize) -> BeaconState { - let spec = &E::default_spec(); - let eth1_data = Eth1Data { - deposit_root: Hash256::zero(), - deposit_count: 0, - block_hash: Hash256::zero(), - }; - - let mut state = BeaconState::new(0, eth1_data, spec); - - for i in 0..validator_count { - state - .balances_mut() - .push(i as u64) - .expect("should add balance"); - state - .validators_mut() - .push(Validator { - pubkey: generate_deterministic_keypair(i).pk.into(), - withdrawal_credentials: Hash256::from_low_u64_le(i as u64), - effective_balance: i as u64, - slashed: i % 2 == 0, - activation_eligibility_epoch: i.into(), - activation_epoch: i.into(), - exit_epoch: i.into(), - withdrawable_epoch: i.into(), - }) - .expect("should add validator"); - } - - state -} - -fn main() { - let validator_count = 1_024; - let mut state = get_state(validator_count); - state.update_tree_hash_cache().expect("should update cache"); - - actual_thing::(&mut state); -} - -fn actual_thing(state: &mut BeaconState) { - for _ in 0..200_024 { - let _ = state.update_tree_hash_cache().expect("should update cache"); - } -} diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 1b40fe76d4d..8e2c9ef63b1 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -345,7 +345,7 @@ impl> BeaconBlockBase { }; let deposit = Deposit { - proof: FixedVector::from_elem(Hash256::zero()), + proof: ssz_types::FixedVector::from_elem(Hash256::zero()), data: deposit_data, }; diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 4a9da364047..7216d424534 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1,18 +1,21 @@ use self::committee_cache::get_active_validator_indices; use self::exit_cache::ExitCache; +use crate::historical_summary::HistoricalSummary; use crate::test_utils::TestRandom; +use crate::validator::ValidatorTrait; use crate::*; use compare_fields::CompareFields; use compare_fields_derive::CompareFields; use derivative::Derivative; use ethereum_hashing::hash; use int_to_bytes::{int_to_bytes4, int_to_bytes8}; -use pubkey_cache::PubkeyCache; +use metastruct::{metastruct, NumFields}; +pub use pubkey_cache::PubkeyCache; use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; use ssz::{ssz_encode, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; -use ssz_types::{typenum::Unsigned, BitVector, FixedVector}; +use ssz_types::{typenum::Unsigned, BitVector}; use std::convert::TryInto; use std::hash::Hash; use std::{fmt, mem, sync::Arc}; @@ -26,24 +29,24 @@ pub use self::committee_cache::{ compute_committee_index_in_epoch, compute_committee_range_in_epoch, epoch_committee_count, CommitteeCache, }; -use crate::historical_summary::HistoricalSummary; -pub use clone_config::CloneConfig; pub use eth_spec::*; pub use iter::BlockRootsIter; -pub use tree_hash_cache::BeaconTreeHashCache; +pub use milhouse::{interface::Interface, List as VList, List, Vector as FixedVector}; #[macro_use] mod committee_cache; -mod clone_config; +pub mod compact_state; mod exit_cache; mod iter; mod pubkey_cache; mod tests; -mod tree_hash_cache; pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; +pub type Validators = VList::ValidatorRegistryLimit>; +pub type Balances = VList::ValidatorRegistryLimit>; + #[derive(Debug, PartialEq, Clone)] pub enum Error { /// A state for a different hard-fork was required -- a severe logic error. @@ -127,6 +130,20 @@ pub enum Error { current_epoch: Epoch, epoch: Epoch, }, + MilhouseError(milhouse::Error), + CommitteeCacheDiffInvalidEpoch { + prev_current_epoch: Epoch, + current_epoch: Epoch, + }, + CommitteeCacheDiffUninitialized { + expected_epoch: Epoch, + }, + DiffAcrossFork { + prev_fork: ForkName, + current_fork: ForkName, + }, + TotalActiveBalanceDiffUninitialized, + MissingImmutableValidator(usize), IndexNotSupported(usize), MerkleTreeError(merkle_proof::MerkleTreeError), } @@ -189,97 +206,151 @@ impl From for Hash256 { TreeHash, TestRandom, CompareFields, - arbitrary::Arbitrary + arbitrary::Arbitrary, ), serde(bound = "T: EthSpec", deny_unknown_fields), - arbitrary(bound = "T: EthSpec"), + arbitrary(bound = "T: EthSpec, GenericValidator: ValidatorTrait"), derivative(Clone), ), + specific_variant_attributes( + Base(metastruct( + mappings( + map_beacon_state_base_fields(), + map_beacon_state_base_tree_list_fields(mutable, fallible, groups(tree_lists)), + ), + num_fields(all()), + )), + Altair(metastruct( + mappings( + map_beacon_state_altair_fields(), + map_beacon_state_altair_tree_list_fields(mutable, fallible, groups(tree_lists)), + ), + num_fields(all()), + )), + Merge(metastruct( + mappings( + map_beacon_state_bellatrix_fields(), + map_beacon_state_bellatrix_tree_list_fields(mutable, fallible, groups(tree_lists)), + ), + num_fields(all()), + )), + Capella(metastruct( + mappings( + map_beacon_state_capella_fields(), + map_beacon_state_capella_tree_list_fields(mutable, fallible, groups(tree_lists)), + ), + num_fields(all()), + )), + ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") )] -#[derive(Debug, PartialEq, Serialize, Deserialize, Encode, TreeHash, arbitrary::Arbitrary)] +#[derive( + Debug, PartialEq, Clone, Serialize, Deserialize, Encode, TreeHash, arbitrary::Arbitrary, +)] #[serde(untagged)] #[serde(bound = "T: EthSpec")] -#[arbitrary(bound = "T: EthSpec")] +#[arbitrary(bound = "T: EthSpec, GenericValidator: ValidatorTrait")] #[tree_hash(enum_behaviour = "transparent")] #[ssz(enum_behaviour = "transparent")] -pub struct BeaconState +pub struct BeaconState where T: EthSpec, { // Versioning #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub genesis_time: u64, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub genesis_validators_root: Hash256, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub slot: Slot, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub fork: Fork, // History + #[metastruct(exclude_from(tree_lists))] pub latest_block_header: BeaconBlockHeader, - #[compare_fields(as_slice)] + #[test_random(default)] pub block_roots: FixedVector, - #[compare_fields(as_slice)] + #[test_random(default)] pub state_roots: FixedVector, // Frozen in Capella, replaced by historical_summaries - pub historical_roots: VariableList, + #[test_random(default)] + pub historical_roots: VList, // Ethereum 1.0 chain data + #[metastruct(exclude_from(tree_lists))] pub eth1_data: Eth1Data, - pub eth1_data_votes: VariableList, + #[test_random(default)] + pub eth1_data_votes: VList, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub eth1_deposit_index: u64, // Registry - #[compare_fields(as_slice)] - pub validators: VariableList, - #[compare_fields(as_slice)] + #[test_random(default)] + pub validators: VList, #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - pub balances: VariableList, + #[test_random(default)] + pub balances: VList, // Randomness + #[test_random(default)] pub randao_mixes: FixedVector, // Slashings + #[test_random(default)] #[serde(with = "ssz_types::serde_utils::quoted_u64_fixed_vec")] pub slashings: FixedVector, // Attestations (genesis fork only) #[superstruct(only(Base))] - pub previous_epoch_attestations: VariableList, T::MaxPendingAttestations>, + #[test_random(default)] + pub previous_epoch_attestations: VList, T::MaxPendingAttestations>, #[superstruct(only(Base))] - pub current_epoch_attestations: VariableList, T::MaxPendingAttestations>, + #[test_random(default)] + pub current_epoch_attestations: VList, T::MaxPendingAttestations>, // Participation (Altair and later) #[superstruct(only(Altair, Merge, Capella))] - pub previous_epoch_participation: VariableList, + #[test_random(default)] + pub previous_epoch_participation: VList, #[superstruct(only(Altair, Merge, Capella))] - pub current_epoch_participation: VariableList, + #[test_random(default)] + pub current_epoch_participation: VList, // Finality #[test_random(default)] + #[metastruct(exclude_from(tree_lists))] pub justification_bits: BitVector, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub previous_justified_checkpoint: Checkpoint, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub current_justified_checkpoint: Checkpoint, #[superstruct(getter(copy))] + #[metastruct(exclude_from(tree_lists))] pub finalized_checkpoint: Checkpoint, // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] #[superstruct(only(Altair, Merge, Capella))] - pub inactivity_scores: VariableList, + #[test_random(default)] + pub inactivity_scores: VList, // Light-client sync committees #[superstruct(only(Altair, Merge, Capella))] + #[metastruct(exclude_from(tree_lists))] pub current_sync_committee: Arc>, #[superstruct(only(Altair, Merge, Capella))] + #[metastruct(exclude_from(tree_lists))] pub next_sync_committee: Arc>, // Execution @@ -287,61 +358,54 @@ where only(Merge), partial_getter(rename = "latest_execution_payload_header_merge") )] + #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderMerge, #[superstruct( only(Capella), partial_getter(rename = "latest_execution_payload_header_capella") )] + #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderCapella, // Capella #[superstruct(only(Capella), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] + #[metastruct(exclude_from(tree_lists))] pub next_withdrawal_index: u64, #[superstruct(only(Capella), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] + #[metastruct(exclude_from(tree_lists))] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. #[superstruct(only(Capella))] - pub historical_summaries: VariableList, + #[test_random(default)] + pub historical_summaries: VList, // Caching (not in the spec) #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub total_active_balance: Option<(Epoch, u64)>, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] - pub committee_caches: [CommitteeCache; CACHED_EPOCHS], + #[metastruct(exclude)] + pub committee_caches: [Arc; CACHED_EPOCHS], #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub pubkey_cache: PubkeyCache, #[serde(skip_serializing, skip_deserializing)] #[ssz(skip_serializing, skip_deserializing)] #[tree_hash(skip_hashing)] #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] + #[metastruct(exclude)] pub exit_cache: ExitCache, - #[serde(skip_serializing, skip_deserializing)] - #[ssz(skip_serializing, skip_deserializing)] - #[tree_hash(skip_hashing)] - #[test_random(default)] - #[derivative(Clone(clone_with = "clone_default"))] - pub tree_hash_cache: BeaconTreeHashCache, -} - -impl Clone for BeaconState { - fn clone(&self) -> Self { - self.clone_with(CloneConfig::all()) - } } impl BeaconState { @@ -349,6 +413,7 @@ impl BeaconState { /// /// Not a complete genesis state, see `initialize_beacon_state_from_eth1`. pub fn new(genesis_time: u64, eth1_data: Eth1Data, spec: &ChainSpec) -> Self { + let default_committee_cache = Arc::new(CommitteeCache::default()); BeaconState::Base(BeaconStateBase { // Versioning genesis_time, @@ -362,28 +427,28 @@ impl BeaconState { // History latest_block_header: BeaconBlock::::empty(spec).temporary_block_header(), - block_roots: FixedVector::from_elem(Hash256::zero()), - state_roots: FixedVector::from_elem(Hash256::zero()), - historical_roots: VariableList::empty(), + block_roots: FixedVector::default(), + state_roots: FixedVector::default(), + historical_roots: VList::default(), // Eth1 eth1_data, - eth1_data_votes: VariableList::empty(), + eth1_data_votes: VList::default(), eth1_deposit_index: 0, // Validator registry - validators: VariableList::empty(), // Set later. - balances: VariableList::empty(), // Set later. + validators: VList::default(), // Set later. + balances: VList::default(), // Set later. // Randomness - randao_mixes: FixedVector::from_elem(Hash256::zero()), + randao_mixes: FixedVector::default(), // Slashings - slashings: FixedVector::from_elem(0), + slashings: FixedVector::default(), // Attestations - previous_epoch_attestations: VariableList::empty(), - current_epoch_attestations: VariableList::empty(), + previous_epoch_attestations: VList::default(), + current_epoch_attestations: VList::default(), // Finality justification_bits: BitVector::new(), @@ -394,13 +459,12 @@ impl BeaconState { // Caching (not in spec) total_active_balance: None, committee_caches: [ - CommitteeCache::default(), - CommitteeCache::default(), - CommitteeCache::default(), + default_committee_cache.clone(), + default_committee_cache.clone(), + default_committee_cache, ], pubkey_cache: PubkeyCache::default(), exit_cache: ExitCache::default(), - tree_hash_cache: <_>::default(), }) } @@ -410,12 +474,7 @@ impl BeaconState { /// dictated by `self.slot()`. pub fn fork_name(&self, spec: &ChainSpec) -> Result { let fork_at_slot = spec.fork_name_at_epoch(self.current_epoch()); - let object_fork = match self { - BeaconState::Base { .. } => ForkName::Base, - BeaconState::Altair { .. } => ForkName::Altair, - BeaconState::Merge { .. } => ForkName::Merge, - BeaconState::Capella { .. } => ForkName::Capella, - }; + let object_fork = self.fork_name_unchecked(); if fork_at_slot == object_fork { Ok(object_fork) @@ -427,28 +486,17 @@ impl BeaconState { } } - /// Specialised deserialisation method that uses the `ChainSpec` as context. - #[allow(clippy::integer_arithmetic)] - pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { - // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). - let slot_start = ::ssz_fixed_len() + ::ssz_fixed_len(); - let slot_end = slot_start + ::ssz_fixed_len(); - - let slot_bytes = bytes - .get(slot_start..slot_end) - .ok_or(DecodeError::InvalidByteLength { - len: bytes.len(), - expected: slot_end, - })?; - - let slot = Slot::from_ssz_bytes(slot_bytes)?; - let fork_at_slot = spec.fork_name_at_slot::(slot); - - Ok(map_fork_name!( - fork_at_slot, - Self, - <_>::from_ssz_bytes(bytes)? - )) + /// Returns the name of the fork pertaining to `self`. + /// + /// This is not checked for consistency with respect to the actual fork epochs, see `fork_name` + /// for a safer function. + pub fn fork_name_unchecked(&self) -> ForkName { + match self { + BeaconState::Base { .. } => ForkName::Base, + BeaconState::Altair { .. } => ForkName::Altair, + BeaconState::Merge { .. } => ForkName::Merge, + BeaconState::Capella { .. } => ForkName::Capella, + } } /// Returns the `tree_hash_root` of the state. @@ -458,11 +506,15 @@ impl BeaconState { Hash256::from_slice(&self.tree_hash_root()[..]) } - pub fn historical_batch(&self) -> HistoricalBatch { - HistoricalBatch { + pub fn historical_batch(&mut self) -> Result, Error> { + // Updating before cloning makes the clone cheap and saves repeated hashing. + self.block_roots_mut().apply_updates()?; + self.state_roots_mut().apply_updates()?; + + Ok(HistoricalBatch { block_roots: self.block_roots().clone(), state_roots: self.state_roots().clone(), - } + }) } /// This method ensures the state's pubkey cache is fully up-to-date before checking if the validator @@ -473,6 +525,21 @@ impl BeaconState { Ok(self.pubkey_cache().get(pubkey)) } + /// Immutable variant of `get_validator_index` which errors if the cache is not up to date. + pub fn get_validator_index_read_only( + &self, + pubkey: &PublicKeyBytes, + ) -> Result, Error> { + let pubkey_cache = self.pubkey_cache(); + if pubkey_cache.len() != self.validators().len() { + return Err(Error::PubkeyCacheIncomplete { + cache_len: pubkey_cache.len(), + registry_len: self.validators().len(), + }); + } + Ok(pubkey_cache.get(pubkey)) + } + /// The epoch corresponding to `self.slot()`. pub fn current_epoch(&self) -> Epoch { self.slot().epoch(T::slots_per_epoch()) @@ -483,10 +550,8 @@ impl BeaconState { /// If the current epoch is the genesis epoch, the genesis_epoch is returned. pub fn previous_epoch(&self) -> Epoch { let current_epoch = self.current_epoch(); - if current_epoch > T::genesis_epoch() { - current_epoch - .safe_sub(1) - .expect("current epoch greater than genesis implies greater than 0") + if let Ok(prev_epoch) = current_epoch.safe_sub(1) { + prev_epoch } else { current_epoch } @@ -832,14 +897,16 @@ impl BeaconState { &mut self, sync_committee: &SyncCommittee, ) -> Result, Error> { - let mut indices = Vec::with_capacity(sync_committee.pubkeys.len()); - for pubkey in sync_committee.pubkeys.iter() { - indices.push( - self.get_validator_index(pubkey)? - .ok_or(Error::PubkeyCacheInconsistent)?, - ) - } - Ok(indices) + self.update_pubkey_cache()?; + sync_committee + .pubkeys + .iter() + .map(|pubkey| { + self.pubkey_cache() + .get(pubkey) + .ok_or(Error::PubkeyCacheInconsistent) + }) + .collect() } /// Compute the sync committee indices for the next sync committee. @@ -865,7 +932,7 @@ impl BeaconState { .get(shuffled_index) .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; let random_byte = Self::shuffling_random_byte(i, seed.as_bytes())?; - let effective_balance = self.get_validator(candidate_index)?.effective_balance; + let effective_balance = self.get_validator(candidate_index)?.effective_balance(); if effective_balance.safe_mul(MAX_RANDOM_BYTE)? >= spec .max_effective_balance @@ -887,7 +954,7 @@ impl BeaconState { .map(|&index| { self.validators() .get(index) - .map(|v| v.pubkey) + .map(|v| *v.pubkey()) .ok_or(Error::UnknownValidator(index)) }) .collect::, _>>()?; @@ -898,7 +965,7 @@ impl BeaconState { let aggregate_pubkey = AggregatePublicKey::aggregate(&decompressed_pubkeys)?; Ok(SyncCommittee { - pubkeys: FixedVector::new(pubkeys)?, + pubkeys: ssz_types::FixedVector::new(pubkeys)?, aggregate_pubkey: aggregate_pubkey.to_public_key().compress(), }) } @@ -918,7 +985,7 @@ impl BeaconState { validator_indices .iter() .map(|&validator_index| { - let pubkey = self.get_validator(validator_index as usize)?.pubkey; + let pubkey = *self.get_validator(validator_index as usize)?.pubkey(); Ok(SyncDuty::from_sync_committee( validator_index, @@ -992,8 +1059,9 @@ impl BeaconState { } /// Fill `randao_mixes` with - pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) { - *self.randao_mixes_mut() = FixedVector::from_elem(index_root); + pub fn fill_randao_mixes_with(&mut self, index_root: Hash256) -> Result<(), Error> { + *self.randao_mixes_mut() = FixedVector::from_elem(index_root)?; + Ok(()) } /// Safely obtains the index for `randao_mixes` @@ -1126,7 +1194,7 @@ impl BeaconState { } /// Get a reference to the entire `slashings` vector. - pub fn get_all_slashings(&self) -> &[u64] { + pub fn get_all_slashings(&self) -> &FixedVector { self.slashings() } @@ -1150,7 +1218,7 @@ impl BeaconState { } /// Convenience accessor for validators and balances simultaneously. - pub fn validators_and_balances_mut(&mut self) -> (&mut [Validator], &mut [u64]) { + pub fn validators_and_balances_mut(&mut self) -> (&mut Validators, &mut Balances) { match self { BeaconState::Base(state) => (&mut state.validators, &mut state.balances), BeaconState::Altair(state) => (&mut state.validators, &mut state.balances), @@ -1159,6 +1227,13 @@ impl BeaconState { } } + /// Get a mutable reference to the balance of a single validator. + pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { + self.balances_mut() + .get_mut(validator_index) + .ok_or(Error::BalancesOutOfBounds(validator_index)) + } + /// Generate a seed for the given `epoch`. pub fn get_seed( &self, @@ -1208,10 +1283,20 @@ impl BeaconState { .ok_or(Error::UnknownValidator(validator_index)) } + /// Safe copy-on-write accessor for the `validators` list. + pub fn get_validator_cow( + &mut self, + validator_index: usize, + ) -> Result, Error> { + self.validators_mut() + .get_cow(validator_index) + .ok_or(Error::UnknownValidator(validator_index)) + } + /// Return the effective balance for a validator with the given `validator_index`. pub fn get_effective_balance(&self, validator_index: usize) -> Result { self.get_validator(validator_index) - .map(|v| v.effective_balance) + .map(|v| v.effective_balance()) } /// Get the inactivity score for a single validator. @@ -1233,13 +1318,6 @@ impl BeaconState { .ok_or(Error::InactivityScoresOutOfBounds(validator_index)) } - /// Get a mutable reference to the balance of a single validator. - pub fn get_balance_mut(&mut self, validator_index: usize) -> Result<&mut u64, Error> { - self.balances_mut() - .get_mut(validator_index) - .ok_or(Error::BalancesOutOfBounds(validator_index)) - } - /// Return the epoch at which an activation or exit triggered in ``epoch`` takes effect. /// /// Spec v0.12.1 @@ -1300,6 +1378,28 @@ impl BeaconState { )) } + pub fn compute_total_active_balance( + &self, + epoch: Epoch, + spec: &ChainSpec, + ) -> Result { + if epoch != self.current_epoch() && epoch != self.next_epoch()? { + return Err(Error::EpochOutOfBounds); + } + + let mut total_active_balance = 0; + + for validator in self.validators() { + if validator.is_active_at(epoch) { + total_active_balance.safe_add_assign(validator.effective_balance())?; + } + } + Ok(std::cmp::max( + total_active_balance, + spec.effective_balance_increment, + )) + } + /// Implementation of `get_total_active_balance`, matching the spec. /// /// Requires the total active balance cache to be initialised, which is initialised whenever @@ -1322,17 +1422,14 @@ impl BeaconState { } } + pub fn set_total_active_balance(&mut self, epoch: Epoch, balance: u64) { + *self.total_active_balance_mut() = Some((epoch, balance)); + } + /// Build the total active balance cache. - /// - /// This function requires the current committee cache to be already built. It is called - /// automatically when `build_committee_cache` is called for the current epoch. fn build_total_active_balance_cache(&mut self, spec: &ChainSpec) -> Result<(), Error> { - // Order is irrelevant, so use the cached indices. let current_epoch = self.current_epoch(); - let total_active_balance = self.get_total_balance( - self.get_cached_active_validator_indices(RelativeEpoch::Current)?, - spec, - )?; + let total_active_balance = self.compute_total_active_balance(current_epoch, spec)?; *self.total_active_balance_mut() = Some((current_epoch, total_active_balance)); Ok(()) } @@ -1346,7 +1443,7 @@ impl BeaconState { pub fn get_epoch_participation_mut( &mut self, epoch: Epoch, - ) -> Result<&mut VariableList, Error> { + ) -> Result<&mut VList, Error> { if epoch == self.current_epoch() { match self { BeaconState::Base(_) => Err(BeaconStateError::IncorrectStateVariant), @@ -1411,7 +1508,6 @@ impl BeaconState { self.drop_committee_cache(RelativeEpoch::Current)?; self.drop_committee_cache(RelativeEpoch::Next)?; self.drop_pubkey_cache(); - self.drop_tree_hash_cache(); *self.exit_cache_mut() = ExitCache::default(); Ok(()) } @@ -1467,7 +1563,7 @@ impl BeaconState { &self, epoch: Epoch, spec: &ChainSpec, - ) -> Result { + ) -> Result, Error> { CommitteeCache::initialized(self, epoch, spec) } @@ -1477,40 +1573,15 @@ impl BeaconState { /// /// Note: this function will not build any new committee caches, but will build the total /// balance cache if the (new) current epoch cache is initialized. - pub fn advance_caches(&mut self, spec: &ChainSpec) -> Result<(), Error> { + pub fn advance_caches(&mut self, _spec: &ChainSpec) -> Result<(), Error> { self.committee_caches_mut().rotate_left(1); - // Re-compute total active balance for current epoch. - // - // This can only be computed once the state's effective balances have been updated - // for the current epoch. I.e. it is not possible to know this value with the same - // lookahead as the committee shuffling. - let curr = Self::committee_cache_index(RelativeEpoch::Current); - let curr_cache = mem::take(self.committee_cache_at_index_mut(curr)?); - - // If current epoch cache is initialized, compute the total active balance from its - // indices. We check that the cache is initialized at the _next_ epoch because the slot has - // not yet been advanced. - let new_current_epoch = self.next_epoch()?; - if curr_cache.is_initialized_at(new_current_epoch) { - *self.total_active_balance_mut() = Some(( - new_current_epoch, - self.get_total_balance(curr_cache.active_validator_indices(), spec)?, - )); - } - // If the cache is not initialized, then the previous cached value for the total balance is - // wrong, so delete it. - else { - self.drop_total_active_balance_cache(); - } - *self.committee_cache_at_index_mut(curr)? = curr_cache; - let next = Self::committee_cache_index(RelativeEpoch::Next); - *self.committee_cache_at_index_mut(next)? = CommitteeCache::default(); + *self.committee_cache_at_index_mut(next)? = Arc::new(CommitteeCache::default()); Ok(()) } - fn committee_cache_index(relative_epoch: RelativeEpoch) -> usize { + pub(crate) fn committee_cache_index(relative_epoch: RelativeEpoch) -> usize { match relative_epoch { RelativeEpoch::Previous => 0, RelativeEpoch::Current => 1, @@ -1521,21 +1592,24 @@ impl BeaconState { /// Get the committee cache for some `slot`. /// /// Return an error if the cache for the slot's epoch is not initialized. - fn committee_cache_at_slot(&self, slot: Slot) -> Result<&CommitteeCache, Error> { + fn committee_cache_at_slot(&self, slot: Slot) -> Result<&Arc, Error> { let epoch = slot.epoch(T::slots_per_epoch()); let relative_epoch = RelativeEpoch::from_epoch(self.current_epoch(), epoch)?; self.committee_cache(relative_epoch) } /// Get the committee cache at a given index. - fn committee_cache_at_index(&self, index: usize) -> Result<&CommitteeCache, Error> { + fn committee_cache_at_index(&self, index: usize) -> Result<&Arc, Error> { self.committee_caches() .get(index) .ok_or(Error::CommitteeCachesOutOfBounds(index)) } /// Get a mutable reference to the committee cache at a given index. - fn committee_cache_at_index_mut(&mut self, index: usize) -> Result<&mut CommitteeCache, Error> { + fn committee_cache_at_index_mut( + &mut self, + index: usize, + ) -> Result<&mut Arc, Error> { self.committee_caches_mut() .get_mut(index) .ok_or(Error::CommitteeCachesOutOfBounds(index)) @@ -1543,7 +1617,10 @@ impl BeaconState { /// Returns the cache for some `RelativeEpoch`. Returns an error if the cache has not been /// initialized. - pub fn committee_cache(&self, relative_epoch: RelativeEpoch) -> Result<&CommitteeCache, Error> { + pub fn committee_cache( + &self, + relative_epoch: RelativeEpoch, + ) -> Result<&Arc, Error> { let i = Self::committee_cache_index(relative_epoch); let cache = self.committee_cache_at_index(i)?; @@ -1554,30 +1631,10 @@ impl BeaconState { } } - /// Returns the cache for some `RelativeEpoch`, replacing the existing cache with an - /// un-initialized cache. Returns an error if the existing cache has not been initialized. - pub fn take_committee_cache( - &mut self, - relative_epoch: RelativeEpoch, - ) -> Result { - let i = Self::committee_cache_index(relative_epoch); - let current_epoch = self.current_epoch(); - let cache = self - .committee_caches_mut() - .get_mut(i) - .ok_or(Error::CommitteeCachesOutOfBounds(i))?; - - if cache.is_initialized_at(relative_epoch.into_epoch(current_epoch)) { - Ok(mem::take(cache)) - } else { - Err(Error::CommitteeCacheUninitialized(Some(relative_epoch))) - } - } - /// Drops the cache, leaving it in an uninitialized state. pub fn drop_committee_cache(&mut self, relative_epoch: RelativeEpoch) -> Result<(), Error> { *self.committee_cache_at_index_mut(Self::committee_cache_index(relative_epoch))? = - CommitteeCache::default(); + Arc::new(CommitteeCache::default()); Ok(()) } @@ -1587,13 +1644,11 @@ impl BeaconState { /// never re-add a pubkey. pub fn update_pubkey_cache(&mut self) -> Result<(), Error> { let mut pubkey_cache = mem::take(self.pubkey_cache_mut()); - for (i, validator) in self - .validators() - .iter() - .enumerate() - .skip(pubkey_cache.len()) - { - let success = pubkey_cache.insert(validator.pubkey, i); + let start_index = pubkey_cache.len(); + + for (i, validator) in self.validators().iter_from(start_index)?.enumerate() { + let index = start_index.safe_add(i)?; + let success = pubkey_cache.insert(*validator.pubkey(), index); if !success { return Err(Error::PubkeyCacheInconsistent); } @@ -1608,95 +1663,53 @@ impl BeaconState { *self.pubkey_cache_mut() = PubkeyCache::default() } - /// Initialize but don't fill the tree hash cache, if it isn't already initialized. - pub fn initialize_tree_hash_cache(&mut self) { - if !self.tree_hash_cache().is_initialized() { - *self.tree_hash_cache_mut() = BeaconTreeHashCache::new(self) - } + pub fn has_pending_mutations(&self) -> bool { + self.block_roots().has_pending_updates() + || self.state_roots().has_pending_updates() + || self.historical_roots().has_pending_updates() + || self.eth1_data_votes().has_pending_updates() + || self.validators().has_pending_updates() + || self.balances().has_pending_updates() + || self.randao_mixes().has_pending_updates() + || self.slashings().has_pending_updates() + || self + .previous_epoch_attestations() + .map_or(false, VList::has_pending_updates) + || self + .current_epoch_attestations() + .map_or(false, VList::has_pending_updates) + || self + .previous_epoch_participation() + .map_or(false, VList::has_pending_updates) + || self + .current_epoch_participation() + .map_or(false, VList::has_pending_updates) + || self + .inactivity_scores() + .map_or(false, VList::has_pending_updates) } /// Compute the tree hash root of the state using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. pub fn update_tree_hash_cache(&mut self) -> Result { - self.initialize_tree_hash_cache(); - - let cache = self.tree_hash_cache_mut().take(); - - if let Some(mut cache) = cache { - // Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as - // None. There's no need to keep a cache that fails. - let root = cache.recalculate_tree_hash_root(self)?; - self.tree_hash_cache_mut().restore(cache); - Ok(root) - } else { - Err(Error::TreeHashCacheNotInitialized) - } + self.apply_pending_mutations()?; + Ok(self.tree_hash_root()) } /// Compute the tree hash root of the validators using the tree hash cache. /// /// Initialize the tree hash cache if it isn't already initialized. pub fn update_validators_tree_hash_cache(&mut self) -> Result { - self.initialize_tree_hash_cache(); - - let cache = self.tree_hash_cache_mut().take(); - - if let Some(mut cache) = cache { - // Note: we return early if the tree hash fails, leaving `self.tree_hash_cache` as - // None. There's no need to keep a cache that fails. - let root = cache.recalculate_validators_tree_hash_root(self.validators())?; - self.tree_hash_cache_mut().restore(cache); - Ok(root) - } else { - Err(Error::TreeHashCacheNotInitialized) - } - } - - /// Completely drops the tree hash cache, replacing it with a new, empty cache. - pub fn drop_tree_hash_cache(&mut self) { - self.tree_hash_cache_mut().uninitialize(); - } - - /// Clone the state whilst preserving only the selected caches. - pub fn clone_with(&self, config: CloneConfig) -> Self { - let mut res = match self { - BeaconState::Base(inner) => BeaconState::Base(inner.clone()), - BeaconState::Altair(inner) => BeaconState::Altair(inner.clone()), - BeaconState::Merge(inner) => BeaconState::Merge(inner.clone()), - BeaconState::Capella(inner) => BeaconState::Capella(inner.clone()), - }; - if config.committee_caches { - *res.committee_caches_mut() = self.committee_caches().clone(); - *res.total_active_balance_mut() = *self.total_active_balance(); - } - if config.pubkey_cache { - *res.pubkey_cache_mut() = self.pubkey_cache().clone(); - } - if config.exit_cache { - *res.exit_cache_mut() = self.exit_cache().clone(); - } - if config.tree_hash_cache { - *res.tree_hash_cache_mut() = self.tree_hash_cache().clone(); - } - res - } - - pub fn clone_with_only_committee_caches(&self) -> Self { - self.clone_with(CloneConfig::committee_caches_only()) + self.validators_mut().apply_updates()?; + Ok(self.validators().tree_hash_root()) } /// Passing `previous_epoch` to this function rather than computing it internally provides /// a tangible speed improvement in state processing. - pub fn is_eligible_validator( - &self, - previous_epoch: Epoch, - val_index: usize, - ) -> Result { - self.get_validator(val_index).map(|val| { - val.is_active_at(previous_epoch) - || (val.slashed && previous_epoch + Epoch::new(1) < val.withdrawable_epoch) - }) + pub fn is_eligible_validator(&self, previous_epoch: Epoch, val: &Validator) -> bool { + val.is_active_at(previous_epoch) + || (val.slashed() && previous_epoch + Epoch::new(1) < val.withdrawable_epoch()) } /// Passing `previous_epoch` to this function rather than computing it internally provides @@ -1726,11 +1739,61 @@ impl BeaconState { }; Ok(sync_committee) } +} - pub fn compute_merkle_proof( - &mut self, - generalized_index: usize, - ) -> Result, Error> { +impl BeaconState { + /// The number of fields of the `BeaconState` rounded up to the nearest power of two. + /// + /// This is relevant to tree-hashing of the `BeaconState`. + /// + /// We assume this value is stable across forks. This assumption is checked in the + /// `check_num_fields_pow2` test. + pub const NUM_FIELDS_POW2: usize = BeaconStateMerge::::NUM_FIELDS.next_power_of_two(); + + /// Specialised deserialisation method that uses the `ChainSpec` as context. + #[allow(clippy::integer_arithmetic)] + pub fn from_ssz_bytes(bytes: &[u8], spec: &ChainSpec) -> Result { + // Slot is after genesis_time (u64) and genesis_validators_root (Hash256). + let slot_start = ::ssz_fixed_len() + ::ssz_fixed_len(); + let slot_end = slot_start + ::ssz_fixed_len(); + + let slot_bytes = bytes + .get(slot_start..slot_end) + .ok_or(DecodeError::InvalidByteLength { + len: bytes.len(), + expected: slot_end, + })?; + + let slot = Slot::from_ssz_bytes(slot_bytes)?; + let fork_at_slot = spec.fork_name_at_slot::(slot); + + Ok(map_fork_name!( + fork_at_slot, + Self, + <_>::from_ssz_bytes(bytes)? + )) + } + + #[allow(clippy::integer_arithmetic)] + pub fn apply_pending_mutations(&mut self) -> Result<(), Error> { + match self { + Self::Base(inner) => { + map_beacon_state_base_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Altair(inner) => { + map_beacon_state_altair_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Merge(inner) => { + map_beacon_state_bellatrix_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + Self::Capella(inner) => { + map_beacon_state_capella_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } + } + Ok(()) + } + + pub fn compute_merkle_proof(&self, generalized_index: usize) -> Result, Error> { // 1. Convert generalized index to field index. let field_index = match generalized_index { light_client_update::CURRENT_SYNC_COMMITTEE_INDEX @@ -1740,7 +1803,7 @@ impl BeaconState { // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate generalized_index - .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .checked_sub(Self::NUM_FIELDS_POW2) .ok_or(Error::IndexNotSupported(generalized_index))? } light_client_update::FINALIZED_ROOT_INDEX => { @@ -1750,19 +1813,37 @@ impl BeaconState { // Subtract off the internal nodes. Result should be 105/2 - 32 = 20 which matches // position of `finalized_checkpoint` in `BeaconState`. finalized_checkpoint_generalized_index - .checked_sub(tree_hash_cache::NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES) + .checked_sub(Self::NUM_FIELDS_POW2) .ok_or(Error::IndexNotSupported(generalized_index))? } _ => return Err(Error::IndexNotSupported(generalized_index)), }; // 2. Get all `BeaconState` leaves. - let mut cache = self - .tree_hash_cache_mut() - .take() - .ok_or(Error::TreeHashCacheNotInitialized)?; - let leaves = cache.recalculate_tree_hash_leaves(self)?; - self.tree_hash_cache_mut().restore(cache); + let mut leaves = vec![]; + #[allow(clippy::integer_arithmetic)] + match self { + BeaconState::Base(state) => { + map_beacon_state_base_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Altair(state) => { + map_beacon_state_altair_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Merge(state) => { + map_beacon_state_bellatrix_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + BeaconState::Capella(state) => { + map_beacon_state_capella_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } + }; // 3. Make deposit tree. // Use the depth of the `BeaconState` fields (i.e. `log2(32) = 5`). @@ -1821,9 +1902,10 @@ impl From for Error { } } -/// Helper function for "cloning" a field by using its default value. -fn clone_default(_value: &T) -> T { - T::default() +impl From for Error { + fn from(e: milhouse::Error) -> Self { + Self::MilhouseError(e) + } } impl CompareFields for BeaconState { diff --git a/consensus/types/src/beacon_state/clone_config.rs b/consensus/types/src/beacon_state/clone_config.rs deleted file mode 100644 index e5f050aee69..00000000000 --- a/consensus/types/src/beacon_state/clone_config.rs +++ /dev/null @@ -1,43 +0,0 @@ -/// Configuration struct for controlling which caches of a `BeaconState` should be cloned. -#[derive(Debug, Default, PartialEq, Eq, Clone, Copy)] -pub struct CloneConfig { - pub committee_caches: bool, - pub pubkey_cache: bool, - pub exit_cache: bool, - pub tree_hash_cache: bool, -} - -impl CloneConfig { - pub fn all() -> Self { - Self { - committee_caches: true, - pubkey_cache: true, - exit_cache: true, - tree_hash_cache: true, - } - } - - pub fn none() -> Self { - Self::default() - } - - pub fn committee_caches_only() -> Self { - Self { - committee_caches: true, - ..Self::none() - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn sanity() { - assert!(CloneConfig::all().pubkey_cache); - assert!(!CloneConfig::none().tree_hash_cache); - assert!(CloneConfig::committee_caches_only().committee_caches); - assert!(!CloneConfig::committee_caches_only().exit_cache); - } -} diff --git a/consensus/types/src/beacon_state/committee_cache.rs b/consensus/types/src/beacon_state/committee_cache.rs index 8afef1183be..b6781f46c2d 100644 --- a/consensus/types/src/beacon_state/committee_cache.rs +++ b/consensus/types/src/beacon_state/committee_cache.rs @@ -3,11 +3,13 @@ use super::BeaconState; use crate::*; use core::num::NonZeroUsize; +use derivative::Derivative; use safe_arith::SafeArith; use serde_derive::{Deserialize, Serialize}; use ssz::{four_byte_option_impl, Decode, DecodeError, Encode}; use ssz_derive::{Decode, Encode}; use std::ops::Range; +use std::sync::Arc; use swap_or_not_shuffle::shuffle_list; mod tests; @@ -19,16 +21,44 @@ four_byte_option_impl!(four_byte_option_non_zero_usize, NonZeroUsize); /// Computes and stores the shuffling for an epoch. Provides various getters to allow callers to /// read the committees for the given epoch. -#[derive(Debug, Default, PartialEq, Clone, Serialize, Deserialize, Encode, Decode)] +#[derive(Derivative, Debug, Default, Clone, Serialize, Deserialize, Encode, Decode)] +#[derivative(PartialEq)] pub struct CommitteeCache { #[ssz(with = "four_byte_option_epoch")] initialized_epoch: Option, shuffling: Vec, + #[derivative(PartialEq(compare_with = "compare_shuffling_positions"))] shuffling_positions: Vec, committees_per_slot: u64, slots_per_epoch: u64, } +/// Equivalence function for `shuffling_positions` that ignores trailing `None` entries. +/// +/// It can happen that states from different epochs computing the same cache have different +/// numbers of validators in `state.validators()` due to recent deposits. These new validators +/// cannot be active however and will always be ommitted from the shuffling. This function checks +/// that two lists of shuffling positions are equivalent by ensuring that they are identical on all +/// common entries, and that new entries at the end are all `None`. +/// +/// In practice this is only used in tests. +#[allow(clippy::indexing_slicing)] +fn compare_shuffling_positions(xs: &Vec, ys: &Vec) -> bool { + use std::cmp::Ordering; + + let (shorter, longer) = match xs.len().cmp(&ys.len()) { + Ordering::Equal => { + return xs == ys; + } + Ordering::Less => (xs, ys), + Ordering::Greater => (ys, xs), + }; + shorter == &longer[..shorter.len()] + && longer[shorter.len()..] + .iter() + .all(|new| *new == NonZeroUsizeOption(None)) +} + impl CommitteeCache { /// Return a new, fully initialized cache. /// @@ -37,7 +67,7 @@ impl CommitteeCache { state: &BeaconState, epoch: Epoch, spec: &ChainSpec, - ) -> Result { + ) -> Result, Error> { // Check that the cache is being built for an in-range epoch. // // We allow caches to be constructed for historic epochs, per: @@ -87,13 +117,13 @@ impl CommitteeCache { .ok_or(Error::ShuffleIndexOutOfBounds(v))? = NonZeroUsize::new(i + 1).into(); } - Ok(CommitteeCache { + Ok(Arc::new(CommitteeCache { initialized_epoch: Some(epoch), shuffling, shuffling_positions, committees_per_slot, slots_per_epoch: T::slots_per_epoch(), - }) + })) } /// Returns `true` if the cache has been initialized at the supplied `epoch`. @@ -322,17 +352,21 @@ pub fn epoch_committee_count(committees_per_slot: usize, slots_per_epoch: usize) /// `epoch`. /// /// Spec v0.12.1 -pub fn get_active_validator_indices(validators: &[Validator], epoch: Epoch) -> Vec { - let mut active = Vec::with_capacity(validators.len()); +pub fn get_active_validator_indices<'a, V, I>(validators: V, epoch: Epoch) -> Vec +where + V: IntoIterator, + I: ExactSizeIterator + Iterator, +{ + let iter = validators.into_iter(); - for (index, validator) in validators.iter().enumerate() { + let mut active = Vec::with_capacity(iter.len()); + + for (index, validator) in iter.enumerate() { if validator.is_active_at(epoch) { active.push(index) } } - active.shrink_to_fit(); - active } diff --git a/consensus/types/src/beacon_state/committee_cache/tests.rs b/consensus/types/src/beacon_state/committee_cache/tests.rs index 11cc6095da8..eea6233e357 100644 --- a/consensus/types/src/beacon_state/committee_cache/tests.rs +++ b/consensus/types/src/beacon_state/committee_cache/tests.rs @@ -92,7 +92,7 @@ async fn shuffles_for_the_right_epoch() { .map(|i| Hash256::from_low_u64_be(i as u64)) .collect(); - *state.randao_mixes_mut() = FixedVector::from(distinct_hashes); + *state.randao_mixes_mut() = FixedVector::try_from_iter(distinct_hashes).unwrap(); let previous_seed = state .get_seed(state.previous_epoch(), Domain::BeaconAttester, spec) diff --git a/consensus/types/src/beacon_state/compact_state.rs b/consensus/types/src/beacon_state/compact_state.rs new file mode 100644 index 00000000000..873bd972c96 --- /dev/null +++ b/consensus/types/src/beacon_state/compact_state.rs @@ -0,0 +1,241 @@ +use crate::{ + BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateCapella, BeaconStateError as Error, + BeaconStateMerge, EthSpec, PublicKeyBytes, VList, Validator, ValidatorMutable, +}; +use itertools::process_results; +use std::sync::Arc; + +pub type CompactBeaconState = BeaconState; + +/// Implement the conversion function from BeaconState -> CompactBeaconState. +macro_rules! full_to_compact { + ($s:ident, $outer:ident, $variant_name:ident, $struct_name:ident, [$($extra_fields:ident),*]) => { + BeaconState::$variant_name($struct_name { + // Versioning + genesis_time: $s.genesis_time, + genesis_validators_root: $s.genesis_validators_root, + slot: $s.slot, + fork: $s.fork, + + // History + latest_block_header: $s.latest_block_header.clone(), + block_roots: $s.block_roots.clone(), + state_roots: $s.state_roots.clone(), + historical_roots: $s.historical_roots.clone(), + + // Eth1 + eth1_data: $s.eth1_data.clone(), + eth1_data_votes: $s.eth1_data_votes.clone(), + eth1_deposit_index: $s.eth1_deposit_index, + + // Validator registry + validators: VList::try_from_iter( + $s.validators.into_iter().map(|validator| validator.mutable.clone()) + ).expect("fix this"), + balances: $s.balances.clone(), + + // Shuffling + randao_mixes: $s.randao_mixes.clone(), + + // Slashings + slashings: $s.slashings.clone(), + + // Finality + justification_bits: $s.justification_bits.clone(), + previous_justified_checkpoint: $s.previous_justified_checkpoint, + current_justified_checkpoint: $s.current_justified_checkpoint, + finalized_checkpoint: $s.finalized_checkpoint, + + // Caches. + total_active_balance: $s.total_active_balance.clone(), + committee_caches: $s.committee_caches.clone(), + pubkey_cache: $s.pubkey_cache.clone(), + exit_cache: $s.exit_cache.clone(), + + // Variant-specific fields + $( + $extra_fields: $s.$extra_fields.clone() + ),* + }) + } +} + +/// Implement the conversion from CompactBeaconState -> BeaconState. +macro_rules! compact_to_full { + ($inner:ident, $variant_name:ident, $struct_name:ident, $immutable_validators:ident, [$($extra_fields:ident),*]) => { + BeaconState::$variant_name($struct_name { + // Versioning + genesis_time: $inner.genesis_time, + genesis_validators_root: $inner.genesis_validators_root, + slot: $inner.slot, + fork: $inner.fork, + + // History + latest_block_header: $inner.latest_block_header, + block_roots: $inner.block_roots, + state_roots: $inner.state_roots, + historical_roots: $inner.historical_roots, + + // Eth1 + eth1_data: $inner.eth1_data, + eth1_data_votes: $inner.eth1_data_votes, + eth1_deposit_index: $inner.eth1_deposit_index, + + // Validator registry + validators: process_results($inner.validators.into_iter().enumerate().map(|(i, mutable)| { + $immutable_validators(i) + .ok_or(Error::MissingImmutableValidator(i)) + .map(move |pubkey| { + Validator { + pubkey, + mutable: mutable.clone(), + } + }) + }), |iter| VList::try_from_iter(iter))??, + balances: $inner.balances, + + // Shuffling + randao_mixes: $inner.randao_mixes, + + // Slashings + slashings: $inner.slashings, + + // Finality + justification_bits: $inner.justification_bits, + previous_justified_checkpoint: $inner.previous_justified_checkpoint, + current_justified_checkpoint: $inner.current_justified_checkpoint, + finalized_checkpoint: $inner.finalized_checkpoint, + + // Caching + total_active_balance: $inner.total_active_balance, + committee_caches: $inner.committee_caches, + pubkey_cache: $inner.pubkey_cache, + exit_cache: $inner.exit_cache, + + // Variant-specific fields + $( + $extra_fields: $inner.$extra_fields + ),* + }) + } +} + +impl BeaconState { + pub fn into_compact_state(self) -> CompactBeaconState { + match self { + BeaconState::Base(s) => full_to_compact!( + s, + self, + Base, + BeaconStateBase, + [previous_epoch_attestations, current_epoch_attestations] + ), + BeaconState::Altair(s) => full_to_compact!( + s, + self, + Altair, + BeaconStateAltair, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores + ] + ), + BeaconState::Merge(s) => full_to_compact!( + s, + self, + Merge, + BeaconStateMerge, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), + BeaconState::Capella(s) => full_to_compact!( + s, + self, + Capella, + BeaconStateCapella, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + historical_summaries, + next_withdrawal_index, + next_withdrawal_validator_index + ] + ), + } + } +} + +impl CompactBeaconState { + pub fn try_into_full_state(self, immutable_validators: F) -> Result, Error> + where + F: Fn(usize) -> Option>, + { + let state = match self { + BeaconState::Base(inner) => compact_to_full!( + inner, + Base, + BeaconStateBase, + immutable_validators, + [previous_epoch_attestations, current_epoch_attestations] + ), + BeaconState::Altair(inner) => compact_to_full!( + inner, + Altair, + BeaconStateAltair, + immutable_validators, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores + ] + ), + BeaconState::Merge(inner) => compact_to_full!( + inner, + Merge, + BeaconStateMerge, + immutable_validators, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header + ] + ), + BeaconState::Capella(inner) => compact_to_full!( + inner, + Capella, + BeaconStateCapella, + immutable_validators, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + historical_summaries, + next_withdrawal_index, + next_withdrawal_validator_index + ] + ), + }; + Ok(state) + } +} diff --git a/consensus/types/src/beacon_state/exit_cache.rs b/consensus/types/src/beacon_state/exit_cache.rs index b657d62ae62..cbd8598de60 100644 --- a/consensus/types/src/beacon_state/exit_cache.rs +++ b/consensus/types/src/beacon_state/exit_cache.rs @@ -1,27 +1,30 @@ use super::{BeaconStateError, ChainSpec, Epoch, Validator}; +use rpds::HashTrieMapSync as HashTrieMap; use safe_arith::SafeArith; -use serde_derive::{Deserialize, Serialize}; -use std::collections::HashMap; /// Map from exit epoch to the number of validators with that exit epoch. -#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct ExitCache { initialized: bool, - exit_epoch_counts: HashMap, + exit_epoch_counts: HashTrieMap, } impl ExitCache { /// Initialize a new cache for the given list of validators. - pub fn new(validators: &[Validator], spec: &ChainSpec) -> Result { + pub fn new<'a, V, I>(validators: V, spec: &ChainSpec) -> Result + where + V: IntoIterator, + I: ExactSizeIterator + Iterator, + { let mut exit_cache = ExitCache { initialized: true, ..ExitCache::default() }; // Add all validators with a non-default exit epoch to the cache. validators - .iter() - .filter(|validator| validator.exit_epoch != spec.far_future_epoch) - .try_for_each(|validator| exit_cache.record_validator_exit(validator.exit_epoch))?; + .into_iter() + .filter(|validator| validator.exit_epoch() != spec.far_future_epoch) + .try_for_each(|validator| exit_cache.record_validator_exit(validator.exit_epoch()))?; Ok(exit_cache) } @@ -37,10 +40,12 @@ impl ExitCache { /// Record the exit epoch of a validator. Must be called only once per exiting validator. pub fn record_validator_exit(&mut self, exit_epoch: Epoch) -> Result<(), BeaconStateError> { self.check_initialized()?; - self.exit_epoch_counts - .entry(exit_epoch) - .or_insert(0) - .safe_add_assign(1)?; + + if let Some(count) = self.exit_epoch_counts.get_mut(&exit_epoch) { + count.safe_add_assign(1)?; + } else { + self.exit_epoch_counts.insert_mut(exit_epoch, 1); + } Ok(()) } diff --git a/consensus/types/src/beacon_state/iter.rs b/consensus/types/src/beacon_state/iter.rs index 2c00913ce96..4b97cae7aed 100644 --- a/consensus/types/src/beacon_state/iter.rs +++ b/consensus/types/src/beacon_state/iter.rs @@ -74,7 +74,7 @@ mod test { let mut state: BeaconState = BeaconState::new(0, <_>::default(), &spec); for i in 0..state.block_roots().len() { - state.block_roots_mut()[i] = root_slot(i).1; + *state.block_roots_mut().get_mut(i).unwrap() = root_slot(i).1; } assert_eq!( @@ -122,7 +122,7 @@ mod test { let mut state: BeaconState = BeaconState::new(0, <_>::default(), &spec); for i in 0..state.block_roots().len() { - state.block_roots_mut()[i] = root_slot(i).1; + *state.block_roots_mut().get_mut(i).unwrap() = root_slot(i).1; } assert_eq!( diff --git a/consensus/types/src/beacon_state/pubkey_cache.rs b/consensus/types/src/beacon_state/pubkey_cache.rs index 590ea30f999..10a11493a75 100644 --- a/consensus/types/src/beacon_state/pubkey_cache.rs +++ b/consensus/types/src/beacon_state/pubkey_cache.rs @@ -1,20 +1,20 @@ use crate::*; -use serde_derive::{Deserialize, Serialize}; -use std::collections::HashMap; +use rpds::HashTrieMapSync as HashTrieMap; type ValidatorIndex = usize; -#[derive(Debug, PartialEq, Clone, Default, Serialize, Deserialize)] +#[derive(Debug, PartialEq, Clone, Default)] pub struct PubkeyCache { - /// Maintain the number of keys added to the map. It is not sufficient to just use the HashMap - /// len, as it does not increase when duplicate keys are added. Duplicate keys are used during - /// testing. + /// Maintain the number of keys added to the map. It is not sufficient to just use the + /// HashTrieMap len, as it does not increase when duplicate keys are added. Duplicate keys are + /// used during testing. len: usize, - map: HashMap, + map: HashTrieMap, } impl PubkeyCache { /// Returns the number of validator indices added to the map so far. + #[allow(clippy::len_without_is_empty)] pub fn len(&self) -> ValidatorIndex { self.len } @@ -25,7 +25,7 @@ impl PubkeyCache { /// that an index is never skipped. pub fn insert(&mut self, pubkey: PublicKeyBytes, index: ValidatorIndex) -> bool { if index == self.len { - self.map.insert(pubkey, index); + self.map.insert_mut(pubkey, index); self.len = self .len .checked_add(1) diff --git a/consensus/types/src/beacon_state/tests.rs b/consensus/types/src/beacon_state/tests.rs index d63eaafc4b9..0504bc028b8 100644 --- a/consensus/types/src/beacon_state/tests.rs +++ b/consensus/types/src/beacon_state/tests.rs @@ -1,21 +1,17 @@ #![cfg(test)] -use crate::test_utils::*; -use crate::test_utils::{SeedableRng, XorShiftRng}; +use crate::{test_utils::*, ForkName}; use beacon_chain::test_utils::{ interop_genesis_state_with_eth1, test_spec, BeaconChainHarness, EphemeralHarnessType, DEFAULT_ETH1_BLOCK_HASH, }; use beacon_chain::types::{ test_utils::TestRandom, BeaconState, BeaconStateAltair, BeaconStateBase, BeaconStateError, - ChainSpec, CloneConfig, Domain, Epoch, EthSpec, FixedVector, Hash256, Keypair, MainnetEthSpec, - MinimalEthSpec, RelativeEpoch, Slot, + BeaconStateMerge, ChainSpec, Domain, Epoch, EthSpec, FixedVector, Hash256, Keypair, + MainnetEthSpec, MinimalEthSpec, RelativeEpoch, Slot, }; -use safe_arith::SafeArith; use ssz::Encode; -use state_processing::per_slot_processing; use std::ops::Mul; use swap_or_not_shuffle::compute_shuffled_index; -use tree_hash::TreeHash; pub const MAX_VALIDATOR_COUNT: usize = 129; pub const SLOT_OFFSET: Slot = Slot::new(1); @@ -102,7 +98,12 @@ async fn test_beacon_proposer_index() { // Test with two validators per slot, first validator has zero balance. let mut state = build_state::((T::slots_per_epoch() as usize).mul(2)).await; let slot0_candidate0 = ith_candidate(&state, Slot::new(0), 0, &spec); - state.validators_mut()[slot0_candidate0].effective_balance = 0; + state + .validators_mut() + .get_mut(slot0_candidate0) + .unwrap() + .mutable + .effective_balance = 0; test(&state, Slot::new(0), 1); for i in 1..T::slots_per_epoch() { test(&state, Slot::from(i), 0); @@ -160,83 +161,6 @@ async fn cache_initialization() { test_cache_initialization(&mut state, RelativeEpoch::Next, &spec); } -fn test_clone_config(base_state: &BeaconState, clone_config: CloneConfig) { - let state = base_state.clone_with(clone_config); - if clone_config.committee_caches { - state - .committee_cache(RelativeEpoch::Previous) - .expect("committee cache exists"); - state - .committee_cache(RelativeEpoch::Current) - .expect("committee cache exists"); - state - .committee_cache(RelativeEpoch::Next) - .expect("committee cache exists"); - state - .total_active_balance() - .expect("total active balance exists"); - } else { - state - .committee_cache(RelativeEpoch::Previous) - .expect_err("shouldn't exist"); - state - .committee_cache(RelativeEpoch::Current) - .expect_err("shouldn't exist"); - state - .committee_cache(RelativeEpoch::Next) - .expect_err("shouldn't exist"); - } - if clone_config.pubkey_cache { - assert_ne!(state.pubkey_cache().len(), 0); - } else { - assert_eq!(state.pubkey_cache().len(), 0); - } - if clone_config.exit_cache { - state - .exit_cache() - .check_initialized() - .expect("exit cache exists"); - } else { - state - .exit_cache() - .check_initialized() - .expect_err("exit cache doesn't exist"); - } - if clone_config.tree_hash_cache { - assert!(state.tree_hash_cache().is_initialized()); - } else { - assert!( - !state.tree_hash_cache().is_initialized(), - "{:?}", - clone_config - ); - } -} - -#[tokio::test] -async fn clone_config() { - let spec = MinimalEthSpec::default_spec(); - - let mut state = build_state::(16).await; - - state.build_all_caches(&spec).unwrap(); - state - .update_tree_hash_cache() - .expect("should update tree hash cache"); - - let num_caches = 4; - let all_configs = (0..2u8.pow(num_caches)).map(|i| CloneConfig { - committee_caches: (i & 1) != 0, - pubkey_cache: ((i >> 1) & 1) != 0, - exit_cache: ((i >> 2) & 1) != 0, - tree_hash_cache: ((i >> 3) & 1) != 0, - }); - - for config in all_configs { - test_clone_config(&state, config); - } -} - /// Tests committee-specific components #[cfg(test)] mod committees { @@ -327,10 +251,9 @@ mod committees { let harness = get_harness::(validator_count, slot).await; let mut new_head_state = harness.get_current_state(); - let distinct_hashes: Vec = (0..T::epochs_per_historical_vector()) - .map(|i| Hash256::from_low_u64_be(i as u64)) - .collect(); - *new_head_state.randao_mixes_mut() = FixedVector::from(distinct_hashes); + let distinct_hashes = + (0..T::epochs_per_historical_vector()).map(|i| Hash256::from_low_u64_be(i as u64)); + *new_head_state.randao_mixes_mut() = FixedVector::try_from_iter(distinct_hashes).unwrap(); new_head_state .force_build_committee_cache(RelativeEpoch::Previous, spec) @@ -486,120 +409,19 @@ fn decode_base_and_altair() { } #[test] -fn tree_hash_cache_linear_history() { - let mut rng = XorShiftRng::from_seed([42; 16]); - - let mut state: BeaconState = - BeaconState::Base(BeaconStateBase::random_for_test(&mut rng)); - - let root = state.update_tree_hash_cache().unwrap(); - - assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]); - - /* - * A cache should hash twice without updating the slot. - */ - - assert_eq!( - state.update_tree_hash_cache().unwrap(), - root, - "tree hash result should be identical on the same slot" - ); - - /* - * A cache should not hash after updating the slot but not updating the state roots. - */ - - // The tree hash cache needs to be rebuilt since it was dropped when it failed. - state - .update_tree_hash_cache() - .expect("should rebuild cache"); - - *state.slot_mut() += 1; - - assert_eq!( - state.update_tree_hash_cache(), - Err(BeaconStateError::NonLinearTreeHashCacheHistory), - "should not build hash without updating the state root" - ); - - /* - * The cache should update if the slot and state root are updated. - */ - - // The tree hash cache needs to be rebuilt since it was dropped when it failed. - let root = state - .update_tree_hash_cache() - .expect("should rebuild cache"); - - *state.slot_mut() += 1; - state - .set_state_root(state.slot() - 1, root) - .expect("should set state root"); - - let root = state.update_tree_hash_cache().unwrap(); - assert_eq!(root.as_bytes(), &state.tree_hash_root()[..]); -} - -// Check how the cache behaves when there's a distance larger than `SLOTS_PER_HISTORICAL_ROOT` -// since its last update. -#[test] -fn tree_hash_cache_linear_history_long_skip() { - let validator_count = 128; - let keypairs = generate_deterministic_keypairs(validator_count); - - let spec = &test_spec::(); - - // This state has a cache that advances normally each slot. - let mut state: BeaconState = interop_genesis_state_with_eth1( - &keypairs, - 0, - Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, - spec, - ) - .unwrap(); - - state.update_tree_hash_cache().unwrap(); - - // This state retains its original cache until it is updated after a long skip. - let mut original_cache_state = state.clone(); - assert!(original_cache_state.tree_hash_cache().is_initialized()); - - // Advance the states to a slot beyond the historical state root limit, using the state root - // from the first state to avoid touching the original state's cache. - let start_slot = state.slot(); - let target_slot = start_slot - .safe_add(MinimalEthSpec::slots_per_historical_root() as u64 + 1) - .unwrap(); - - let mut prev_state_root; - while state.slot() < target_slot { - prev_state_root = state.update_tree_hash_cache().unwrap(); - per_slot_processing(&mut state, None, spec).unwrap(); - per_slot_processing(&mut original_cache_state, Some(prev_state_root), spec).unwrap(); +fn check_num_fields_pow2() { + use metastruct::NumFields; + pub type E = MainnetEthSpec; + + for fork_name in ForkName::list_all() { + let num_fields = match fork_name { + ForkName::Base => BeaconStateBase::::NUM_FIELDS, + ForkName::Altair => BeaconStateAltair::::NUM_FIELDS, + ForkName::Merge => BeaconStateMerge::::NUM_FIELDS, + }; + assert_eq!( + num_fields.next_power_of_two(), + BeaconState::::NUM_FIELDS_POW2 + ); } - - // The state with the original cache should still be initialized at the starting slot. - assert_eq!( - original_cache_state - .tree_hash_cache() - .initialized_slot() - .unwrap(), - start_slot - ); - - // Updating the tree hash cache should be successful despite the long skip. - assert_eq!( - original_cache_state.update_tree_hash_cache().unwrap(), - state.update_tree_hash_cache().unwrap() - ); - - assert_eq!( - original_cache_state - .tree_hash_cache() - .initialized_slot() - .unwrap(), - target_slot - ); } diff --git a/consensus/types/src/beacon_state/tree_hash_cache.rs b/consensus/types/src/beacon_state/tree_hash_cache.rs deleted file mode 100644 index d1d63e3c806..00000000000 --- a/consensus/types/src/beacon_state/tree_hash_cache.rs +++ /dev/null @@ -1,646 +0,0 @@ -#![allow(clippy::integer_arithmetic)] -#![allow(clippy::disallowed_methods)] -#![allow(clippy::indexing_slicing)] - -use super::Error; -use crate::historical_summary::HistoricalSummaryCache; -use crate::{BeaconState, EthSpec, Hash256, ParticipationList, Slot, Unsigned, Validator}; -use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, TreeHashCache}; -use rayon::prelude::*; -use ssz_derive::{Decode, Encode}; -use ssz_types::VariableList; -use std::cmp::Ordering; -use std::iter::ExactSizeIterator; -use tree_hash::{mix_in_length, MerkleHasher, TreeHash}; - -/// The number of leaves (including padding) on the `BeaconState` Merkle tree. -/// -/// ## Note -/// -/// This constant is set with the assumption that there are `> 16` and `<= 32` fields on the -/// `BeaconState`. **Tree hashing will fail if this value is set incorrectly.** -pub const NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES: usize = 32; - -/// The number of nodes in the Merkle tree of a validator record. -const NODES_PER_VALIDATOR: usize = 15; - -/// The number of validator record tree hash caches stored in each arena. -/// -/// This is primarily used for concurrency; if we have 16 validators and set `VALIDATORS_PER_ARENA -/// == 8` then it is possible to do a 2-core concurrent hash. -/// -/// Do not set to 0. -const VALIDATORS_PER_ARENA: usize = 4_096; - -#[derive(Debug, PartialEq, Clone, Encode, Decode)] -pub struct Eth1DataVotesTreeHashCache { - arena: CacheArena, - tree_hash_cache: TreeHashCache, - voting_period: u64, - roots: VariableList, -} - -impl Eth1DataVotesTreeHashCache { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees. Only the leaves are - /// hashed, leaving the internal nodes as all-zeros. - pub fn new(state: &BeaconState) -> Self { - let mut arena = CacheArena::default(); - let roots: VariableList<_, _> = state - .eth1_data_votes() - .iter() - .map(|eth1_data| eth1_data.tree_hash_root()) - .collect::>() - .into(); - let tree_hash_cache = roots.new_tree_hash_cache(&mut arena); - - Self { - arena, - tree_hash_cache, - voting_period: Self::voting_period(state.slot()), - roots, - } - } - - fn voting_period(slot: Slot) -> u64 { - slot.as_u64() / T::SlotsPerEth1VotingPeriod::to_u64() - } - - pub fn recalculate_tree_hash_root(&mut self, state: &BeaconState) -> Result { - if state.eth1_data_votes().len() < self.roots.len() - || Self::voting_period(state.slot()) != self.voting_period - { - *self = Self::new(state); - } - - state - .eth1_data_votes() - .iter() - .skip(self.roots.len()) - .try_for_each(|eth1_data| self.roots.push(eth1_data.tree_hash_root()))?; - - self.roots - .recalculate_tree_hash_root(&mut self.arena, &mut self.tree_hash_cache) - .map_err(Into::into) - } -} - -/// A cache that performs a caching tree hash of the entire `BeaconState` struct. -/// -/// This type is a wrapper around the inner cache, which does all the work. -#[derive(Debug, Default, PartialEq, Clone)] -pub struct BeaconTreeHashCache { - inner: Option>, -} - -impl BeaconTreeHashCache { - pub fn new(state: &BeaconState) -> Self { - Self { - inner: Some(BeaconTreeHashCacheInner::new(state)), - } - } - - pub fn is_initialized(&self) -> bool { - self.inner.is_some() - } - - /// Move the inner cache out so that the containing `BeaconState` can be borrowed. - pub fn take(&mut self) -> Option> { - self.inner.take() - } - - /// Restore the inner cache after using `take`. - pub fn restore(&mut self, inner: BeaconTreeHashCacheInner) { - self.inner = Some(inner); - } - - /// Make the cache empty. - pub fn uninitialize(&mut self) { - self.inner = None; - } - - /// Return the slot at which the cache was last updated. - /// - /// This should probably only be used during testing. - pub fn initialized_slot(&self) -> Option { - Some(self.inner.as_ref()?.previous_state?.1) - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct BeaconTreeHashCacheInner { - /// Tracks the previously generated state root to ensure the next state root provided descends - /// directly from this state. - previous_state: Option<(Hash256, Slot)>, - // Validators cache - validators: ValidatorsListTreeHashCache, - // Arenas - fixed_arena: CacheArena, - balances_arena: CacheArena, - slashings_arena: CacheArena, - // Caches - block_roots: TreeHashCache, - state_roots: TreeHashCache, - historical_roots: TreeHashCache, - historical_summaries: OptionalTreeHashCache, - balances: TreeHashCache, - randao_mixes: TreeHashCache, - slashings: TreeHashCache, - eth1_data_votes: Eth1DataVotesTreeHashCache, - inactivity_scores: OptionalTreeHashCache, - // Participation caches - previous_epoch_participation: OptionalTreeHashCache, - current_epoch_participation: OptionalTreeHashCache, -} - -impl BeaconTreeHashCacheInner { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees. Only the leaves are - /// hashed, leaving the internal nodes as all-zeros. - pub fn new(state: &BeaconState) -> Self { - let mut fixed_arena = CacheArena::default(); - let block_roots = state.block_roots().new_tree_hash_cache(&mut fixed_arena); - let state_roots = state.state_roots().new_tree_hash_cache(&mut fixed_arena); - let historical_roots = state - .historical_roots() - .new_tree_hash_cache(&mut fixed_arena); - let historical_summaries = OptionalTreeHashCache::new( - state - .historical_summaries() - .ok() - .map(HistoricalSummaryCache::new) - .as_ref(), - ); - - let randao_mixes = state.randao_mixes().new_tree_hash_cache(&mut fixed_arena); - - let validators = ValidatorsListTreeHashCache::new::(state.validators()); - - let mut balances_arena = CacheArena::default(); - let balances = state.balances().new_tree_hash_cache(&mut balances_arena); - - let mut slashings_arena = CacheArena::default(); - let slashings = state.slashings().new_tree_hash_cache(&mut slashings_arena); - - let inactivity_scores = OptionalTreeHashCache::new(state.inactivity_scores().ok()); - - let previous_epoch_participation = OptionalTreeHashCache::new( - state - .previous_epoch_participation() - .ok() - .map(ParticipationList::new) - .as_ref(), - ); - let current_epoch_participation = OptionalTreeHashCache::new( - state - .current_epoch_participation() - .ok() - .map(ParticipationList::new) - .as_ref(), - ); - - Self { - previous_state: None, - validators, - fixed_arena, - balances_arena, - slashings_arena, - block_roots, - state_roots, - historical_roots, - historical_summaries, - balances, - randao_mixes, - slashings, - inactivity_scores, - eth1_data_votes: Eth1DataVotesTreeHashCache::new(state), - previous_epoch_participation, - current_epoch_participation, - } - } - - pub fn recalculate_tree_hash_leaves( - &mut self, - state: &BeaconState, - ) -> Result, Error> { - let mut leaves = vec![ - // Genesis data leaves. - state.genesis_time().tree_hash_root(), - state.genesis_validators_root().tree_hash_root(), - // Current fork data leaves. - state.slot().tree_hash_root(), - state.fork().tree_hash_root(), - state.latest_block_header().tree_hash_root(), - // Roots leaves. - state - .block_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.block_roots)?, - state - .state_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.state_roots)?, - state - .historical_roots() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.historical_roots)?, - // Eth1 Data leaves. - state.eth1_data().tree_hash_root(), - self.eth1_data_votes.recalculate_tree_hash_root(state)?, - state.eth1_deposit_index().tree_hash_root(), - // Validator leaves. - self.validators - .recalculate_tree_hash_root(state.validators())?, - state - .balances() - .recalculate_tree_hash_root(&mut self.balances_arena, &mut self.balances)?, - state - .randao_mixes() - .recalculate_tree_hash_root(&mut self.fixed_arena, &mut self.randao_mixes)?, - state - .slashings() - .recalculate_tree_hash_root(&mut self.slashings_arena, &mut self.slashings)?, - ]; - - // Participation - if let BeaconState::Base(state) = state { - leaves.push(state.previous_epoch_attestations.tree_hash_root()); - leaves.push(state.current_epoch_attestations.tree_hash_root()); - } else { - leaves.push( - self.previous_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.previous_epoch_participation()?, - ))?, - ); - leaves.push( - self.current_epoch_participation - .recalculate_tree_hash_root(&ParticipationList::new( - state.current_epoch_participation()?, - ))?, - ); - } - // Checkpoint leaves - leaves.push(state.justification_bits().tree_hash_root()); - leaves.push(state.previous_justified_checkpoint().tree_hash_root()); - leaves.push(state.current_justified_checkpoint().tree_hash_root()); - leaves.push(state.finalized_checkpoint().tree_hash_root()); - // Inactivity & light-client sync committees (Altair and later). - if let Ok(inactivity_scores) = state.inactivity_scores() { - leaves.push( - self.inactivity_scores - .recalculate_tree_hash_root(inactivity_scores)?, - ); - } - if let Ok(current_sync_committee) = state.current_sync_committee() { - leaves.push(current_sync_committee.tree_hash_root()); - } - - if let Ok(next_sync_committee) = state.next_sync_committee() { - leaves.push(next_sync_committee.tree_hash_root()); - } - - // Execution payload (merge and later). - if let Ok(payload_header) = state.latest_execution_payload_header() { - leaves.push(payload_header.tree_hash_root()); - } - - // Withdrawal indices (Capella and later). - if let Ok(next_withdrawal_index) = state.next_withdrawal_index() { - leaves.push(next_withdrawal_index.tree_hash_root()); - } - if let Ok(next_withdrawal_validator_index) = state.next_withdrawal_validator_index() { - leaves.push(next_withdrawal_validator_index.tree_hash_root()); - } - - // Historical roots/summaries (Capella and later). - if let Ok(historical_summaries) = state.historical_summaries() { - leaves.push( - self.historical_summaries.recalculate_tree_hash_root( - &HistoricalSummaryCache::new(historical_summaries), - )?, - ); - } - - Ok(leaves) - } - - /// Updates the cache and returns the tree hash root for the given `state`. - /// - /// The provided `state` should be a descendant of the last `state` given to this function, or - /// the `Self::new` function. If the state is more than `SLOTS_PER_HISTORICAL_ROOT` slots - /// after `self.previous_state` then the whole cache will be re-initialized. - pub fn recalculate_tree_hash_root(&mut self, state: &BeaconState) -> Result { - // If this cache has previously produced a root, ensure that it is in the state root - // history of this state. - // - // This ensures that the states applied have a linear history, this - // allows us to make assumptions about how the state changes over times and produce a more - // efficient algorithm. - if let Some((previous_root, previous_slot)) = self.previous_state { - // The previously-hashed state must not be newer than `state`. - if previous_slot > state.slot() { - return Err(Error::TreeHashCacheSkippedSlot { - cache: previous_slot, - state: state.slot(), - }); - } - - // If the state is newer, the previous root must be in the history of the given state. - // If the previous slot is out of range of the `state_roots` array (indicating a long - // gap between the cache's last use and the current state) then we re-initialize. - match state.get_state_root(previous_slot) { - Ok(state_previous_root) if *state_previous_root == previous_root => {} - Ok(_) => return Err(Error::NonLinearTreeHashCacheHistory), - Err(Error::SlotOutOfBounds) => { - *self = Self::new(state); - } - Err(e) => return Err(e), - } - } - - let mut hasher = MerkleHasher::with_leaves(NUM_BEACON_STATE_HASH_TREE_ROOT_LEAVES); - - let leaves = self.recalculate_tree_hash_leaves(state)?; - for leaf in leaves { - hasher.write(leaf.as_bytes())?; - } - - let root = hasher.finish()?; - - self.previous_state = Some((root, state.slot())); - - Ok(root) - } - - /// Updates the cache and provides the root of the given `validators`. - pub fn recalculate_validators_tree_hash_root( - &mut self, - validators: &[Validator], - ) -> Result { - self.validators.recalculate_tree_hash_root(validators) - } -} - -/// A specialized cache for computing the tree hash root of `state.validators`. -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -struct ValidatorsListTreeHashCache { - list_arena: CacheArena, - list_cache: TreeHashCache, - values: ParallelValidatorTreeHash, -} - -impl ValidatorsListTreeHashCache { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees but does perform any - /// hashing. - fn new(validators: &[Validator]) -> Self { - let mut list_arena = CacheArena::default(); - Self { - list_cache: TreeHashCache::new( - &mut list_arena, - int_log(E::ValidatorRegistryLimit::to_usize()), - validators.len(), - ), - list_arena, - values: ParallelValidatorTreeHash::new(validators), - } - } - - /// Updates the cache and returns the tree hash root for the given `state`. - /// - /// This function makes assumptions that the `validators` list will only change in accordance - /// with valid per-block/per-slot state transitions. - fn recalculate_tree_hash_root(&mut self, validators: &[Validator]) -> Result { - let mut list_arena = std::mem::take(&mut self.list_arena); - - let leaves = self.values.leaves(validators)?; - let num_leaves = leaves.iter().map(|arena| arena.len()).sum(); - - let leaves_iter = ForcedExactSizeIterator { - iter: leaves.into_iter().flatten().map(|h| h.to_fixed_bytes()), - len: num_leaves, - }; - - let list_root = self - .list_cache - .recalculate_merkle_root(&mut list_arena, leaves_iter)?; - - self.list_arena = list_arena; - - Ok(mix_in_length(&list_root, validators.len())) - } -} - -/// Provides a wrapper around some `iter` if the number of items in the iterator is known to the -/// programmer but not the compiler. This allows use of `ExactSizeIterator` in some occasions. -/// -/// Care should be taken to ensure `len` is accurate. -struct ForcedExactSizeIterator { - iter: I, - len: usize, -} - -impl> Iterator for ForcedExactSizeIterator { - type Item = V; - - fn next(&mut self) -> Option { - self.iter.next() - } -} - -impl> ExactSizeIterator for ForcedExactSizeIterator { - fn len(&self) -> usize { - self.len - } -} - -/// Provides a cache for each of the `Validator` objects in `state.validators` and computes the -/// roots of these using Rayon parallelization. -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -pub struct ParallelValidatorTreeHash { - /// Each arena and its associated sub-trees. - arenas: Vec<(CacheArena, Vec)>, -} - -impl ParallelValidatorTreeHash { - /// Instantiates a new cache. - /// - /// Allocates the necessary memory to store all of the cached Merkle trees but does perform any - /// hashing. - fn new(validators: &[Validator]) -> Self { - let num_arenas = std::cmp::max( - 1, - (validators.len() + VALIDATORS_PER_ARENA - 1) / VALIDATORS_PER_ARENA, - ); - - let mut arenas = (1..=num_arenas) - .map(|i| { - let num_validators = if i == num_arenas { - validators.len() % VALIDATORS_PER_ARENA - } else { - VALIDATORS_PER_ARENA - }; - NODES_PER_VALIDATOR * num_validators - }) - .map(|capacity| (CacheArena::with_capacity(capacity), vec![])) - .collect::>(); - - validators.iter().enumerate().for_each(|(i, v)| { - let (arena, caches) = &mut arenas[i / VALIDATORS_PER_ARENA]; - caches.push(v.new_tree_hash_cache(arena)) - }); - - Self { arenas } - } - - /// Returns the number of validators stored in self. - fn len(&self) -> usize { - self.arenas.last().map_or(0, |last| { - // Subtraction cannot underflow because `.last()` ensures the `.len() > 0`. - (self.arenas.len() - 1) * VALIDATORS_PER_ARENA + last.1.len() - }) - } - - /// Updates the caches for each `Validator` in `validators` and returns a list that maps 1:1 - /// with `validators` to the hash of each validator. - /// - /// This function makes assumptions that the `validators` list will only change in accordance - /// with valid per-block/per-slot state transitions. - fn leaves(&mut self, validators: &[Validator]) -> Result>, Error> { - match self.len().cmp(&validators.len()) { - Ordering::Less => validators.iter().skip(self.len()).for_each(|v| { - if self - .arenas - .last() - .map_or(true, |last| last.1.len() >= VALIDATORS_PER_ARENA) - { - let mut arena = CacheArena::default(); - let cache = v.new_tree_hash_cache(&mut arena); - self.arenas.push((arena, vec![cache])) - } else { - let (arena, caches) = &mut self - .arenas - .last_mut() - .expect("Cannot reach this block if arenas is empty."); - caches.push(v.new_tree_hash_cache(arena)) - } - }), - Ordering::Greater => { - return Err(Error::ValidatorRegistryShrunk); - } - Ordering::Equal => (), - } - - self.arenas - .par_iter_mut() - .enumerate() - .map(|(arena_index, (arena, caches))| { - caches - .iter_mut() - .enumerate() - .map(move |(cache_index, cache)| { - let val_index = (arena_index * VALIDATORS_PER_ARENA) + cache_index; - - let validator = validators - .get(val_index) - .ok_or(Error::TreeHashCacheInconsistent)?; - - validator - .recalculate_tree_hash_root(arena, cache) - .map_err(Error::CachedTreeHashError) - }) - .collect() - }) - .collect() - } -} - -#[derive(Debug, PartialEq, Clone)] -pub struct OptionalTreeHashCache { - inner: Option, -} - -#[derive(Debug, PartialEq, Clone)] -pub struct OptionalTreeHashCacheInner { - arena: CacheArena, - tree_hash_cache: TreeHashCache, -} - -impl OptionalTreeHashCache { - /// Initialize a new cache if `item.is_some()`. - fn new>(item: Option<&C>) -> Self { - let inner = item.map(OptionalTreeHashCacheInner::new); - Self { inner } - } - - /// Compute the tree hash root for the given `item`. - /// - /// This function will initialize the inner cache if necessary (e.g. when crossing the fork). - fn recalculate_tree_hash_root>( - &mut self, - item: &C, - ) -> Result { - let cache = self - .inner - .get_or_insert_with(|| OptionalTreeHashCacheInner::new(item)); - item.recalculate_tree_hash_root(&mut cache.arena, &mut cache.tree_hash_cache) - .map_err(Into::into) - } -} - -impl OptionalTreeHashCacheInner { - fn new>(item: &C) -> Self { - let mut arena = CacheArena::default(); - let tree_hash_cache = item.new_tree_hash_cache(&mut arena); - OptionalTreeHashCacheInner { - arena, - tree_hash_cache, - } - } -} - -impl arbitrary::Arbitrary<'_> for BeaconTreeHashCache { - fn arbitrary(_u: &mut arbitrary::Unstructured<'_>) -> arbitrary::Result { - Ok(Self::default()) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{MainnetEthSpec, ParticipationFlags}; - - #[test] - fn validator_node_count() { - let mut arena = CacheArena::default(); - let v = Validator::default(); - let _cache = v.new_tree_hash_cache(&mut arena); - assert_eq!(arena.backing_len(), NODES_PER_VALIDATOR); - } - - #[test] - fn participation_flags() { - type N = ::ValidatorRegistryLimit; - let len = 65; - let mut test_flag = ParticipationFlags::default(); - test_flag.add_flag(0).unwrap(); - let epoch_participation = VariableList::<_, N>::new(vec![test_flag; len]).unwrap(); - - let mut cache = OptionalTreeHashCache { inner: None }; - - let cache_root = cache - .recalculate_tree_hash_root(&ParticipationList::new(&epoch_participation)) - .unwrap(); - let recalc_root = cache - .recalculate_tree_hash_root(&ParticipationList::new(&epoch_participation)) - .unwrap(); - - assert_eq!(cache_root, recalc_root, "recalculated root should match"); - assert_eq!( - cache_root, - epoch_participation.tree_hash_root(), - "cached root should match uncached" - ); - } -} diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 5253dcc4b03..3ccac58f5ef 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -257,6 +257,13 @@ impl ChainSpec { } } + /// Return the name of the fork activated at `slot`, if any. + pub fn fork_activated_at_slot(&self, slot: Slot) -> Option { + let prev_slot_fork = self.fork_name_at_slot::(slot - 1); + let slot_fork = self.fork_name_at_slot::(slot); + (slot_fork != prev_slot_fork).then_some(slot_fork) + } + /// Returns the fork version for a named fork. pub fn fork_version_for_name(&self, fork_name: ForkName) -> [u8; 4] { match fork_name { diff --git a/consensus/types/src/deposit.rs b/consensus/types/src/deposit.rs index bbc3bd9fb89..af9f6211528 100644 --- a/consensus/types/src/deposit.rs +++ b/consensus/types/src/deposit.rs @@ -2,7 +2,7 @@ use crate::test_utils::TestRandom; use crate::*; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::U33; +use ssz_types::{typenum::U33, FixedVector}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 77ef6407e87..7761c432abc 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -6,6 +6,9 @@ use ssz_derive::{Decode, Encode}; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +// FIXME(sproul): try milhouse FixedVector +use ssz_types::FixedVector; + pub type Transaction = VariableList; pub type Transactions = VariableList< Transaction<::MaxBytesPerTransaction>, diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 1fb29db9d3a..a8baedcdd9c 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -8,6 +8,8 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use BeaconStateError; +use ssz_types::{FixedVector, VariableList}; + #[superstruct( variants(Merge, Capella), variant_attributes( @@ -30,7 +32,8 @@ use BeaconStateError; ), ref_attributes(derive(PartialEq, TreeHash), tree_hash(enum_behaviour = "transparent")), cast_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), - partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant") + partial_getter_error(ty = "Error", expr = "BeaconStateError::IncorrectStateVariant"), + map_ref_into(ExecutionPayloadHeader) )] #[derive( Debug, Clone, Serialize, Deserialize, Encode, TreeHash, Derivative, arbitrary::Arbitrary, @@ -194,6 +197,16 @@ impl<'a, T: EthSpec> From> for ExecutionPayloadHeader } } +impl<'a, T: EthSpec> From> for ExecutionPayloadHeader { + fn from(header_ref: ExecutionPayloadHeaderRef<'a, T>) -> Self { + map_execution_payload_header_ref_into_execution_payload_header!( + &'a _, + header_ref, + |inner, cons| cons(inner.clone()) + ) + } +} + impl TryFrom> for ExecutionPayloadHeaderMerge { type Error = BeaconStateError; fn try_from(header: ExecutionPayloadHeader) -> Result { @@ -215,6 +228,21 @@ impl TryFrom> for ExecutionPayloadHeaderCa } } +impl<'a, T: EthSpec> ExecutionPayloadHeaderRefMut<'a, T> { + /// Mutate through + pub fn replace(self, header: ExecutionPayloadHeader) -> Result<(), BeaconStateError> { + match self { + ExecutionPayloadHeaderRefMut::Merge(mut_ref) => { + *mut_ref = header.try_into()?; + } + ExecutionPayloadHeaderRefMut::Capella(mut_ref) => { + *mut_ref = header.try_into()?; + } + } + Ok(()) + } +} + impl ForkVersionDeserialize for ExecutionPayloadHeader { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, diff --git a/consensus/types/src/historical_batch.rs b/consensus/types/src/historical_batch.rs index e75b64cae93..adf401eddb9 100644 --- a/consensus/types/src/historical_batch.rs +++ b/consensus/types/src/historical_batch.rs @@ -1,9 +1,7 @@ use crate::test_utils::TestRandom; use crate::*; - use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::FixedVector; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; @@ -24,7 +22,9 @@ use tree_hash_derive::TreeHash; )] #[arbitrary(bound = "T: EthSpec")] pub struct HistoricalBatch { + #[test_random(default)] pub block_roots: FixedVector, + #[test_random(default)] pub state_roots: FixedVector, } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index aefb45490a8..73e2b7f8244 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -89,7 +89,6 @@ pub mod sync_committee_contribution; pub mod sync_committee_message; pub mod sync_selection_proof; pub mod sync_subnet_id; -mod tree_hash_impls; pub mod validator_registration_data; pub mod withdrawal; @@ -114,7 +113,7 @@ pub use crate::beacon_block_body::{ }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; -pub use crate::beacon_state::{BeaconTreeHashCache, Error as BeaconStateError, *}; +pub use crate::beacon_state::{compact_state::CompactBeaconState, Error as BeaconStateError, *}; pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; @@ -184,7 +183,7 @@ pub use crate::sync_committee_subscription::SyncCommitteeSubscription; pub use crate::sync_duty::SyncDuty; pub use crate::sync_selection_proof::SyncSelectionProof; pub use crate::sync_subnet_id::SyncSubnetId; -pub use crate::validator::Validator; +pub use crate::validator::{Validator, ValidatorMutable}; pub use crate::validator_registration_data::*; pub use crate::validator_subscription::ValidatorSubscription; pub use crate::voluntary_exit::VoluntaryExit; @@ -203,5 +202,6 @@ pub use bls::{ AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey, Signature, SignatureBytes, }; -pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, FixedVector, VariableList}; +pub use milhouse::{self, Vector as FixedVector}; +pub use ssz_types::{typenum, typenum::Unsigned, BitList, BitVector, VariableList}; pub use superstruct::superstruct; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 1a5eed2205d..e99afd45a14 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -1,7 +1,8 @@ -use super::{BeaconBlockHeader, BeaconState, EthSpec, FixedVector, Hash256, SyncCommittee}; +use super::{BeaconBlockHeader, BeaconState, EthSpec, Hash256, SyncCommittee}; use crate::{light_client_update::*, test_utils::TestRandom}; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use ssz_types::FixedVector; use std::sync::Arc; use test_random_derive::TestRandom; use tree_hash::TreeHash; diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index 08069c93084..c8b2ca97507 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -29,6 +29,7 @@ pub struct LightClientFinalityUpdate { /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). pub finalized_header: BeaconBlockHeader, /// Merkle proof attesting finalized header. + #[test_random(default)] pub finality_branch: FixedVector, /// current sync aggreggate pub sync_aggregate: SyncAggregate, diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index ca35f96802b..f3d64332466 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,9 +1,12 @@ -use super::{BeaconBlockHeader, EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; +use super::{BeaconBlockHeader, EthSpec, Hash256, Slot, SyncAggregate, SyncCommittee}; use crate::{beacon_state, test_utils::TestRandom, BeaconBlock, BeaconState, ChainSpec}; use safe_arith::ArithError; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::{U5, U6}; +use ssz_types::{ + typenum::{U5, U6}, + FixedVector, +}; use std::sync::Arc; use test_random_derive::TestRandom; use tree_hash::TreeHash; @@ -23,6 +26,7 @@ pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), + MilhouseError(milhouse::Error), BeaconStateError(beacon_state::Error), ArithError(ArithError), AltairForkNotActive, @@ -49,6 +53,12 @@ impl From for Error { } } +impl From for Error { + fn from(e: milhouse::Error) -> Error { + Error::MilhouseError(e) + } +} + /// A LightClientUpdate is the update we request solely to either complete the bootstraping process, /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. diff --git a/consensus/types/src/sync_committee.rs b/consensus/types/src/sync_committee.rs index 43ba23f121c..2ae645604d2 100644 --- a/consensus/types/src/sync_committee.rs +++ b/consensus/types/src/sync_committee.rs @@ -1,6 +1,6 @@ use crate::test_utils::TestRandom; use crate::typenum::Unsigned; -use crate::{EthSpec, FixedVector, SyncSubnetId}; +use crate::{EthSpec, SyncSubnetId}; use bls::PublicKeyBytes; use safe_arith::{ArithError, SafeArith}; use serde_derive::{Deserialize, Serialize}; @@ -9,6 +9,9 @@ use std::collections::HashMap; use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; +// Use flat `FixedVector` regardless of whether or not tree states are used. +use ssz_types::FixedVector; + #[derive(Debug, PartialEq)] pub enum Error { ArithError(ArithError), diff --git a/consensus/types/src/test_utils/test_random.rs b/consensus/types/src/test_utils/test_random.rs index 43396dedc0d..519387bd589 100644 --- a/consensus/types/src/test_utils/test_random.rs +++ b/consensus/types/src/test_utils/test_random.rs @@ -86,7 +86,7 @@ where } } -impl TestRandom for FixedVector +impl TestRandom for ssz_types::FixedVector where T: TestRandom, { diff --git a/consensus/types/src/tree_hash_impls.rs b/consensus/types/src/tree_hash_impls.rs deleted file mode 100644 index 34043c0e83f..00000000000 --- a/consensus/types/src/tree_hash_impls.rs +++ /dev/null @@ -1,166 +0,0 @@ -//! This module contains custom implementations of `CachedTreeHash` for ETH2-specific types. -//! -//! It makes some assumptions about the layouts and update patterns of other structs in this -//! crate, and should be updated carefully whenever those structs are changed. -use crate::{Epoch, Hash256, PublicKeyBytes, Validator}; -use cached_tree_hash::{int_log, CacheArena, CachedTreeHash, Error, TreeHashCache}; -use int_to_bytes::int_to_fixed_bytes32; -use tree_hash::merkle_root; - -/// Number of struct fields on `Validator`. -const NUM_VALIDATOR_FIELDS: usize = 8; - -impl CachedTreeHash for Validator { - fn new_tree_hash_cache(&self, arena: &mut CacheArena) -> TreeHashCache { - TreeHashCache::new(arena, int_log(NUM_VALIDATOR_FIELDS), NUM_VALIDATOR_FIELDS) - } - - /// Efficiently tree hash a `Validator`, assuming it was updated by a valid state transition. - /// - /// Specifically, we assume that the `pubkey` field is constant. - fn recalculate_tree_hash_root( - &self, - arena: &mut CacheArena, - cache: &mut TreeHashCache, - ) -> Result { - // Otherwise just check the fields which might have changed. - let dirty_indices = cache - .leaves() - .iter_mut(arena)? - .enumerate() - .flat_map(|(i, leaf)| { - // Pubkey field (index 0) is constant. - if i == 0 && cache.initialized { - None - } else if process_field_by_index(self, i, leaf, !cache.initialized) { - Some(i) - } else { - None - } - }) - .collect(); - - cache.update_merkle_root(arena, dirty_indices) - } -} - -fn process_field_by_index( - v: &Validator, - field_idx: usize, - leaf: &mut Hash256, - force_update: bool, -) -> bool { - match field_idx { - 0 => process_pubkey_bytes_field(&v.pubkey, leaf, force_update), - 1 => process_slice_field(v.withdrawal_credentials.as_bytes(), leaf, force_update), - 2 => process_u64_field(v.effective_balance, leaf, force_update), - 3 => process_bool_field(v.slashed, leaf, force_update), - 4 => process_epoch_field(v.activation_eligibility_epoch, leaf, force_update), - 5 => process_epoch_field(v.activation_epoch, leaf, force_update), - 6 => process_epoch_field(v.exit_epoch, leaf, force_update), - 7 => process_epoch_field(v.withdrawable_epoch, leaf, force_update), - _ => panic!( - "Validator type only has {} fields, {} out of bounds", - NUM_VALIDATOR_FIELDS, field_idx - ), - } -} - -fn process_pubkey_bytes_field( - val: &PublicKeyBytes, - leaf: &mut Hash256, - force_update: bool, -) -> bool { - let new_tree_hash = merkle_root(val.as_serialized(), 0); - process_slice_field(new_tree_hash.as_bytes(), leaf, force_update) -} - -fn process_slice_field(new_tree_hash: &[u8], leaf: &mut Hash256, force_update: bool) -> bool { - if force_update || leaf.as_bytes() != new_tree_hash { - leaf.assign_from_slice(new_tree_hash); - true - } else { - false - } -} - -fn process_u64_field(val: u64, leaf: &mut Hash256, force_update: bool) -> bool { - let new_tree_hash = int_to_fixed_bytes32(val); - process_slice_field(&new_tree_hash[..], leaf, force_update) -} - -fn process_epoch_field(val: Epoch, leaf: &mut Hash256, force_update: bool) -> bool { - process_u64_field(val.as_u64(), leaf, force_update) -} - -fn process_bool_field(val: bool, leaf: &mut Hash256, force_update: bool) -> bool { - process_u64_field(val as u64, leaf, force_update) -} - -#[cfg(test)] -mod test { - use super::*; - use crate::test_utils::TestRandom; - use crate::Epoch; - use rand::SeedableRng; - use rand_xorshift::XorShiftRng; - use tree_hash::TreeHash; - - fn test_validator_tree_hash(v: &Validator) { - let arena = &mut CacheArena::default(); - - let mut cache = v.new_tree_hash_cache(arena); - // With a fresh cache - assert_eq!( - &v.tree_hash_root()[..], - v.recalculate_tree_hash_root(arena, &mut cache) - .unwrap() - .as_bytes(), - "{:?}", - v - ); - // With a completely up-to-date cache - assert_eq!( - &v.tree_hash_root()[..], - v.recalculate_tree_hash_root(arena, &mut cache) - .unwrap() - .as_bytes(), - "{:?}", - v - ); - } - - #[test] - fn default_validator() { - test_validator_tree_hash(&Validator::default()); - } - - #[test] - fn zeroed_validator() { - let v = Validator { - activation_eligibility_epoch: Epoch::from(0u64), - activation_epoch: Epoch::from(0u64), - ..Default::default() - }; - test_validator_tree_hash(&v); - } - - #[test] - fn random_validators() { - let mut rng = XorShiftRng::from_seed([0xf1; 16]); - let num_validators = 1000; - (0..num_validators) - .map(|_| Validator::random_for_test(&mut rng)) - .for_each(|v| test_validator_tree_hash(&v)); - } - - #[test] - #[allow(clippy::assertions_on_constants)] - pub fn smallvec_size_check() { - // If this test fails we need to go and reassess the length of the `SmallVec` in - // `cached_tree_hash::TreeHashCache`. If the size of the `SmallVec` is too slow we're going - // to start doing heap allocations for each validator, this will fragment memory and slow - // us down. - assert!(NUM_VALIDATOR_FIELDS <= 8,); - } -} diff --git a/consensus/types/src/validator.rs b/consensus/types/src/validator.rs index 6860397fb5b..7be8143f6fb 100644 --- a/consensus/types/src/validator.rs +++ b/consensus/types/src/validator.rs @@ -2,28 +2,34 @@ use crate::{ test_utils::TestRandom, Address, BeaconState, ChainSpec, Epoch, EthSpec, Hash256, PublicKeyBytes, }; +use arbitrary::Arbitrary; use serde_derive::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; +use std::sync::Arc; use test_random_derive::TestRandom; +use tree_hash::TreeHash; use tree_hash_derive::TreeHash; +const NUM_FIELDS: usize = 8; + /// Information about a `BeaconChain` validator. /// /// Spec v0.12.1 #[derive( - arbitrary::Arbitrary, - Debug, - Clone, - PartialEq, - Serialize, - Deserialize, - Encode, - Decode, - TestRandom, - TreeHash, + Arbitrary, Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TestRandom, )] +#[serde(deny_unknown_fields)] pub struct Validator { - pub pubkey: PublicKeyBytes, + pub pubkey: Arc, + #[serde(flatten)] + pub mutable: ValidatorMutable, +} + +/// The mutable fields of a validator. +#[derive( + Debug, Clone, PartialEq, Serialize, Deserialize, Encode, Decode, TreeHash, TestRandom, Arbitrary, +)] +pub struct ValidatorMutable { pub withdrawal_credentials: Hash256, #[serde(with = "serde_utils::quoted_u64")] pub effective_balance: u64, @@ -34,52 +40,153 @@ pub struct Validator { pub withdrawable_epoch: Epoch, } +pub trait ValidatorTrait: + std::fmt::Debug + + PartialEq + + Clone + + serde::Serialize + + Send + + Sync + + serde::de::DeserializeOwned + + ssz::Encode + + ssz::Decode + + TreeHash + + TestRandom + + for<'a> arbitrary::Arbitrary<'a> +{ +} + +impl ValidatorTrait for Validator {} +impl ValidatorTrait for ValidatorMutable {} + impl Validator { + pub fn pubkey(&self) -> &PublicKeyBytes { + &self.pubkey + } + + pub fn pubkey_clone(&self) -> Arc { + self.pubkey.clone() + } + + /// Replace the validator's pubkey (should only be used during testing). + pub fn replace_pubkey(&mut self, pubkey: PublicKeyBytes) { + self.pubkey = Arc::new(pubkey); + } + + #[inline] + pub fn withdrawal_credentials(&self) -> Hash256 { + self.mutable.withdrawal_credentials + } + + #[inline] + pub fn effective_balance(&self) -> u64 { + self.mutable.effective_balance + } + + #[inline] + pub fn slashed(&self) -> bool { + self.mutable.slashed + } + + #[inline] + pub fn activation_eligibility_epoch(&self) -> Epoch { + self.mutable.activation_eligibility_epoch + } + + #[inline] + pub fn activation_epoch(&self) -> Epoch { + self.mutable.activation_epoch + } + + #[inline] + pub fn activation_epoch_mut(&mut self) -> &mut Epoch { + &mut self.mutable.activation_epoch + } + + #[inline] + pub fn exit_epoch(&self) -> Epoch { + self.mutable.exit_epoch + } + + pub fn exit_epoch_mut(&mut self) -> &mut Epoch { + &mut self.mutable.exit_epoch + } + + #[inline] + pub fn withdrawable_epoch(&self) -> Epoch { + self.mutable.withdrawable_epoch + } + /// Returns `true` if the validator is considered active at some epoch. + #[inline] pub fn is_active_at(&self, epoch: Epoch) -> bool { - self.activation_epoch <= epoch && epoch < self.exit_epoch + self.activation_epoch() <= epoch && epoch < self.exit_epoch() } /// Returns `true` if the validator is slashable at some epoch. + #[inline] pub fn is_slashable_at(&self, epoch: Epoch) -> bool { - !self.slashed && self.activation_epoch <= epoch && epoch < self.withdrawable_epoch + !self.slashed() && self.activation_epoch() <= epoch && epoch < self.withdrawable_epoch() } /// Returns `true` if the validator is considered exited at some epoch. + #[inline] pub fn is_exited_at(&self, epoch: Epoch) -> bool { - self.exit_epoch <= epoch + self.exit_epoch() <= epoch } /// Returns `true` if the validator is able to withdraw at some epoch. + #[inline] pub fn is_withdrawable_at(&self, epoch: Epoch) -> bool { - epoch >= self.withdrawable_epoch + epoch >= self.withdrawable_epoch() } /// Returns `true` if the validator is eligible to join the activation queue. /// /// Spec v0.12.1 + #[inline] pub fn is_eligible_for_activation_queue(&self, spec: &ChainSpec) -> bool { - self.activation_eligibility_epoch == spec.far_future_epoch - && self.effective_balance == spec.max_effective_balance + self.activation_eligibility_epoch() == spec.far_future_epoch + && self.effective_balance() == spec.max_effective_balance } /// Returns `true` if the validator is eligible to be activated. /// /// Spec v0.12.1 + #[inline] pub fn is_eligible_for_activation( &self, state: &BeaconState, spec: &ChainSpec, ) -> bool { // Placement in queue is finalized - self.activation_eligibility_epoch <= state.finalized_checkpoint().epoch + self.activation_eligibility_epoch() <= state.finalized_checkpoint().epoch // Has not yet been activated - && self.activation_epoch == spec.far_future_epoch + && self.activation_epoch() == spec.far_future_epoch + } + + fn tree_hash_root_internal(&self) -> Result { + let mut hasher = tree_hash::MerkleHasher::with_leaves(NUM_FIELDS); + + hasher.write(self.pubkey().tree_hash_root().as_bytes())?; + hasher.write(self.withdrawal_credentials().tree_hash_root().as_bytes())?; + hasher.write(self.effective_balance().tree_hash_root().as_bytes())?; + hasher.write(self.slashed().tree_hash_root().as_bytes())?; + hasher.write( + self.activation_eligibility_epoch() + .tree_hash_root() + .as_bytes(), + )?; + hasher.write(self.activation_epoch().tree_hash_root().as_bytes())?; + hasher.write(self.exit_epoch().tree_hash_root().as_bytes())?; + hasher.write(self.withdrawable_epoch().tree_hash_root().as_bytes())?; + + hasher.finish() } /// Returns `true` if the validator has eth1 withdrawal credential. pub fn has_eth1_withdrawal_credential(&self, spec: &ChainSpec) -> bool { - self.withdrawal_credentials + self.withdrawal_credentials() .as_bytes() .first() .map(|byte| *byte == spec.eth1_address_withdrawal_prefix_byte) @@ -90,7 +197,7 @@ impl Validator { pub fn get_eth1_withdrawal_address(&self, spec: &ChainSpec) -> Option
{ self.has_eth1_withdrawal_credential(spec) .then(|| { - self.withdrawal_credentials + self.withdrawal_credentials() .as_bytes() .get(12..) .map(Address::from_slice) @@ -105,28 +212,29 @@ impl Validator { let mut bytes = [0u8; 32]; bytes[0] = spec.eth1_address_withdrawal_prefix_byte; bytes[12..].copy_from_slice(execution_address.as_bytes()); - self.withdrawal_credentials = Hash256::from(bytes); + self.mutable.withdrawal_credentials = Hash256::from(bytes); } /// Returns `true` if the validator is fully withdrawable at some epoch. pub fn is_fully_withdrawable_at(&self, balance: u64, epoch: Epoch, spec: &ChainSpec) -> bool { - self.has_eth1_withdrawal_credential(spec) && self.withdrawable_epoch <= epoch && balance > 0 + self.has_eth1_withdrawal_credential(spec) + && self.withdrawable_epoch() <= epoch + && balance > 0 } /// Returns `true` if the validator is partially withdrawable. pub fn is_partially_withdrawable_validator(&self, balance: u64, spec: &ChainSpec) -> bool { self.has_eth1_withdrawal_credential(spec) - && self.effective_balance == spec.max_effective_balance + && self.effective_balance() == spec.max_effective_balance && balance > spec.max_effective_balance } } -impl Default for Validator { - /// Yields a "default" `Validator`. Primarily used for testing. +/* +impl Default for ValidatorMutable { fn default() -> Self { - Self { - pubkey: PublicKeyBytes::empty(), - withdrawal_credentials: Hash256::default(), + ValidatorMutable { + withdrawal_credentials: Hash256::zero(), activation_eligibility_epoch: Epoch::from(std::u64::MAX), activation_epoch: Epoch::from(std::u64::MAX), exit_epoch: Epoch::from(std::u64::MAX), @@ -136,6 +244,26 @@ impl Default for Validator { } } } +*/ + +impl TreeHash for Validator { + fn tree_hash_type() -> tree_hash::TreeHashType { + tree_hash::TreeHashType::Container + } + + fn tree_hash_packed_encoding(&self) -> tree_hash::PackedEncoding { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_packing_factor() -> usize { + unreachable!("Struct should never be packed.") + } + + fn tree_hash_root(&self) -> Hash256 { + self.tree_hash_root_internal() + .expect("Validator tree_hash_root should not fail") + } +} #[cfg(test)] mod tests { @@ -150,7 +278,7 @@ mod tests { assert!(!v.is_active_at(epoch)); assert!(!v.is_exited_at(epoch)); assert!(!v.is_withdrawable_at(epoch)); - assert!(!v.slashed); + assert!(!v.slashed()); } #[test] @@ -158,7 +286,10 @@ mod tests { let epoch = Epoch::new(10); let v = Validator { - activation_epoch: epoch, + mutable: ValidatorMutable { + activation_epoch: epoch, + ..Default::default() + }, ..Validator::default() }; @@ -172,7 +303,10 @@ mod tests { let epoch = Epoch::new(10); let v = Validator { - exit_epoch: epoch, + mutable: ValidatorMutable { + exit_epoch: epoch, + ..ValidatorMutable::default() + }, ..Validator::default() }; @@ -186,7 +320,10 @@ mod tests { let epoch = Epoch::new(10); let v = Validator { - withdrawable_epoch: epoch, + mutable: ValidatorMutable { + withdrawable_epoch: epoch, + ..ValidatorMutable::default() + }, ..Validator::default() }; diff --git a/crypto/bls/Cargo.toml b/crypto/bls/Cargo.toml index a610f257cdb..74940b7870b 100644 --- a/crypto/bls/Cargo.toml +++ b/crypto/bls/Cargo.toml @@ -7,8 +7,8 @@ edition = "2021" [dependencies] ethereum_ssz = "0.5.0" tree_hash = "0.5.0" -milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.4.2", optional = true } -rand = "0.7.3" +milagro_bls = { git = "https://github.com/sigp/milagro_bls", tag = "v1.5.1", optional = true } +rand = "0.8.5" serde = "1.0.116" serde_derive = "1.0.116" ethereum_serde_utils = "0.5.0" @@ -26,3 +26,10 @@ milagro = ["milagro_bls"] supranational = ["blst"] supranational-portable = ["supranational", "blst/portable"] supranational-force-adx = ["supranational", "blst/force-adx"] + +[dev-dependencies] +criterion = "0.3.3" + +[[bench]] +name = "compress_decompress" +harness = false diff --git a/crypto/bls/benches/compress_decompress.rs b/crypto/bls/benches/compress_decompress.rs new file mode 100644 index 00000000000..3053cf1f9a3 --- /dev/null +++ b/crypto/bls/benches/compress_decompress.rs @@ -0,0 +1,64 @@ +use bls::{PublicKey, SecretKey}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; + +pub fn compress(c: &mut Criterion) { + let private_key = SecretKey::random(); + let public_key = private_key.public_key(); + c.bench_with_input( + BenchmarkId::new("compress", 1), + &public_key, + |b, public_key| { + b.iter(|| public_key.compress()); + }, + ); +} + +pub fn decompress(c: &mut Criterion) { + let private_key = SecretKey::random(); + let public_key_bytes = private_key.public_key().compress(); + c.bench_with_input( + BenchmarkId::new("decompress", 1), + &public_key_bytes, + |b, public_key_bytes| { + b.iter(|| public_key_bytes.decompress().unwrap()); + }, + ); +} + +pub fn deserialize_uncompressed(c: &mut Criterion) { + let private_key = SecretKey::random(); + let public_key_bytes = private_key.public_key().serialize_uncompressed(); + c.bench_with_input( + BenchmarkId::new("deserialize_uncompressed", 1), + &public_key_bytes, + |b, public_key_bytes| { + b.iter(|| PublicKey::deserialize_uncompressed(public_key_bytes).unwrap()); + }, + ); +} + +pub fn compress_all(c: &mut Criterion) { + let n = 500_000; + let keys = (0..n) + .map(|_| { + let private_key = SecretKey::random(); + private_key.public_key() + }) + .collect::>(); + c.bench_with_input(BenchmarkId::new("compress", n), &keys, |b, keys| { + b.iter(|| { + for key in keys { + key.compress(); + } + }); + }); +} + +criterion_group!( + benches, + compress, + decompress, + deserialize_uncompressed, + compress_all +); +criterion_main!(benches); diff --git a/crypto/bls/src/generic_public_key.rs b/crypto/bls/src/generic_public_key.rs index 462e4cb2cb0..80b42dfa714 100644 --- a/crypto/bls/src/generic_public_key.rs +++ b/crypto/bls/src/generic_public_key.rs @@ -11,6 +11,9 @@ use tree_hash::TreeHash; /// The byte-length of a BLS public key when serialized in compressed form. pub const PUBLIC_KEY_BYTES_LEN: usize = 48; +/// The byte-length of a BLS public key when serialized in uncompressed form. +pub const PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN: usize = 96; + /// Represents the public key at infinity. pub const INFINITY_PUBLIC_KEY: [u8; PUBLIC_KEY_BYTES_LEN] = [ 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -23,8 +26,17 @@ pub trait TPublicKey: Sized + Clone { /// Serialize `self` as compressed bytes. fn serialize(&self) -> [u8; PUBLIC_KEY_BYTES_LEN]; + /// Serialize `self` as uncompressed bytes. + fn serialize_uncompressed(&self) -> [u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN]; + /// Deserialize `self` from compressed bytes. fn deserialize(bytes: &[u8]) -> Result; + + /// Deserialize `self` from uncompressed bytes. + /// + /// This function *does not* perform thorough checks of the input bytes and should only be + /// used with bytes output from `Self::serialize_uncompressed`. + fn deserialize_uncompressed(bytes: &[u8]) -> Result; } /// A BLS public key that is generic across some BLS point (`Pub`). @@ -65,6 +77,11 @@ where self.point.serialize() } + /// Serialize `self` as uncompressed bytes. + pub fn serialize_uncompressed(&self) -> [u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN] { + self.point.serialize_uncompressed() + } + /// Deserialize `self` from compressed bytes. pub fn deserialize(bytes: &[u8]) -> Result { if bytes == &INFINITY_PUBLIC_KEY[..] { @@ -75,6 +92,13 @@ where }) } } + + /// Deserialize `self` from compressed bytes. + pub fn deserialize_uncompressed(bytes: &[u8]) -> Result { + Ok(Self { + point: Pub::deserialize_uncompressed(bytes)?, + }) + } } impl Eq for GenericPublicKey {} diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index bd28abff9fb..10a073c6c86 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -1,10 +1,12 @@ use crate::{ generic_aggregate_public_key::TAggregatePublicKey, generic_aggregate_signature::TAggregateSignature, - generic_public_key::{GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN}, + generic_public_key::{ + GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, + }, generic_secret_key::TSecretKey, generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, - Error, Hash256, ZeroizeHash, INFINITY_SIGNATURE, + BlstError, Error, Hash256, ZeroizeHash, INFINITY_SIGNATURE, }; pub use blst::min_pk as blst_core; use blst::{blst_scalar, BLST_ERROR}; @@ -123,6 +125,10 @@ impl TPublicKey for blst_core::PublicKey { self.compress() } + fn serialize_uncompressed(&self) -> [u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN] { + blst_core::PublicKey::serialize(self) + } + fn deserialize(bytes: &[u8]) -> Result { // key_validate accepts uncompressed bytes too so enforce byte length here. // It also does subgroup checks, noting infinity check is done in `generic_public_key.rs`. @@ -134,6 +140,19 @@ impl TPublicKey for blst_core::PublicKey { } Self::key_validate(bytes).map_err(Into::into) } + + fn deserialize_uncompressed(bytes: &[u8]) -> Result { + if bytes.len() != PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN { + return Err(Error::InvalidByteLength { + got: bytes.len(), + expected: PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, + }); + } + // Ensure we use the `blst` function rather than the one from this trait. + let result: Result = Self::deserialize(bytes); + let key = result?; + Ok(key) + } } /// A wrapper that allows for `PartialEq` and `Clone` impls. diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index f2d8b79b986..a09fb347e6b 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -1,7 +1,9 @@ use crate::{ generic_aggregate_public_key::TAggregatePublicKey, generic_aggregate_signature::TAggregateSignature, - generic_public_key::{GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN}, + generic_public_key::{ + GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, + }, generic_secret_key::{TSecretKey, SECRET_KEY_BYTES_LEN}, generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, Error, Hash256, ZeroizeHash, INFINITY_PUBLIC_KEY, INFINITY_SIGNATURE, @@ -46,11 +48,19 @@ impl TPublicKey for PublicKey { self.0 } + fn serialize_uncompressed(&self) -> [u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN] { + panic!("fake_crypto does not support uncompressed keys") + } + fn deserialize(bytes: &[u8]) -> Result { let mut pubkey = Self::infinity(); pubkey.0[..].copy_from_slice(&bytes[0..PUBLIC_KEY_BYTES_LEN]); Ok(pubkey) } + + fn deserialize_uncompressed(_: &[u8]) -> Result { + panic!("fake_crypto does not support uncompressed keys") + } } impl Eq for PublicKey {} diff --git a/crypto/bls/src/impls/milagro.rs b/crypto/bls/src/impls/milagro.rs index eb4767d3c70..4c659331038 100644 --- a/crypto/bls/src/impls/milagro.rs +++ b/crypto/bls/src/impls/milagro.rs @@ -1,7 +1,9 @@ use crate::{ generic_aggregate_public_key::TAggregatePublicKey, generic_aggregate_signature::TAggregateSignature, - generic_public_key::{GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN}, + generic_public_key::{ + GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, + }, generic_secret_key::{TSecretKey, SECRET_KEY_BYTES_LEN}, generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, Error, Hash256, ZeroizeHash, @@ -76,14 +78,20 @@ pub fn verify_signature_sets<'a>( impl TPublicKey for milagro::PublicKey { fn serialize(&self) -> [u8; PUBLIC_KEY_BYTES_LEN] { - let mut bytes = [0; PUBLIC_KEY_BYTES_LEN]; - bytes[..].copy_from_slice(&self.as_bytes()); - bytes + self.as_bytes() + } + + fn serialize_uncompressed(&self) -> [u8; PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN] { + self.as_uncompressed_bytes() } fn deserialize(bytes: &[u8]) -> Result { Self::from_bytes(bytes).map_err(Into::into) } + + fn deserialize_uncompressed(bytes: &[u8]) -> Result { + Self::from_uncompressed_bytes(bytes).map_err(Into::into) + } } impl TAggregatePublicKey for milagro::AggregatePublicKey { diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index 750e1bd5b80..e588b3b3550 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -35,7 +35,9 @@ mod zeroize_hash; pub mod impls; -pub use generic_public_key::{INFINITY_PUBLIC_KEY, PUBLIC_KEY_BYTES_LEN}; +pub use generic_public_key::{ + INFINITY_PUBLIC_KEY, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, +}; pub use generic_secret_key::SECRET_KEY_BYTES_LEN; pub use generic_signature::{INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN}; pub use get_withdrawal_credentials::get_withdrawal_credentials; diff --git a/crypto/bls/tests/tests.rs b/crypto/bls/tests/tests.rs index ad498dbfa87..40c0a4a7937 100644 --- a/crypto/bls/tests/tests.rs +++ b/crypto/bls/tests/tests.rs @@ -341,6 +341,11 @@ macro_rules! test_suite { .assert_single_message_verify(true) } + #[test] + fn deserialize_infinity_public_key() { + PublicKey::deserialize(&bls::INFINITY_PUBLIC_KEY).unwrap_err(); + } + /// A helper struct to make it easer to deal with `SignatureSet` lifetimes. struct OwnedSignatureSet { signature: AggregateSignature, diff --git a/database_manager/Cargo.toml b/database_manager/Cargo.toml index f715528138a..06f141613ea 100644 --- a/database_manager/Cargo.toml +++ b/database_manager/Cargo.toml @@ -16,3 +16,5 @@ tempfile = "3.1.0" types = { path = "../consensus/types" } slog = "2.5.2" strum = { version = "0.24.0", features = ["derive"] } +hex = "0.4.2" +ethereum_ssz = { version = "0.5.0" } diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index ce0b094b772..755edd4f304 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -2,7 +2,7 @@ use beacon_chain::{ builder::Witness, eth1_chain::CachingEth1Backend, schema_change::migrate_schema, slot_clock::SystemTimeSlotClock, }; -use beacon_node::{get_data_dir, get_slots_per_restore_point, ClientConfig}; +use beacon_node::{get_data_dir, ClientConfig}; use clap::{App, Arg, ArgMatches}; use environment::{Environment, RuntimeContext}; use slog::{info, Logger}; @@ -15,7 +15,7 @@ use store::{ DBColumn, HotColdDB, KeyValueStore, LevelDB, }; use strum::{EnumString, EnumVariantNames, VariantNames}; -use types::EthSpec; +use types::{EthSpec, VList}; pub const CMD: &str = "database_manager"; @@ -60,6 +60,24 @@ pub fn inspect_cli_app<'a, 'b>() -> App<'a, 'b> { .default_value("sizes") .possible_values(InspectTarget::VARIANTS), ) + .arg( + Arg::with_name("skip") + .long("skip") + .value_name("N") + .help("Skip over the first N keys"), + ) + .arg( + Arg::with_name("limit") + .long("limit") + .value_name("N") + .help("Output at most N keys"), + ) + .arg( + Arg::with_name("freezer") + .long("freezer") + .help("Inspect the freezer DB rather than the hot DB") + .takes_value(false), + ) .arg( Arg::with_name("output-dir") .long("output-dir") @@ -75,6 +93,26 @@ pub fn prune_payloads_app<'a, 'b>() -> App<'a, 'b> { .about("Prune finalized execution payloads") } +pub fn diff_app<'a, 'b>() -> App<'a, 'b> { + App::new("diff") + .setting(clap::AppSettings::ColoredHelp) + .about("Diff SSZ balances") + .arg( + Arg::with_name("first") + .long("first") + .value_name("PATH") + .takes_value(true) + .required(true), + ) + .arg( + Arg::with_name("second") + .long("second") + .value_name("PATH") + .takes_value(true) + .required(true), + ) +} + pub fn cli_app<'a, 'b>() -> App<'a, 'b> { App::new(CMD) .visible_aliases(&["db"]) @@ -102,6 +140,7 @@ pub fn cli_app<'a, 'b>() -> App<'a, 'b> { .subcommand(version_cli_app()) .subcommand(inspect_cli_app()) .subcommand(prune_payloads_app()) + .subcommand(diff_app()) } fn parse_client_config( @@ -116,10 +155,6 @@ fn parse_client_config( client_config.freezer_db_path = Some(freezer_dir); } - let (sprp, sprp_explicit) = get_slots_per_restore_point::(cli_args)?; - client_config.store.slots_per_restore_point = sprp; - client_config.store.slots_per_restore_point_set_explicitly = sprp_explicit; - Ok(client_config) } @@ -158,7 +193,7 @@ pub fn display_db_version( Ok(()) } -#[derive(Debug, EnumString, EnumVariantNames)] +#[derive(Debug, PartialEq, Eq, EnumString, EnumVariantNames)] pub enum InspectTarget { #[strum(serialize = "sizes")] ValueSizes, @@ -166,11 +201,16 @@ pub enum InspectTarget { ValueTotal, #[strum(serialize = "values")] Values, + #[strum(serialize = "gaps")] + Gaps, } pub struct InspectConfig { column: DBColumn, target: InspectTarget, + skip: Option, + limit: Option, + freezer: bool, /// Configures where the inspect output should be stored. output_dir: PathBuf, } @@ -178,11 +218,18 @@ pub struct InspectConfig { fn parse_inspect_config(cli_args: &ArgMatches) -> Result { let column = clap_utils::parse_required(cli_args, "column")?; let target = clap_utils::parse_required(cli_args, "output")?; + let skip = clap_utils::parse_optional(cli_args, "skip")?; + let limit = clap_utils::parse_optional(cli_args, "limit")?; + let freezer = cli_args.is_present("freezer"); + let output_dir: PathBuf = clap_utils::parse_optional(cli_args, "output-dir")?.unwrap_or_else(PathBuf::new); Ok(InspectConfig { column, target, + skip, + limit, + freezer, output_dir, }) } @@ -208,6 +255,20 @@ pub fn inspect_db( .map_err(|e| format!("{:?}", e))?; let mut total = 0; + let mut num_keys = 0; + + let sub_db = if inspect_config.freezer { + &db.cold_db + } else { + &db.hot_db + }; + + let skip = inspect_config.skip.unwrap_or(0); + let limit = inspect_config.limit.unwrap_or(usize::MAX); + + let mut prev_key = 0; + let mut found_gaps = false; + let base_path = &inspect_config.output_dir; if let InspectTarget::Values = inspect_config.target { @@ -215,20 +276,41 @@ pub fn inspect_db( .map_err(|e| format!("Unable to create import directory: {:?}", e))?; } - for res in db.hot_db.iter_column(inspect_config.column) { + for res in sub_db + .iter_column::>(inspect_config.column) + .skip(skip) + .take(limit) + { let (key, value) = res.map_err(|e| format!("{:?}", e))?; match inspect_config.target { InspectTarget::ValueSizes => { - println!("{:?}: {} bytes", key, value.len()); - total += value.len(); + println!("{}: {} bytes", hex::encode(&key), value.len()); } - InspectTarget::ValueTotal => { - total += value.len(); + InspectTarget::Gaps => { + // Convert last 8 bytes of key to u64. + let numeric_key = u64::from_be_bytes( + key[key.len() - 8..] + .try_into() + .expect("key is at least 8 bytes"), + ); + + if numeric_key > prev_key + 1 { + println!( + "gap between keys {} and {} (offset: {})", + prev_key, numeric_key, num_keys, + ); + found_gaps = true; + } + prev_key = numeric_key; } + InspectTarget::ValueTotal => (), InspectTarget::Values => { - let file_path = - base_path.join(format!("{}_{}.ssz", inspect_config.column.as_str(), key)); + let file_path = base_path.join(format!( + "{}_{}.ssz", + inspect_config.column.as_str(), + hex::encode(&key) + )); let write_result = fs::OpenOptions::new() .create(true) @@ -248,14 +330,17 @@ pub fn inspect_db( total += value.len(); } } + total += value.len(); + num_keys += 1; } - match inspect_config.target { - InspectTarget::ValueSizes | InspectTarget::ValueTotal | InspectTarget::Values => { - println!("Total: {} bytes", total); - } + if inspect_config.target == InspectTarget::Gaps && !found_gaps { + println!("No gaps found!"); } + println!("Num keys: {}", num_keys); + println!("Total: {} bytes", total); + Ok(()) } @@ -310,6 +395,57 @@ pub fn migrate_db( ) } +pub struct DiffConfig { + first: PathBuf, + second: PathBuf, +} + +fn parse_diff_config(cli_args: &ArgMatches) -> Result { + let first = clap_utils::parse_required(cli_args, "first")?; + let second = clap_utils::parse_required(cli_args, "second")?; + + Ok(DiffConfig { first, second }) +} + +pub fn diff(diff_config: &DiffConfig, log: Logger) -> Result<(), Error> { + use ssz::{Decode, Encode}; + use std::fs::File; + use std::io::Read; + use store::StoreConfig; + + let mut first_file = File::open(&diff_config.first).unwrap(); + let mut second_file = File::open(&diff_config.second).unwrap(); + + let mut first_bytes = vec![]; + first_file.read_to_end(&mut first_bytes).unwrap(); + let first: VList = VList::from_ssz_bytes(&first_bytes).unwrap(); + + let mut second_bytes = vec![]; + second_file.read_to_end(&mut second_bytes).unwrap(); + let second: VList = + VList::from_ssz_bytes(&second_bytes).unwrap(); + + let mut diff_balances = Vec::with_capacity(second.len()); + + for (i, new_balance) in second.iter().enumerate() { + let old_balance = first.get(i).copied().unwrap_or(0); + let diff = new_balance.wrapping_sub(old_balance); + diff_balances.push(diff); + } + + let diff_ssz_bytes = diff_balances.as_ssz_bytes(); + let config = StoreConfig::default(); + let compressed_diff_bytes = config.compress_bytes(&diff_ssz_bytes).unwrap(); + + info!( + log, + "Compressed diff to {} bytes (from {})", + compressed_diff_bytes.len(), + diff_ssz_bytes.len() + ); + Ok(()) +} + pub fn prune_payloads( client_config: ClientConfig, runtime_context: &RuntimeContext, @@ -356,6 +492,10 @@ pub fn run(cli_args: &ArgMatches<'_>, env: Environment) -> Result ("prune_payloads", Some(_)) => { prune_payloads(client_config, &context, log).map_err(format_err) } + ("diff", Some(cli_args)) => { + let diff_config = parse_diff_config(cli_args)?; + diff::(&diff_config, log).map_err(format_err) + } _ => Err("Unknown subcommand, for help `lighthouse database_manager --help`".into()), } } diff --git a/lcli/src/main.rs b/lcli/src/main.rs index d072beaa4e1..76963f067ef 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -15,6 +15,7 @@ mod new_testnet; mod parse_ssz; mod replace_state_pubkeys; mod skip_slots; +mod state_diff; mod transition_blocks; use clap::{App, Arg, ArgMatches, SubCommand}; @@ -841,6 +842,22 @@ fn main() { .help("Number of repeat runs, useful for benchmarking."), ) ) + .subcommand( + SubCommand::with_name("state-diff") + .about("Compute a state diff for a pair of states") + .arg( + Arg::with_name("state1") + .value_name("STATE1") + .takes_value(true) + .help("Path to first SSZ state"), + ) + .arg( + Arg::with_name("state2") + .value_name("STATE2") + .takes_value(true) + .help("Path to second SSZ state"), + ) + ) .get_matches(); let result = matches @@ -934,6 +951,8 @@ fn run( .map_err(|e| format!("Failed to run indexed-attestations command: {}", e)), ("block-root", Some(matches)) => block_root::run::(env, matches) .map_err(|e| format!("Failed to run block-root command: {}", e)), + ("state-diff", Some(matches)) => state_diff::run::(env, matches) + .map_err(|e| format!("Failed to run state-diff command: {}", e)), (other, _) => Err(format!("Unknown subcommand {}. See --help.", other)), } } diff --git a/lcli/src/replace_state_pubkeys.rs b/lcli/src/replace_state_pubkeys.rs index e9e3388c065..fd7e23fa196 100644 --- a/lcli/src/replace_state_pubkeys.rs +++ b/lcli/src/replace_state_pubkeys.rs @@ -42,7 +42,8 @@ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), let mut deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); let mut deposit_root = Hash256::zero(); - for (index, validator) in state.validators_mut().iter_mut().enumerate() { + let validators = state.validators_mut(); + for index in 0..validators.len() { let (secret, _) = recover_validator_secret_from_mnemonic(seed.as_bytes(), index as u32, KeyType::Voting) .map_err(|e| format!("Unable to generate validator key: {:?}", e))?; @@ -52,11 +53,14 @@ pub fn run(testnet_dir: PathBuf, matches: &ArgMatches) -> Result<(), eprintln!("{}: {}", index, keypair.pk); - validator.pubkey = keypair.pk.into(); + validators + .get_mut(index) + .unwrap() + .replace_pubkey(keypair.pk.into()); // Update the deposit tree. let mut deposit_data = DepositData { - pubkey: validator.pubkey, + pubkey: *validators.get(index).unwrap().pubkey(), // Set this to a junk value since it's very time consuming to generate the withdrawal // keys and it's not useful for the time being. withdrawal_credentials: Hash256::zero(), diff --git a/lcli/src/skip_slots.rs b/lcli/src/skip_slots.rs index 49d1dd424da..8b81af8024e 100644 --- a/lcli/src/skip_slots.rs +++ b/lcli/src/skip_slots.rs @@ -55,7 +55,7 @@ use std::fs::File; use std::io::prelude::*; use std::path::PathBuf; use std::time::{Duration, Instant}; -use types::{BeaconState, CloneConfig, EthSpec, Hash256}; +use types::{BeaconState, EthSpec, Hash256}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); @@ -121,7 +121,7 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), }; for i in 0..runs { - let mut state = state.clone_with(CloneConfig::all()); + let mut state = state.clone(); let start = Instant::now(); diff --git a/lcli/src/state_diff.rs b/lcli/src/state_diff.rs new file mode 100644 index 00000000000..5087fad35be --- /dev/null +++ b/lcli/src/state_diff.rs @@ -0,0 +1,44 @@ +use crate::transition_blocks::load_from_ssz_with; +use clap::ArgMatches; +use clap_utils::{parse_optional, parse_required}; +use environment::Environment; +use eth2::{types::BlockId, BeaconNodeHttpClient, SensitiveUrl, Timeouts}; +use std::path::PathBuf; +use std::time::{Duration, Instant}; +use store::hdiff::{HDiff, HDiffBuffer}; +use types::{BeaconState, EthSpec, FullPayload, SignedBeaconBlock}; + +pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), String> { + let state1_path: PathBuf = parse_required(matches, "state1")?; + let state2_path: PathBuf = parse_required(matches, "state2")?; + let spec = &T::default_spec(); + + let state1 = load_from_ssz_with(&state1_path, spec, BeaconState::::from_ssz_bytes)?; + let state2 = load_from_ssz_with(&state2_path, spec, BeaconState::::from_ssz_bytes)?; + + let buffer1 = HDiffBuffer::from_state(state1.clone()); + let buffer2 = HDiffBuffer::from_state(state2.clone()); + + let t = std::time::Instant::now(); + let diff = HDiff::compute(&buffer1, &buffer2).unwrap(); + let elapsed = t.elapsed(); + + println!("Diff size"); + println!("- state: {} bytes", diff.state_diff_len()); + println!("- balances: {} bytes", diff.balances_diff_len()); + println!("Computation time: {}ms", elapsed.as_millis()); + + // Re-apply. + let mut recon_buffer = HDiffBuffer::from_state(state1); + + let t = std::time::Instant::now(); + diff.apply(&mut recon_buffer).unwrap(); + + println!("Diff application time: {}ms", t.elapsed().as_millis()); + + let recon = recon_buffer.into_state(spec).unwrap(); + + assert_eq!(state2, recon); + + Ok(()) +} diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index cf971c69f00..b33f4b75a5c 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -83,7 +83,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::{Duration, Instant}; use store::HotColdDB; -use types::{BeaconState, ChainSpec, CloneConfig, EthSpec, Hash256, SignedBeaconBlock}; +use types::{BeaconState, ChainSpec, EthSpec, Hash256, SignedBeaconBlock}; const HTTP_TIMEOUT: Duration = Duration::from_secs(10); @@ -195,7 +195,10 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), let store = Arc::new(store); debug!("Building pubkey cache (might take some time)"); - let validator_pubkey_cache = ValidatorPubkeyCache::new(&pre_state, store) + let validator_pubkey_cache = store.immutable_validators.clone(); + validator_pubkey_cache + .write() + .import_new_pubkeys(&pre_state) .map_err(|e| format!("Failed to create pubkey cache: {:?}", e))?; /* @@ -226,8 +229,9 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), */ let mut output_post_state = None; + let mut saved_ctxt = None; for i in 0..runs { - let pre_state = pre_state.clone_with(CloneConfig::all()); + let pre_state = pre_state.clone(); let block = block.clone(); let start = Instant::now(); @@ -238,7 +242,8 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), block, state_root_opt, &config, - &validator_pubkey_cache, + &*validator_pubkey_cache.read(), + &mut saved_ctxt, spec, )?; @@ -288,9 +293,12 @@ pub fn run(env: Environment, matches: &ArgMatches) -> Result<(), .map_err(|e| format!("Unable to write to output file: {:?}", e))?; } + drop(pre_state); + Ok(()) } +#[allow(clippy::too_many_arguments)] fn do_transition( mut pre_state: BeaconState, block_root: Hash256, @@ -298,6 +306,7 @@ fn do_transition( mut state_root_opt: Option, config: &Config, validator_pubkey_cache: &ValidatorPubkeyCache>, + saved_ctxt: &mut Option>, spec: &ChainSpec, ) -> Result, String> { if !config.exclude_cache_builds { @@ -339,9 +348,22 @@ fn do_transition( .map_err(|e| format!("Unable to build caches: {:?}", e))?; debug!("Build all caches (again): {:?}", t.elapsed()); - let mut ctxt = ConsensusContext::new(pre_state.slot()) - .set_current_block_root(block_root) - .set_proposer_index(block.message().proposer_index()); + let mut ctxt = if let Some(ctxt) = saved_ctxt { + ctxt.clone() + } else { + let mut ctxt = ConsensusContext::new(pre_state.slot()) + .set_current_block_root(block_root) + .set_proposer_index(block.message().proposer_index()); + + if config.exclude_cache_builds { + ctxt = ctxt.set_epoch_cache( + EpochCache::new(&pre_state, spec) + .map_err(|e| format!("unable to build epoch cache: {e:?}"))?, + ); + *saved_ctxt = Some(ctxt.clone()); + } + ctxt + }; if !config.no_signature_verification { let get_pubkey = move |validator_index| { diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index bbde006efc5..d0672b50c3b 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -21,7 +21,7 @@ spec-minimal = [] # Support Gnosis spec and Gnosis Beacon Chain. gnosis = [] # Support slasher MDBX backend. -slasher-mdbx = ["slasher/mdbx"] +# slasher-mdbx = ["slasher/mdbx"] # Support slasher LMDB backend. slasher-lmdb = ["slasher/lmdb"] # Use jemalloc. @@ -54,6 +54,7 @@ task_executor = { path = "../common/task_executor" } malloc_utils = { path = "../common/malloc_utils" } directory = { path = "../common/directory" } unused_port = { path = "../common/unused_port" } +store = { path = "../beacon_node/store" } database_manager = { path = "../database_manager" } slasher = { path = "../slasher" } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 65d7bd08b28..bd3e783d70b 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -1765,6 +1765,42 @@ fn no_reconstruct_historic_states_flag() { .run_with_zero_port() .with_config(|config| assert!(!config.chain.reconstruct_historic_states)); } +#[test] +fn db_migration_period_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.store_migrator.epochs_per_run, + beacon_node::beacon_chain::migrate::DEFAULT_EPOCHS_PER_RUN + ) + }); +} +#[test] +fn db_migration_period_override() { + CommandLineTest::new() + .flag("db-migration-period", Some("128")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store_migrator.epochs_per_run, 128)); +} +#[test] +fn epochs_per_state_diff_default() { + CommandLineTest::new() + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.store.epochs_per_state_diff, + beacon_node::beacon_chain::store::config::DEFAULT_EPOCHS_PER_STATE_DIFF + ) + }); +} +#[test] +fn epochs_per_state_diff_override() { + CommandLineTest::new() + .flag("epochs-per-state-diff", Some("1")) + .run_with_zero_port() + .with_config(|config| assert_eq!(config.store.epochs_per_state_diff, 1)); +} // Tests for Slasher flags. // Using `--slasher-max-db-size` to work around https://github.com/sigp/lighthouse/issues/2342 diff --git a/slasher/Cargo.toml b/slasher/Cargo.toml index bfa7b5f64c5..85e0bdc88d9 100644 --- a/slasher/Cargo.toml +++ b/slasher/Cargo.toml @@ -6,7 +6,7 @@ edition = "2021" [features] default = ["lmdb"] -mdbx = ["dep:mdbx"] +# mdbx = ["dep:mdbx"] lmdb = ["lmdb-rkv", "lmdb-rkv-sys"] [dependencies] @@ -32,7 +32,7 @@ types = { path = "../consensus/types" } strum = { version = "0.24.1", features = ["derive"] } # MDBX is pinned at the last version with Windows and macOS support. -mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", tag = "v0.1.4", optional = true } +# mdbx = { package = "libmdbx", git = "https://github.com/sigp/libmdbx-rs", tag = "v0.1.4", optional = true } lmdb-rkv = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } lmdb-rkv-sys = { git = "https://github.com/sigp/lmdb-rs", rev = "f33845c6469b94265319aac0ed5085597862c27e", optional = true } diff --git a/testing/ef_tests/Cargo.toml b/testing/ef_tests/Cargo.toml index 11283052f07..81a739c2b39 100644 --- a/testing/ef_tests/Cargo.toml +++ b/testing/ef_tests/Cargo.toml @@ -35,4 +35,6 @@ fs2 = "0.4.3" beacon_chain = { path = "../../beacon_node/beacon_chain" } store = { path = "../../beacon_node/store" } fork_choice = { path = "../../consensus/fork_choice" } +malloc_utils = { path = "../../common/malloc_utils" } +logging = { path = "../../common/logging" } execution_layer = { path = "../../beacon_node/execution_layer" } diff --git a/testing/ef_tests/src/case_result.rs b/testing/ef_tests/src/case_result.rs index 4982bf94c1f..e0ab308418f 100644 --- a/testing/ef_tests/src/case_result.rs +++ b/testing/ef_tests/src/case_result.rs @@ -2,7 +2,7 @@ use super::*; use compare_fields::{CompareFields, Comparison, FieldComparison}; use std::fmt::Debug; use std::path::{Path, PathBuf}; -use types::BeaconState; +use types::{beacon_state::BeaconStateDiff, milhouse::diff::Diff, BeaconState}; pub const MAX_VALUE_STRING_LEN: usize = 500; @@ -39,6 +39,9 @@ pub fn compare_beacon_state_results_without_caches( if let (Ok(ref mut result), Some(ref mut expected)) = (result.as_mut(), expected.as_mut()) { result.drop_all_caches().unwrap(); expected.drop_all_caches().unwrap(); + + result.apply_pending_mutations().unwrap(); + expected.apply_pending_mutations().unwrap(); } compare_result_detailed(result, expected) @@ -115,6 +118,23 @@ where } } +pub fn check_state_diff( + pre_state: &BeaconState, + opt_post_state: &Option>, +) -> Result<(), Error> { + if let Some(post_state) = opt_post_state { + let diff = BeaconStateDiff::compute_diff(pre_state, post_state) + .expect("BeaconStateDiff should compute"); + let mut diffed_state = pre_state.clone(); + diff.apply_diff(&mut diffed_state) + .expect("BeaconStateDiff should apply"); + + compare_result_detailed::<_, ()>(&Ok(diffed_state), opt_post_state) + } else { + Ok(()) + } +} + fn fmt_val(val: T) -> String { let mut string = format!("{:?}", val); string.truncate(MAX_VALUE_STRING_LEN); diff --git a/testing/ef_tests/src/cases/bls_verify_msg.rs b/testing/ef_tests/src/cases/bls_verify_msg.rs index 779b3cf75f7..ac81c2a9bd5 100644 --- a/testing/ef_tests/src/cases/bls_verify_msg.rs +++ b/testing/ef_tests/src/cases/bls_verify_msg.rs @@ -1,7 +1,7 @@ use super::*; use crate::case_result::compare_result; use crate::impl_bls_load_case; -use bls::{PublicKeyBytes, Signature, SignatureBytes}; +use bls::{PublicKey, PublicKeyBytes, Signature, SignatureBytes}; use serde_derive::Deserialize; use std::convert::TryInto; use types::Hash256; @@ -30,6 +30,13 @@ impl Case for BlsVerify { .try_into() .and_then(|signature: Signature| { let pk = self.input.pubkey.decompress()?; + + // Check serialization roundtrip. + let pk_uncompressed = pk.serialize_uncompressed(); + let pk_from_uncompressed = PublicKey::deserialize_uncompressed(&pk_uncompressed) + .expect("uncompressed serialization should round-trip"); + assert_eq!(pk_from_uncompressed, pk); + Ok(signature.verify(&pk, Hash256::from_slice(&message))) }) .unwrap_or(false); diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index 6095e1be6b1..ce68c1865a5 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -1,6 +1,6 @@ use super::*; use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; +use crate::case_result::{check_state_diff, compare_beacon_state_results_without_caches}; use crate::decode::{ssz_decode_state, yaml_decode_file}; use crate::type_name; use crate::type_name::TypeName; @@ -147,16 +147,17 @@ impl EpochTransition for Slashings { validator_statuses.process_attestations(state)?; process_slashings( state, + None, validator_statuses.total_balances.current_epoch(), spec, )?; } BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { + let mut cache = altair::ParticipationCache::new(state, spec).unwrap(); process_slashings( state, - altair::ParticipationCache::new(state, spec) - .unwrap() - .current_epoch_total_active_balance(), + Some(cache.process_slashings_indices()), + cache.current_epoch_total_active_balance(), spec, )?; } @@ -237,7 +238,7 @@ impl EpochTransition for InactivityUpdates { BeaconState::Altair(_) | BeaconState::Merge(_) | BeaconState::Capella(_) => { altair::process_inactivity_updates( state, - &altair::ParticipationCache::new(state, spec).unwrap(), + &mut altair::ParticipationCache::new(state, spec).unwrap(), spec, ) } @@ -312,18 +313,22 @@ impl> Case for EpochProcessing { fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { self.metadata.bls_setting.unwrap_or_default().check()?; - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); - let spec = &testing_spec::(fork_name); + let mut pre_state = self.pre.clone(); + + // Processing requires the committee caches. + pre_state.build_all_committee_caches(spec).unwrap(); - let mut result = (|| { - // Processing requires the committee caches. - state.build_all_committee_caches(spec)?; + let mut state = pre_state.clone(); + let mut expected = self.post.clone(); + + if let Some(post_state) = expected.as_mut() { + post_state.build_all_committee_caches(spec).unwrap(); + } - T::run(&mut state, spec).map(|_| state) - })(); + let mut result = T::run(&mut state, spec).map(|_| state); + check_state_diff(&pre_state, &expected)?; compare_beacon_state_results_without_caches(&mut result, &mut expected) } } diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index c180774bb64..c5d32be5c0b 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -50,7 +50,7 @@ impl LoadCase for MerkleProofValidity { impl Case for MerkleProofValidity { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let mut state = self.state.clone(); - state.initialize_tree_hash_cache(); + state.update_tree_hash_cache().unwrap(); let proof = match state.compute_merkle_proof(self.merkle_proof.leaf_index) { Ok(proof) => proof, Err(_) => { @@ -79,9 +79,6 @@ impl Case for MerkleProofValidity { } } - // Tree hash cache should still be initialized (not dropped). - assert!(state.tree_hash_cache().is_initialized()); - Ok(()) } } diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 5fd00285aaa..aabbed69a5e 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -1,6 +1,6 @@ use super::*; use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; +use crate::case_result::{check_state_diff, compare_beacon_state_results_without_caches}; use crate::decode::{ssz_decode_file, ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use crate::testing_spec; use serde_derive::Deserialize; @@ -452,9 +452,8 @@ impl> Case for Operations { fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let spec = &testing_spec::(fork_name); - let mut state = self.pre.clone(); - let mut expected = self.post.clone(); + let mut pre_state = self.pre.clone(); // Processing requires the committee caches. // NOTE: some of the withdrawals tests have 0 active validators, do not try // to build the commitee cache in this case. @@ -462,6 +461,13 @@ impl> Case for Operations { state.build_all_committee_caches(spec).unwrap(); } + let mut state = pre_state.clone(); + let mut expected = self.post.clone(); + + if let Some(post_state) = expected.as_mut() { + post_state.build_all_committee_caches(spec).unwrap(); + } + let mut result = self .operation .as_ref() @@ -469,6 +475,7 @@ impl> Case for Operations { .apply_to(&mut state, spec, self) .map(|()| state); + check_state_diff(&pre_state, &expected)?; compare_beacon_state_results_without_caches(&mut result, &mut expected) } } diff --git a/testing/ef_tests/src/cases/sanity_blocks.rs b/testing/ef_tests/src/cases/sanity_blocks.rs index e51fed1907f..e233b0b0bb0 100644 --- a/testing/ef_tests/src/cases/sanity_blocks.rs +++ b/testing/ef_tests/src/cases/sanity_blocks.rs @@ -1,6 +1,6 @@ use super::*; use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; +use crate::case_result::{check_state_diff, compare_beacon_state_results_without_caches}; use crate::decode::{ssz_decode_file_with, ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::{ @@ -128,6 +128,16 @@ impl Case for SanityBlocks { Ok(res) => (Ok(res.0), Ok(res.1)), }; compare_beacon_state_results_without_caches(&mut indiv_result, &mut expected)?; - compare_beacon_state_results_without_caches(&mut bulk_result, &mut expected) + compare_beacon_state_results_without_caches(&mut bulk_result, &mut expected)?; + + // Check state diff (requires fully built committee caches). + let mut pre = self.pre.clone(); + pre.build_all_committee_caches(spec).unwrap(); + let post = self.post.clone().map(|mut post| { + post.build_all_committee_caches(spec).unwrap(); + post + }); + check_state_diff(&pre, &post)?; + Ok(()) } } diff --git a/testing/ef_tests/src/cases/sanity_slots.rs b/testing/ef_tests/src/cases/sanity_slots.rs index a38a8930a0e..bc4fe8d0b14 100644 --- a/testing/ef_tests/src/cases/sanity_slots.rs +++ b/testing/ef_tests/src/cases/sanity_slots.rs @@ -1,6 +1,6 @@ use super::*; use crate::bls_setting::BlsSetting; -use crate::case_result::compare_beacon_state_results_without_caches; +use crate::case_result::{check_state_diff, compare_beacon_state_results_without_caches}; use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde_derive::Deserialize; use state_processing::per_slot_processing; @@ -67,6 +67,15 @@ impl Case for SanitySlots { .try_for_each(|_| per_slot_processing(&mut state, None, spec).map(|_| ())) .map(|_| state); - compare_beacon_state_results_without_caches(&mut result, &mut expected) + compare_beacon_state_results_without_caches(&mut result, &mut expected)?; + + // Check state diff (requires fully built committee caches). + let mut pre = self.pre.clone(); + pre.build_all_committee_caches(spec).unwrap(); + let post = self.post.clone().map(|mut post| { + post.build_all_committee_caches(spec).unwrap(); + post + }); + check_state_diff(&pre, &post) } } diff --git a/testing/ef_tests/src/cases/ssz_static.rs b/testing/ef_tests/src/cases/ssz_static.rs index d0cc5f9eac0..f5b0e4bad88 100644 --- a/testing/ef_tests/src/cases/ssz_static.rs +++ b/testing/ef_tests/src/cases/ssz_static.rs @@ -42,7 +42,7 @@ fn load_from_dir(path: &Path) -> Result<(SszStaticRoots, Vec Case for SszStaticTHC> { check_tree_hash(&self.roots.root, self.value.tree_hash_root().as_bytes())?; let mut state = self.value.clone(); - state.initialize_tree_hash_cache(); let cached_tree_hash_root = state.update_tree_hash_cache().unwrap(); check_tree_hash(&self.roots.root, cached_tree_hash_root.as_bytes())?; diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 33f8d67ec00..9ff13b3c45b 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -366,6 +366,7 @@ mod ssz_static { } } +/* #[test] fn ssz_generic() { SszGenericHandler::::default().run(); @@ -375,6 +376,7 @@ fn ssz_generic() { SszGenericHandler::::default().run(); SszGenericHandler::::default().run(); } +*/ #[test] fn epoch_processing_justification_and_finalization() { diff --git a/testing/state_transition_vectors/src/exit.rs b/testing/state_transition_vectors/src/exit.rs index 7e7fd23e0d0..2e33f8a7a2b 100644 --- a/testing/state_transition_vectors/src/exit.rs +++ b/testing/state_transition_vectors/src/exit.rs @@ -171,7 +171,7 @@ vectors_and_tests!( invalid_exit_already_initiated, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut().get_mut(0).unwrap().exit_epoch = STATE_EPOCH + 1; + *state.validators_mut().get_mut(0).unwrap().exit_epoch_mut() = STATE_EPOCH + 1; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -190,8 +190,11 @@ vectors_and_tests!( invalid_not_active_before_activation_epoch, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut().get_mut(0).unwrap().activation_epoch = - E::default_spec().far_future_epoch; + *state + .validators_mut() + .get_mut(0) + .unwrap() + .activation_epoch_mut() = E::default_spec().far_future_epoch; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0, @@ -210,7 +213,7 @@ vectors_and_tests!( invalid_not_active_after_exit_epoch, ExitTest { state_modifier: Box::new(|state| { - state.validators_mut().get_mut(0).unwrap().exit_epoch = STATE_EPOCH; + *state.validators_mut().get_mut(0).unwrap().exit_epoch_mut() = STATE_EPOCH; }), expected: Err(BlockProcessingError::ExitInvalid { index: 0,