From 32c9b7bdd1097ceff2c016e1dbea56bacfe3fc1c Mon Sep 17 00:00:00 2001 From: LEE HAJIN <87962518+betterthanhajin@users.noreply.github.com> Date: Mon, 2 Dec 2024 11:34:48 +0900 Subject: [PATCH 01/16] Documentation Fix: Typo and URL Correction (#73397) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What? This pull request addresses two minor documentation issues in the Next.js project: 1. A typo fix in the API reference for the `cacheLife` function. 2. A correction of an incorrect URL for the `experimental.typedRoutes` in the Turbopack documentation. ### Why? Accurate documentation is crucial for developers to effectively use the features of Next.js. By ensuring the clarity and correctness of these documents, we improve the user experience and aid in preventing potential confusion when navigating and implementing the API features. ### How? - **Typo Correction**: In `docs/01-app/03-api-reference/04-functions/cacheLife.mdx`, the `cacheLife` function documentation had a capitalization error in the phrase “The `cacheLife` function.” This has been corrected to use the correct lowercase styling: “the `cacheLife` function”. - **URL Correction**: In `docs/01-app/03-api-reference/08-turbopack.mdx`, the link pointing to `experimental.typedRoutes` was incorrect. The link has been updated to the correct URL: `https://nextjs.org/docs/app/api-reference/next-config-js/typedRoutes`. These changes ensure the accuracy and readability of the documentation, aligning it with Next.js standards. --- --------- Co-authored-by: Sam Ko --- docs/01-app/03-api-reference/04-functions/cacheLife.mdx | 2 +- docs/01-app/03-api-reference/08-turbopack.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/01-app/03-api-reference/04-functions/cacheLife.mdx b/docs/01-app/03-api-reference/04-functions/cacheLife.mdx index 18952d72a2cad..5e3cb133ffe0b 100644 --- a/docs/01-app/03-api-reference/04-functions/cacheLife.mdx +++ b/docs/01-app/03-api-reference/04-functions/cacheLife.mdx @@ -40,7 +40,7 @@ const nextConfig = { export default nextConfig ``` -Then, import and invoke The `cacheLife` function within the scope of the function or component: +Then, import and invoke the `cacheLife` function within the scope of the function or component: ```tsx filename="app/page.tsx" highlight={5} switcher 'use cache' diff --git a/docs/01-app/03-api-reference/08-turbopack.mdx b/docs/01-app/03-api-reference/08-turbopack.mdx index f5b6fabb9dc86..2100fb38f26ac 100644 --- a/docs/01-app/03-api-reference/08-turbopack.mdx +++ b/docs/01-app/03-api-reference/08-turbopack.mdx @@ -56,7 +56,7 @@ These features are currently not supported: - Blocking `.css` imports in `pages/_document.tsx` - Currently with webpack Next.js blocks importing `.css` files in `pages/_document.tsx` - We are planning to implement this warning in the future. -- [`experimental.typedRoutes`](https://nextjs.org/docs/app/api-reference/config/next-config-js/typedRoutes) +- [`experimental.typedRoutes`](/docs/app/api-reference/config/next-config-js/typedRoutes) - We are planning to implement this in the future. - `experimental.nextScriptWorkers` - We are planning to implement this in the future. From b5443b27c4a8305b7c790c89d00bd292f9619dd7 Mon Sep 17 00:00:00 2001 From: Tobias Koppers Date: Mon, 2 Dec 2024 11:40:43 +0100 Subject: [PATCH 02/16] [Turbopack] Custom persistence layer (#73029) This adds a new custom database implementation for Persistent Caching. Why is this better than an existing database? It can be implemented especially for our use case and can come with assumptions and restrictions that other database can't have. What is special about our use case? * We only do one write at a time, but in a single very large transaction that can potentially push GBs into the database. * We want to fill that large transaction from multiple threads. * The initial cold build is very important from performance perspective. How do we tackle that? * We only allow a single WriteBatch at a time, but we start writing to disk while filling that write batch. * When we commit the WriteBatch we write a sequence number to make these writes visible * Once written and committed, files are immutable (but they can be deleted) * Every WriteBatch writes additional files that logically override the values from earlier files. (Deletions are stored as tombstones) * When the average files to read reaches a threshold we do a compaction. * A compaction runs a merge on multiple files to create new sorted files. This reduces that metric. * We limit the number of merged files to avoid long compactions. * In every file we store a key range, an AMQF to quickly find out if a key can be in that file. The false positive rate per file is 0.1%. * When we need to lookup a key in a file we do a binary search as keys are stored in sorted order (sorted by their hash). * Files are split into blocks that are stored compressed with lz4 with a two shared compression dictionary per file (one for keys and one for values) * We have an additional index block to find the right key block without a search. * We support multiple key families to split the database for different kinds of data. * Depending on the size of the value it will be stored: 1. in a block with other small values, 2. in it's own block, 3. in a separate file. * We have a block cache to cache decompressed blocks. * We have a AMQF to cache deserialized filters * Files are memory mapped for reading to leverage OS cache and memory See more details in the added README.md file. --- Cargo.lock | 133 ++- Cargo.toml | 1 + turbopack/crates/turbo-persistence/Cargo.toml | 33 + turbopack/crates/turbo-persistence/README.md | 251 +++++ .../crates/turbo-persistence/src/arc_slice.rs | 93 ++ .../crates/turbo-persistence/src/collector.rs | 113 +++ .../turbo-persistence/src/collector_entry.rs | 85 ++ .../turbo-persistence/src/compaction/mod.rs | 1 + .../src/compaction/selector.rs | 371 +++++++ .../crates/turbo-persistence/src/constants.rs | 34 + turbopack/crates/turbo-persistence/src/db.rs | 910 ++++++++++++++++++ turbopack/crates/turbo-persistence/src/key.rs | 205 ++++ turbopack/crates/turbo-persistence/src/lib.rs | 24 + .../turbo-persistence/src/lookup_entry.rs | 66 ++ .../turbo-persistence/src/merge_iter.rs | 79 ++ .../src/static_sorted_file.rs | 717 ++++++++++++++ .../src/static_sorted_file_builder.rs | 532 ++++++++++ .../crates/turbo-persistence/src/tests.rs | 347 +++++++ .../turbo-persistence/src/write_batch.rs | 296 ++++++ .../crates/turbo-tasks-backend/Cargo.toml | 4 +- .../turbo-tasks-backend/src/backend/mod.rs | 10 + .../src/backing_storage.rs | 4 + .../src/database/key_value_database.rs | 4 + .../turbo-tasks-backend/src/database/mod.rs | 15 +- .../turbo-tasks-backend/src/database/turbo.rs | 147 +++ .../src/kv_backing_storage.rs | 4 + .../crates/turbo-tasks-backend/src/lib.rs | 35 +- 27 files changed, 4471 insertions(+), 43 deletions(-) create mode 100644 turbopack/crates/turbo-persistence/Cargo.toml create mode 100644 turbopack/crates/turbo-persistence/README.md create mode 100644 turbopack/crates/turbo-persistence/src/arc_slice.rs create mode 100644 turbopack/crates/turbo-persistence/src/collector.rs create mode 100644 turbopack/crates/turbo-persistence/src/collector_entry.rs create mode 100644 turbopack/crates/turbo-persistence/src/compaction/mod.rs create mode 100644 turbopack/crates/turbo-persistence/src/compaction/selector.rs create mode 100644 turbopack/crates/turbo-persistence/src/constants.rs create mode 100644 turbopack/crates/turbo-persistence/src/db.rs create mode 100644 turbopack/crates/turbo-persistence/src/key.rs create mode 100644 turbopack/crates/turbo-persistence/src/lib.rs create mode 100644 turbopack/crates/turbo-persistence/src/lookup_entry.rs create mode 100644 turbopack/crates/turbo-persistence/src/merge_iter.rs create mode 100644 turbopack/crates/turbo-persistence/src/static_sorted_file.rs create mode 100644 turbopack/crates/turbo-persistence/src/static_sorted_file_builder.rs create mode 100644 turbopack/crates/turbo-persistence/src/tests.rs create mode 100644 turbopack/crates/turbo-persistence/src/write_batch.rs create mode 100644 turbopack/crates/turbo-tasks-backend/src/database/turbo.rs diff --git a/Cargo.lock b/Cargo.lock index 222d9cbed901d..29cdeb27ed348 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -340,7 +340,7 @@ checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" dependencies = [ "async-task", "concurrent-queue", - "fastrand 2.0.0", + "fastrand 2.2.0", "futures-lite 2.3.0", "slab", ] @@ -393,7 +393,7 @@ dependencies = [ "futures-lite 2.3.0", "parking", "polling 3.7.2", - "rustix 0.38.31", + "rustix 0.38.41", "slab", "tracing", "windows-sys 0.52.0", @@ -441,7 +441,7 @@ dependencies = [ "cfg-if", "event-listener 3.1.0", "futures-lite 1.13.0", - "rustix 0.38.31", + "rustix 0.38.41", "windows-sys 0.48.0", ] @@ -457,7 +457,7 @@ dependencies = [ "cfg-if", "futures-core", "futures-io", - "rustix 0.38.31", + "rustix 0.38.41", "signal-hook-registry", "slab", "windows-sys 0.52.0", @@ -2148,9 +2148,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "486f806e73c5707928240ddc295403b1b93c96a02038563881c4a2fd84b81ac4" [[package]] name = "fdeflate" @@ -2320,7 +2320,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ - "fastrand 2.0.0", + "fastrand 2.2.0", "futures-core", "futures-io", "parking", @@ -3343,9 +3343,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.155" +version = "0.2.164" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "433bfe06b8c75da9b2e3fbea6e5329ff87748f0b144ef75306e674c3f6f7c13f" [[package]] name = "libfuzzer-sys" @@ -3479,9 +3479,9 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litrs" @@ -3594,7 +3594,16 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" dependencies = [ - "twox-hash", + "twox-hash 1.6.3", +] + +[[package]] +name = "lzzzz" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac94cca0c9c2ac03c63092f1377df5b83e4c35441f9d83a53ca214c58685f7bd" +dependencies = [ + "cc", ] [[package]] @@ -3695,6 +3704,15 @@ dependencies = [ "libc", ] +[[package]] +name = "memmap2" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" +dependencies = [ + "libc", +] + [[package]] name = "memoffset" version = "0.7.1" @@ -4711,7 +4729,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae1d5c74c9876f070d3e8fd503d748c7d974c3e48da8f41350fa5222ef9b4391" dependencies = [ "atomic-waker", - "fastrand 2.0.0", + "fastrand 2.2.0", "futures-io", ] @@ -4799,7 +4817,7 @@ dependencies = [ "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", - "rustix 0.38.31", + "rustix 0.38.41", "tracing", "windows-sys 0.52.0", ] @@ -5015,6 +5033,17 @@ dependencies = [ "unicase", ] +[[package]] +name = "qfilter" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b36883275f761fe4c69f0ba982d18b36208b72d647ad9d468afcad70fb08a4e" +dependencies = [ + "serde", + "serde_bytes", + "xxhash-rust", +] + [[package]] name = "qstring" version = "0.7.2" @@ -5030,6 +5059,18 @@ version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" +[[package]] +name = "quick_cache" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d7c94f8935a9df96bb6380e8592c70edf497a643f94bd23b2f76b399385dbf4" +dependencies = [ + "ahash 0.8.11", + "equivalent", + "hashbrown 0.14.5", + "parking_lot", +] + [[package]] name = "quote" version = "1.0.36" @@ -5488,14 +5529,14 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "d7f649912bc1495e167a6edee79151c84b1bad49748cb4f1f1167f459f6224f6" dependencies = [ "bitflags 2.5.0", "errno", "libc", - "linux-raw-sys 0.4.13", + "linux-raw-sys 0.4.14", "windows-sys 0.52.0", ] @@ -7700,15 +7741,15 @@ checksum = "8ae9980cab1db3fceee2f6c6f643d5d8de2997c58ee8d25fb0cc8a9e9e7348e5" [[package]] name = "tempfile" -version = "3.8.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" dependencies = [ "cfg-if", - "fastrand 2.0.0", - "redox_syscall", - "rustix 0.38.31", - "windows-sys 0.48.0", + "fastrand 2.2.0", + "once_cell", + "rustix 0.38.41", + "windows-sys 0.59.0", ] [[package]] @@ -7726,7 +7767,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" dependencies = [ - "rustix 0.38.31", + "rustix 0.38.41", "windows-sys 0.48.0", ] @@ -8313,6 +8354,28 @@ dependencies = [ "utf-8", ] +[[package]] +name = "turbo-persistence" +version = "0.1.0" +dependencies = [ + "anyhow", + "byteorder", + "lzzzz", + "memmap2 0.9.5", + "parking_lot", + "pot", + "qfilter", + "quick_cache", + "rand", + "rayon", + "rustc-hash 1.1.0", + "serde", + "tempfile", + "thread_local", + "twox-hash 2.0.1", + "zstd", +] + [[package]] name = "turbo-prehash" version = "0.1.0" @@ -8411,6 +8474,7 @@ dependencies = [ "tokio", "tokio-scoped", "tracing", + "turbo-persistence", "turbo-prehash", "turbo-rcstr", "turbo-tasks", @@ -8524,7 +8588,7 @@ name = "turbo-tasks-hash" version = "0.1.0" dependencies = [ "turbo-tasks-macros", - "twox-hash", + "twox-hash 1.6.3", ] [[package]] @@ -9234,6 +9298,15 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "twox-hash" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6db6856664807f43c17fbaf2718e2381ac1476a449aa104f5f64622defa1245" +dependencies = [ + "rand", +] + [[package]] name = "typed-arena" version = "2.0.2" @@ -10238,7 +10311,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix 0.38.31", + "rustix 0.38.41", ] [[package]] @@ -10676,8 +10749,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f" dependencies = [ "libc", - "linux-raw-sys 0.4.13", - "rustix 0.38.31", + "linux-raw-sys 0.4.14", + "rustix 0.38.41", ] [[package]] @@ -10734,9 +10807,9 @@ dependencies = [ [[package]] name = "zstd" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ "zstd-safe", ] diff --git a/Cargo.toml b/Cargo.toml index aed0d1881c6b4..b90470bdcc40e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,6 +43,7 @@ auto-hash-map = { path = "turbopack/crates/turbo-tasks-auto-hash-map" } swc-ast-explorer = { path = "turbopack/crates/turbopack-swc-ast-explorer" } turbo-prehash = { path = "turbopack/crates/turbo-prehash" } turbo-rcstr = { path = "turbopack/crates/turbo-rcstr" } +turbo-persistence = { path = "turbopack/crates/turbo-persistence" } turbo-tasks-malloc = { path = "turbopack/crates/turbo-tasks-malloc", default-features = false } turbo-tasks = { path = "turbopack/crates/turbo-tasks" } turbo-tasks-backend = { path = "turbopack/crates/turbo-tasks-backend" } diff --git a/turbopack/crates/turbo-persistence/Cargo.toml b/turbopack/crates/turbo-persistence/Cargo.toml new file mode 100644 index 0000000000000..8783c62d456fb --- /dev/null +++ b/turbopack/crates/turbo-persistence/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "turbo-persistence" +version = "0.1.0" +edition = "2021" +license = "MIT" + +[features] +verify_sst_content = [] +strict_checks = [] +stats = ["quick_cache/stats"] + +[dependencies] +anyhow = { workspace = true } +pot = "3.0.0" +byteorder = "1.5.0" +lzzzz = "1.1.0" +memmap2 = "0.9.5" +parking_lot = { workspace = true } +qfilter = { version = "0.2.1", features = ["serde"] } +quick_cache = { version = "0.6.9" } +rayon = { workspace = true } +rustc-hash = { workspace = true } +serde = { workspace = true } +thread_local = { workspace = true } +twox-hash = { version = "2.0.1", features = ["xxhash64"] } +zstd = { version = "0.13.2", features = ["zdict_builder"] } + +[dev-dependencies] +rand = { workspace = true, features = ["small_rng"] } +tempfile = "3.14.0" + +[lints] +workspace = true diff --git a/turbopack/crates/turbo-persistence/README.md b/turbopack/crates/turbo-persistence/README.md new file mode 100644 index 0000000000000..dedd7ddd96a1c --- /dev/null +++ b/turbopack/crates/turbo-persistence/README.md @@ -0,0 +1,251 @@ +# turbo-persistence + +This crate provides a way to persist key value pairs into a folder and restore them later. + +The API only allows a single write transaction at a time, but multiple threads can fill the transaction with (non-conflicting) data concurrently. + +When pushing data into the WriteBatch it is already persisted to disk, but only becomes visible after the transaction is committed. On startup left-over uncommitted files on disk are automatically cleaned up. + +The architecture is optimized for pushing a lot data to disk in a single transaction, while still allowing for fast random reads. + +It supports having multiple key families, which are stored in separate files, but a write batch can contain keys from multiple families. Each key family defines a separate key space. Entries in different key families doesn't influence each other (also not performance-wise). + +## On disk format + +There is a single `CURRENT` file which stores the latest committed sequence number. + +All other files have a sequence number as file name, e. g. `0000123.sst`. All files are immutable once there sequence number is <= the committed sequence number. But they might be deleted when they are superseeded by other committed files. + +There are two different file types: + +* Static Sorted Table (SST, `*.sst`): These files contain key value pairs. +* Blob files (`*.blob`): These files contain large values. + +Therefore there are there value types: + +* INLINE: Small values that are stored directly in the `*.sst` files. +* BLOB: Large values that are stored in `*.blob` files. +* DELETED: Values that are deleted. (Tombstone) +* Future: + * MERGE: An application specific update operation that is applied on the old value. + +### SST file + +* Headers + * 4 bytes magic number and version + * 4 bytes key family + * 8 bytes min hash + * 8 bytes max hash + * 3 bytes AQMF length + * 2 bytes key Compression Dictionary length + * 2 bytes value Compression Dictionary length + * 2 bytes block count +* serialized AQMF +* serialized key Compression Dictionary +* serialized value Compression Dictionary +* foreach block + * 4 bytes end of block offset relative to start of all blocks +* foreach block + * 4 bytes uncompressed block length + * compressed data + +#### Index Block + +* 1 byte block type (0: index block) +* 2 bytes block index +* `n` times + * 8 bytes hash + * 2 bytes block index + +An Index block contains `n` 8 bytes hashes, which specify `n - 1` hash ranges (eq hash goes into the prev range, except for the first key). Between these `n` hashes there are `n - 1` 2 byte block indicies that point to the block that contains the hash range. + +The hashes are sorted. + +`n` is `(block size + 1) / 10` + +#### Key Block + +* 1 byte block type (1: key block) +* 3 bytes entry count +* foreach entry + * 1 byte type + * 3 bytes position in block after header +* Max block size: 16 MB + +A Key block contains n keys, which specify n key value pairs. + +Depending on the `type` field entry has a different format: +* 0: normal key (small value) + * 8 bytes key hash + * key data + * 2 byte block index + * 2 bytes size + * 4 bytes position in block +* 1: blob reference + * 8 bytes key hash + * key data + * 4 bytes sequence number +* 2: deleted key / tombstone (no data) + * 8 bytes key hash + * key data +* 3: normal key (medium sized value) + * 8 bytes key hash + * key data + * 2 byte block index +* 7: merge key (future) + * key data + * 2 byte block index + * 3 bytes size + * 4 bytes position in block +* 8..255: inlined key (future) + * 8 bytes key hash + * key data + * type - 8 bytes value data + +The entries are sorted by key hash and key. + +TODO: 8 bytes key hash is a bit inefficient for small keys. + +#### Value Block + +* no header, all bytes are data referenced by other blocks +* max block size: 4 GB + +### Blob file + +The plain value compressed with dynamic compression. + +## Reading + +Reading start from the current sequence number and goes downwards. + +* We have all SST files memory mapped +* for i = CURRENT sequence number .. 0 + * Check AQMF from SST file for key existance -> if not continue + * let block = 0 + * loop + * Index Block: find key range that contains the key by binary search + * found -> set block, continue + * not found -> break + * Key Block: find key by binary search + * found -> lookup value from value block, return + * not found -> break + +## Writing + +Writing starts by creating a new WriteBatch. It maintains an atomic counter of the next free sequence number. + +The WriteBatch has a thread local buffer that accumulates operations until a certain threshold is reached. Then the buffer is sorted and written to a new SST file (and maybe some blob files). + +When the WriteBatch is committed all thread local buffers are merged into a single global buffer and written into new SST files (potentially multiple when threshold is reached). + +fsync! The new sequence number is written to the `CURRENT` file. + +After that optimization might take place. + +## Compaction + +For compaction we compute the "coverage" of the SST files. The coverage is the average number of SST files that need to be touched to figure out that a key is missing. The coverage can be computed by looking at the min_hash and max_hash of the SST files only. + +For a single SST file we can compute `(max_hash - min_hash) / u64::MAX` as the coverage of the SST file. We sum up all these coverages to get the total coverage. + +Compaction chooses a few SST files and runs the merge step of merge sort on tham to create a few new SST files with sorted ranges. + +Example: + +``` +key hash range: | 0 ... u64::MAX | +SST 1: |----------------| +SST 2: |----------------| +SST 3: |-----| +``` + +can be compacted into: + +``` +key hash range: | 0 ... u64::MAX | +SST 1': |-------| +SST 2': |------| +SST 3': |-----| +``` + +The merge operation decreases the total coverage since the new SST files will have a coverage of < 1. + +But we need to be careful to insert the SST files in the correct location again, since items in these SST files might be overriden in later SST file and we don't want to change that. + +Since SST files that are smaller than the current sequence number are immutable we can't change the files and we can't insert new files at this sequence numbers. +Instead we need to insert the new SST after the current sequence number and copy all SST files after the original SST files after them. (Actually we only need to copy SST files with overlapping key hash ranges. And we can hardlink them instead). Later we will write the current sequence number and delete them original and all copied SST files. + +We can run multiple merge operations concurrently when the key hash ranges are not overlapping or they are from different key families. The copy operation need to be strictly after all merge operations. + +There must not be another SST file with overlapping key hash range between files of a merge operation. + +During the merge operation we eliminate duplicate keys. When blob references are eliminated we delete the blob file after the current sequence number was updated. + +Since the process might exit unexpectedly, to avoid "forgetting" to delete the SST files we keep track of that in a `*.del` file. This file contains the sequence number of SST and blob files that should be deleted. We write that file before the current sequence number is updated. On restart we execute the deletes again. + +We limit the number of SST files that are merged at once to avoid long compactions. + +Full example: + +Example: + +``` +key hash range: | 0 ... u64::MAX | Family +SST 1: |-| 1 +SST 2: |----------------| 1 +SST 3: |----------------| 1 +SST 4: |-----| 2 +SST 5: |-----| 2 +SST 6: |-------| 1 +SST 7: |-------| 1 +SST 8: |--------| 2 +SST 9: |--------| 2 +CURRENT: 9 +``` + +Compactions could selects SST 2, 3, 6 and SST 4, 5, 8 for merging (we limited to 3 SST files per merge operation). This also selects SST 7, 9 for copying. The current sequence number is 9. + +We merge SST 2, 3, 6 into new SST files 10, 12, 14 and SST 4, 5, 8 into new SST files 11, 13. Both operations are done concurrently so they might choose free sequence numbers in random order. The operation might result in less SST files due to duplicate keys. + +After that we copy SST files 7, 9 to new SST files 15, 16. + +We write a "del" file at sequence number 17. + +After that we write the new current sequence number 17. + +Then we delete SST files 2, 3, 6 and 4, 5, 8 and 7, 9. The + +SST files 1 stays unchanged. + +``` +key hash range: | 0 ... u64::MAX | Family +SST 1: |-| 1 +SST 10: |-----| 1 +SST 12: |-----| 1 +SST 11: |------| 2 +SST 14: |-------| 1 +SST 13: |-----| 2 +SST 15: |-------| 1 +SST 16: |--------| 2 +DEL 17: (2, 3, 4, 5, 6, 7, 8, 9) +CURRENT: 17 +``` + +Configuration options for compations are: +* max number of SST files that are merged at once +* coverage when compaction is triggered (otherwise calling compact is a noop) + +## Opening + +* Read the `CURRENT` file +* Delete all files with a higher sequence number than the one in the `CURRENT` file. +* Read all `*.del` files and delete the files that are listed in there. +* Read all `*.sst` files and memory map them. + +## Closing + +* fsync! +* (this also deleted enqueued files) + + diff --git a/turbopack/crates/turbo-persistence/src/arc_slice.rs b/turbopack/crates/turbo-persistence/src/arc_slice.rs new file mode 100644 index 0000000000000..785331a9262fc --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/arc_slice.rs @@ -0,0 +1,93 @@ +use std::{ + borrow::Borrow, + fmt::{self, Debug, Formatter}, + hash::{Hash, Hasher}, + ops::{Deref, Range}, + sync::Arc, +}; + +/// A owned slice that is backed by an `Arc`. +#[derive(Clone)] +pub struct ArcSlice { + data: *const [T], + arc: Arc<[T]>, +} + +unsafe impl Send for ArcSlice {} +unsafe impl Sync for ArcSlice {} + +impl From> for ArcSlice { + fn from(arc: Arc<[T]>) -> Self { + Self { + data: &*arc as *const [T], + arc, + } + } +} + +impl From> for ArcSlice { + fn from(b: Box<[T]>) -> Self { + Self::from(Arc::from(b)) + } +} + +impl Deref for ArcSlice { + type Target = [T]; + + fn deref(&self) -> &Self::Target { + unsafe { &*self.data } + } +} + +impl Borrow<[T]> for ArcSlice { + fn borrow(&self) -> &[T] { + self + } +} + +impl Hash for ArcSlice { + fn hash(&self, state: &mut H) { + self.deref().hash(state) + } +} + +impl PartialEq for ArcSlice { + fn eq(&self, other: &Self) -> bool { + self.deref().eq(other.deref()) + } +} + +impl Debug for ArcSlice { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Debug::fmt(&**self, f) + } +} + +impl Eq for ArcSlice {} + +impl ArcSlice { + /// Creates a new `ArcSlice` from a pointer to a slice and an `Arc`. + /// + /// # Safety + /// + /// The caller must ensure that the pointer is pointing to a valid slice that is kept alive by + /// the `Arc`. + pub unsafe fn new_unchecked(data: *const [T], arc: Arc<[T]>) -> Self { + Self { data, arc } + } + + /// Get the backing arc + pub fn full_arc(this: &ArcSlice) -> Arc<[T]> { + this.arc.clone() + } + + /// Returns a new `ArcSlice` that points to a slice of the current slice. + pub fn slice(self, range: Range) -> ArcSlice { + let data = &*self; + let data = &data[range] as *const [T]; + Self { + data, + arc: self.arc, + } + } +} diff --git a/turbopack/crates/turbo-persistence/src/collector.rs b/turbopack/crates/turbo-persistence/src/collector.rs new file mode 100644 index 0000000000000..bfd507be294e0 --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/collector.rs @@ -0,0 +1,113 @@ +use crate::{ + collector_entry::{CollectorEntry, CollectorEntryValue, EntryKey}, + constants::{ + DATA_THRESHOLD_PER_INITIAL_FILE, MAX_ENTRIES_PER_INITIAL_FILE, MAX_SMALL_VALUE_SIZE, + }, + key::{hash_key, StoreKey}, +}; + +/// A collector accumulates entries that should be eventually written to a file. It keeps track of +/// count and size of the entries to decide when it's "full". Accessing the entries sorts them. +pub struct Collector { + total_key_size: usize, + total_value_size: usize, + entries: Vec>, +} + +impl Collector { + /// Creates a new collector. Note that this allocates the full capacity for the entries. + pub fn new() -> Self { + Self { + total_key_size: 0, + total_value_size: 0, + entries: Vec::with_capacity(MAX_ENTRIES_PER_INITIAL_FILE), + } + } + + /// Returns true if the collector has no entries. + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + /// Returns true if the collector is full. + pub fn is_full(&self) -> bool { + self.entries.len() >= MAX_ENTRIES_PER_INITIAL_FILE + || self.total_key_size + self.total_value_size > DATA_THRESHOLD_PER_INITIAL_FILE + } + + /// Adds a normal key-value pair to the collector. + pub fn put(&mut self, key: K, value: Vec) { + let key = EntryKey { + hash: hash_key(&key), + data: key, + }; + let value = if value.len() > MAX_SMALL_VALUE_SIZE { + CollectorEntryValue::Medium { value } + } else { + CollectorEntryValue::Small { value } + }; + self.total_key_size += key.len(); + self.total_value_size += value.len(); + self.entries.push(CollectorEntry { key, value }); + } + + /// Adds a blob key-value pair to the collector. + pub fn put_blob(&mut self, key: K, blob: u32) { + let key = EntryKey { + hash: hash_key(&key), + data: key, + }; + self.total_key_size += key.len(); + self.entries.push(CollectorEntry { + key, + value: CollectorEntryValue::Large { blob }, + }); + } + + /// Adds a tombstone pair to the collector. + pub fn delete(&mut self, key: K) { + let key = EntryKey { + hash: hash_key(&key), + data: key, + }; + self.total_key_size += key.len(); + self.entries.push(CollectorEntry { + key, + value: CollectorEntryValue::Deleted, + }); + } + + /// Adds an entry from another collector to this collector. + pub fn add_entry(&mut self, entry: CollectorEntry) { + self.total_key_size += entry.key.len(); + self.total_value_size += entry.value.len(); + self.entries.push(entry); + } + + /// Sorts the entries and returns them along with the total key and value sizes. This doesn't + /// clear the entries. + pub fn sorted(&mut self) -> (&[CollectorEntry], usize, usize) { + self.entries.sort_by(|a, b| a.key.cmp(&b.key)); + (&self.entries, self.total_key_size, self.total_value_size) + } + + /// Clears the collector. + pub fn clear(&mut self) { + self.entries.clear(); + self.total_key_size = 0; + self.total_value_size = 0; + } + + /// Drains all entries from the collector in un-sorted order. This can be used to move the + /// entries into another collector. + pub fn drain(&mut self) -> impl Iterator> + '_ { + self.total_key_size = 0; + self.total_value_size = 0; + self.entries.drain(..) + } + + /// Returns the number of entries in the collector. + pub fn len(&self) -> usize { + self.entries.len() + } +} diff --git a/turbopack/crates/turbo-persistence/src/collector_entry.rs b/turbopack/crates/turbo-persistence/src/collector_entry.rs new file mode 100644 index 0000000000000..a27db6d1119dc --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/collector_entry.rs @@ -0,0 +1,85 @@ +use std::cmp::Ordering; + +use crate::{ + key::StoreKey, + static_sorted_file_builder::{Entry, EntryValue}, +}; + +pub struct CollectorEntry { + pub key: EntryKey, + pub value: CollectorEntryValue, +} + +pub enum CollectorEntryValue { + Small { value: Vec }, + Medium { value: Vec }, + Large { blob: u32 }, + Deleted, +} + +impl CollectorEntryValue { + pub fn len(&self) -> usize { + match self { + CollectorEntryValue::Small { value } => value.len(), + CollectorEntryValue::Medium { value } => value.len(), + CollectorEntryValue::Large { blob: _ } => 0, + CollectorEntryValue::Deleted => 0, + } + } +} + +pub struct EntryKey { + pub hash: u64, + pub data: K, +} + +impl EntryKey { + pub fn len(&self) -> usize { + std::mem::size_of::() + self.data.len() + } +} + +impl PartialEq for EntryKey { + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash && self.data == other.data + } +} + +impl Eq for EntryKey {} + +impl PartialOrd for EntryKey { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for EntryKey { + fn cmp(&self, other: &Self) -> Ordering { + self.hash + .cmp(&other.hash) + .then_with(|| self.data.cmp(&other.data)) + } +} + +impl Entry for CollectorEntry { + fn key_hash(&self) -> u64 { + self.key.hash + } + + fn key_len(&self) -> usize { + self.key.data.len() + } + + fn write_key_to(&self, buf: &mut Vec) { + self.key.data.write_to(buf); + } + + fn value(&self) -> EntryValue<'_> { + match &self.value { + CollectorEntryValue::Small { value } => EntryValue::Small { value }, + CollectorEntryValue::Medium { value } => EntryValue::Medium { value }, + CollectorEntryValue::Large { blob } => EntryValue::Large { blob: *blob }, + CollectorEntryValue::Deleted => EntryValue::Deleted, + } + } +} diff --git a/turbopack/crates/turbo-persistence/src/compaction/mod.rs b/turbopack/crates/turbo-persistence/src/compaction/mod.rs new file mode 100644 index 0000000000000..199a414becc3f --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/compaction/mod.rs @@ -0,0 +1 @@ +pub mod selector; diff --git a/turbopack/crates/turbo-persistence/src/compaction/selector.rs b/turbopack/crates/turbo-persistence/src/compaction/selector.rs new file mode 100644 index 0000000000000..2a67cab2acd18 --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/compaction/selector.rs @@ -0,0 +1,371 @@ +/// The merge and move jobs that the compaction algorithm has computed. It's expected that all move +/// jobs are executed in parallel and when that has finished the move jobs are executed in parallel. +#[derive(Debug)] +pub struct CompactionJobs { + pub merge_jobs: Vec>, + pub move_jobs: Vec, +} + +impl CompactionJobs { + #[cfg(test)] + pub(self) fn is_empty(&self) -> bool { + self.merge_jobs.is_empty() && self.move_jobs.is_empty() + } +} + +type Range = (u64, u64); + +/// The trait for the input of the compaction algorithm. +pub trait Compactable { + /// Returns the range of the compactable. + fn range(&self) -> Range; +} + +fn is_overlapping(a: &Range, b: &Range) -> bool { + a.0 <= b.1 && b.0 <= a.1 +} + +fn spread(range: &Range) -> u64 { + range.1 - range.0 +} + +/// Extends the range `a` to include the range `b`, returns `true` if the range was extended. +fn extend_range(a: &mut Range, b: &Range) -> bool { + let mut extended = false; + if b.0 < a.0 { + a.0 = b.0; + extended = true; + } + if b.1 > a.1 { + a.1 = b.1; + extended = true; + } + extended +} + +/// Computes the total coverage of the compactables. +pub fn total_coverage(compactables: &[T], full_range: Range) -> f32 { + let mut coverage = 0.0f32; + for c in compactables { + let range = c.range(); + coverage += spread(&range) as f32; + } + coverage / spread(&full_range) as f32 +} + +/// Configuration for the compaction algorithm. +pub struct CompactConfig { + /// The maximum number of files to merge at once. + pub max_merge: usize, + + /// The minimum number of files to merge at once. + pub min_merge: usize, +} + +/// For a list of compactables, computes merge and move jobs that are expected to perform best. +pub fn get_compaction_jobs( + compactables: &[T], + config: &CompactConfig, +) -> CompactionJobs { + let (jobs, _) = get_compaction_jobs_internal(compactables, config, 0); + jobs +} + +fn get_compaction_jobs_internal( + compactables: &[T], + config: &CompactConfig, + start_index: usize, +) -> (CompactionJobs, f32) { + let len = compactables.len(); + let mut used_compactables = vec![false; len]; + let mut need_move = vec![false; len]; + let mut merge_jobs = Vec::new(); + let mut merge_jobs_reducation = 0.0f32; + let mut move_jobs = Vec::new(); + + let age = |i| (len - 1 - i) as f32; + + loop { + // Find the first unused compactable. + let Some(start) = used_compactables + .iter() + .skip(start_index) + .position(|&used| !used) + .map(|i| i + start_index) + else { + break; + }; + if start >= len - 1 { + break; + } + used_compactables[start] = true; + let start_range = compactables[start].range(); + let mut range = start_range; + + let mut merge_job = Vec::new(); + merge_job.push(start); + let mut merge_job_input_spread = spread(&start_range) as f32; + + 'outer: loop { + // Find the next overlapping unused compactable and extend the range to cover it. + // If it already covers it, add this to the current set. + let mut i = start + 1; + loop { + if !used_compactables[i] { + let range_for_i = compactables[i].range(); + if is_overlapping(&range, &range_for_i) { + let mut extended_range = range; + if !extend_range(&mut extended_range, &range_for_i) { + used_compactables[i] = true; + merge_job.push(i); + merge_job_input_spread += spread(&range_for_i) as f32; + } else { + let s = spread(&range); + // Disallow doubling the range spread + if merge_job.len() >= config.min_merge + && spread(&extended_range) - s > s + { + break 'outer; + } + range = extended_range; + // Need to restart the search from the beginning as the extended range + // may overlap with compactables that were + // already processed. + break; + } + } + } + i += 1; + if i >= compactables.len() { + break 'outer; + } + if merge_job.len() >= config.max_merge { + break 'outer; + } + } + } + + if merge_job.len() < config.min_merge { + continue; + } + let mut merge_range = compactables[start].range(); + if !merge_job + .iter() + .skip(1) + .any(|&i| is_overlapping(&merge_range, &compactables[i].range())) + { + // No overlapping ranges, skip that merge job. + continue; + } + + for &i in merge_job.iter().skip(1) { + extend_range(&mut merge_range, &compactables[i].range()); + } + merge_jobs_reducation = (merge_job_input_spread - spread(&merge_range) as f32) * age(start); + + for (i, compactable) in compactables + .iter() + .enumerate() + .skip(merge_job.last().unwrap() + 1) + { + if used_compactables[i] { + continue; + } + let range = compactable.range(); + if is_overlapping(&merge_range, &range) && !need_move[i] { + need_move[i] = true; + used_compactables[i] = true; + move_jobs.push(i); + } + } + + merge_jobs.push(merge_job); + } + + // Check if there is an alternative with better reduction. + if !move_jobs.is_empty() { + let offset = move_jobs[0]; + let (result, estimated_reduction) = + get_compaction_jobs_internal(compactables, config, offset); + if estimated_reduction > merge_jobs_reducation { + return (result, estimated_reduction); + } + } + + move_jobs.sort_unstable(); + + ( + CompactionJobs { + merge_jobs, + move_jobs, + }, + merge_jobs_reducation, + ) +} + +#[cfg(test)] +mod tests { + use std::{ + fmt::Debug, + mem::{swap, take}, + }; + + use rand::{Rng, SeedableRng}; + + use super::*; + + struct TestCompactable { + range: Range, + } + + impl Compactable for TestCompactable { + fn range(&self) -> Range { + self.range + } + } + + fn compact(ranges: [(u64, u64); N], max_merge: usize) -> CompactionJobs { + let compactables = ranges + .iter() + .map(|&range| TestCompactable { range }) + .collect::>(); + let config = CompactConfig { + max_merge, + min_merge: 2, + }; + get_compaction_jobs(&compactables, &config) + } + + #[test] + fn test_compaction_jobs() { + let CompactionJobs { + merge_jobs, + move_jobs, + .. + } = compact( + [ + (0, 10), + (10, 30), + (9, 13), + (0, 30), + (40, 44), + (41, 42), + (41, 47), + (90, 100), + (30, 40), + ], + 3, + ); + assert_eq!(merge_jobs, vec![vec![0, 1, 2], vec![4, 5, 6]]); + assert_eq!(move_jobs, vec![3, 8]); + } + + #[test] + fn simulate_compactions() { + let mut rnd = rand::rngs::SmallRng::from_seed([0; 32]); + let mut keys = (0..1000) + .map(|_| rnd.gen_range(0..10000)) + .collect::>(); + + let mut containers = keys + .chunks(100) + .map(|keys| Container::new(keys.to_vec())) + .collect::>(); + + let mut warm_keys = (0..100) + .map(|_| { + let i = rnd.gen_range(0..keys.len()); + keys.swap_remove(i) + }) + .collect::>(); + + let mut number_of_compactions = 0; + + for _ in 0..100 { + let coverage = total_coverage(&containers, (0, 10000)); + println!( + "{containers:#?} coverage: {}, items: {}", + coverage, + containers.len() + ); + + if coverage > 10.0 { + let config = CompactConfig { + max_merge: 4, + min_merge: 2, + }; + let jobs = get_compaction_jobs(&containers, &config); + if !jobs.is_empty() { + println!("{jobs:?}"); + + do_compact(&mut containers, jobs); + number_of_compactions += 1; + } + } else { + println!("No compaction needed"); + } + + // Modify warm keys + containers.push(Container::new(warm_keys.clone())); + + // Change some warm keys + for _ in 0..10 { + let i = rnd.gen_range(0..warm_keys.len()); + let j = rnd.gen_range(0..keys.len()); + swap(&mut warm_keys[i], &mut keys[j]); + } + } + println!("Number of compactions: {}", number_of_compactions); + + assert!(containers.len() < 40); + let coverage = total_coverage(&containers, (0, 10000)); + assert!(coverage < 12.0); + } + + struct Container { + keys: Vec, + } + + impl Container { + fn new(mut keys: Vec) -> Self { + keys.sort_unstable(); + Self { keys } + } + } + + impl Compactable for Container { + fn range(&self) -> Range { + (self.keys[0], *self.keys.last().unwrap()) + } + } + + impl Debug for Container { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let (l, r) = self.range(); + write!(f, "{} {l} - {r} ({})", self.keys.len(), r - l) + } + } + + fn do_compact(containers: &mut Vec, jobs: CompactionJobs) { + for merge_job in jobs.merge_jobs { + let mut keys = Vec::new(); + for i in merge_job { + keys.append(&mut containers[i].keys); + } + keys.sort_unstable(); + keys.dedup(); + containers.extend(keys.chunks(100).map(|keys| Container { + keys: keys.to_vec(), + })); + } + + for i in jobs.move_jobs { + let moved_container = Container { + keys: take(&mut containers[i].keys), + }; + containers.push(moved_container); + } + + containers.retain(|c| !c.keys.is_empty()); + } +} diff --git a/turbopack/crates/turbo-persistence/src/constants.rs b/turbopack/crates/turbo-persistence/src/constants.rs new file mode 100644 index 0000000000000..af103a4bc95c1 --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/constants.rs @@ -0,0 +1,34 @@ +/// Values larger than this become blob files +pub const MAX_MEDIUM_VALUE_SIZE: usize = 64 * 1024 * 1024; + +/// Values larger than this become separate value blocks +// Note this must fit into 2 bytes length +pub const MAX_SMALL_VALUE_SIZE: usize = 64 * 1024 - 1; + +/// Maximum number of entries per SST file +pub const MAX_ENTRIES_PER_INITIAL_FILE: usize = 1024 * 1024; + +/// Maximum number of entries per SST file +pub const MAX_ENTRIES_PER_COMPACTED_FILE: usize = 1024 * 1024; + +/// Finish file when total amount of data exceeds this +pub const DATA_THRESHOLD_PER_INITIAL_FILE: usize = 256 * 1024 * 1024; + +/// Finish file when total amount of data exceeds this +pub const DATA_THRESHOLD_PER_COMPACTED_FILE: usize = 256 * 1024 * 1024; + +/// Maximum RAM bytes for AQMF cache +pub const AQMF_CACHE_SIZE: u64 = 300 * 1024 * 1024; +pub const AQMF_AVG_SIZE: usize = 37399; + +/// Maximum RAM bytes for index block cache +pub const INDEX_BLOCK_CACHE_SIZE: u64 = 100 * 1024 * 1024; +pub const INDEX_BLOCK_AVG_SIZE: usize = 152000; + +/// Maximum RAM bytes for key block cache +pub const KEY_BLOCK_CACHE_SIZE: u64 = 300 * 1024 * 1024; +pub const KEY_BLOCK_AVG_SIZE: usize = 16 * 1024; + +/// Maximum RAM bytes for value block cache +pub const VALUE_BLOCK_CACHE_SIZE: u64 = 300 * 1024 * 1024; +pub const VALUE_BLOCK_AVG_SIZE: usize = 132000; diff --git a/turbopack/crates/turbo-persistence/src/db.rs b/turbopack/crates/turbo-persistence/src/db.rs new file mode 100644 index 0000000000000..2366e5c7f2b65 --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/db.rs @@ -0,0 +1,910 @@ +use std::{ + any::{Any, TypeId}, + collections::HashSet, + fs::{self, File, OpenOptions, ReadDir}, + io::Write, + mem::{swap, transmute, MaybeUninit}, + path::{Path, PathBuf}, + sync::{ + atomic::{AtomicBool, AtomicU32, Ordering}, + Arc, + }, +}; + +use anyhow::{bail, Context, Result}; +use byteorder::{ReadBytesExt, WriteBytesExt, BE}; +use lzzzz::lz4::decompress; +use parking_lot::{Mutex, RwLock}; +use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; + +use crate::{ + arc_slice::ArcSlice, + compaction::selector::{ + get_compaction_jobs, total_coverage, CompactConfig, Compactable, CompactionJobs, + }, + constants::{ + AQMF_AVG_SIZE, AQMF_CACHE_SIZE, DATA_THRESHOLD_PER_COMPACTED_FILE, INDEX_BLOCK_AVG_SIZE, + INDEX_BLOCK_CACHE_SIZE, KEY_BLOCK_AVG_SIZE, KEY_BLOCK_CACHE_SIZE, + MAX_ENTRIES_PER_COMPACTED_FILE, VALUE_BLOCK_AVG_SIZE, VALUE_BLOCK_CACHE_SIZE, + }, + key::{hash_key, StoreKey}, + lookup_entry::LookupEntry, + merge_iter::MergeIter, + static_sorted_file::{ + AqmfCache, BlockCache, LookupResult, StaticSortedFile, StaticSortedFileRange, + }, + static_sorted_file_builder::StaticSortedFileBuilder, + write_batch::WriteBatch, + QueryKey, +}; + +#[cfg(feature = "stats")] +#[derive(Debug)] +pub struct CacheStatistics { + pub hit_rate: f32, + pub fill: f32, + pub items: usize, + pub size: u64, + pub hits: u64, + pub misses: u64, +} + +#[cfg(feature = "stats")] +impl CacheStatistics { + fn new(cache: &quick_cache::sync::Cache) -> Self + where + Key: Eq + std::hash::Hash, + Val: Clone, + We: quick_cache::Weighter + Clone, + B: std::hash::BuildHasher + Clone, + L: quick_cache::Lifecycle + Clone, + { + let size = cache.weight(); + let hits = cache.hits(); + let misses = cache.misses(); + Self { + hit_rate: hits as f32 / (hits + misses) as f32, + fill: size as f32 / cache.capacity() as f32, + items: cache.len(), + size, + hits, + misses, + } + } +} + +#[cfg(feature = "stats")] +#[derive(Debug)] +pub struct Statistics { + pub sst_files: usize, + pub index_block_cache: CacheStatistics, + pub key_block_cache: CacheStatistics, + pub value_block_cache: CacheStatistics, + pub aqmf_cache: CacheStatistics, + pub hits: u64, + pub misses: u64, + pub miss_range: u64, + pub miss_aqmf: u64, + pub miss_key: u64, +} + +#[cfg(feature = "stats")] +#[derive(Default)] +struct TrackedStats { + hits_deleted: std::sync::atomic::AtomicU64, + hits_small: std::sync::atomic::AtomicU64, + hits_blob: std::sync::atomic::AtomicU64, + miss_range: std::sync::atomic::AtomicU64, + miss_aqmf: std::sync::atomic::AtomicU64, + miss_key: std::sync::atomic::AtomicU64, + miss_global: std::sync::atomic::AtomicU64, +} + +/// TurboPersistence is a persistent key-value store. It is limited to a single writer at a time +/// using a single write batch. It allows for concurrent reads. +pub struct TurboPersistence { + /// The path to the directory where the database is stored + path: PathBuf, + /// The inner state of the database. Writing will update that. + inner: RwLock, + /// A cache for the last WriteBatch. It is used to avoid reallocation of buffers for the + /// WriteBatch. + idle_write_batch: Mutex)>>, + /// A flag to indicate if a write operation is currently active. Prevents multiple concurrent + /// write operations. + active_write_operation: AtomicBool, + /// A cache for deserialized AQMF filters. + aqmf_cache: AqmfCache, + /// A cache for decompressed index blocks. + index_block_cache: BlockCache, + /// A cache for decompressed key blocks. + key_block_cache: BlockCache, + /// A cache for decompressed value blocks. + value_block_cache: BlockCache, + /// Statistics for the database. + #[cfg(feature = "stats")] + stats: TrackedStats, +} + +/// The inner state of the database. +struct Inner { + /// The list of SST files in the database in order. + static_sorted_files: Vec, + /// The current sequence number for the database. + current_sequence_number: u32, +} + +impl TurboPersistence { + /// Open a TurboPersistence database at the given path. + /// This will read the directory and might performance cleanup when the database was not closed + /// properly. Cleanup only requires to read a few bytes from a few files and to delete + /// files, so it's fast. + pub fn open(path: PathBuf) -> Result { + let mut db = Self { + path, + inner: RwLock::new(Inner { + static_sorted_files: Vec::new(), + current_sequence_number: 0, + }), + idle_write_batch: Mutex::new(None), + active_write_operation: AtomicBool::new(false), + aqmf_cache: AqmfCache::with( + AQMF_CACHE_SIZE as usize / AQMF_AVG_SIZE, + AQMF_CACHE_SIZE, + Default::default(), + Default::default(), + Default::default(), + ), + index_block_cache: BlockCache::with( + INDEX_BLOCK_CACHE_SIZE as usize / INDEX_BLOCK_AVG_SIZE, + INDEX_BLOCK_CACHE_SIZE, + Default::default(), + Default::default(), + Default::default(), + ), + key_block_cache: BlockCache::with( + KEY_BLOCK_CACHE_SIZE as usize / KEY_BLOCK_AVG_SIZE, + KEY_BLOCK_CACHE_SIZE, + Default::default(), + Default::default(), + Default::default(), + ), + value_block_cache: BlockCache::with( + VALUE_BLOCK_CACHE_SIZE as usize / VALUE_BLOCK_AVG_SIZE, + VALUE_BLOCK_CACHE_SIZE, + Default::default(), + Default::default(), + Default::default(), + ), + #[cfg(feature = "stats")] + stats: TrackedStats::default(), + }; + db.open_directory()?; + Ok(db) + } + + /// Performas the initial check on the database directory. + fn open_directory(&mut self) -> Result<()> { + match fs::read_dir(&self.path) { + Ok(entries) => { + if !self + .load_directory(entries) + .context("Loading persistence directory failed")? + { + self.init_directory() + .context("Initializing persistence directory failed")?; + } + Ok(()) + } + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + self.create_and_init_directory() + .context("Creating and initializing persistence directory failed")?; + Ok(()) + } else { + Err(e).context("Failed to open database") + } + } + } + } + + /// Creates the directory and initializes it. + fn create_and_init_directory(&mut self) -> Result<()> { + fs::create_dir_all(&self.path)?; + self.init_directory() + } + + /// Initializes the directory by creating the CURRENT file. + fn init_directory(&mut self) -> Result<()> { + let mut current = File::create(self.path.join("CURRENT"))?; + current.write_u32::(0)?; + current.flush()?; + Ok(()) + } + + /// Loads an existing database directory and performs cleanup if necessary. + fn load_directory(&mut self, entries: ReadDir) -> Result { + let mut sst_files = Vec::new(); + let mut current_file = match File::open(self.path.join("CURRENT")) { + Ok(file) => file, + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + return Ok(false); + } else { + return Err(e).context("Failed to open CURRENT file"); + } + } + }; + let current = current_file.read_u32::()?; + drop(current_file); + + let mut deleted_files = HashSet::new(); + for entry in entries { + let entry = entry?; + let path = entry.path(); + if let Some(ext) = path.extension().and_then(|s| s.to_str()) { + let seq: u32 = path + .file_stem() + .context("File has no file stem")? + .to_str() + .context("File stem is not valid utf-8")? + .parse()?; + if deleted_files.contains(&seq) { + continue; + } + if seq > current { + fs::remove_file(&path)?; + } else { + match ext { + "sst" => { + sst_files.push(seq); + } + "del" => { + let mut content = &*fs::read(&path)?; + let mut no_existing_files = true; + while !content.is_empty() { + let seq = content.read_u32::()?; + deleted_files.insert(seq); + let sst_file = self.path.join(format!("{:08}.sst", seq)); + let blob_file = self.path.join(format!("{:08}.blob", seq)); + for path in [sst_file, blob_file] { + if fs::exists(&path)? { + fs::remove_file(path)?; + no_existing_files = false; + } + } + } + if no_existing_files { + fs::remove_file(&path)?; + } + } + "blob" => { + // ignore blobs, they are read when needed + } + _ => { + bail!("Unexpected file in persistence directory: {:?}", path); + } + } + } + } else { + match path.file_stem().and_then(|s| s.to_str()) { + Some("CURRENT") => { + // Already read + } + _ => { + bail!("Unexpected file in persistence directory: {:?}", path); + } + } + } + } + + sst_files.retain(|seq| !deleted_files.contains(seq)); + sst_files.sort(); + let sst_files = sst_files + .into_iter() + .map(|seq| self.open_sst(seq)) + .collect::>>()?; + #[cfg(feature = "stats")] + { + for sst in sst_files.iter() { + let crate::static_sorted_file::StaticSortedFileRange { + family, + min_hash, + max_hash, + } = sst.range()?; + println!( + "SST {} {} {:016x} - {:016x} {:016x}", + sst.sequence_number(), + family, + min_hash, + max_hash, + max_hash - min_hash + ); + } + } + let inner = self.inner.get_mut(); + inner.static_sorted_files = sst_files; + inner.current_sequence_number = current; + Ok(true) + } + + /// Opens a single SST file. This memory maps the file, but doesn't read it yet. + fn open_sst(&self, seq: u32) -> Result { + let path = self.path.join(format!("{:08}.sst", seq)); + StaticSortedFile::open(seq, path) + .with_context(|| format!("Unable to open sst file {:08}.sst", seq)) + } + + /// Reads and decompresses a blob file. This is not backed by any cache. + fn read_blob(&self, seq: u32) -> Result> { + let path = self.path.join(format!("{:08}.blob", seq)); + let compressed = + fs::read(path).with_context(|| format!("Unable to read blob file {:08}.blob", seq))?; + let mut compressed = &compressed[..]; + let uncompressed_length = compressed.read_u32::()? as usize; + + let buffer = Arc::new_zeroed_slice(uncompressed_length); + // Safety: MaybeUninit can be safely transmuted to u8. + let mut buffer = unsafe { transmute::]>, Arc<[u8]>>(buffer) }; + // Safety: We know that the buffer is not shared yet. + let decompressed = unsafe { Arc::get_mut_unchecked(&mut buffer) }; + decompress(compressed, decompressed)?; + Ok(ArcSlice::from(buffer)) + } + + /// Returns true if the database is empty. + pub fn is_empty(&self) -> bool { + self.inner.read().static_sorted_files.is_empty() + } + + /// Starts a new WriteBatch for the database. Only a single write operation is allowed at a + /// time. The WriteBatch need to be committed with [`TurboPersistence::commit_write_batch`]. + /// Note that the WriteBatch might start writing data to disk while it's filled up with data. + /// This data will only become visible after the WriteBatch is committed. + pub fn write_batch( + &self, + ) -> Result> { + if self + .active_write_operation + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) + .is_err() + { + bail!( + "Another write batch or compaction is already active (Only a single write \ + operations is allowed at a time)" + ); + } + let current = self.inner.read().current_sequence_number; + if let Some((ty, any)) = self.idle_write_batch.lock().take() { + if ty == TypeId::of::>() { + let mut write_batch = *any.downcast::>().unwrap(); + write_batch.reset(current); + return Ok(write_batch); + } + } + Ok(WriteBatch::new(self.path.clone(), current)) + } + + /// Commits a WriteBatch to the database. This will finish writing the data to disk and make it + /// visible to readers. + pub fn commit_write_batch( + &self, + mut write_batch: WriteBatch, + ) -> Result<()> { + let (seq, new_sst_files) = write_batch.finish()?; + self.commit(new_sst_files, vec![], seq)?; + self.active_write_operation.store(false, Ordering::Release); + self.idle_write_batch.lock().replace(( + TypeId::of::>(), + Box::new(write_batch), + )); + Ok(()) + } + + /// fsyncs the new files and updates the CURRENT file. Updates the database state to include the + /// new files. + fn commit( + &self, + new_sst_files: Vec<(u32, File)>, + mut indicies_to_delete: Vec, + mut seq: u32, + ) -> Result<(), anyhow::Error> { + let mut new_sst_files = new_sst_files + .into_iter() + .map(|(seq, file)| { + file.sync_all()?; + self.open_sst(seq) + }) + .collect::>>()?; + + if !indicies_to_delete.is_empty() { + seq += 1; + } + + let removed_ssts; + + { + let mut inner = self.inner.write(); + inner.current_sequence_number = seq; + indicies_to_delete.sort(); + removed_ssts = remove_indicies(&mut inner.static_sorted_files, &indicies_to_delete); + inner.static_sorted_files.append(&mut new_sst_files); + } + + let mut removed_ssts = removed_ssts + .into_iter() + .map(|sst| sst.sequence_number()) + .collect::>(); + removed_ssts.sort(); + + if !indicies_to_delete.is_empty() { + // Write *.del file, marking the selected files as to delete + let mut buf = Vec::with_capacity(removed_ssts.len() * 4); + for seq in removed_ssts.iter() { + buf.write_u32::(*seq)?; + } + let mut file = File::create(self.path.join(format!("{:08}.del", seq)))?; + file.write_all(&buf)?; + file.sync_all()?; + } + + let mut current_file = OpenOptions::new() + .write(true) + .truncate(false) + .read(false) + .open(self.path.join("CURRENT"))?; + current_file.write_u32::(seq)?; + current_file.sync_all()?; + + for seq in removed_ssts { + fs::remove_file(self.path.join(format!("{seq:08}.sst")))?; + } + + Ok(()) + } + + /// Runs a full compaction on the database. This will rewrite all SST files, removing all + /// duplicate keys and separating all key ranges into unique files. + pub fn full_compact(&self) -> Result<()> { + self.compact(0.0, usize::MAX)?; + Ok(()) + } + + /// Runs a (partial) compaction. Compaction will only be performed if the coverage of the SST + /// files is above the given threshold. The coverage is the average number of SST files that + /// need to be read to find a key. It also limits the maximum number of SST files that are + /// merged at once, which is the main factor for the runtime of the compaction. + pub fn compact(&self, max_coverage: f32, max_merge_sequence: usize) -> Result<()> { + if self + .active_write_operation + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Acquire) + .is_err() + { + bail!( + "Another write batch or compaction is already active (Only a single write \ + operations is allowed at a time)" + ); + } + + let mut sequence_number; + let mut new_sst_files = Vec::new(); + let mut indicies_to_delete = Vec::new(); + + { + let inner = self.inner.read(); + sequence_number = AtomicU32::new(inner.current_sequence_number); + self.compact_internal( + &inner.static_sorted_files, + &sequence_number, + &mut new_sst_files, + &mut indicies_to_delete, + max_coverage, + max_merge_sequence, + )?; + } + + self.commit( + new_sst_files, + indicies_to_delete, + *sequence_number.get_mut(), + )?; + + self.active_write_operation.store(false, Ordering::Release); + + Ok(()) + } + + /// Internal function to perform a compaction. + fn compact_internal( + &self, + static_sorted_files: &[StaticSortedFile], + sequence_number: &AtomicU32, + new_sst_files: &mut Vec<(u32, File)>, + indicies_to_delete: &mut Vec, + max_coverage: f32, + max_merge_sequence: usize, + ) -> Result { + if static_sorted_files.is_empty() { + return Ok(false); + } + + struct SstWithRange { + index: usize, + range: StaticSortedFileRange, + } + + impl Compactable for SstWithRange { + fn range(&self) -> (u64, u64) { + (self.range.min_hash, self.range.max_hash) + } + } + + let ssts_with_ranges = static_sorted_files + .iter() + .enumerate() + .flat_map(|(index, sst)| sst.range().ok().map(|range| SstWithRange { index, range })) + .collect::>(); + + let families = ssts_with_ranges + .iter() + .map(|s| s.range.family) + .max() + .unwrap() as usize + + 1; + + let mut sst_by_family = Vec::with_capacity(families); + sst_by_family.resize_with(families, Vec::new); + + for sst in ssts_with_ranges { + sst_by_family[sst.range.family as usize].push(sst); + } + + let key_block_cache = &self.key_block_cache; + let value_block_cache = &self.value_block_cache; + let path = &self.path; + + let result = sst_by_family + .into_par_iter() + .with_min_len(1) + .enumerate() + .map(|(family, ssts_with_ranges)| { + let coverage = total_coverage(&ssts_with_ranges, (0, u64::MAX)); + if coverage <= max_coverage { + return Ok((Vec::new(), Vec::new())); + } + + let CompactionJobs { + merge_jobs, + move_jobs, + } = get_compaction_jobs( + &ssts_with_ranges, + &CompactConfig { + max_merge: max_merge_sequence, + min_merge: 2, + }, + ); + + // Later we will remove the merged and moved files + let indicies_to_delete = merge_jobs + .iter() + .flat_map(|l| l.iter().copied()) + .chain(move_jobs.iter().copied()) + .map(|index| ssts_with_ranges[index].index) + .collect::>(); + + // Merge SST files + let merge_result = merge_jobs + .into_par_iter() + .with_min_len(1) + .map(|indicies| { + fn create_sst_file( + family: u32, + entries: &[LookupEntry], + total_key_size: usize, + total_value_size: usize, + path: &Path, + seq: u32, + ) -> Result<(u32, File)> { + let builder = StaticSortedFileBuilder::new( + family, + entries, + total_key_size, + total_value_size, + )?; + Ok((seq, builder.write(&path.join(format!("{:08}.sst", seq)))?)) + } + + let mut new_sst_files = Vec::new(); + + // Iterate all SST files + let iters = indicies + .iter() + .map(|&index| { + let index = ssts_with_ranges[index].index; + let sst = &static_sorted_files[index]; + sst.iter(key_block_cache, value_block_cache) + }) + .collect::>>()?; + + let iter = MergeIter::new(iters.into_iter())?; + + let mut total_key_size = 0; + let mut total_value_size = 0; + let mut current: Option = None; + let mut entries = Vec::new(); + let mut last_entries = Vec::new(); + let mut last_entries_total_sizes = (0, 0); + for entry in iter { + let entry = entry?; + + // Remove duplicates + if let Some(current) = current.take() { + if current.key != entry.key { + let key_size = current.key.len(); + let value_size = current.value.size_in_sst(); + total_key_size += key_size; + total_value_size += value_size; + + if total_key_size + total_value_size + > DATA_THRESHOLD_PER_COMPACTED_FILE + || entries.len() >= MAX_ENTRIES_PER_COMPACTED_FILE + { + let (selected_total_key_size, selected_total_value_size) = + last_entries_total_sizes; + swap(&mut entries, &mut last_entries); + last_entries_total_sizes = ( + total_key_size - key_size, + total_value_size - value_size, + ); + total_key_size = key_size; + total_value_size = value_size; + + if !entries.is_empty() { + let seq = + sequence_number.fetch_add(1, Ordering::SeqCst) + 1; + + new_sst_files.push(create_sst_file( + family as u32, + &entries, + selected_total_key_size, + selected_total_value_size, + path, + seq, + )?); + + entries.clear(); + } + } + + entries.push(current); + } else { + // Override value + } + } + current = Some(entry); + } + if let Some(entry) = current { + total_key_size += entry.key.len(); + total_value_size += entry.value.size_in_sst(); + entries.push(entry); + } + + // If we have one set of entries left, write them to a new SST file + if last_entries.is_empty() && !entries.is_empty() { + let seq = sequence_number.fetch_add(1, Ordering::SeqCst) + 1; + + new_sst_files.push(create_sst_file( + family as u32, + &entries, + total_key_size, + total_value_size, + path, + seq, + )?); + } else + // If we have two sets of entries left, merge them and + // split it into two SST files, to avoid having a + // single SST file that is very small. + if !last_entries.is_empty() { + last_entries.append(&mut entries); + + last_entries_total_sizes.0 += total_key_size; + last_entries_total_sizes.1 += total_value_size; + + let (part1, part2) = last_entries.split_at(last_entries.len() / 2); + + let seq1 = sequence_number.fetch_add(1, Ordering::SeqCst) + 1; + let seq2 = sequence_number.fetch_add(1, Ordering::SeqCst) + 1; + + new_sst_files.push(create_sst_file( + family as u32, + part1, + // We don't know the exact sizes so we estimate them + last_entries_total_sizes.0 / 2, + last_entries_total_sizes.1 / 2, + path, + seq1, + )?); + + new_sst_files.push(create_sst_file( + family as u32, + part2, + last_entries_total_sizes.0 / 2, + last_entries_total_sizes.1 / 2, + path, + seq2, + )?); + } + Ok(new_sst_files) + }) + .collect::>>()?; + + // Move SST files + let mut new_sst_files = move_jobs + .into_par_iter() + .with_min_len(1) + .map(|index| { + let index = ssts_with_ranges[index].index; + let sst = &static_sorted_files[index]; + let seq = sequence_number.fetch_add(1, Ordering::SeqCst) + 1; + let src_path = self.path.join(format!("{:08}.sst", sst.sequence_number())); + let dst_path = self.path.join(format!("{:08}.sst", seq)); + if fs::hard_link(&src_path, &dst_path).is_err() { + fs::copy(src_path, &dst_path)?; + } + Ok((seq, File::open(dst_path)?)) + }) + .collect::>>()?; + + new_sst_files.extend(merge_result.into_iter().flatten()); + Ok((new_sst_files, indicies_to_delete)) + }) + .collect::>>()?; + + for (mut inner_new_sst_files, mut inner_indicies_to_delete) in result { + new_sst_files.append(&mut inner_new_sst_files); + indicies_to_delete.append(&mut inner_indicies_to_delete); + } + + Ok(true) + } + + /// Get a value from the database. Returns None if the key is not found. The returned value + /// might hold onto a block of the database and it should not be hold long-term. + pub fn get(&self, family: usize, key: &K) -> Result>> { + let hash = hash_key(key); + let inner = self.inner.read(); + for sst in inner.static_sorted_files.iter().rev() { + match sst.lookup( + family as u32, + hash, + key, + &self.aqmf_cache, + &self.index_block_cache, + &self.key_block_cache, + &self.value_block_cache, + )? { + LookupResult::Deleted => { + #[cfg(feature = "stats")] + self.stats.hits_deleted.fetch_add(1, Ordering::Relaxed); + return Ok(None); + } + LookupResult::Slice { value } => { + #[cfg(feature = "stats")] + self.stats.hits_small.fetch_add(1, Ordering::Relaxed); + return Ok(Some(value)); + } + LookupResult::Blob { sequence_number } => { + #[cfg(feature = "stats")] + self.stats.hits_blob.fetch_add(1, Ordering::Relaxed); + let blob = self.read_blob(sequence_number)?; + return Ok(Some(blob)); + } + LookupResult::RangeMiss => { + #[cfg(feature = "stats")] + self.stats.miss_range.fetch_add(1, Ordering::Relaxed); + } + LookupResult::QuickFilterMiss => { + #[cfg(feature = "stats")] + self.stats.miss_aqmf.fetch_add(1, Ordering::Relaxed); + } + LookupResult::KeyMiss => { + #[cfg(feature = "stats")] + self.stats.miss_key.fetch_add(1, Ordering::Relaxed); + } + } + } + #[cfg(feature = "stats")] + self.stats.miss_global.fetch_add(1, Ordering::Relaxed); + Ok(None) + } + + /// Returns database statistics. + #[cfg(feature = "stats")] + pub fn statistics(&self) -> Statistics { + let inner = self.inner.read(); + Statistics { + sst_files: inner.static_sorted_files.len(), + index_block_cache: CacheStatistics::new(&self.index_block_cache), + key_block_cache: CacheStatistics::new(&self.key_block_cache), + value_block_cache: CacheStatistics::new(&self.value_block_cache), + aqmf_cache: CacheStatistics::new(&self.aqmf_cache), + hits: self.stats.hits_deleted.load(Ordering::Relaxed) + + self.stats.hits_small.load(Ordering::Relaxed) + + self.stats.hits_blob.load(Ordering::Relaxed), + misses: self.stats.miss_global.load(Ordering::Relaxed), + miss_range: self.stats.miss_range.load(Ordering::Relaxed), + miss_aqmf: self.stats.miss_aqmf.load(Ordering::Relaxed), + miss_key: self.stats.miss_key.load(Ordering::Relaxed), + } + } + + /// Shuts down the database. This will print statistics if the `stats` feature is enabled. + pub fn shutdown(&self) -> Result<()> { + #[cfg(feature = "stats")] + println!("{:#?}", self.statistics()); + Ok(()) + } +} + +/// Helper method to remove certain indicies from a list while keeping the order. +/// This is similar to the `remove` method on Vec, but it allows to remove multiple indicies at +/// once. It returns the removed elements in unspecified order. +/// +/// Note: The `sorted_indicies` list needs to be sorted. +fn remove_indicies(list: &mut Vec, sorted_indicies: &[usize]) -> Vec { + let mut r = 0; + let mut w = 0; + let mut i = 0; + while r < list.len() { + if i < sorted_indicies.len() { + let idx = sorted_indicies[i]; + if r != idx { + list.swap(w, r); + w += 1; + r += 1; + } else { + r += 1; + i += 1; + } + } else { + list.swap(w, r); + w += 1; + r += 1; + } + } + list.split_off(w) +} + +#[cfg(test)] +mod tests { + use crate::db::remove_indicies; + + #[test] + fn test_remove_indicies() { + let mut list = vec![1, 2, 3, 4, 5, 6, 7, 8, 9]; + let sorted_indicies = vec![1, 3, 5, 7]; + let removed = remove_indicies(&mut list, &sorted_indicies); + assert_eq!(list, vec![1, 3, 5, 7, 9]); + assert!(removed.contains(&2)); + assert!(removed.contains(&4)); + assert!(removed.contains(&6)); + assert!(removed.contains(&8)); + assert_eq!(removed.len(), 4); + } + + #[test] + fn test_remove_indicies2() { + let mut list = vec![1, 2, 3, 4, 5, 6, 7, 8, 9]; + let sorted_indicies = vec![0, 1, 2, 6, 7, 8]; + let removed = remove_indicies(&mut list, &sorted_indicies); + assert_eq!(list, vec![4, 5, 6]); + assert!(removed.contains(&1)); + assert!(removed.contains(&2)); + assert!(removed.contains(&3)); + assert!(removed.contains(&7)); + assert!(removed.contains(&8)); + assert!(removed.contains(&9)); + assert_eq!(removed.len(), 6); + } +} diff --git a/turbopack/crates/turbo-persistence/src/key.rs b/turbopack/crates/turbo-persistence/src/key.rs new file mode 100644 index 0000000000000..d88a93396abda --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/key.rs @@ -0,0 +1,205 @@ +use std::{cmp::min, hash::Hasher}; + +/// A trait for keys that can be used for hashing. +pub trait KeyBase { + /// Returns the length of the key in bytes. + fn len(&self) -> usize; + /// Hashes the key. It should not include the structure of the key, only the data. E.g. `([1, + /// 2], [3, 4])` should hash the same as `[1, 2, 3, 4]`. + fn hash(&self, state: &mut H); +} + +impl KeyBase for &'_ [u8] { + fn len(&self) -> usize { + <[u8]>::len(self) + } + + fn hash(&self, state: &mut H) { + for item in *self { + state.write_u8(*item); + } + } +} + +impl KeyBase for [u8; N] { + fn len(&self) -> usize { + self[..].len() + } + + fn hash(&self, state: &mut H) { + for item in self { + state.write_u8(*item); + } + } +} + +impl KeyBase for Vec { + fn len(&self) -> usize { + self.len() + } + + fn hash(&self, state: &mut H) { + for item in self { + state.write_u8(*item); + } + } +} + +impl KeyBase for u8 { + fn len(&self) -> usize { + 1 + } + + fn hash(&self, state: &mut H) { + state.write_u8(*self); + } +} + +impl KeyBase for (A, B) { + fn len(&self) -> usize { + let (a, b) = self; + a.len() + b.len() + } + + fn hash(&self, state: &mut H) { + let (a, b) = self; + KeyBase::hash(a, state); + KeyBase::hash(b, state); + } +} + +impl KeyBase for &'_ T { + fn len(&self) -> usize { + (*self).len() + } + + fn hash(&self, state: &mut H) { + (*self).hash(state) + } +} + +/// A trait for keys that can be used to query the database. They need to allow hashing and +/// comparison with a byte slice (total order). +pub trait QueryKey: KeyBase { + fn cmp(&self, key: &[u8]) -> std::cmp::Ordering; +} + +impl QueryKey for &'_ [u8] { + fn cmp(&self, key: &[u8]) -> std::cmp::Ordering { + Ord::cmp(self, &key) + } +} + +impl QueryKey for [u8; N] { + fn cmp(&self, key: &[u8]) -> std::cmp::Ordering { + Ord::cmp(&self[..], key) + } +} + +impl QueryKey for Vec { + fn cmp(&self, key: &[u8]) -> std::cmp::Ordering { + Ord::cmp(&**self, key) + } +} + +impl QueryKey for u8 { + fn cmp(&self, key: &[u8]) -> std::cmp::Ordering { + Ord::cmp(&[*self][..], key) + } +} + +impl QueryKey for (A, B) { + fn cmp(&self, mut key: &[u8]) -> std::cmp::Ordering { + let (a, b) = self; + let len = a.len(); + let key_len = key.len(); + let key_part = &key[..min(key_len, len)]; + match a.cmp(key_part) { + std::cmp::Ordering::Equal => { + key = &key[len..]; + b.cmp(key) + } + ord => ord, + } + } +} + +impl QueryKey for &'_ T { + fn cmp(&self, key: &[u8]) -> std::cmp::Ordering { + (*self).cmp(key) + } +} + +/// A trait for keys that can be stored in the database. They need to allow hashing and comparison. +pub trait StoreKey: KeyBase + Ord { + fn write_to(&self, buf: &mut Vec); +} + +impl StoreKey for Vec { + fn write_to(&self, buf: &mut Vec) { + buf.extend_from_slice(self); + } +} + +impl StoreKey for &'_ [u8] { + fn write_to(&self, buf: &mut Vec) { + buf.extend_from_slice(self); + } +} + +impl StoreKey for u8 { + fn write_to(&self, buf: &mut Vec) { + buf.push(*self); + } +} + +impl StoreKey for (A, B) { + fn write_to(&self, buf: &mut Vec) { + self.0.write_to(buf); + self.1.write_to(buf); + } +} + +impl StoreKey for &'_ T { + fn write_to(&self, buf: &mut Vec) { + (*self).write_to(buf); + } +} + +/// Hashes a key with a fast, deterministic hash function. +pub fn hash_key(key: &impl KeyBase) -> u64 { + let mut hasher = twox_hash::XxHash64::with_seed(0); + key.hash(&mut hasher); + hasher.finish() +} + +#[cfg(test)] +mod tests { + use std::cmp::Ordering; + + use crate::{key::hash_key, QueryKey}; + + #[test] + fn tuple() { + let key = (&[1, 2], &[3, 4]); + assert_eq!(QueryKey::cmp(&key, &[1, 2, 3, 4]), Ordering::Equal); + assert_eq!(QueryKey::cmp(&key, &[1, 2, 3, 3]), Ordering::Greater); + assert_eq!(QueryKey::cmp(&key, &[1, 2, 3, 5]), Ordering::Less); + assert_eq!(QueryKey::cmp(&key, &[0, 2, 3, 4]), Ordering::Greater); + assert_eq!(QueryKey::cmp(&key, &[2, 2, 3, 4]), Ordering::Less); + assert_eq!(QueryKey::cmp(&key, &[1, 2, 3, 4, 5]), Ordering::Less); + assert_eq!(QueryKey::cmp(&key, &[1, 2, 3]), Ordering::Greater); + assert_eq!(QueryKey::cmp(&key, &[1, 2]), Ordering::Greater); + assert_eq!(QueryKey::cmp(&key, &[1]), Ordering::Greater); + assert_eq!(QueryKey::cmp(&key, &[]), Ordering::Greater); + } + + #[test] + fn hash() { + let h1 = hash_key(&[1, 2, 3, 4]); + let h2 = hash_key(&(&[1, 2], &[3, 4])); + let h3 = hash_key(&(vec![1, 2, 3], 4u8)); + assert_eq!(h2, h1); + assert_eq!(h3, h1); + } +} diff --git a/turbopack/crates/turbo-persistence/src/lib.rs b/turbopack/crates/turbo-persistence/src/lib.rs new file mode 100644 index 0000000000000..3069de7069418 --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/lib.rs @@ -0,0 +1,24 @@ +#![feature(once_cell_try)] +#![feature(new_zeroed_alloc)] +#![feature(get_mut_unchecked)] + +mod arc_slice; +mod collector; +mod collector_entry; +mod compaction; +mod constants; +mod db; +mod key; +mod lookup_entry; +mod merge_iter; +mod static_sorted_file; +mod static_sorted_file_builder; +mod write_batch; + +#[cfg(test)] +mod tests; + +pub use arc_slice::ArcSlice; +pub use db::TurboPersistence; +pub use key::{QueryKey, StoreKey}; +pub use write_batch::WriteBatch; diff --git a/turbopack/crates/turbo-persistence/src/lookup_entry.rs b/turbopack/crates/turbo-persistence/src/lookup_entry.rs new file mode 100644 index 0000000000000..7095fd57fc9bb --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/lookup_entry.rs @@ -0,0 +1,66 @@ +use crate::{ + constants::MAX_SMALL_VALUE_SIZE, + static_sorted_file_builder::{Entry, EntryValue}, + ArcSlice, +}; + +/// A value from a SST file lookup. +pub enum LookupValue { + /// The value was deleted. + Deleted, + /// The value is stored in the SST file. + Slice { value: ArcSlice }, + /// The value is stored in a blob file. + Blob { sequence_number: u32 }, +} + +impl LookupValue { + /// Returns the size of the value in the SST file. + pub fn size_in_sst(&self) -> usize { + match self { + LookupValue::Slice { value } => value.len(), + LookupValue::Deleted => 0, + LookupValue::Blob { .. } => 0, + } + } +} + +/// An entry from a SST file lookup. +pub struct LookupEntry { + /// The hash of the key. + pub hash: u64, + /// The key. + pub key: ArcSlice, + /// The value. + pub value: LookupValue, +} + +impl Entry for LookupEntry { + fn key_hash(&self) -> u64 { + self.hash + } + + fn key_len(&self) -> usize { + self.key.len() + } + + fn write_key_to(&self, buf: &mut Vec) { + buf.extend_from_slice(&self.key); + } + + fn value(&self) -> EntryValue<'_> { + match &self.value { + LookupValue::Deleted => EntryValue::Deleted, + LookupValue::Slice { value } => { + if value.len() > MAX_SMALL_VALUE_SIZE { + EntryValue::Medium { value } + } else { + EntryValue::Small { value } + } + } + LookupValue::Blob { sequence_number } => EntryValue::Large { + blob: *sequence_number, + }, + } + } +} diff --git a/turbopack/crates/turbo-persistence/src/merge_iter.rs b/turbopack/crates/turbo-persistence/src/merge_iter.rs new file mode 100644 index 0000000000000..251ef32c26db5 --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/merge_iter.rs @@ -0,0 +1,79 @@ +use std::{cmp::Ordering, collections::BinaryHeap}; + +use anyhow::Result; + +use crate::lookup_entry::LookupEntry; + +/// An active iterator that is being merged. It has peeked the next element and can be compared +/// according to that element. The `order` is used when multiple iterators have the same key. +struct ActiveIterator>> { + iter: T, + order: usize, + entry: LookupEntry, +} + +impl>> PartialEq for ActiveIterator { + fn eq(&self, other: &Self) -> bool { + self.entry.hash == other.entry.hash && *self.entry.key == *other.entry.key + } +} + +impl>> Eq for ActiveIterator {} + +impl>> PartialOrd for ActiveIterator { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl>> Ord for ActiveIterator { + fn cmp(&self, other: &Self) -> Ordering { + self.entry + .hash + .cmp(&other.entry.hash) + .then_with(|| (*self.entry.key).cmp(&other.entry.key)) + .then_with(|| self.order.cmp(&other.order)) + .reverse() + } +} + +/// An iterator that merges multiple sorted iterators into a single sorted iterator. Internal it +/// uses an heap of iterators to iterate them in order. +pub struct MergeIter>> { + heap: BinaryHeap>, +} + +impl>> MergeIter { + pub fn new(iters: impl Iterator) -> Result { + let mut heap = BinaryHeap::new(); + for (order, mut iter) in iters.enumerate() { + if let Some(entry) = iter.next() { + let entry = entry?; + heap.push(ActiveIterator { iter, order, entry }); + } + } + Ok(Self { heap }) + } +} + +impl>> Iterator for MergeIter { + type Item = Result; + + fn next(&mut self) -> Option { + let ActiveIterator { + mut iter, + order, + entry, + } = self.heap.pop()?; + match iter.next() { + None => {} + Some(Err(e)) => return Some(Err(e)), + Some(Ok(next)) => self.heap.push(ActiveIterator { + iter, + order, + entry: next, + }), + } + Some(Ok(entry)) + } +} diff --git a/turbopack/crates/turbo-persistence/src/static_sorted_file.rs b/turbopack/crates/turbo-persistence/src/static_sorted_file.rs new file mode 100644 index 0000000000000..1de537c69ee3d --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/static_sorted_file.rs @@ -0,0 +1,717 @@ +use std::{ + cmp::Ordering, + fs::File, + hash::BuildHasherDefault, + mem::{transmute, MaybeUninit}, + path::PathBuf, + sync::{Arc, OnceLock}, +}; + +use anyhow::{bail, Result}; +use byteorder::{ReadBytesExt, BE}; +use lzzzz::lz4::decompress_with_dict; +use memmap2::Mmap; +use quick_cache::sync::GuardResult; +use rustc_hash::FxHasher; + +use crate::{ + arc_slice::ArcSlice, + lookup_entry::{LookupEntry, LookupValue}, + QueryKey, +}; + +/// The block header for an index block. +pub const BLOCK_TYPE_INDEX: u8 = 0; +/// The block header for a key block. +pub const BLOCK_TYPE_KEY: u8 = 1; + +/// The tag for a small-sized value. +pub const KEY_BLOCK_ENTRY_TYPE_SMALL: u8 = 0; +/// The tag for the blob value. +pub const KEY_BLOCK_ENTRY_TYPE_BLOB: u8 = 1; +/// The tag for the deleted value. +pub const KEY_BLOCK_ENTRY_TYPE_DELETED: u8 = 2; +/// The tag for a medium-sized value. +pub const KEY_BLOCK_ENTRY_TYPE_MEDIUM: u8 = 3; + +/// The result of a lookup operation. +pub enum LookupResult { + /// The key was deleted. + Deleted, + /// The key was found and the value is a slice. + Slice { value: ArcSlice }, + /// The key was found and the value is a blob. + Blob { sequence_number: u32 }, + /// The key was not found because it is out of the range of this SST file. + RangeMiss, + /// The key was not found because it was not in the AQMF filter. But it was in the range. + QuickFilterMiss, + /// The key was not found. But it was in the range and the AQMF filter. + KeyMiss, +} + +impl From for LookupResult { + fn from(value: LookupValue) -> Self { + match value { + LookupValue::Deleted => LookupResult::Deleted, + LookupValue::Slice { value } => LookupResult::Slice { value }, + LookupValue::Blob { sequence_number } => LookupResult::Blob { sequence_number }, + } + } +} + +/// A byte range in the SST file. +struct LocationInFile { + start: usize, + end: usize, +} + +/// The read and parsed header of an SST file. +struct Header { + /// The key family stored in this file. + family: u32, + /// The minimum hash value in this file. + min_hash: u64, + /// The maximum hash value in this file. + max_hash: u64, + /// The location of the AQMF filter in the file. + aqmf: LocationInFile, + /// The location of the key compression dictionary in the file. + key_compression_dictionary: LocationInFile, + /// The location of the value compression dictionary in the file. + value_compression_dictionary: LocationInFile, + /// The byte offset where the block offsets start. + block_offsets_start: usize, + /// The byte offset where the blocks start. + blocks_start: usize, + /// The number of blocks in this file. + block_count: u16, +} + +/// The key family and hash range of an SST file. +#[derive(Clone, Copy)] +pub struct StaticSortedFileRange { + pub family: u32, + pub min_hash: u64, + pub max_hash: u64, +} + +#[derive(Clone, Default)] +pub struct AqmfWeighter; + +impl quick_cache::Weighter> for AqmfWeighter { + fn weight(&self, _key: &u32, filter: &Arc) -> u64 { + filter.capacity() + 1 + } +} + +#[derive(Clone, Default)] +pub struct BlockWeighter; + +impl quick_cache::Weighter<(u32, u16), ArcSlice> for BlockWeighter { + fn weight(&self, _key: &(u32, u16), val: &ArcSlice) -> u64 { + val.len() as u64 + 8 + } +} + +pub type AqmfCache = + quick_cache::sync::Cache, AqmfWeighter, BuildHasherDefault>; +pub type BlockCache = + quick_cache::sync::Cache<(u32, u16), ArcSlice, BlockWeighter, BuildHasherDefault>; + +/// A memory mapped SST file. +pub struct StaticSortedFile { + /// The sequence number of this file. + sequence_number: u32, + /// The memory mapped file. + mmap: Mmap, + /// The parsed header of this file. + header: OnceLock
, + /// The AQMF filter of this file. This is only used if the range is very large. Smaller ranges + /// use the AQMF cache instead. + aqmf: OnceLock, +} + +impl StaticSortedFile { + /// The sequence number of this file. + pub fn sequence_number(&self) -> u32 { + self.sequence_number + } + + /// Opens an SST file at the given path. This memory maps the file, but does not read it yet. + /// It's lazy read on demand. + pub fn open(sequence_number: u32, path: PathBuf) -> Result { + let mmap = unsafe { Mmap::map(&File::open(&path)?)? }; + let file = Self { + sequence_number, + mmap, + header: OnceLock::new(), + aqmf: OnceLock::new(), + }; + Ok(file) + } + + /// Reads and parses the header of this file if it hasn't been read yet. + fn header(&self) -> Result<&Header> { + self.header.get_or_try_init(|| { + let mut file = &*self.mmap; + let magic = file.read_u32::()?; + if magic != 0x53535401 { + bail!("Invalid magic number or version"); + } + let family = file.read_u32::()?; + let min_hash = file.read_u64::()?; + let max_hash = file.read_u64::()?; + let aqmf_length = file.read_u24::()? as usize; + let key_compression_dictionary_length = file.read_u16::()? as usize; + let value_compression_dictionary_length = file.read_u16::()? as usize; + let block_count = file.read_u16::()?; + const HEADER_SIZE: usize = 33; + let mut current_offset = HEADER_SIZE; + let aqmf = LocationInFile { + start: current_offset, + end: current_offset + aqmf_length, + }; + current_offset += aqmf_length; + let key_compression_dictionary = LocationInFile { + start: current_offset, + end: current_offset + key_compression_dictionary_length, + }; + current_offset += key_compression_dictionary_length; + let value_compression_dictionary = LocationInFile { + start: current_offset, + end: current_offset + value_compression_dictionary_length, + }; + current_offset += value_compression_dictionary_length; + let block_offsets_start = current_offset; + let blocks_start = block_offsets_start + block_count as usize * 4; + + Ok(Header { + family, + min_hash, + max_hash, + aqmf, + key_compression_dictionary, + value_compression_dictionary, + block_offsets_start, + blocks_start, + block_count, + }) + }) + } + + /// Returns the key family and hash range of this file. + pub fn range(&self) -> Result { + let header = self.header()?; + Ok(StaticSortedFileRange { + family: header.family, + min_hash: header.min_hash, + max_hash: header.max_hash, + }) + } + + /// Iterate over all entries in this file in sorted order. + pub fn iter<'l>( + &'l self, + key_block_cache: &'l BlockCache, + value_block_cache: &'l BlockCache, + ) -> Result> { + let header = self.header()?; + let mut iter = StaticSortedFileIter { + this: self, + key_block_cache, + value_block_cache, + header, + stack: Vec::new(), + current_key_block: None, + }; + iter.enter_block(header.block_count - 1)?; + Ok(iter) + } + + /// Looks up a key in this file. + pub fn lookup( + &self, + key_family: u32, + key_hash: u64, + key: &K, + aqmf_cache: &AqmfCache, + index_block_cache: &BlockCache, + key_block_cache: &BlockCache, + value_block_cache: &BlockCache, + ) -> Result { + let header = self.header()?; + if key_family != header.family || key_hash < header.min_hash || key_hash > header.max_hash { + return Ok(LookupResult::RangeMiss); + } + + let use_aqmf_cache = header.max_hash - header.min_hash < 1 << 62; + if use_aqmf_cache { + let aqmf = match aqmf_cache.get_value_or_guard(&self.sequence_number, None) { + GuardResult::Value(aqmf) => aqmf, + GuardResult::Guard(guard) => { + let aqmf = &self.mmap[header.aqmf.start..header.aqmf.end]; + let aqmf: Arc = Arc::new(pot::from_slice(aqmf)?); + let _ = guard.insert(aqmf.clone()); + aqmf + } + GuardResult::Timeout => unreachable!(), + }; + if !aqmf.contains_fingerprint(key_hash) { + return Ok(LookupResult::QuickFilterMiss); + } + } else { + let aqmf = self.aqmf.get_or_try_init(|| { + let aqmf = &self.mmap[header.aqmf.start..header.aqmf.end]; + anyhow::Ok(pot::from_slice(aqmf)?) + })?; + if !aqmf.contains_fingerprint(key_hash) { + return Ok(LookupResult::QuickFilterMiss); + } + } + let mut current_block = header.block_count - 1; + let mut cache = index_block_cache; + loop { + let block = self.get_key_block(header, current_block, cache)?; + cache = key_block_cache; + let mut block = &block[..]; + let block_type = block.read_u8()?; + match block_type { + BLOCK_TYPE_INDEX => { + current_block = self.lookup_index_block(block, key_hash)?; + } + BLOCK_TYPE_KEY => { + return self.lookup_key_block(block, key_hash, key, header, value_block_cache); + } + _ => { + bail!("Invalid block type"); + } + } + } + } + + /// Looks up a hash in a index block. + fn lookup_index_block(&self, mut block: &[u8], hash: u64) -> Result { + let first_block = block.read_u16::()?; + let entry_count = block.len() / 10; + if entry_count == 0 { + return Ok(first_block); + } + let entries = block; + fn get_hash(entries: &[u8], index: usize) -> Result { + Ok((&entries[index * 10..]).read_u64::()?) + } + fn get_block(entries: &[u8], index: usize) -> Result { + Ok((&entries[index * 10 + 8..]).read_u16::()?) + } + let first_hash = get_hash(entries, 0)?; + match hash.cmp(&first_hash) { + Ordering::Less => { + return Ok(first_block); + } + Ordering::Equal => { + return get_block(entries, 0); + } + Ordering::Greater => {} + } + + let mut l = 1; + let mut r = entry_count; + // binary search for the range + while l < r { + let m = (l + r) / 2; + let mid_hash = get_hash(entries, m)?; + match hash.cmp(&mid_hash) { + Ordering::Less => { + r = m; + } + Ordering::Equal => { + return get_block(entries, m); + } + Ordering::Greater => { + l = m + 1; + } + } + } + get_block(entries, l - 1) + } + + /// Looks up a key in a key block and the value in a value block. + fn lookup_key_block( + &self, + mut block: &[u8], + key_hash: u64, + key: &K, + header: &Header, + value_block_cache: &BlockCache, + ) -> Result { + let entry_count = block.read_u24::()? as usize; + let offsets = &block[..entry_count * 4]; + let entries = &block[entry_count * 4..]; + + let mut l = 0; + let mut r = entry_count; + // binary search for the key + while l < r { + let m = (l + r) / 2; + let GetKeyEntryResult { + hash: mid_hash, + key: mid_key, + ty, + val: mid_val, + } = get_key_entry(offsets, entries, entry_count, m)?; + match key_hash.cmp(&mid_hash).then_with(|| key.cmp(mid_key)) { + Ordering::Less => { + r = m; + } + Ordering::Equal => { + return Ok(self + .handle_key_match(ty, mid_val, header, value_block_cache)? + .into()); + } + Ordering::Greater => { + l = m + 1; + } + } + } + Ok(LookupResult::KeyMiss) + } + + /// Handles a key match by looking up the value. + fn handle_key_match( + &self, + ty: u8, + mut val: &[u8], + header: &Header, + value_block_cache: &BlockCache, + ) -> Result { + Ok(match ty { + KEY_BLOCK_ENTRY_TYPE_SMALL => { + let block = val.read_u16::()?; + let size = val.read_u16::()? as usize; + let position = val.read_u32::()? as usize; + let value = self + .get_value_block(header, block, value_block_cache)? + .slice(position..position + size); + LookupValue::Slice { value } + } + KEY_BLOCK_ENTRY_TYPE_MEDIUM => { + let block = val.read_u16::()?; + let value = self.read_value_block(header, block)?; + LookupValue::Slice { value } + } + KEY_BLOCK_ENTRY_TYPE_BLOB => { + let sequence_number = val.read_u32::()?; + LookupValue::Blob { sequence_number } + } + KEY_BLOCK_ENTRY_TYPE_DELETED => LookupValue::Deleted, + _ => { + bail!("Invalid key block entry type"); + } + }) + } + + /// Gets a key block from the cache or reads it from the file. + fn get_key_block( + &self, + header: &Header, + block: u16, + key_block_cache: &BlockCache, + ) -> Result, anyhow::Error> { + Ok( + match key_block_cache.get_value_or_guard(&(self.sequence_number, block), None) { + GuardResult::Value(block) => block, + GuardResult::Guard(guard) => { + let block = self.read_key_block(header, block)?; + let _ = guard.insert(block.clone()); + block + } + GuardResult::Timeout => unreachable!(), + }, + ) + } + + /// Gets a value block from the cache or reads it from the file. + fn get_value_block( + &self, + header: &Header, + block: u16, + value_block_cache: &BlockCache, + ) -> Result> { + let block = match value_block_cache.get_value_or_guard(&(self.sequence_number, block), None) + { + GuardResult::Value(block) => block, + GuardResult::Guard(guard) => { + let block = self.read_value_block(header, block)?; + let _ = guard.insert(block.clone()); + block + } + GuardResult::Timeout => unreachable!(), + }; + Ok(block) + } + + /// Reads a key block from the file. + fn read_key_block(&self, header: &Header, block_index: u16) -> Result> { + self.read_block( + header, + block_index, + &self.mmap + [header.key_compression_dictionary.start..header.key_compression_dictionary.end], + ) + } + + /// Reads a value block from the file. + fn read_value_block(&self, header: &Header, block_index: u16) -> Result> { + self.read_block( + header, + block_index, + &self.mmap[header.value_compression_dictionary.start + ..header.value_compression_dictionary.end], + ) + } + + /// Reads a block from the file. + fn read_block( + &self, + header: &Header, + block_index: u16, + compression_dictionary: &[u8], + ) -> Result> { + #[cfg(feature = "strict_checks")] + if block_index >= header.block_count { + bail!( + "Corrupted file seq:{} block:{} > number of blocks {} (block_offsets: {:x}, \ + blocks: {:x})", + self.sequence_number, + block_index, + header.block_count, + header.block_offsets_start, + header.blocks_start + ); + } + let offset = header.block_offsets_start + block_index as usize * 4; + #[cfg(feature = "strict_checks")] + if offset + 4 > self.mmap.len() { + bail!( + "Corrupted file seq:{} block:{} block offset locations {} + 4 bytes > file end {} \ + (block_offsets: {:x}, blocks: {:x})", + self.sequence_number, + block_index, + offset, + self.mmap.len(), + header.block_offsets_start, + header.blocks_start + ); + } + let block_start = if block_index == 0 { + header.blocks_start + } else { + header.blocks_start + (&self.mmap[offset - 4..offset]).read_u32::()? as usize + }; + let block_end = + header.blocks_start + (&self.mmap[offset..offset + 4]).read_u32::()? as usize; + #[cfg(feature = "strict_checks")] + if block_end > self.mmap.len() || block_start > self.mmap.len() { + bail!( + "Corrupted file seq:{} block:{} block {} - {} > file end {} (block_offsets: {:x}, \ + blocks: {:x})", + self.sequence_number, + block_index, + block_start, + block_end, + self.mmap.len(), + header.block_offsets_start, + header.blocks_start + ); + } + let uncompressed_length = + (&self.mmap[block_start..block_start + 4]).read_u32::()? as usize; + let block = self.mmap[block_start + 4..block_end].to_vec(); + + let buffer = Arc::new_zeroed_slice(uncompressed_length); + // Safety: MaybeUninit can be safely transmuted to u8. + let mut buffer = unsafe { transmute::]>, Arc<[u8]>>(buffer) }; + // Safety: We know that the buffer is not shared yet. + let decompressed = unsafe { Arc::get_mut_unchecked(&mut buffer) }; + decompress_with_dict(&block, decompressed, compression_dictionary)?; + Ok(ArcSlice::from(buffer)) + } +} + +/// An iterator over all entries in a SST file in sorted order. +pub struct StaticSortedFileIter<'l> { + this: &'l StaticSortedFile, + key_block_cache: &'l BlockCache, + value_block_cache: &'l BlockCache, + header: &'l Header, + + stack: Vec, + current_key_block: Option, +} + +struct CurrentKeyBlock { + offsets: ArcSlice, + entries: ArcSlice, + entry_count: usize, + index: usize, +} + +struct CurrentIndexBlock { + entries: ArcSlice, + block_indicies_count: usize, + index: usize, +} + +impl Iterator for StaticSortedFileIter<'_> { + type Item = Result; + + fn next(&mut self) -> Option { + self.next_internal().transpose() + } +} + +impl StaticSortedFileIter<'_> { + /// Enters a block at the given index. + fn enter_block(&mut self, block_index: u16) -> Result<()> { + let block_arc = self + .this + .get_key_block(self.header, block_index, self.key_block_cache)?; + let mut block = &*block_arc; + let block_type = block.read_u8()?; + match block_type { + BLOCK_TYPE_INDEX => { + let block_indicies_count = (block.len() + 8) / 10; + let range = 1..block_arc.len(); + self.stack.push(CurrentIndexBlock { + entries: block_arc.slice(range), + block_indicies_count, + index: 0, + }); + } + BLOCK_TYPE_KEY => { + let entry_count = block.read_u24::()? as usize; + let offsets_range = 4..4 + entry_count * 4; + let entries_range = 4 + entry_count * 4..block_arc.len(); + let offsets = block_arc.clone().slice(offsets_range); + let entries = block_arc.slice(entries_range); + self.current_key_block = Some(CurrentKeyBlock { + offsets, + entries, + entry_count, + index: 0, + }); + } + _ => { + bail!("Invalid block type"); + } + } + Ok(()) + } + + /// Gets the next entry in the file and moves the cursor. + fn next_internal(&mut self) -> Result> { + loop { + if let Some(CurrentKeyBlock { + offsets, + entries, + entry_count, + index, + }) = self.current_key_block.take() + { + let GetKeyEntryResult { hash, key, ty, val } = + get_key_entry(&offsets, &entries, entry_count, index)?; + let value = + self.this + .handle_key_match(ty, val, self.header, self.value_block_cache)?; + let entry = LookupEntry { + hash, + // Safety: The key is a valid slice of the entries. + key: unsafe { ArcSlice::new_unchecked(key, ArcSlice::full_arc(&entries)) }, + value, + }; + if index + 1 < entry_count { + self.current_key_block = Some(CurrentKeyBlock { + offsets, + entries, + entry_count, + index: index + 1, + }); + } + return Ok(Some(entry)); + } + if let Some(CurrentIndexBlock { + entries, + block_indicies_count, + index, + }) = self.stack.pop() + { + let block_index = (&entries[index * 10..]).read_u16::()?; + if index + 1 < block_indicies_count { + self.stack.push(CurrentIndexBlock { + entries, + block_indicies_count, + index: index + 1, + }); + } + self.enter_block(block_index)?; + } else { + return Ok(None); + } + } + } +} + +struct GetKeyEntryResult<'l> { + hash: u64, + key: &'l [u8], + ty: u8, + val: &'l [u8], +} + +/// Reads a key entry from a key block. +fn get_key_entry<'l>( + offsets: &[u8], + entries: &'l [u8], + entry_count: usize, + index: usize, +) -> Result> { + let mut offset = &offsets[index * 4..]; + let ty = offset.read_u8()?; + let start = offset.read_u24::()? as usize; + let end = if index == entry_count - 1 { + entries.len() + } else { + (&offsets[(index + 1) * 4 + 1..]).read_u24::()? as usize + }; + let hash = (&entries[start..start + 8]).read_u64::()?; + Ok(match ty { + KEY_BLOCK_ENTRY_TYPE_SMALL => GetKeyEntryResult { + hash, + key: &entries[start + 8..end - 8], + ty, + val: &entries[end - 8..end], + }, + KEY_BLOCK_ENTRY_TYPE_MEDIUM => GetKeyEntryResult { + hash, + key: &entries[start + 8..end - 2], + ty, + val: &entries[end - 2..end], + }, + KEY_BLOCK_ENTRY_TYPE_BLOB => GetKeyEntryResult { + hash, + key: &entries[start + 8..end - 4], + ty, + val: &entries[end - 4..end], + }, + KEY_BLOCK_ENTRY_TYPE_DELETED => GetKeyEntryResult { + hash, + key: &entries[start + 8..end], + ty, + val: &[], + }, + _ => { + bail!("Invalid key block entry type"); + } + }) +} diff --git a/turbopack/crates/turbo-persistence/src/static_sorted_file_builder.rs b/turbopack/crates/turbo-persistence/src/static_sorted_file_builder.rs new file mode 100644 index 0000000000000..03dfc6ef21c0d --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/static_sorted_file_builder.rs @@ -0,0 +1,532 @@ +use std::{ + cmp::min, + fs::File, + io::{self, BufWriter, Write}, + path::Path, +}; + +use anyhow::{Context, Result}; +use byteorder::{ByteOrder, WriteBytesExt, BE}; +use lzzzz::lz4::{max_compressed_size, ACC_LEVEL_DEFAULT}; + +use crate::static_sorted_file::{ + BLOCK_TYPE_INDEX, BLOCK_TYPE_KEY, KEY_BLOCK_ENTRY_TYPE_BLOB, KEY_BLOCK_ENTRY_TYPE_DELETED, + KEY_BLOCK_ENTRY_TYPE_MEDIUM, KEY_BLOCK_ENTRY_TYPE_SMALL, +}; + +/// The maximum number of entries that should go into a single key block +const MAX_KEY_BLOCK_ENTRIES: usize = 100 * 1024; +/// The maximum bytes that should go into a single key block +// Note this must fit into 3 bytes length +const MAX_KEY_BLOCK_SIZE: usize = 16 * 1024; +/// Overhead of bytes that should be counted for entries in a key block in addition to the key size +const KEY_BLOCK_ENTRY_META_OVERHEAD: usize = 8; +/// The maximum number of entries that should go into a single small value block +const MAX_SMALL_VALUE_BLOCK_ENTRIES: usize = 100 * 1024; +/// The maximum bytes that should go into a single small value block +const MAX_SMALL_VALUE_BLOCK_SIZE: usize = 16 * 1024; +/// The aimed false positive rate for the AQMF +const AQMF_FALSE_POSITIVE_RATE: f64 = 0.01; + +/// The maximum compression dictionay size for value blocks +const VALUE_COMPRESSION_DICTIONARY_SIZE: usize = 64 * 1024 - 1; +/// The maximum compression dictionay size for key and index blocks +const KEY_COMPRESSION_DICTIONARY_SIZE: usize = 64 * 1024 - 1; +/// The maximum bytes that should be selected as value samples to create a compression dictionary +const VALUE_COMPRESSION_SAMPLES_SIZE: usize = 256 * 1024; +/// The maximum bytes that should be selected as key samples to create a compression dictionary +const KEY_COMPRESSION_SAMPLES_SIZE: usize = 256 * 1024; +/// The minimum bytes that should be selected as value samples. Below that no compression dictionary +/// is used. +const MIN_VALUE_COMPRESSION_SAMPLES_SIZE: usize = 1024; +/// The minimum bytes that should be selected as key samples. Below that no compression dictionary +/// is used. +const MIN_KEY_COMPRESSION_SAMPLES_SIZE: usize = 1024; +/// The bytes that are used per key/value entry for a sample. +const COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY: usize = 100; + +/// Trait for entries from that SST files can be created +pub trait Entry { + /// Returns the hash of the key + fn key_hash(&self) -> u64; + /// Returns the length of the key + fn key_len(&self) -> usize; + /// Writes the key to a buffer + fn write_key_to(&self, buf: &mut Vec); + + /// Returns the value + fn value(&self) -> EntryValue<'_>; +} + +/// Reference to a value +#[derive(Copy, Clone)] +pub enum EntryValue<'l> { + /// Small-sized value. They are stored in shared value blocks. + Small { value: &'l [u8] }, + /// Medium-sized value. They are stored in their own value block. + Medium { value: &'l [u8] }, + /// Large-sized value. They are stored in a blob file. + Large { blob: u32 }, + /// Tombstone. The value was removed. + Deleted, +} + +#[derive(Debug, Default)] +pub struct StaticSortedFileBuilder { + family: u32, + aqmf: Vec, + key_compression_dictionary: Vec, + value_compression_dictionary: Vec, + blocks: Vec<(u32, Vec)>, + min_hash: u64, + max_hash: u64, +} + +impl StaticSortedFileBuilder { + pub fn new( + family: u32, + entries: &[E], + total_key_size: usize, + total_value_size: usize, + ) -> Result { + debug_assert!(entries.iter().map(|e| e.key_hash()).is_sorted()); + let mut builder = Self { + family, + min_hash: entries.first().map(|e| e.key_hash()).unwrap_or(u64::MAX), + max_hash: entries.last().map(|e| e.key_hash()).unwrap_or(0), + ..Default::default() + }; + builder.compute_aqmf(entries); + builder.compute_compression_dictionary(entries, total_key_size, total_value_size)?; + builder.compute_blocks(entries); + Ok(builder) + } + + /// Computes a AQMF from the keys of all entries. + fn compute_aqmf(&mut self, entries: &[E]) { + let mut filter = qfilter::Filter::new(entries.len() as u64, AQMF_FALSE_POSITIVE_RATE) + // This won't fail as we limit the number of entries per SST file + .expect("Filter can't be constructed"); + for entry in entries { + filter + .insert_fingerprint(false, entry.key_hash()) + // This can't fail as we allocated enough capacity + .expect("AQMF insert failed"); + } + self.aqmf = pot::to_vec(&filter).expect("AQMF serialization failed"); + } + + /// Computes compression dictionaries from keys and values of all entries + fn compute_compression_dictionary( + &mut self, + entries: &[E], + total_key_size: usize, + total_value_size: usize, + ) -> Result<()> { + if total_key_size < MIN_KEY_COMPRESSION_SAMPLES_SIZE + && total_value_size < MIN_VALUE_COMPRESSION_SAMPLES_SIZE + { + return Ok(()); + } + let key_compression_samples_size = min(KEY_COMPRESSION_SAMPLES_SIZE, total_key_size / 10); + let value_compression_samples_size = + min(VALUE_COMPRESSION_SAMPLES_SIZE, total_value_size / 10); + let mut value_samples = Vec::with_capacity(value_compression_samples_size); + let mut value_sample_sizes = Vec::new(); + let mut key_samples = Vec::with_capacity(key_compression_samples_size); + let mut key_sample_sizes = Vec::new(); + let mut i = 12345678 % entries.len(); + let mut j = 0; + loop { + let entry = &entries[i]; + let value_remaining = value_compression_samples_size - value_samples.len(); + let key_remaining = key_compression_samples_size - key_samples.len(); + if value_remaining > 0 { + if let EntryValue::Small { value } | EntryValue::Medium { value } = entry.value() { + let value = if value.len() <= COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY { + value + } else { + j = (j + 12345678) + % (value.len() - COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY); + &value[j..j + COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY] + }; + if value.len() <= value_remaining { + value_sample_sizes.push(value.len()); + value_samples.extend_from_slice(value); + } else { + value_sample_sizes.push(value_remaining); + value_samples.extend_from_slice(&value[..value_remaining]); + } + } + } + if key_remaining > 0 { + let used_len = min(key_remaining, COMPRESSION_DICTIONARY_SAMPLE_PER_ENTRY); + if entry.key_len() <= used_len { + key_sample_sizes.push(entry.key_len()); + entry.write_key_to(&mut key_samples); + } else { + let mut temp = Vec::with_capacity(entry.key_len()); + entry.write_key_to(&mut temp); + debug_assert!(temp.len() == entry.key_len()); + + j = (j + 12345678) % (temp.len() - used_len); + key_sample_sizes.push(used_len); + key_samples.extend_from_slice(&temp[j..j + used_len]); + } + } + if key_remaining == 0 && value_remaining == 0 { + break; + } + i = (i + 12345678) % entries.len(); + } + assert!(key_samples.len() == key_sample_sizes.iter().sum::()); + assert!(value_samples.len() == value_sample_sizes.iter().sum::()); + if key_samples.len() > MIN_KEY_COMPRESSION_SAMPLES_SIZE && key_sample_sizes.len() > 5 { + self.key_compression_dictionary = zstd::dict::from_continuous( + &key_samples, + &key_sample_sizes, + KEY_COMPRESSION_DICTIONARY_SIZE, + ) + .context("Key dictionary creation failed")?; + } + if value_samples.len() > MIN_VALUE_COMPRESSION_SAMPLES_SIZE && value_sample_sizes.len() > 5 + { + self.value_compression_dictionary = zstd::dict::from_continuous( + &value_samples, + &value_sample_sizes, + VALUE_COMPRESSION_DICTIONARY_SIZE, + ) + .context("Value dictionary creation failed")?; + } + Ok(()) + } + + /// Compute index, key and value blocks. + fn compute_blocks(&mut self, entries: &[E]) { + // TODO implement multi level index + // TODO place key and value block near to each other + + // For now we use something simple to implement: + // Start with Value blocks + // And then Key blocks + // Last block is Index block + + // Store the locations of the values + let mut value_locations: Vec<(usize, usize)> = Vec::with_capacity(entries.len()); + + // Split the values into blocks + let mut current_block_start = 0; + let mut current_block_count = 0; + let mut current_block_size = 0; + for (i, entry) in entries.iter().enumerate() { + match entry.value() { + EntryValue::Small { value } => { + if current_block_size + value.len() > MAX_SMALL_VALUE_BLOCK_SIZE + || current_block_count + 1 >= MAX_SMALL_VALUE_BLOCK_ENTRIES + { + let block_index = self.blocks.len(); + let mut block = Vec::with_capacity(current_block_size); + for j in current_block_start..i { + if let EntryValue::Small { value } = &entries[j].value() { + block.extend_from_slice(value); + value_locations[j].0 = block_index; + } + } + self.blocks.push(self.compress_value_block(&block)); + current_block_start = i; + current_block_size = 0; + current_block_count = 0; + } + value_locations.push((0, current_block_size)); + current_block_size += value.len(); + current_block_count += 1; + } + EntryValue::Medium { value } => { + value_locations.push((self.blocks.len(), value.len())); + self.blocks.push(self.compress_value_block(value)); + } + _ => { + value_locations.push((0, 0)); + } + } + } + if current_block_count > 0 { + let block_index = self.blocks.len(); + let mut block = Vec::with_capacity(current_block_size); + for j in current_block_start..entries.len() { + if let EntryValue::Small { value } = &entries[j].value() { + block.extend_from_slice(value); + value_locations[j].0 = block_index; + } + } + self.blocks.push(self.compress_value_block(&block)); + } + + let mut key_block_boundaries = Vec::new(); + + // Split the keys into blocks + fn add_entry_to_block( + entry: &E, + value_location: &(usize, usize), + block: &mut KeyBlockBuilder, + ) { + match entry.value() { + EntryValue::Small { value } => { + block.put_small( + entry, + value_location.0.try_into().unwrap(), + value_location.1.try_into().unwrap(), + value.len().try_into().unwrap(), + ); + } + EntryValue::Medium { .. } => { + block.put_medium(entry, value_location.0.try_into().unwrap()); + } + EntryValue::Large { blob } => { + block.put_blob(entry, blob); + } + EntryValue::Deleted => { + block.delete(entry); + } + } + } + let mut current_block_start = 0; + let mut current_block_size = 0; + for (i, entry) in entries.iter().enumerate() { + if current_block_size > 0 + && (current_block_size + entry.key_len() + KEY_BLOCK_ENTRY_META_OVERHEAD + > MAX_KEY_BLOCK_SIZE + || i - current_block_start >= MAX_KEY_BLOCK_ENTRIES) && + // avoid breaking the block in the middle of a hash conflict + entries[i - 1].key_hash() != entry.key_hash() + { + let mut block = KeyBlockBuilder::new((i - current_block_start) as u32); + for j in current_block_start..i { + let entry = &entries[j]; + let value_location = &value_locations[j]; + add_entry_to_block(entry, value_location, &mut block); + } + key_block_boundaries + .push((entries[current_block_start].key_hash(), self.blocks.len())); + self.blocks.push(self.compress_key_block(&block.finish())); + current_block_size = 0; + current_block_start = i; + } + current_block_size += entry.key_len() + KEY_BLOCK_ENTRY_META_OVERHEAD; + } + if current_block_size > 0 { + let mut block = KeyBlockBuilder::new((entries.len() - current_block_start) as u32); + for j in current_block_start..entries.len() { + let entry = &entries[j]; + let value_location = &value_locations[j]; + add_entry_to_block(entry, value_location, &mut block); + } + key_block_boundaries.push((entries[current_block_start].key_hash(), self.blocks.len())); + self.blocks.push(self.compress_key_block(&block.finish())); + } + + // Compute the index + let mut index_block = IndexBlockBuilder::new( + key_block_boundaries.len() as u16, + key_block_boundaries[0].1 as u16, + ); + for (hash, block) in &key_block_boundaries[1..] { + index_block.put(*hash, *block as u16); + } + self.blocks + .push(self.compress_key_block(&index_block.finish())); + } + + /// Compresses a block with a compression dictionary. + fn compress_block(&self, block: &[u8], dict: &[u8]) -> (u32, Vec) { + let mut compressor = + lzzzz::lz4::Compressor::with_dict(dict).expect("LZ4 compressor creation failed"); + let mut compressed = Vec::with_capacity(max_compressed_size(block.len())); + compressor + .next_to_vec(block, &mut compressed, ACC_LEVEL_DEFAULT) + .expect("Compression failed"); + if compressed.capacity() > compressed.len() * 2 { + compressed.shrink_to_fit(); + } + (block.len().try_into().unwrap(), compressed) + } + + /// Compresses an index or key block. + fn compress_key_block(&self, block: &[u8]) -> (u32, Vec) { + self.compress_block(block, &self.key_compression_dictionary) + } + + /// Compresses a value block. + fn compress_value_block(&self, block: &[u8]) -> (u32, Vec) { + self.compress_block(block, &self.value_compression_dictionary) + } + + /// Writes the SST file. + pub fn write(&self, file: &Path) -> io::Result { + let mut file = BufWriter::new(File::create(file)?); + // magic number and version + file.write_u32::(0x53535401)?; + // family + file.write_u32::(self.family)?; + // min hash + file.write_u64::(self.min_hash)?; + // max hash + file.write_u64::(self.max_hash)?; + // AQMF length + file.write_u24::(self.aqmf.len().try_into().unwrap())?; + // Key compression dictionary length + file.write_u16::(self.key_compression_dictionary.len().try_into().unwrap())?; + // Value compression dictionary length + file.write_u16::(self.value_compression_dictionary.len().try_into().unwrap())?; + // Number of blocks + file.write_u16::(self.blocks.len().try_into().unwrap())?; + + // Write the AQMF + file.write_all(&self.aqmf)?; + // Write the key compression dictionary + file.write_all(&self.key_compression_dictionary)?; + // Write the value compression dictionary + file.write_all(&self.value_compression_dictionary)?; + + // Write the blocks + let mut offset = 0; + for (_, block) in &self.blocks { + // Block length (including the uncompressed length field) + let len = block.len() + 4; + offset += len; + file.write_u32::(offset.try_into().unwrap())?; + } + for (uncompressed_size, block) in &self.blocks { + // Uncompressed size + file.write_u32::(*uncompressed_size)?; + // Compressed block + file.write_all(block)?; + } + Ok(file.into_inner()?) + } +} + +/// Builder for a single key block +pub struct KeyBlockBuilder { + current_entry: usize, + header_size: usize, + data: Vec, +} + +/// The size of the key block header. +const KEY_BLOCK_HEADER_SIZE: usize = 4; + +impl KeyBlockBuilder { + /// Creates a new key block builder for the number of entries. + pub fn new(entry_count: u32) -> Self { + debug_assert!(entry_count < (1 << 24)); + + const ESTIMATED_KEY_SIZE: usize = 16; + let mut data = Vec::with_capacity(entry_count as usize * ESTIMATED_KEY_SIZE); + data.write_u8(BLOCK_TYPE_KEY).unwrap(); + data.write_u24::(entry_count).unwrap(); + for _ in 0..entry_count { + data.write_u32::(0).unwrap(); + } + Self { + current_entry: 0, + header_size: data.len(), + data, + } + } + + /// Writes a small-sized value to the buffer. + pub fn put_small( + &mut self, + entry: &E, + value_block: u16, + value_offset: u32, + value_size: u16, + ) { + let pos = self.data.len() - self.header_size; + let header_offset = KEY_BLOCK_HEADER_SIZE + self.current_entry * 4; + let header = (pos as u32) | ((KEY_BLOCK_ENTRY_TYPE_SMALL as u32) << 24); + BE::write_u32(&mut self.data[header_offset..header_offset + 4], header); + + self.data.write_u64::(entry.key_hash()).unwrap(); + entry.write_key_to(&mut self.data); + self.data.write_u16::(value_block).unwrap(); + self.data.write_u16::(value_size).unwrap(); + self.data.write_u32::(value_offset).unwrap(); + + self.current_entry += 1; + } + + /// Writes a medium-sized value to the buffer. + pub fn put_medium(&mut self, entry: &E, value_block: u16) { + let pos = self.data.len() - self.header_size; + let header_offset = KEY_BLOCK_HEADER_SIZE + self.current_entry * 4; + let header = (pos as u32) | ((KEY_BLOCK_ENTRY_TYPE_MEDIUM as u32) << 24); + BE::write_u32(&mut self.data[header_offset..header_offset + 4], header); + + self.data.write_u64::(entry.key_hash()).unwrap(); + entry.write_key_to(&mut self.data); + self.data.write_u16::(value_block).unwrap(); + + self.current_entry += 1; + } + + /// Writes a tombstone to the buffer. + pub fn delete(&mut self, entry: &E) { + let pos = self.data.len() - self.header_size; + let header_offset = KEY_BLOCK_HEADER_SIZE + self.current_entry * 4; + let header = (pos as u32) | ((KEY_BLOCK_ENTRY_TYPE_DELETED as u32) << 24); + BE::write_u32(&mut self.data[header_offset..header_offset + 4], header); + + self.data.write_u64::(entry.key_hash()).unwrap(); + entry.write_key_to(&mut self.data); + + self.current_entry += 1; + } + + /// Writes a blob value to the buffer. + pub fn put_blob(&mut self, entry: &E, blob: u32) { + let pos = self.data.len() - self.header_size; + let header_offset = KEY_BLOCK_HEADER_SIZE + self.current_entry * 4; + let header = (pos as u32) | ((KEY_BLOCK_ENTRY_TYPE_BLOB as u32) << 24); + BE::write_u32(&mut self.data[header_offset..header_offset + 4], header); + + self.data.write_u64::(entry.key_hash()).unwrap(); + entry.write_key_to(&mut self.data); + self.data.write_u32::(blob).unwrap(); + + self.current_entry += 1; + } + + /// Returns the key block buffer + pub fn finish(self) -> Vec { + self.data + } +} + +/// Builder for a single index block. +pub struct IndexBlockBuilder { + data: Vec, +} + +impl IndexBlockBuilder { + /// Creates a new builder for an index block with the specified number of entries and a pointer + /// to the first block. + pub fn new(entry_count: u16, first_block: u16) -> Self { + let mut data = Vec::with_capacity(entry_count as usize * 10 + 3); + data.write_u8(BLOCK_TYPE_INDEX).unwrap(); + data.write_u16::(first_block).unwrap(); + Self { data } + } + + /// Adds a hash boundary to the index block. + pub fn put(&mut self, hash: u64, block: u16) { + self.data.write_u64::(hash).unwrap(); + self.data.write_u16::(block).unwrap(); + } + + /// Returns the index block buffer + fn finish(self) -> Vec { + self.data + } +} diff --git a/turbopack/crates/turbo-persistence/src/tests.rs b/turbopack/crates/turbo-persistence/src/tests.rs new file mode 100644 index 0000000000000..6dee6cd81721a --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/tests.rs @@ -0,0 +1,347 @@ +use std::time::Instant; + +use anyhow::Result; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; + +use crate::{db::TurboPersistence, write_batch::WriteBatch}; + +#[test] +fn full_cycle() -> Result<()> { + let mut test_cases = Vec::new(); + type TestCases = Vec<( + &'static str, + Box, 16>) -> Result<()>>, + Box Result<()>>, + )>; + + fn test_case( + test_cases: &mut TestCases, + name: &'static str, + write: impl Fn(&mut WriteBatch, 16>) -> Result<()> + 'static, + read: impl Fn(&TurboPersistence) -> Result<()> + 'static, + ) { + test_cases.push(( + name, + Box::new(write) as Box, 16>) -> Result<()>>, + Box::new(read) as Box Result<()>>, + )); + } + + test_case( + &mut test_cases, + "Simple", + |batch| { + for i in 10..100u8 { + batch.put(0, vec![i], vec![i].into())?; + } + Ok(()) + }, + |db| { + let Some(value) = db.get(0, &[42u8])? else { + panic!("Value not found"); + }; + assert_eq!(&*value, &[42]); + assert_eq!(db.get(0, &[42u8, 42])?, None); + assert_eq!(db.get(0, &[1u8])?, None); + assert_eq!(db.get(0, &[255u8])?, None); + Ok(()) + }, + ); + + test_case( + &mut test_cases, + "Families", + |batch| { + for i in 0..16u8 { + batch.put(i as usize, vec![i], vec![i].into())?; + } + Ok(()) + }, + |db| { + let Some(value) = db.get(8, &[8u8])? else { + panic!("Value not found"); + }; + assert_eq!(&*value, &[8]); + assert!(db.get(8, &[8u8, 8])?.is_none()); + assert!(db.get(8, &[0u8])?.is_none()); + assert!(db.get(8, &[255u8])?.is_none()); + Ok(()) + }, + ); + + test_case( + &mut test_cases, + "Medium keys and values", + |batch| { + for i in 0..200u8 { + batch.put(0, vec![i; 10 * 1024], vec![i; 100 * 1024].into())?; + } + Ok(()) + }, + |db| { + for i in 0..200u8 { + let Some(value) = db.get(0, &vec![i; 10 * 1024])? else { + panic!("Value not found"); + }; + assert_eq!(&*value, &vec![i; 100 * 1024]); + } + Ok(()) + }, + ); + + test_case( + &mut test_cases, + "Large keys and values (blob files)", + |batch| { + for i in 0..20u8 { + batch.put( + 0, + vec![i; 10 * 1024 * 1024], + vec![i; 10 * 1024 * 1024].into(), + )?; + } + Ok(()) + }, + |db| { + for i in 0..20u8 { + let Some(value) = db.get(0, &vec![i; 10 * 1024 * 1024])? else { + panic!("Value not found"); + }; + assert_eq!(&*value, &vec![i; 10 * 1024 * 1024]); + } + Ok(()) + }, + ); + + test_case( + &mut test_cases, + "Different sizes keys and values", + |batch| { + for i in 100..200u8 { + batch.put(0, vec![i; i as usize], vec![i; i as usize].into())?; + } + Ok(()) + }, + |db| { + for i in 100..200u8 { + let Some(value) = db.get(0, &vec![i; i as usize])? else { + panic!("Value not found"); + }; + assert_eq!(&*value, &vec![i; i as usize]); + } + Ok(()) + }, + ); + + test_case( + &mut test_cases, + "Many items (1% read)", + |batch| { + for i in 0..1000 * 1024u32 { + batch.put(0, i.to_be_bytes().into(), i.to_be_bytes().to_vec().into())?; + } + Ok(()) + }, + |db| { + for i in 0..10 * 1024u32 { + let i = i * 100; + let Some(value) = db.get(0, &i.to_be_bytes())? else { + panic!("Value not found"); + }; + assert_eq!(&*value, &i.to_be_bytes()); + } + Ok(()) + }, + ); + + test_case( + &mut test_cases, + "Many items (1% read, multi-threaded)", + |batch| { + (0..10 * 1024 * 1024u32).into_par_iter().for_each(|i| { + batch + .put(0, i.to_be_bytes().into(), i.to_be_bytes().to_vec().into()) + .unwrap(); + }); + Ok(()) + }, + |db| { + (0..100 * 1024u32).into_par_iter().for_each(|i| { + let i = i * 100; + let Some(value) = db.get(0, &i.to_be_bytes()).unwrap() else { + panic!("Value not found"); + }; + assert_eq!(&*value, &i.to_be_bytes()); + }); + Ok(()) + }, + ); + + // Run each test case standalone + for (name, write, read) in test_cases.iter() { + let tempdir = tempfile::tempdir()?; + let path = tempdir.path(); + + { + let start = Instant::now(); + let db = TurboPersistence::open(path.to_path_buf())?; + let mut batch = db.write_batch()?; + write(&mut batch)?; + db.commit_write_batch(batch)?; + println!("{name} write time: {:?}", start.elapsed()); + + let start = Instant::now(); + read(&db)?; + println!("{name} read time: {:?}", start.elapsed()); + + let start = Instant::now(); + drop(db); + println!("{name} drop time: {:?}", start.elapsed()); + } + { + let start = Instant::now(); + let db = TurboPersistence::open(path.to_path_buf())?; + println!("{name} restore time: {:?}", start.elapsed()); + let start = Instant::now(); + read(&db)?; + println!("{name} read time after restore: {:?}", start.elapsed()); + let start = Instant::now(); + read(&db)?; + println!("{name} read time after read: {:?}", start.elapsed()); + + #[cfg(feature = "stats")] + println!("{name} stats: {:#?}", db.statistics()); + + let start = Instant::now(); + db.full_compact()?; + println!("{name} compact time: {:?}", start.elapsed()); + + let start = Instant::now(); + read(&db)?; + println!("{name} read time after compact: {:?}", start.elapsed()); + + let start = Instant::now(); + drop(db); + println!("{name} drop time after compact: {:?}", start.elapsed()); + } + { + let start = Instant::now(); + let db = TurboPersistence::open(path.to_path_buf())?; + println!("{name} restore time after compact: {:?}", start.elapsed()); + let start = Instant::now(); + read(&db)?; + println!( + "{name} read time after compact + restore: {:?}", + start.elapsed() + ); + let start = Instant::now(); + read(&db)?; + println!( + "{name} read time after compact + restore + read: {:?}", + start.elapsed() + ); + + #[cfg(feature = "stats")] + println!("{name} stats (compacted): {:#?}", db.statistics()); + + let start = Instant::now(); + drop(db); + println!( + "{name} drop time after compact + restore: {:?}", + start.elapsed() + ); + } + } + + // Run all test cases in a single db + { + let tempdir = tempfile::tempdir()?; + let path = tempdir.path(); + + { + let start = Instant::now(); + let db = TurboPersistence::open(path.to_path_buf())?; + let mut batch = db.write_batch()?; + for (_, write, _) in test_cases.iter() { + write(&mut batch)?; + } + db.commit_write_batch(batch)?; + println!("All write time: {:?}", start.elapsed()); + + for (name, _, read) in test_cases.iter() { + let start = Instant::now(); + read(&db)?; + println!("{name} read time: {:?}", start.elapsed()); + } + + let start = Instant::now(); + drop(db); + println!("All drop time: {:?}", start.elapsed()); + } + { + let start = Instant::now(); + let db = TurboPersistence::open(path.to_path_buf())?; + println!("All restore time: {:?}", start.elapsed()); + for (name, _, read) in test_cases.iter() { + let start = Instant::now(); + read(&db)?; + println!("{name} read time after restore: {:?}", start.elapsed()); + } + for (name, _, read) in test_cases.iter() { + let start = Instant::now(); + read(&db)?; + println!("{name} read time after read: {:?}", start.elapsed()); + } + #[cfg(feature = "stats")] + println!("All stats: {:#?}", db.statistics()); + + let start = Instant::now(); + db.full_compact()?; + println!("All compact time: {:?}", start.elapsed()); + + for (name, _, read) in test_cases.iter() { + let start = Instant::now(); + read(&db)?; + println!("{name} read time after compact: {:?}", start.elapsed()); + } + + let start = Instant::now(); + drop(db); + println!("All drop time after compact: {:?}", start.elapsed()); + } + + { + let start = Instant::now(); + let db = TurboPersistence::open(path.to_path_buf())?; + println!("All restore time after compact: {:?}", start.elapsed()); + + for (name, _, read) in test_cases.iter() { + let start = Instant::now(); + read(&db)?; + println!( + "{name} read time after compact + restore: {:?}", + start.elapsed() + ); + } + for (name, _, read) in test_cases.iter() { + let start = Instant::now(); + read(&db)?; + println!( + "{name} read time after compact + restore + read: {:?}", + start.elapsed() + ); + } + + #[cfg(feature = "stats")] + println!("All stats (compacted): {:#?}", db.statistics()); + + let start = Instant::now(); + drop(db); + println!( + "All drop time after compact + restore: {:?}", + start.elapsed() + ); + } + } + Ok(()) +} diff --git a/turbopack/crates/turbo-persistence/src/write_batch.rs b/turbopack/crates/turbo-persistence/src/write_batch.rs new file mode 100644 index 0000000000000..97490c73e41e5 --- /dev/null +++ b/turbopack/crates/turbo-persistence/src/write_batch.rs @@ -0,0 +1,296 @@ +use std::{ + borrow::Cow, + cell::UnsafeCell, + fs::File, + mem::{replace, swap}, + path::PathBuf, + sync::atomic::{AtomicU32, Ordering}, +}; + +use anyhow::{Context, Result}; +use byteorder::{WriteBytesExt, BE}; +use lzzzz::lz4::{self, ACC_LEVEL_DEFAULT}; +use parking_lot::Mutex; +use rayon::{ + iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}, + scope, Scope, +}; +use thread_local::ThreadLocal; + +use crate::{ + collector::Collector, collector_entry::CollectorEntry, constants::MAX_MEDIUM_VALUE_SIZE, + key::StoreKey, static_sorted_file_builder::StaticSortedFileBuilder, +}; + +/// The thread local state of a `WriteBatch`. +struct ThreadLocalState { + /// The collectors for each family. + collectors: [Option>; FAMILIES], + /// The list of new SST files that have been created. + new_sst_files: Vec<(u32, File)>, +} + +/// A write batch. +pub struct WriteBatch { + /// The database path + path: PathBuf, + /// The current sequence number counter. Increased for every new SST file or blob file. + current_sequence_number: AtomicU32, + /// The thread local state. + thread_locals: ThreadLocal>>, + /// Collectors are are current unused, but have memory preallocated. + idle_collectors: Mutex>>, +} + +impl WriteBatch { + /// Creates a new write batch for a database. + pub(crate) fn new(path: PathBuf, current: u32) -> Self { + assert!(FAMILIES <= u32::MAX as usize); + Self { + path, + current_sequence_number: AtomicU32::new(current), + thread_locals: ThreadLocal::new(), + idle_collectors: Mutex::new(Vec::new()), + } + } + + /// Resets the write batch to a new sequence number. This is called when the WriteBatch is + /// reused. + pub(crate) fn reset(&mut self, current: u32) { + self.current_sequence_number + .store(current, Ordering::SeqCst); + } + + /// Returns the collector for a family for the current thread. + fn collector_mut(&self, family: usize) -> Result<&mut Collector> { + debug_assert!(family < FAMILIES); + let cell = self.thread_locals.get_or(|| { + UnsafeCell::new(ThreadLocalState { + collectors: [const { None }; FAMILIES], + new_sst_files: Vec::new(), + }) + }); + // Safety: We know that the cell is only accessed from the current thread. + let state = unsafe { &mut *cell.get() }; + let collector = state.collectors[family].get_or_insert_with(|| { + self.idle_collectors + .lock() + .pop() + .unwrap_or_else(|| Collector::new()) + }); + if collector.is_full() { + let sst = self.create_sst_file(family, collector.sorted())?; + collector.clear(); + state.new_sst_files.push(sst); + } + Ok(collector) + } + + /// Puts a key-value pair into the write batch. + pub fn put(&self, family: usize, key: K, value: Cow<'_, [u8]>) -> Result<()> { + let collector = self.collector_mut(family)?; + if value.len() <= MAX_MEDIUM_VALUE_SIZE { + collector.put(key, value.into_owned()); + } else { + let blob = self.create_blob(&value)?; + collector.put_blob(key, blob); + } + Ok(()) + } + + /// Puts a delete operation into the write batch. + pub fn delete(&self, family: usize, key: K) -> Result<()> { + let collector = self.collector_mut(family)?; + collector.delete(key); + Ok(()) + } + + /// Finishes the write batch by returning the new sequence number and the new SST files. This + /// writes all outstanding thread local data to disk. + pub(crate) fn finish(&mut self) -> Result<(u32, Vec<(u32, File)>)> { + let mut new_sst_files = Vec::new(); + let mut all_collectors = [(); FAMILIES].map(|_| Vec::new()); + for cell in self.thread_locals.iter_mut() { + let state = cell.get_mut(); + new_sst_files.append(&mut state.new_sst_files); + for (family, global_collector) in all_collectors.iter_mut().enumerate() { + if let Some(collector) = state.collectors[family].take() { + if !collector.is_empty() { + global_collector.push(Some(collector)); + } + } + } + } + let shared_new_sst_files = Mutex::new(&mut new_sst_files); + let shared_error = Mutex::new(Ok(())); + scope(|scope| { + fn handle_done_collector<'scope, K: StoreKey + Send + Sync, const FAMILIES: usize>( + this: &'scope WriteBatch, + scope: &Scope<'scope>, + family: usize, + mut collector: Collector, + shared_new_sst_files: &'scope Mutex<&mut Vec<(u32, File)>>, + shared_error: &'scope Mutex>, + ) { + scope.spawn( + move |_| match this.create_sst_file(family, collector.sorted()) { + Ok(sst) => { + collector.clear(); + this.idle_collectors.lock().push(collector); + shared_new_sst_files.lock().push(sst); + } + Err(err) => { + *shared_error.lock() = Err(err); + } + }, + ); + } + + all_collectors + .into_par_iter() + .enumerate() + .for_each(|(family, collectors)| { + let final_collector = collectors.into_par_iter().reduce( + || None, + |a, b| match (a, b) { + (Some(mut a), Some(mut b)) => { + if a.len() < b.len() { + swap(&mut a, &mut b); + } + for entry in b.drain() { + if a.is_full() { + let full_collector = replace( + &mut a, + self.idle_collectors + .lock() + .pop() + .unwrap_or_else(|| Collector::new()), + ); + handle_done_collector( + self, + scope, + family, + full_collector, + &shared_new_sst_files, + &shared_error, + ); + } + a.add_entry(entry); + } + self.idle_collectors.lock().push(b); + Some(a) + } + (Some(a), None) => Some(a), + (None, Some(b)) => Some(b), + (None, None) => None, + }, + ); + if let Some(collector) = final_collector { + handle_done_collector( + self, + scope, + family, + collector, + &shared_new_sst_files, + &shared_error, + ); + } + }); + }); + shared_error.into_inner()?; + let seq = self.current_sequence_number.load(Ordering::SeqCst); + new_sst_files.sort_by_key(|(seq, _)| *seq); + Ok((seq, new_sst_files)) + } + + /// Creates a new blob file with the given value. + fn create_blob(&self, value: &[u8]) -> Result { + let seq = self.current_sequence_number.fetch_add(1, Ordering::SeqCst) + 1; + let mut buffer = Vec::new(); + buffer.write_u32::(value.len() as u32)?; + lz4::compress_to_vec(value, &mut buffer, ACC_LEVEL_DEFAULT) + .context("Compression of value for blob file failed")?; + + let file = self.path.join(format!("{:08}.blob", seq)); + std::fs::write(file, &buffer).context("Unable to write blob file")?; + Ok(seq) + } + + /// Creates a new SST file with the given collector data. + fn create_sst_file( + &self, + family: usize, + collector_data: (&[CollectorEntry], usize, usize), + ) -> Result<(u32, File)> { + let (entries, total_key_size, total_value_size) = collector_data; + let seq = self.current_sequence_number.fetch_add(1, Ordering::SeqCst) + 1; + + let builder = + StaticSortedFileBuilder::new(family as u32, entries, total_key_size, total_value_size)?; + + let path = self.path.join(format!("{:08}.sst", seq)); + let file = builder + .write(&path) + .with_context(|| format!("Unable to write SST file {:08}.sst", seq))?; + + #[cfg(feature = "verify_sst_content")] + { + use core::panic; + + use crate::{ + collector_entry::CollectorEntryValue, + key::hash_key, + static_sorted_file::{AqmfCache, BlockCache, LookupResult, StaticSortedFile}, + }; + + file.sync_all()?; + let sst = StaticSortedFile::open(seq, path)?; + let cache1 = AqmfCache::with( + 10, + u64::MAX, + Default::default(), + Default::default(), + Default::default(), + ); + let cache2 = BlockCache::with( + 10, + u64::MAX, + Default::default(), + Default::default(), + Default::default(), + ); + let cache3 = BlockCache::with( + 10, + u64::MAX, + Default::default(), + Default::default(), + Default::default(), + ); + for entry in entries { + let mut key = Vec::with_capacity(entry.key.len()); + entry.key.write_to(&mut key); + let result = sst + .lookup(hash_key(&key), &key, &cache1, &cache2, &cache3) + .expect("key found"); + match result { + LookupResult::Deleted => {} + LookupResult::Small { value: val } => { + if let EntryValue::Small { value } | EntryValue::Medium { value } = + entry.value + { + assert_eq!(&*val, &*value); + } else { + panic!("Unexpected value"); + } + } + LookupResult::Blob { sequence_number } => {} + LookupResult::QuickFilterMiss => panic!("aqmf must include"), + LookupResult::RangeMiss => panic!("Index must cover"), + LookupResult::KeyMiss => panic!("All keys must exist"), + } + } + } + + Ok((seq, file)) + } +} diff --git a/turbopack/crates/turbo-tasks-backend/Cargo.toml b/turbopack/crates/turbo-tasks-backend/Cargo.toml index 1def3ef47f75b..96252b3c51869 100644 --- a/turbopack/crates/turbo-tasks-backend/Cargo.toml +++ b/turbopack/crates/turbo-tasks-backend/Cargo.toml @@ -16,6 +16,7 @@ workspace = true default = [] verify_serialization = [] trace_aggregation_update = [] +lmdb = ["dep:lmdb-rkv"] [dependencies] anyhow = { workspace = true } @@ -27,7 +28,7 @@ dashmap = { workspace = true, features = ["raw-api"]} either = { workspace = true } hashbrown = { workspace = true, features = ["raw"] } indexmap = { workspace = true } -lmdb-rkv = "0.14.0" +lmdb-rkv = { version = "0.14.0", optional = true } once_cell = { workspace = true } parking_lot = { workspace = true } pot = "3.0.0" @@ -41,6 +42,7 @@ tokio = { workspace = true } tokio-scoped = "0.2.0" tracing = { workspace = true } thread_local = { workspace = true } +turbo-persistence = { workspace = true } turbo-prehash = { workspace = true } turbo-rcstr = { workspace = true } turbo-tasks = { workspace = true } diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs index 1110be4bf3781..e6c3d88e59288 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs @@ -784,6 +784,12 @@ impl TurboTasksBackendInner { self.stopping_event.notify(usize::MAX); } + fn stop(&self) { + if let Err(err) = self.backing_storage.shutdown() { + println!("Shutting down failed: {}", err); + } + } + fn idle_start(&self) { self.idle_start_event.notify(usize::MAX); } @@ -1813,6 +1819,10 @@ impl Backend for TurboTasksBackend { self.0.stopping(); } + fn stop(&self, _turbo_tasks: &dyn TurboTasksBackendApi) { + self.0.stop(); + } + fn idle_start(&self, _turbo_tasks: &dyn TurboTasksBackendApi) { self.0.idle_start(); } diff --git a/turbopack/crates/turbo-tasks-backend/src/backing_storage.rs b/turbopack/crates/turbo-tasks-backend/src/backing_storage.rs index 1db2d7f2ada0e..257c13f585731 100644 --- a/turbopack/crates/turbo-tasks-backend/src/backing_storage.rs +++ b/turbopack/crates/turbo-tasks-backend/src/backing_storage.rs @@ -51,4 +51,8 @@ pub trait BackingStorage: 'static + Send + Sync { task_id: TaskId, category: TaskDataCategory, ) -> Vec; + + fn shutdown(&self) -> Result<()> { + Ok(()) + } } diff --git a/turbopack/crates/turbo-tasks-backend/src/database/key_value_database.rs b/turbopack/crates/turbo-tasks-backend/src/database/key_value_database.rs index eb300c00624e4..f41f9db6cc0da 100644 --- a/turbopack/crates/turbo-tasks-backend/src/database/key_value_database.rs +++ b/turbopack/crates/turbo-tasks-backend/src/database/key_value_database.rs @@ -51,4 +51,8 @@ pub trait KeyValueDatabase { fn write_batch( &self, ) -> Result, Self::ConcurrentWriteBatch<'_>>>; + + fn shutdown(&self) -> Result<()> { + Ok(()) + } } diff --git a/turbopack/crates/turbo-tasks-backend/src/database/mod.rs b/turbopack/crates/turbo-tasks-backend/src/database/mod.rs index 9e23e919fa679..78ce1a99a6791 100644 --- a/turbopack/crates/turbo-tasks-backend/src/database/mod.rs +++ b/turbopack/crates/turbo-tasks-backend/src/database/mod.rs @@ -1,16 +1,15 @@ +#[cfg(feature = "lmdb")] mod by_key_space; pub mod db_versioning; +#[cfg(feature = "lmdb")] pub mod fresh_db_optimization; pub mod key_value_database; +#[cfg(feature = "lmdb")] pub mod lmdb; pub mod noop_kv; +#[cfg(feature = "lmdb")] pub mod read_transaction_cache; -mod startup_cache; +#[cfg(feature = "lmdb")] +pub mod startup_cache; +pub mod turbo; pub mod write_batch; - -pub use db_versioning::handle_db_versioning; -pub use fresh_db_optimization::{is_fresh, FreshDbOptimization}; -#[allow(unused_imports)] -pub use noop_kv::NoopKvDb; -pub use read_transaction_cache::ReadTransactionCache; -pub use startup_cache::StartupCacheLayer; diff --git a/turbopack/crates/turbo-tasks-backend/src/database/turbo.rs b/turbopack/crates/turbo-tasks-backend/src/database/turbo.rs new file mode 100644 index 0000000000000..3d1274689eb12 --- /dev/null +++ b/turbopack/crates/turbo-tasks-backend/src/database/turbo.rs @@ -0,0 +1,147 @@ +use std::{ + borrow::Cow, + path::PathBuf, + sync::Arc, + thread::{spawn, JoinHandle}, +}; + +use anyhow::Result; +use parking_lot::Mutex; +use turbo_persistence::{ArcSlice, TurboPersistence}; + +use crate::database::{ + key_value_database::{KeySpace, KeyValueDatabase}, + write_batch::{BaseWriteBatch, ConcurrentWriteBatch, WriteBatch}, +}; + +const COMPACT_MAX_COVERAGE: f32 = 20.0; +const COMPACT_MAX_MERGE_SEQUENCE: usize = 8; + +pub struct TurboKeyValueDatabase { + db: Arc, + compact_join_handle: Mutex>>>, +} + +impl TurboKeyValueDatabase { + pub fn new(path: PathBuf) -> Result { + let db = Arc::new(TurboPersistence::open(path.to_path_buf())?); + let mut this = Self { + db: db.clone(), + compact_join_handle: Mutex::new(None), + }; + // start compaction in background if the database is not empty + if !db.is_empty() { + let handle = + spawn(move || db.compact(COMPACT_MAX_COVERAGE, COMPACT_MAX_MERGE_SEQUENCE)); + this.compact_join_handle.get_mut().replace(handle); + } + Ok(this) + } +} + +impl KeyValueDatabase for TurboKeyValueDatabase { + type ReadTransaction<'l> + = () + where + Self: 'l; + + fn lower_read_transaction<'l: 'i + 'r, 'i: 'r, 'r>( + tx: &'r Self::ReadTransaction<'l>, + ) -> &'r Self::ReadTransaction<'i> { + tx + } + + fn is_empty(&self) -> bool { + self.db.is_empty() + } + + fn begin_read_transaction(&self) -> Result> { + Ok(()) + } + + type ValueBuffer<'l> + = ArcSlice + where + Self: 'l; + + fn get<'l, 'db: 'l>( + &'l self, + _transaction: &'l Self::ReadTransaction<'db>, + key_space: KeySpace, + key: &[u8], + ) -> Result>> { + self.db.get(key_space as usize, &key) + } + + type ConcurrentWriteBatch<'l> + = TurboWriteBatch<'l> + where + Self: 'l; + + fn write_batch( + &self, + ) -> Result, Self::ConcurrentWriteBatch<'_>>> { + // Wait for the compaction to finish + if let Some(join_handle) = self.compact_join_handle.lock().take() { + join_handle.join().unwrap()?; + } + // Start a new write batch + Ok(WriteBatch::concurrent(TurboWriteBatch { + batch: self.db.write_batch()?, + db: &self.db, + compact_join_handle: &self.compact_join_handle, + })) + } + + fn shutdown(&self) -> Result<()> { + // Wait for the compaction to finish + if let Some(join_handle) = self.compact_join_handle.lock().take() { + join_handle.join().unwrap()?; + } + // Shutdown the database + self.db.shutdown() + } +} + +pub struct TurboWriteBatch<'a> { + batch: turbo_persistence::WriteBatch, 5>, + db: &'a Arc, + compact_join_handle: &'a Mutex>>>, +} + +impl<'a> BaseWriteBatch<'a> for TurboWriteBatch<'a> { + type ValueBuffer<'l> + = ArcSlice + where + Self: 'l, + 'a: 'l; + + fn get<'l>(&'l self, key_space: KeySpace, key: &[u8]) -> Result>> + where + 'a: 'l, + { + self.db.get(key_space as usize, &key) + } + + fn commit(self) -> Result<()> { + // Commit the write batch + self.db.commit_write_batch(self.batch)?; + + // Start a new compaction in the background + let db = self.db.clone(); + let handle = spawn(move || db.compact(COMPACT_MAX_COVERAGE, COMPACT_MAX_MERGE_SEQUENCE)); + self.compact_join_handle.lock().replace(handle); + + Ok(()) + } +} + +impl<'a> ConcurrentWriteBatch<'a> for TurboWriteBatch<'a> { + fn put(&self, key_space: KeySpace, key: Cow<[u8]>, value: Cow<[u8]>) -> Result<()> { + self.batch.put(key_space as usize, key.into_owned(), value) + } + + fn delete(&self, key_space: KeySpace, key: Cow<[u8]>) -> Result<()> { + self.batch.delete(key_space as usize, key.into_owned()) + } +} diff --git a/turbopack/crates/turbo-tasks-backend/src/kv_backing_storage.rs b/turbopack/crates/turbo-tasks-backend/src/kv_backing_storage.rs index 009fdf5e2dc11..50d7aedd83a4f 100644 --- a/turbopack/crates/turbo-tasks-backend/src/kv_backing_storage.rs +++ b/turbopack/crates/turbo-tasks-backend/src/kv_backing_storage.rs @@ -435,6 +435,10 @@ impl BackingStorage .inspect_err(|err| println!("Looking up data for {task_id} failed: {err:?}")) .unwrap_or_default() } + + fn shutdown(&self) -> Result<()> { + self.database.shutdown() + } } fn get_next_free_task_id<'a, S, C>( diff --git a/turbopack/crates/turbo-tasks-backend/src/lib.rs b/turbopack/crates/turbo-tasks-backend/src/lib.rs index 333cf6596ab4f..3af8287607746 100644 --- a/turbopack/crates/turbo-tasks-backend/src/lib.rs +++ b/turbopack/crates/turbo-tasks-backend/src/lib.rs @@ -18,32 +18,59 @@ pub use self::{ kv_backing_storage::KeyValueDatabaseBackingStorage, }; use crate::database::{ - handle_db_versioning, is_fresh, lmdb::LmbdKeyValueDatabase, FreshDbOptimization, NoopKvDb, - ReadTransactionCache, StartupCacheLayer, + db_versioning::handle_db_versioning, noop_kv::NoopKvDb, turbo::TurboKeyValueDatabase, }; +#[cfg(feature = "lmdb")] pub type LmdbBackingStorage = KeyValueDatabaseBackingStorage< - ReadTransactionCache>>, + ReadTransactionCache< + StartupCacheLayer>, + >, >; +#[cfg(feature = "lmdb")] pub fn lmdb_backing_storage(path: &Path) -> Result { + use crate::database::{ + fresh_db_optimization::{is_fresh, FreshDbOptimization}, + read_transaction_cache::ReadTransactionCache, + startup_cache::StartupCacheLayer, + }; + let path = handle_db_versioning(path)?; let fresh_db = is_fresh(&path); - let database = LmbdKeyValueDatabase::new(&path)?; + let database = crate::database::lmdb::LmbdKeyValueDatabase::new(&path)?; let database = FreshDbOptimization::new(database, fresh_db); let database = StartupCacheLayer::new(database, path.join("startup.cache"), fresh_db)?; let database = ReadTransactionCache::new(database); Ok(KeyValueDatabaseBackingStorage::new(database)) } +pub type TurboBackingStorage = KeyValueDatabaseBackingStorage; + +pub fn turbo_backing_storage(path: &Path) -> Result { + let path = handle_db_versioning(path)?; + let database = TurboKeyValueDatabase::new(path)?; + Ok(KeyValueDatabaseBackingStorage::new(database)) +} + pub type NoopBackingStorage = KeyValueDatabaseBackingStorage; pub fn noop_backing_storage() -> NoopBackingStorage { KeyValueDatabaseBackingStorage::new(NoopKvDb) } +#[cfg(feature = "lmdb")] pub type DefaultBackingStorage = LmdbBackingStorage; +#[cfg(feature = "lmdb")] pub fn default_backing_storage(path: &Path) -> Result { lmdb_backing_storage(path) } + +#[cfg(not(feature = "lmdb"))] +pub type DefaultBackingStorage = TurboBackingStorage; + +#[cfg(not(feature = "lmdb"))] +pub fn default_backing_storage(path: &Path) -> Result { + turbo_backing_storage(path) +} From c95ed85b329a975e4a0a0f5f7d2ce8de31d2121a Mon Sep 17 00:00:00 2001 From: vercel-release-bot Date: Mon, 2 Dec 2024 10:45:44 +0000 Subject: [PATCH 03/16] v15.0.4-canary.34 --- lerna.json | 2 +- packages/create-next-app/package.json | 2 +- packages/eslint-config-next/package.json | 4 ++-- packages/eslint-plugin-next/package.json | 2 +- packages/font/package.json | 2 +- packages/next-bundle-analyzer/package.json | 2 +- packages/next-codemod/package.json | 2 +- packages/next-env/package.json | 2 +- packages/next-mdx/package.json | 2 +- packages/next-plugin-storybook/package.json | 2 +- packages/next-polyfill-module/package.json | 2 +- packages/next-polyfill-nomodule/package.json | 2 +- packages/next-swc/package.json | 2 +- packages/next/package.json | 14 +++++++------- packages/react-refresh-utils/package.json | 2 +- packages/third-parties/package.json | 4 ++-- pnpm-lock.yaml | 16 ++++++++-------- 17 files changed, 32 insertions(+), 32 deletions(-) diff --git a/lerna.json b/lerna.json index 27876a22c069a..da567605812cb 100644 --- a/lerna.json +++ b/lerna.json @@ -16,5 +16,5 @@ "registry": "https://registry.npmjs.org/" } }, - "version": "15.0.4-canary.33" + "version": "15.0.4-canary.34" } diff --git a/packages/create-next-app/package.json b/packages/create-next-app/package.json index fab091511fd04..cbba3ea335c9f 100644 --- a/packages/create-next-app/package.json +++ b/packages/create-next-app/package.json @@ -1,6 +1,6 @@ { "name": "create-next-app", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "keywords": [ "react", "next", diff --git a/packages/eslint-config-next/package.json b/packages/eslint-config-next/package.json index 21c9bae7bbe24..e54cd245603e6 100644 --- a/packages/eslint-config-next/package.json +++ b/packages/eslint-config-next/package.json @@ -1,6 +1,6 @@ { "name": "eslint-config-next", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "description": "ESLint configuration used by Next.js.", "main": "index.js", "license": "MIT", @@ -10,7 +10,7 @@ }, "homepage": "https://nextjs.org/docs/app/api-reference/config/eslint#eslint-config", "dependencies": { - "@next/eslint-plugin-next": "15.0.4-canary.33", + "@next/eslint-plugin-next": "15.0.4-canary.34", "@rushstack/eslint-patch": "^1.10.3", "@typescript-eslint/eslint-plugin": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", "@typescript-eslint/parser": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", diff --git a/packages/eslint-plugin-next/package.json b/packages/eslint-plugin-next/package.json index 7a0661e73efe5..c1a1604dcef01 100644 --- a/packages/eslint-plugin-next/package.json +++ b/packages/eslint-plugin-next/package.json @@ -1,6 +1,6 @@ { "name": "@next/eslint-plugin-next", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "description": "ESLint plugin for Next.js.", "main": "dist/index.js", "license": "MIT", diff --git a/packages/font/package.json b/packages/font/package.json index d39b1af4bfd4c..e69b563f5883c 100644 --- a/packages/font/package.json +++ b/packages/font/package.json @@ -1,7 +1,7 @@ { "name": "@next/font", "private": true, - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "repository": { "url": "vercel/next.js", "directory": "packages/font" diff --git a/packages/next-bundle-analyzer/package.json b/packages/next-bundle-analyzer/package.json index 869f32c0f5c75..d4b7dedb6786d 100644 --- a/packages/next-bundle-analyzer/package.json +++ b/packages/next-bundle-analyzer/package.json @@ -1,6 +1,6 @@ { "name": "@next/bundle-analyzer", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "main": "index.js", "types": "index.d.ts", "license": "MIT", diff --git a/packages/next-codemod/package.json b/packages/next-codemod/package.json index 6c32e8d0e366a..d678f82269e94 100644 --- a/packages/next-codemod/package.json +++ b/packages/next-codemod/package.json @@ -1,6 +1,6 @@ { "name": "@next/codemod", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "license": "MIT", "repository": { "type": "git", diff --git a/packages/next-env/package.json b/packages/next-env/package.json index eb06a11fb32a4..37075900404c6 100644 --- a/packages/next-env/package.json +++ b/packages/next-env/package.json @@ -1,6 +1,6 @@ { "name": "@next/env", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "keywords": [ "react", "next", diff --git a/packages/next-mdx/package.json b/packages/next-mdx/package.json index 83e3be60dea93..f149e67f85b6b 100644 --- a/packages/next-mdx/package.json +++ b/packages/next-mdx/package.json @@ -1,6 +1,6 @@ { "name": "@next/mdx", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "main": "index.js", "license": "MIT", "repository": { diff --git a/packages/next-plugin-storybook/package.json b/packages/next-plugin-storybook/package.json index d99d7f494c6ab..4ddeec70cb2a3 100644 --- a/packages/next-plugin-storybook/package.json +++ b/packages/next-plugin-storybook/package.json @@ -1,6 +1,6 @@ { "name": "@next/plugin-storybook", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "repository": { "url": "vercel/next.js", "directory": "packages/next-plugin-storybook" diff --git a/packages/next-polyfill-module/package.json b/packages/next-polyfill-module/package.json index 87bbda1190a49..687f5deb81239 100644 --- a/packages/next-polyfill-module/package.json +++ b/packages/next-polyfill-module/package.json @@ -1,6 +1,6 @@ { "name": "@next/polyfill-module", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "description": "A standard library polyfill for ES Modules supporting browsers (Edge 16+, Firefox 60+, Chrome 61+, Safari 10.1+)", "main": "dist/polyfill-module.js", "license": "MIT", diff --git a/packages/next-polyfill-nomodule/package.json b/packages/next-polyfill-nomodule/package.json index 3c80d33040982..eaf35ab089c38 100644 --- a/packages/next-polyfill-nomodule/package.json +++ b/packages/next-polyfill-nomodule/package.json @@ -1,6 +1,6 @@ { "name": "@next/polyfill-nomodule", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "description": "A polyfill for non-dead, nomodule browsers.", "main": "dist/polyfill-nomodule.js", "license": "MIT", diff --git a/packages/next-swc/package.json b/packages/next-swc/package.json index 1d9c12c9ec2a8..9423495f0ea56 100644 --- a/packages/next-swc/package.json +++ b/packages/next-swc/package.json @@ -1,6 +1,6 @@ { "name": "@next/swc", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "private": true, "scripts": { "clean": "node ../../scripts/rm.mjs native", diff --git a/packages/next/package.json b/packages/next/package.json index 31774b65e1199..95ae5b7488c59 100644 --- a/packages/next/package.json +++ b/packages/next/package.json @@ -1,6 +1,6 @@ { "name": "next", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "description": "The React Framework", "main": "./dist/server/next.js", "license": "MIT", @@ -97,7 +97,7 @@ ] }, "dependencies": { - "@next/env": "15.0.4-canary.33", + "@next/env": "15.0.4-canary.34", "@swc/counter": "0.1.3", "@swc/helpers": "0.5.13", "busboy": "1.6.0", @@ -161,11 +161,11 @@ "@jest/types": "29.5.0", "@mswjs/interceptors": "0.23.0", "@napi-rs/triples": "1.2.0", - "@next/font": "15.0.4-canary.33", - "@next/polyfill-module": "15.0.4-canary.33", - "@next/polyfill-nomodule": "15.0.4-canary.33", - "@next/react-refresh-utils": "15.0.4-canary.33", - "@next/swc": "15.0.4-canary.33", + "@next/font": "15.0.4-canary.34", + "@next/polyfill-module": "15.0.4-canary.34", + "@next/polyfill-nomodule": "15.0.4-canary.34", + "@next/react-refresh-utils": "15.0.4-canary.34", + "@next/swc": "15.0.4-canary.34", "@opentelemetry/api": "1.6.0", "@playwright/test": "1.41.2", "@swc/core": "1.9.2-nightly-20241111.1", diff --git a/packages/react-refresh-utils/package.json b/packages/react-refresh-utils/package.json index 4d84603dd9d92..5a5e105b92cb4 100644 --- a/packages/react-refresh-utils/package.json +++ b/packages/react-refresh-utils/package.json @@ -1,6 +1,6 @@ { "name": "@next/react-refresh-utils", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "description": "An experimental package providing utilities for React Refresh.", "repository": { "url": "vercel/next.js", diff --git a/packages/third-parties/package.json b/packages/third-parties/package.json index 43ecfec69a1bf..8359ee5e20b3f 100644 --- a/packages/third-parties/package.json +++ b/packages/third-parties/package.json @@ -1,6 +1,6 @@ { "name": "@next/third-parties", - "version": "15.0.4-canary.33", + "version": "15.0.4-canary.34", "repository": { "url": "vercel/next.js", "directory": "packages/third-parties" @@ -26,7 +26,7 @@ "third-party-capital": "1.0.20" }, "devDependencies": { - "next": "15.0.4-canary.33", + "next": "15.0.4-canary.34", "outdent": "0.8.0", "prettier": "2.5.1", "typescript": "5.6.3" diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index f51edef6c2c9c..38f58b7ac1e60 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -795,7 +795,7 @@ importers: packages/eslint-config-next: dependencies: '@next/eslint-plugin-next': - specifier: 15.0.4-canary.33 + specifier: 15.0.4-canary.34 version: link:../eslint-plugin-next '@rushstack/eslint-patch': specifier: ^1.10.3 @@ -859,7 +859,7 @@ importers: packages/next: dependencies: '@next/env': - specifier: 15.0.4-canary.33 + specifier: 15.0.4-canary.34 version: link:../next-env '@swc/counter': specifier: 0.1.3 @@ -987,19 +987,19 @@ importers: specifier: 1.2.0 version: 1.2.0 '@next/font': - specifier: 15.0.4-canary.33 + specifier: 15.0.4-canary.34 version: link:../font '@next/polyfill-module': - specifier: 15.0.4-canary.33 + specifier: 15.0.4-canary.34 version: link:../next-polyfill-module '@next/polyfill-nomodule': - specifier: 15.0.4-canary.33 + specifier: 15.0.4-canary.34 version: link:../next-polyfill-nomodule '@next/react-refresh-utils': - specifier: 15.0.4-canary.33 + specifier: 15.0.4-canary.34 version: link:../react-refresh-utils '@next/swc': - specifier: 15.0.4-canary.33 + specifier: 15.0.4-canary.34 version: link:../next-swc '@opentelemetry/api': specifier: 1.6.0 @@ -1633,7 +1633,7 @@ importers: version: 1.0.20 devDependencies: next: - specifier: 15.0.4-canary.33 + specifier: 15.0.4-canary.34 version: link:../next outdent: specifier: 0.8.0 From 7c24763befc58b6803cdb279d7d96429d0b1efcf Mon Sep 17 00:00:00 2001 From: Tobias Koppers Date: Mon, 2 Dec 2024 16:02:49 +0100 Subject: [PATCH 04/16] [Turbopack] Persistent Caching fixups (#73423) ### What? * sync blob files on commit * This could cause a database corruption * add print_stats feature to print stats on shutdown * use memory map to read blob files * shared index and key cache --- turbopack/crates/turbo-persistence/Cargo.toml | 1 + .../crates/turbo-persistence/src/constants.rs | 6 +- turbopack/crates/turbo-persistence/src/db.rs | 52 +++++++++-------- .../src/static_sorted_file.rs | 5 +- .../turbo-persistence/src/write_batch.rs | 56 +++++++++++++++---- 5 files changed, 76 insertions(+), 44 deletions(-) diff --git a/turbopack/crates/turbo-persistence/Cargo.toml b/turbopack/crates/turbo-persistence/Cargo.toml index 8783c62d456fb..6e4b023f9cbd2 100644 --- a/turbopack/crates/turbo-persistence/Cargo.toml +++ b/turbopack/crates/turbo-persistence/Cargo.toml @@ -8,6 +8,7 @@ license = "MIT" verify_sst_content = [] strict_checks = [] stats = ["quick_cache/stats"] +print_stats = ["stats"] [dependencies] anyhow = { workspace = true } diff --git a/turbopack/crates/turbo-persistence/src/constants.rs b/turbopack/crates/turbo-persistence/src/constants.rs index af103a4bc95c1..de67de1b8e084 100644 --- a/turbopack/crates/turbo-persistence/src/constants.rs +++ b/turbopack/crates/turbo-persistence/src/constants.rs @@ -21,12 +21,8 @@ pub const DATA_THRESHOLD_PER_COMPACTED_FILE: usize = 256 * 1024 * 1024; pub const AQMF_CACHE_SIZE: u64 = 300 * 1024 * 1024; pub const AQMF_AVG_SIZE: usize = 37399; -/// Maximum RAM bytes for index block cache -pub const INDEX_BLOCK_CACHE_SIZE: u64 = 100 * 1024 * 1024; -pub const INDEX_BLOCK_AVG_SIZE: usize = 152000; - /// Maximum RAM bytes for key block cache -pub const KEY_BLOCK_CACHE_SIZE: u64 = 300 * 1024 * 1024; +pub const KEY_BLOCK_CACHE_SIZE: u64 = 400 * 1024 * 1024; pub const KEY_BLOCK_AVG_SIZE: usize = 16 * 1024; /// Maximum RAM bytes for value block cache diff --git a/turbopack/crates/turbo-persistence/src/db.rs b/turbopack/crates/turbo-persistence/src/db.rs index 2366e5c7f2b65..ba3a0b1cbc972 100644 --- a/turbopack/crates/turbo-persistence/src/db.rs +++ b/turbopack/crates/turbo-persistence/src/db.rs @@ -14,6 +14,7 @@ use std::{ use anyhow::{bail, Context, Result}; use byteorder::{ReadBytesExt, WriteBytesExt, BE}; use lzzzz::lz4::decompress; +use memmap2::{Advice, Mmap}; use parking_lot::{Mutex, RwLock}; use rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}; @@ -23,9 +24,9 @@ use crate::{ get_compaction_jobs, total_coverage, CompactConfig, Compactable, CompactionJobs, }, constants::{ - AQMF_AVG_SIZE, AQMF_CACHE_SIZE, DATA_THRESHOLD_PER_COMPACTED_FILE, INDEX_BLOCK_AVG_SIZE, - INDEX_BLOCK_CACHE_SIZE, KEY_BLOCK_AVG_SIZE, KEY_BLOCK_CACHE_SIZE, - MAX_ENTRIES_PER_COMPACTED_FILE, VALUE_BLOCK_AVG_SIZE, VALUE_BLOCK_CACHE_SIZE, + AQMF_AVG_SIZE, AQMF_CACHE_SIZE, DATA_THRESHOLD_PER_COMPACTED_FILE, KEY_BLOCK_AVG_SIZE, + KEY_BLOCK_CACHE_SIZE, MAX_ENTRIES_PER_COMPACTED_FILE, VALUE_BLOCK_AVG_SIZE, + VALUE_BLOCK_CACHE_SIZE, }, key::{hash_key, StoreKey}, lookup_entry::LookupEntry, @@ -34,7 +35,7 @@ use crate::{ AqmfCache, BlockCache, LookupResult, StaticSortedFile, StaticSortedFileRange, }, static_sorted_file_builder::StaticSortedFileBuilder, - write_batch::WriteBatch, + write_batch::{FinishResult, WriteBatch}, QueryKey, }; @@ -77,7 +78,6 @@ impl CacheStatistics { #[derive(Debug)] pub struct Statistics { pub sst_files: usize, - pub index_block_cache: CacheStatistics, pub key_block_cache: CacheStatistics, pub value_block_cache: CacheStatistics, pub aqmf_cache: CacheStatistics, @@ -115,8 +115,6 @@ pub struct TurboPersistence { active_write_operation: AtomicBool, /// A cache for deserialized AQMF filters. aqmf_cache: AqmfCache, - /// A cache for decompressed index blocks. - index_block_cache: BlockCache, /// A cache for decompressed key blocks. key_block_cache: BlockCache, /// A cache for decompressed value blocks. @@ -155,13 +153,6 @@ impl TurboPersistence { Default::default(), Default::default(), ), - index_block_cache: BlockCache::with( - INDEX_BLOCK_CACHE_SIZE as usize / INDEX_BLOCK_AVG_SIZE, - INDEX_BLOCK_CACHE_SIZE, - Default::default(), - Default::default(), - Default::default(), - ), key_block_cache: BlockCache::with( KEY_BLOCK_CACHE_SIZE as usize / KEY_BLOCK_AVG_SIZE, KEY_BLOCK_CACHE_SIZE, @@ -338,9 +329,16 @@ impl TurboPersistence { /// Reads and decompresses a blob file. This is not backed by any cache. fn read_blob(&self, seq: u32) -> Result> { let path = self.path.join(format!("{:08}.blob", seq)); - let compressed = - fs::read(path).with_context(|| format!("Unable to read blob file {:08}.blob", seq))?; - let mut compressed = &compressed[..]; + let mmap = unsafe { Mmap::map(&File::open(&path)?)? }; + #[cfg(unix)] + mmap.advise(Advice::Sequential)?; + #[cfg(unix)] + mmap.advise(Advice::WillNeed)?; + #[cfg(target_os = "linux")] + mmap.advise(Advice::DontFork)?; + #[cfg(target_os = "linux")] + mmap.advise(Advice::Unmergeable)?; + let mut compressed = &mmap[..]; let uncompressed_length = compressed.read_u32::()? as usize; let buffer = Arc::new_zeroed_slice(uncompressed_length); @@ -391,8 +389,12 @@ impl TurboPersistence { &self, mut write_batch: WriteBatch, ) -> Result<()> { - let (seq, new_sst_files) = write_batch.finish()?; - self.commit(new_sst_files, vec![], seq)?; + let FinishResult { + sequence_number, + new_sst_files, + new_blob_files, + } = write_batch.finish()?; + self.commit(new_sst_files, new_blob_files, vec![], sequence_number)?; self.active_write_operation.store(false, Ordering::Release); self.idle_write_batch.lock().replace(( TypeId::of::>(), @@ -406,6 +408,7 @@ impl TurboPersistence { fn commit( &self, new_sst_files: Vec<(u32, File)>, + new_blob_files: Vec, mut indicies_to_delete: Vec, mut seq: u32, ) -> Result<(), anyhow::Error> { @@ -417,6 +420,10 @@ impl TurboPersistence { }) .collect::>>()?; + for file in new_blob_files { + file.sync_all()?; + } + if !indicies_to_delete.is_empty() { seq += 1; } @@ -505,6 +512,7 @@ impl TurboPersistence { self.commit( new_sst_files, + Vec::new(), indicies_to_delete, *sequence_number.get_mut(), )?; @@ -780,7 +788,6 @@ impl TurboPersistence { hash, key, &self.aqmf_cache, - &self.index_block_cache, &self.key_block_cache, &self.value_block_cache, )? { @@ -825,7 +832,6 @@ impl TurboPersistence { let inner = self.inner.read(); Statistics { sst_files: inner.static_sorted_files.len(), - index_block_cache: CacheStatistics::new(&self.index_block_cache), key_block_cache: CacheStatistics::new(&self.key_block_cache), value_block_cache: CacheStatistics::new(&self.value_block_cache), aqmf_cache: CacheStatistics::new(&self.aqmf_cache), @@ -839,9 +845,9 @@ impl TurboPersistence { } } - /// Shuts down the database. This will print statistics if the `stats` feature is enabled. + /// Shuts down the database. This will print statistics if the `print_stats` feature is enabled. pub fn shutdown(&self) -> Result<()> { - #[cfg(feature = "stats")] + #[cfg(feature = "print_stats")] println!("{:#?}", self.statistics()); Ok(()) } diff --git a/turbopack/crates/turbo-persistence/src/static_sorted_file.rs b/turbopack/crates/turbo-persistence/src/static_sorted_file.rs index 1de537c69ee3d..1d31a292c0091 100644 --- a/turbopack/crates/turbo-persistence/src/static_sorted_file.rs +++ b/turbopack/crates/turbo-persistence/src/static_sorted_file.rs @@ -236,7 +236,6 @@ impl StaticSortedFile { key_hash: u64, key: &K, aqmf_cache: &AqmfCache, - index_block_cache: &BlockCache, key_block_cache: &BlockCache, value_block_cache: &BlockCache, ) -> Result { @@ -270,10 +269,8 @@ impl StaticSortedFile { } } let mut current_block = header.block_count - 1; - let mut cache = index_block_cache; loop { - let block = self.get_key_block(header, current_block, cache)?; - cache = key_block_cache; + let block = self.get_key_block(header, current_block, key_block_cache)?; let mut block = &block[..]; let block_type = block.read_u8()?; match block_type { diff --git a/turbopack/crates/turbo-persistence/src/write_batch.rs b/turbopack/crates/turbo-persistence/src/write_batch.rs index 97490c73e41e5..19bb575ea0528 100644 --- a/turbopack/crates/turbo-persistence/src/write_batch.rs +++ b/turbopack/crates/turbo-persistence/src/write_batch.rs @@ -2,6 +2,7 @@ use std::{ borrow::Cow, cell::UnsafeCell, fs::File, + io::Write, mem::{replace, swap}, path::PathBuf, sync::atomic::{AtomicU32, Ordering}, @@ -28,6 +29,15 @@ struct ThreadLocalState { collectors: [Option>; FAMILIES], /// The list of new SST files that have been created. new_sst_files: Vec<(u32, File)>, + /// The list of new blob files that have been created. + new_blob_files: Vec, +} + +/// The result of a `WriteBatch::finish` operation. +pub(crate) struct FinishResult { + pub(crate) sequence_number: u32, + pub(crate) new_sst_files: Vec<(u32, File)>, + pub(crate) new_blob_files: Vec, } /// A write batch. @@ -61,17 +71,27 @@ impl WriteBatch { .store(current, Ordering::SeqCst); } - /// Returns the collector for a family for the current thread. - fn collector_mut(&self, family: usize) -> Result<&mut Collector> { - debug_assert!(family < FAMILIES); + /// Returns the thread local state for the current thread. + #[allow(clippy::mut_from_ref)] + fn thread_local_state(&self) -> &mut ThreadLocalState { let cell = self.thread_locals.get_or(|| { UnsafeCell::new(ThreadLocalState { collectors: [const { None }; FAMILIES], new_sst_files: Vec::new(), + new_blob_files: Vec::new(), }) }); // Safety: We know that the cell is only accessed from the current thread. - let state = unsafe { &mut *cell.get() }; + unsafe { &mut *cell.get() } + } + + /// Returns the collector for a family for the current thread. + fn collector_mut<'l>( + &self, + state: &'l mut ThreadLocalState, + family: usize, + ) -> Result<&'l mut Collector> { + debug_assert!(family < FAMILIES); let collector = state.collectors[family].get_or_insert_with(|| { self.idle_collectors .lock() @@ -88,31 +108,36 @@ impl WriteBatch { /// Puts a key-value pair into the write batch. pub fn put(&self, family: usize, key: K, value: Cow<'_, [u8]>) -> Result<()> { - let collector = self.collector_mut(family)?; + let state = self.thread_local_state(); + let collector = self.collector_mut(state, family)?; if value.len() <= MAX_MEDIUM_VALUE_SIZE { collector.put(key, value.into_owned()); } else { - let blob = self.create_blob(&value)?; + let (blob, file) = self.create_blob(&value)?; collector.put_blob(key, blob); + state.new_blob_files.push(file); } Ok(()) } /// Puts a delete operation into the write batch. pub fn delete(&self, family: usize, key: K) -> Result<()> { - let collector = self.collector_mut(family)?; + let state = self.thread_local_state(); + let collector = self.collector_mut(state, family)?; collector.delete(key); Ok(()) } /// Finishes the write batch by returning the new sequence number and the new SST files. This /// writes all outstanding thread local data to disk. - pub(crate) fn finish(&mut self) -> Result<(u32, Vec<(u32, File)>)> { + pub(crate) fn finish(&mut self) -> Result { let mut new_sst_files = Vec::new(); + let mut new_blob_files = Vec::new(); let mut all_collectors = [(); FAMILIES].map(|_| Vec::new()); for cell in self.thread_locals.iter_mut() { let state = cell.get_mut(); new_sst_files.append(&mut state.new_sst_files); + new_blob_files.append(&mut state.new_blob_files); for (family, global_collector) in all_collectors.iter_mut().enumerate() { if let Some(collector) = state.collectors[family].take() { if !collector.is_empty() { @@ -200,11 +225,15 @@ impl WriteBatch { shared_error.into_inner()?; let seq = self.current_sequence_number.load(Ordering::SeqCst); new_sst_files.sort_by_key(|(seq, _)| *seq); - Ok((seq, new_sst_files)) + Ok(FinishResult { + sequence_number: seq, + new_sst_files, + new_blob_files, + }) } /// Creates a new blob file with the given value. - fn create_blob(&self, value: &[u8]) -> Result { + fn create_blob(&self, value: &[u8]) -> Result<(u32, File)> { let seq = self.current_sequence_number.fetch_add(1, Ordering::SeqCst) + 1; let mut buffer = Vec::new(); buffer.write_u32::(value.len() as u32)?; @@ -212,8 +241,11 @@ impl WriteBatch { .context("Compression of value for blob file failed")?; let file = self.path.join(format!("{:08}.blob", seq)); - std::fs::write(file, &buffer).context("Unable to write blob file")?; - Ok(seq) + let mut file = File::create(&file).context("Unable to create blob file")?; + file.write_all(&buffer) + .context("Unable to write blob file")?; + file.flush().context("Unable to flush blob file")?; + Ok((seq, file)) } /// Creates a new SST file with the given collector data. From 953a8c9f1dd533c681bf684d97b2f248c93cc736 Mon Sep 17 00:00:00 2001 From: Lee Robinson Date: Mon, 2 Dec 2024 11:58:21 -0600 Subject: [PATCH 05/16] docs: Fix codeblock switcher (#73436) https://nextjs.org/docs/app/api-reference/components/link#prefetching-links-in-middleware --- docs/01-app/03-api-reference/02-components/link.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/01-app/03-api-reference/02-components/link.mdx b/docs/01-app/03-api-reference/02-components/link.mdx index 762d8ccd95971..f46ef387576f3 100644 --- a/docs/01-app/03-api-reference/02-components/link.mdx +++ b/docs/01-app/03-api-reference/02-components/link.mdx @@ -1092,7 +1092,7 @@ It's common to use [Middleware](/docs/app/building-your-application/routing/midd For example, if you want to serve a `/dashboard` route that has authenticated and visitor views, you can add the following in your Middleware to redirect the user to the correct page: -```ts filename="middleware.ts" +```ts filename="middleware.ts" switcher import { NextResponse } from 'next/server' export function middleware(request: Request) { @@ -1107,7 +1107,7 @@ export function middleware(request: Request) { } ``` -```js filename="middleware.js" +```js filename="middleware.js" switcher import { NextResponse } from 'next/server' export function middleware(request) { From 5405f66fc78d02cc30afd0284630d91f676c3f38 Mon Sep 17 00:00:00 2001 From: Janka Uryga Date: Mon, 2 Dec 2024 19:24:59 +0100 Subject: [PATCH 06/16] remove usage of "@vercel/request-context" (#71383) Removes usage of `@vercel/request-context`, because now we have `@next/request-context` and can get `waitUntil` from there instead. Dependent on builder changes from https://github.com/vercel/vercel/pull/12286, which were [released in `vercel@39.1.2` / `@vercel/next@4.4.0`](https://github.com/vercel/vercel/pull/12648) ### Testing We can verify that, when using an older builder (which doesn't provide `@next/request-context`), the deploy tests for `after` fail, because they can't access `waitUntil`: ``` # these will fail, except some edge runtime tests # because they can use FetchEventResult.waitUntil as a fallback VERCEL_CLI_VERSION='vercel@39.1.1' \ NEXT_TEST_VERSION='https://vercel-packages.vercel.app/next/commits/b1b97b8bbaa2f9d400de42957869d138a54da304/next' \ pnpm test-deploy test/e2e/app-dir/next-after-app-deploy ``` To see the errors, need to inspect the logs and look for this error message: https://github.com/vercel/next.js/blob/d1e554be4c0bac8bf28c84a05065c230cb6f9d80/packages/next/src/server/after/after-context.ts#L145 But using `39.1.2`, they will pass, because `@next/request-context` is present: ``` # these will pass VERCEL_CLI_VERSION='vercel@39.1.2' \ NEXT_TEST_VERSION='https://vercel-packages.vercel.app/next/commits/b1b97b8bbaa2f9d400de42957869d138a54da304/next' \ pnpm test-deploy test/e2e/app-dir/next-after-app-deploy ``` --- .../next/src/server/after/builtin-request-context.ts | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/packages/next/src/server/after/builtin-request-context.ts b/packages/next/src/server/after/builtin-request-context.ts index a2e2015ae9381..dbf6e168bd625 100644 --- a/packages/next/src/server/after/builtin-request-context.ts +++ b/packages/next/src/server/after/builtin-request-context.ts @@ -4,23 +4,15 @@ export function getBuiltinRequestContext(): | BuiltinRequestContextValue | undefined { const _globalThis = globalThis as GlobalThisWithRequestContext - const ctx = - _globalThis[NEXT_REQUEST_CONTEXT_SYMBOL] ?? - _globalThis[VERCEL_REQUEST_CONTEXT_SYMBOL] + const ctx = _globalThis[NEXT_REQUEST_CONTEXT_SYMBOL] return ctx?.get() } /** This should be considered unstable until `unstable_after` is stablized. */ const NEXT_REQUEST_CONTEXT_SYMBOL = Symbol.for('@next/request-context') -// TODO(after): this is a temporary workaround. -// Remove this when vercel builder is updated to provide '@next/request-context'. -const VERCEL_REQUEST_CONTEXT_SYMBOL = Symbol.for('@vercel/request-context') - type GlobalThisWithRequestContext = typeof globalThis & { [NEXT_REQUEST_CONTEXT_SYMBOL]?: BuiltinRequestContext - /** @deprecated */ - [VERCEL_REQUEST_CONTEXT_SYMBOL]?: BuiltinRequestContext } /** A request context provided by the platform. From 965fe24d91d08567751339756e51f2cf9d0e3188 Mon Sep 17 00:00:00 2001 From: Tim Neutkens Date: Mon, 2 Dec 2024 23:05:03 +0100 Subject: [PATCH 07/16] Add ensure-page span for development tracing (#73346) ## What Adds a tracing span for `ensure-page` which is the function called whenever a route needs to be compiled. The span doesn't distinguish already compiled vs new compilation, which means that we'll have to filter out the lowest numbers in order to get reliable results on unique compile times. --- .../src/server/dev/hot-reloader-turbopack.ts | 235 +++++++++--------- .../src/server/dev/hot-reloader-webpack.ts | 40 +-- 2 files changed, 145 insertions(+), 130 deletions(-) diff --git a/packages/next/src/server/dev/hot-reloader-turbopack.ts b/packages/next/src/server/dev/hot-reloader-turbopack.ts index f1332e1e1220c..15092ff62bf14 100644 --- a/packages/next/src/server/dev/hot-reloader-turbopack.ts +++ b/packages/next/src/server/dev/hot-reloader-turbopack.ts @@ -872,132 +872,141 @@ export async function createHotReloaderTurbopack( isApp, url: requestUrl, }) { - if (BLOCKED_PAGES.includes(inputPage) && inputPage !== '/_error') { - return - } - - await currentEntriesHandling - - // TODO We shouldn't look into the filesystem again. This should use the information from entrypoints - let routeDef: Pick = - definition ?? - (await findPagePathData( - dir, + return hotReloaderSpan + .traceChild('ensure-page', { inputPage, - nextConfig.pageExtensions, - opts.pagesDir, - opts.appDir - )) - - // If the route is actually an app page route, then we should have access - // to the app route definition, and therefore, the appPaths from it. - if (!appPaths && definition && isAppPageRouteDefinition(definition)) { - appPaths = definition.appPaths - } + }) + .traceAsyncFn(async () => { + if (BLOCKED_PAGES.includes(inputPage) && inputPage !== '/_error') { + return + } - let page = routeDef.page - if (appPaths) { - const normalizedPage = normalizeAppPath(page) + await currentEntriesHandling + + // TODO We shouldn't look into the filesystem again. This should use the information from entrypoints + let routeDef: Pick< + RouteDefinition, + 'filename' | 'bundlePath' | 'page' + > = + definition ?? + (await findPagePathData( + dir, + inputPage, + nextConfig.pageExtensions, + opts.pagesDir, + opts.appDir + )) + + // If the route is actually an app page route, then we should have access + // to the app route definition, and therefore, the appPaths from it. + if (!appPaths && definition && isAppPageRouteDefinition(definition)) { + appPaths = definition.appPaths + } - // filter out paths that are not exact matches (e.g. catchall) - const matchingAppPaths = appPaths.filter( - (path) => normalizeAppPath(path) === normalizedPage - ) + let page = routeDef.page + if (appPaths) { + const normalizedPage = normalizeAppPath(page) - // the last item in the array is the root page, if there are parallel routes - page = matchingAppPaths[matchingAppPaths.length - 1] - } + // filter out paths that are not exact matches (e.g. catchall) + const matchingAppPaths = appPaths.filter( + (path) => normalizeAppPath(path) === normalizedPage + ) - const pathname = definition?.pathname ?? inputPage - - if (page === '/_error') { - let finishBuilding = startBuilding(pathname, requestUrl, false) - try { - await handlePagesErrorRoute({ - dev: true, - currentEntryIssues, - entrypoints: currentEntrypoints, - manifestLoader, - devRewrites: opts.fsChecker.rewrites, - productionRewrites: undefined, - logErrors: true, - - hooks: { - subscribeToChanges, - handleWrittenEndpoint: (id, result) => { - clearRequireCache(id, result) - currentWrittenEntrypoints.set(id, result) - assetMapper.setPathsForKey(id, result.clientPaths) - }, - }, - }) - } finally { - finishBuilding() - } - return - } + // the last item in the array is the root page, if there are parallel routes + page = matchingAppPaths[matchingAppPaths.length - 1] + } - const isInsideAppDir = routeDef.bundlePath.startsWith('app/') - const isEntryMetadataRouteFile = isMetadataRouteFile( - routeDef.filename.replace(opts.appDir || '', ''), - nextConfig.pageExtensions, - true - ) - const normalizedAppPage = isEntryMetadataRouteFile - ? normalizedPageToTurbopackStructureRoute( - page, - extname(routeDef.filename) - ) - : page + const pathname = definition?.pathname ?? inputPage + + if (page === '/_error') { + let finishBuilding = startBuilding(pathname, requestUrl, false) + try { + await handlePagesErrorRoute({ + dev: true, + currentEntryIssues, + entrypoints: currentEntrypoints, + manifestLoader, + devRewrites: opts.fsChecker.rewrites, + productionRewrites: undefined, + logErrors: true, + + hooks: { + subscribeToChanges, + handleWrittenEndpoint: (id, result) => { + clearRequireCache(id, result) + currentWrittenEntrypoints.set(id, result) + assetMapper.setPathsForKey(id, result.clientPaths) + }, + }, + }) + } finally { + finishBuilding() + } + return + } - const route = isInsideAppDir - ? currentEntrypoints.app.get(normalizedAppPage) - : currentEntrypoints.page.get(page) + const isInsideAppDir = routeDef.bundlePath.startsWith('app/') + const isEntryMetadataRouteFile = isMetadataRouteFile( + routeDef.filename.replace(opts.appDir || '', ''), + nextConfig.pageExtensions, + true + ) + const normalizedAppPage = isEntryMetadataRouteFile + ? normalizedPageToTurbopackStructureRoute( + page, + extname(routeDef.filename) + ) + : page - if (!route) { - // TODO: why is this entry missing in turbopack? - if (page === '/middleware') return - if (page === '/src/middleware') return - if (page === '/instrumentation') return - if (page === '/src/instrumentation') return + const route = isInsideAppDir + ? currentEntrypoints.app.get(normalizedAppPage) + : currentEntrypoints.page.get(page) - throw new PageNotFoundError(`route not found ${page}`) - } + if (!route) { + // TODO: why is this entry missing in turbopack? + if (page === '/middleware') return + if (page === '/src/middleware') return + if (page === '/instrumentation') return + if (page === '/src/instrumentation') return - // We don't throw on ensureOpts.isApp === true for page-api - // since this can happen when app pages make - // api requests to page API routes. - if (isApp && route.type === 'page') { - throw new Error(`mis-matched route type: isApp && page for ${page}`) - } + throw new PageNotFoundError(`route not found ${page}`) + } - const finishBuilding = startBuilding(pathname, requestUrl, false) - try { - await handleRouteType({ - dev, - page, - pathname, - route, - currentEntryIssues, - entrypoints: currentEntrypoints, - manifestLoader, - readyIds, - devRewrites: opts.fsChecker.rewrites, - productionRewrites: undefined, - logErrors: true, + // We don't throw on ensureOpts.isApp === true for page-api + // since this can happen when app pages make + // api requests to page API routes. + if (isApp && route.type === 'page') { + throw new Error(`mis-matched route type: isApp && page for ${page}`) + } - hooks: { - subscribeToChanges, - handleWrittenEndpoint: (id, result) => { - currentWrittenEntrypoints.set(id, result) - clearRequireCache(id, result) - assetMapper.setPathsForKey(id, result.clientPaths) - }, - }, + const finishBuilding = startBuilding(pathname, requestUrl, false) + try { + await handleRouteType({ + dev, + page, + pathname, + route, + currentEntryIssues, + entrypoints: currentEntrypoints, + manifestLoader, + readyIds, + devRewrites: opts.fsChecker.rewrites, + productionRewrites: undefined, + logErrors: true, + + hooks: { + subscribeToChanges, + handleWrittenEndpoint: (id, result) => { + currentWrittenEntrypoints.set(id, result) + clearRequireCache(id, result) + assetMapper.setPathsForKey(id, result.clientPaths) + }, + }, + }) + } finally { + finishBuilding() + } }) - } finally { - finishBuilding() - } }, } diff --git a/packages/next/src/server/dev/hot-reloader-webpack.ts b/packages/next/src/server/dev/hot-reloader-webpack.ts index 7d0b1827c8c33..7993ac29b5838 100644 --- a/packages/next/src/server/dev/hot-reloader-webpack.ts +++ b/packages/next/src/server/dev/hot-reloader-webpack.ts @@ -1594,23 +1594,29 @@ export default class HotReloaderWebpack implements NextJsHotReloaderInterface { definition?: RouteDefinition url?: string }): Promise { - // Make sure we don't re-build or dispose prebuilt pages - if (page !== '/_error' && BLOCKED_PAGES.indexOf(page) !== -1) { - return - } - const error = clientOnly - ? this.clientError - : this.serverError || this.clientError - if (error) { - throw error - } + return this.hotReloaderSpan + .traceChild('ensure-page', { + inputPage: page, + }) + .traceAsyncFn(async () => { + // Make sure we don't re-build or dispose prebuilt pages + if (page !== '/_error' && BLOCKED_PAGES.indexOf(page) !== -1) { + return + } + const error = clientOnly + ? this.clientError + : this.serverError || this.clientError + if (error) { + throw error + } - return this.onDemandEntries?.ensurePage({ - page, - appPaths, - definition, - isApp, - url, - }) + return this.onDemandEntries?.ensurePage({ + page, + appPaths, + definition, + isApp, + url, + }) + }) } } From 920b4e9c28028d1a0dbb2511725d29c48142a042 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EB=A3=A8=EB=B0=80LuMir?= Date: Tue, 3 Dec 2024 07:21:08 +0900 Subject: [PATCH 08/16] docs: add missing punctuation in `developing-using-local-app.md` (#73163) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Improving Documentation Hello, I’ve added missing punctuation and an inline code block. The `package.json` file has an inline code block on line 42, but the one in the heading was missing. Co-authored-by: JJ Kasper --- contributing/core/developing-using-local-app.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/contributing/core/developing-using-local-app.md b/contributing/core/developing-using-local-app.md index f61d462348708..2e643ad01ed61 100644 --- a/contributing/core/developing-using-local-app.md +++ b/contributing/core/developing-using-local-app.md @@ -12,9 +12,9 @@ If you already have an app and it has dependencies, you can follow these steps: 1. Move your app inside of the Next.js monorepo. -2. Run with `pnpm next-with-deps ./app-path-in-monorepo` +2. Run with `pnpm next-with-deps ./app-path-in-monorepo`. -## Set as a local dependency in package.json +## Set as a local dependency in `package.json` 1. Run `pnpm dev` in the background in the Next.js monorepo. @@ -38,7 +38,7 @@ If you already have an app and it has dependencies, you can follow these steps: Failed to load SWC binary, see more info here: https://nextjs.org/docs/messages/failed-loading-swc ``` -Try to add the below section to your `package.json`, then run again +Try to add the below section to your `package.json`, then run again. ```json { From 8a4b1d3ce68a188f3dfa3884652a75d1d895a118 Mon Sep 17 00:00:00 2001 From: Steven Date: Mon, 2 Dec 2024 17:27:18 -0500 Subject: [PATCH 09/16] chore(image-optimizer): cleanup unused code paths, refactor opts (#73373) - cleanup unused code paths since `optimizedBuffer` and `upstreamBuffer` are always defined - refactor additional options in to `opts` object - add `opts.silent` option to disable logs This is easiest to review with whitespace disabled: https://github.com/vercel/next.js/pull/73373/files?diff=split&w=1 --- packages/next/src/server/image-optimizer.ts | 91 +++++++++++---------- packages/next/src/server/next-server.ts | 11 +-- 2 files changed, 53 insertions(+), 49 deletions(-) diff --git a/packages/next/src/server/image-optimizer.ts b/packages/next/src/server/image-optimizer.ts index 607bd9636b8dd..b478b35d56135 100644 --- a/packages/next/src/server/image-optimizer.ts +++ b/packages/next/src/server/image-optimizer.ts @@ -47,7 +47,7 @@ const BLUR_QUALITY = 70 // should match `next-image-loader` let _sharp: typeof import('sharp') -function getSharp(concurrency: number | null | undefined) { +export function getSharp(concurrency: number | null | undefined) { if (_sharp) { return _sharp } @@ -658,14 +658,18 @@ export async function imageOptimizer( 'dangerouslyAllowSVG' | 'minimumCacheTTL' > }, - isDev: boolean | undefined, - previousCacheEntry?: IncrementalCacheItem + opts: { + isDev?: boolean + silent?: boolean + previousCacheEntry?: IncrementalCacheItem + } ): Promise<{ buffer: Buffer contentType: string maxAge: number etag: string upstreamEtag: string + error?: unknown }> { const { href, quality, width, mimeType } = paramsResult const { buffer: upstreamBuffer, etag: upstreamEtag } = imageUpstream @@ -680,18 +684,22 @@ export async function imageOptimizer( upstreamType.startsWith('image/svg') && !nextConfig.images.dangerouslyAllowSVG ) { - Log.error( - `The requested resource "${href}" has type "${upstreamType}" but dangerouslyAllowSVG is disabled` - ) + if (!opts.silent) { + Log.error( + `The requested resource "${href}" has type "${upstreamType}" but dangerouslyAllowSVG is disabled` + ) + } throw new ImageError( 400, '"url" parameter is valid but image type is not allowed' ) } if (ANIMATABLE_TYPES.includes(upstreamType) && isAnimated(upstreamBuffer)) { - Log.warnOnce( - `The requested resource "${href}" is an animated image so it will not be optimized. Consider adding the "unoptimized" property to the .` - ) + if (!opts.silent) { + Log.warnOnce( + `The requested resource "${href}" is an animated image so it will not be optimized. Consider adding the "unoptimized" property to the .` + ) + } return { buffer: upstreamBuffer, contentType: upstreamType, @@ -713,12 +721,14 @@ export async function imageOptimizer( } } if (!upstreamType.startsWith('image/') || upstreamType.includes(',')) { - Log.error( - "The requested resource isn't a valid image for", - href, - 'received', - upstreamType - ) + if (!opts.silent) { + Log.error( + "The requested resource isn't a valid image for", + href, + 'received', + upstreamType + ) + } throw new ImageError(400, "The requested resource isn't a valid image.") } } @@ -739,13 +749,13 @@ export async function imageOptimizer( } const previouslyCachedImage = getPreviouslyCachedImageOrNull( imageUpstream, - previousCacheEntry + opts.previousCacheEntry ) if (previouslyCachedImage) { return { buffer: previouslyCachedImage.buffer, contentType, - maxAge: previousCacheEntry?.curRevalidate || maxAge, + maxAge: opts?.previousCacheEntry?.curRevalidate || maxAge, etag: previouslyCachedImage.etag, upstreamEtag: previouslyCachedImage.upstreamEtag, } @@ -762,34 +772,30 @@ export async function imageOptimizer( sequentialRead: nextConfig.experimental.imgOptSequentialRead, timeoutInSeconds: nextConfig.experimental.imgOptTimeoutInSeconds, }) - if (optimizedBuffer) { - if (isDev && width <= BLUR_IMG_SIZE && quality === BLUR_QUALITY) { - // During `next dev`, we don't want to generate blur placeholders with webpack - // because it can delay starting the dev server. Instead, `next-image-loader.js` - // will inline a special url to lazily generate the blur placeholder at request time. - const meta = await getImageSize(optimizedBuffer) - const opts = { - blurWidth: meta.width, - blurHeight: meta.height, - blurDataURL: `data:${contentType};base64,${optimizedBuffer.toString( - 'base64' - )}`, - } - optimizedBuffer = Buffer.from(unescape(getImageBlurSvg(opts))) - contentType = 'image/svg+xml' - } - return { - buffer: optimizedBuffer, - contentType, - maxAge: Math.max(maxAge, nextConfig.images.minimumCacheTTL), - etag: getImageEtag(optimizedBuffer), - upstreamEtag, + if (opts.isDev && width <= BLUR_IMG_SIZE && quality === BLUR_QUALITY) { + // During `next dev`, we don't want to generate blur placeholders with webpack + // because it can delay starting the dev server. Instead, `next-image-loader.js` + // will inline a special url to lazily generate the blur placeholder at request time. + const meta = await getImageSize(optimizedBuffer) + const blurOpts = { + blurWidth: meta.width, + blurHeight: meta.height, + blurDataURL: `data:${contentType};base64,${optimizedBuffer.toString( + 'base64' + )}`, } - } else { - throw new ImageError(500, 'Unable to optimize buffer') + optimizedBuffer = Buffer.from(unescape(getImageBlurSvg(blurOpts))) + contentType = 'image/svg+xml' + } + return { + buffer: optimizedBuffer, + contentType, + maxAge: Math.max(maxAge, nextConfig.images.minimumCacheTTL), + etag: getImageEtag(optimizedBuffer), + upstreamEtag, } } catch (error) { - if (upstreamBuffer && upstreamType) { + if (upstreamType) { // If we fail to optimize, fallback to the original image return { buffer: upstreamBuffer, @@ -797,6 +803,7 @@ export async function imageOptimizer( maxAge: nextConfig.images.minimumCacheTTL, etag: upstreamEtag, upstreamEtag, + error, } } else { throw new ImageError( diff --git a/packages/next/src/server/next-server.ts b/packages/next/src/server/next-server.ts index aa6bc14eeffa1..a56dc40a77d81 100644 --- a/packages/next/src/server/next-server.ts +++ b/packages/next/src/server/next-server.ts @@ -668,13 +668,10 @@ export default class NextNodeServer extends BaseServer< handleInternalReq ) - return imageOptimizer( - imageUpstream, - paramsResult, - this.nextConfig, - this.renderOpts.dev, - previousCacheEntry - ) + return imageOptimizer(imageUpstream, paramsResult, this.nextConfig, { + isDev: this.renderOpts.dev, + previousCacheEntry, + }) } } From 1a7a180a6626fd3cdf9da76c0e6dd52f23b1f0fe Mon Sep 17 00:00:00 2001 From: JJ Kasper Date: Mon, 2 Dec 2024 14:38:46 -0800 Subject: [PATCH 10/16] Update max tag items limit in docs (#73444) x-ref: https://github.com/vercel/next.js/pull/73124 --- docs/01-app/03-api-reference/04-functions/fetch.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/01-app/03-api-reference/04-functions/fetch.mdx b/docs/01-app/03-api-reference/04-functions/fetch.mdx index a0878a140bafb..11a95e99ddc3b 100644 --- a/docs/01-app/03-api-reference/04-functions/fetch.mdx +++ b/docs/01-app/03-api-reference/04-functions/fetch.mdx @@ -80,7 +80,7 @@ Set the cache lifetime of a resource (in seconds). fetch(`https://...`, { next: { tags: ['collection'] } }) ``` -Set the cache tags of a resource. Data can then be revalidated on-demand using [`revalidateTag`](https://nextjs.org/docs/app/api-reference/functions/revalidateTag). The max length for a custom tag is 256 characters and the max tag items is 64. +Set the cache tags of a resource. Data can then be revalidated on-demand using [`revalidateTag`](https://nextjs.org/docs/app/api-reference/functions/revalidateTag). The max length for a custom tag is 256 characters and the max tag items is 128. ## Troubleshooting From 05c101b9d84e777adf99b77d7764e4dcbc6ba821 Mon Sep 17 00:00:00 2001 From: Jude Gao Date: Mon, 2 Dec 2024 17:49:15 -0500 Subject: [PATCH 11/16] Retire replay-io (#73282) We had this wired up a long time ago but disabled it since it wasn't helping much. So cleaning it up now. --- contributing/core/testing.md | 11 - jest.config.js | 4 - jest.replay.config.js | 18 -- package.json | 3 - pnpm-lock.yaml | 606 ----------------------------------- run-tests.js | 7 - test/lib/browsers/replay.ts | 23 -- test/lib/next-webdriver.ts | 16 +- 8 files changed, 3 insertions(+), 685 deletions(-) delete mode 100644 jest.replay.config.js delete mode 100644 test/lib/browsers/replay.ts diff --git a/contributing/core/testing.md b/contributing/core/testing.md index c8cf5c32e02c5..7223d318294a2 100644 --- a/contributing/core/testing.md +++ b/contributing/core/testing.md @@ -107,17 +107,6 @@ and then inspected with `pnpm playwright show-trace ./path/to/trace` Add `NEXT_TEST_TRACE=1` to enable test profiling. It's useful for improving our testing infrastructure. -### Recording the browser using Replay.io - -Using [Replay.io](https://www.replay.io/) you can record and time-travel debug the browser. - -1. Clear all local replays using `pnpm replay rm-all` -2. Run the test locally using the `RECORD_REPLAY=1` environment variables. - (e.g. `RECORD_REPLAY=1 pnpm test-dev test/e2e/app-dir/app/index.test.ts`) -3. Upload all the replays to your workspace using your API key: - `RECORD_REPLAY_API_KEY=addkeyhere pnpm replay upload-all` -4. Check the uploaded replays in your workspace, while uploading it provides the URLs. - ### Testing Turbopack To run the test suite using Turbopack, you can use the `TURBOPACK=1` environment variable: diff --git a/jest.config.js b/jest.config.js index 4fd273751b1ef..50bdfe93fd642 100644 --- a/jest.config.js +++ b/jest.config.js @@ -25,10 +25,6 @@ const customJestConfig = { prettierPath: require.resolve('prettier-2'), } -if (process.env.RECORD_REPLAY) { - customJestConfig.testRunner = '@replayio/jest/runner' -} - // Check if the environment variable is set to enable test report, // Insert a reporter to generate a junit report to upload. // diff --git a/jest.replay.config.js b/jest.replay.config.js deleted file mode 100644 index 23a8c1c76f945..0000000000000 --- a/jest.replay.config.js +++ /dev/null @@ -1,18 +0,0 @@ -const nextJest = require('next/jest') - -const createJestConfig = nextJest() - -// Any custom config you want to pass to Jest -const customJestConfig = { - testMatch: ['**/*.test.js', '**/*.test.ts', '**/*.test.tsx'], - setupFilesAfterEnv: ['/jest-setup-after-env.ts'], - verbose: true, - rootDir: 'test', - modulePaths: ['/lib'], - transformIgnorePatterns: ['/next[/\\\\]dist/', '/\\.next/'], - testTimeout: 60000, - testRunner: '@replayio/jest/runner', -} - -// createJestConfig is exported in this way to ensure that next/jest can load the Next.js config which is async -module.exports = createJestConfig(customJestConfig) diff --git a/package.json b/package.json index 09ca4af8dc4e1..19d70b0db46ea 100644 --- a/package.json +++ b/package.json @@ -94,9 +94,6 @@ "@next/third-parties": "workspace:*", "@opentelemetry/api": "1.4.1", "@picocss/pico": "1.5.10", - "@replayio/jest": "27.2.35", - "@replayio/playwright": "1.1.8", - "@replayio/replay": "0.20.1", "@svgr/webpack": "5.5.0", "@swc/cli": "0.1.55", "@swc/core": "1.6.13", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 38f58b7ac1e60..43fa82a03e311 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -110,15 +110,6 @@ importers: '@picocss/pico': specifier: 1.5.10 version: 1.5.10 - '@replayio/jest': - specifier: 27.2.35 - version: 27.2.35(encoding@0.1.13) - '@replayio/playwright': - specifier: 1.1.8 - version: 1.1.8(@playwright/test@1.45.1)(encoding@0.1.13) - '@replayio/replay': - specifier: 0.20.1 - version: 0.20.1(encoding@0.1.13) '@svgr/webpack': specifier: 5.5.0 version: 5.5.0 @@ -4011,10 +4002,6 @@ packages: resolution: {integrity: sha512-tsAQNx32a8CoFhjhijUIhI4kccIAgmGhy8LZMZgGfmXcpMbPRUqn5LWmgRttILi6yeGmBJd2xsPkFMs0PzgPCw==} engines: {node: '>=8'} - '@jest/console@27.5.1': - resolution: {integrity: sha512-kZ/tNpS3NXn0mlXXXPNuDZnb4c0oZ20r4K5eemM2k30ZC3G0T02nXUvyhf5YdbXWHPEJLc9qGLxEZ216MdL+Zg==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - '@jest/console@29.7.0': resolution: {integrity: sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -4028,10 +4015,6 @@ packages: node-notifier: optional: true - '@jest/environment@27.5.1': - resolution: {integrity: sha512-/WQjhPJe3/ghaol/4Bq480JKXV/Rfw8nQdN7f41fM8VDHLcxKXou6QyXAh3EFr9/bVG3x74z1NWDkP87EiY8gA==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - '@jest/environment@29.5.0': resolution: {integrity: sha512-5FXw2+wD29YU1d4I2htpRX7jYnAyTRjP2CsXQdo9SAM8g3ifxWPSV0HnClSn71xwctr0U3oZIIH+dtbfmnbXVQ==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -4048,10 +4031,6 @@ packages: resolution: {integrity: sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - '@jest/fake-timers@27.5.1': - resolution: {integrity: sha512-/aPowoolwa07k7/oM3aASneNeBGCmGQsc3ugN4u6s4C/+s5M64MFo/+djTdiwcbQlRfFElGuDXWzaWj6QgKObQ==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - '@jest/fake-timers@29.5.0': resolution: {integrity: sha512-9ARvuAAQcBwDAqOnglWq2zwNIRUDtk/SCkp/ToGEhFv5r86K21l+VEs0qNTaXtyiY0lEePl3kylijSYJQqdbDg==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -4060,10 +4039,6 @@ packages: resolution: {integrity: sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - '@jest/globals@27.5.1': - resolution: {integrity: sha512-ZEJNB41OBQQgGzgyInAv0UUfDDj3upmHydjieSxFvTRuZElrx7tXg/uVQ5hYVEwiXs3+aMsAeEc9X7xiSKCm4Q==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - '@jest/globals@29.7.0': resolution: {integrity: sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -4085,18 +4060,10 @@ packages: resolution: {integrity: sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - '@jest/source-map@27.5.1': - resolution: {integrity: sha512-y9NIHUYF3PJRlHk98NdC/N1gl88BL08aQQgu4k4ZopQkCw9t9cV8mtl3TV8b/YCB8XaVTFrmUTAJvjsntDireg==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - '@jest/source-map@29.6.3': resolution: {integrity: sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - '@jest/test-result@27.5.1': - resolution: {integrity: sha512-EW35l2RYFUcUQxFJz5Cv5MTOxlJIQs4I7gxzi2zVU7PJhOwfYq1MdC5nhSmYjX1gmMmLPvB3sIaC+BkcHRBfag==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - '@jest/test-result@29.7.0': resolution: {integrity: sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -4105,10 +4072,6 @@ packages: resolution: {integrity: sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - '@jest/transform@27.5.1': - resolution: {integrity: sha512-ipON6WtYgl/1329g5AIJVbUuEh0wZVbdpGwC99Jw4LwuoBNS95MVphU6zOeD9pDkon+LLbFL7lOQRapbB8SCHw==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - '@jest/transform@29.5.0': resolution: {integrity: sha512-8vbeZWqLJOvHaDfeMuoHITGKSz5qWc9u04lnWrQE3VyuSw604PzQM824ZeX9XSjUCeDiE3GuxZe5UKa8J61NQw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -4121,10 +4084,6 @@ packages: resolution: {integrity: sha512-XKK7ze1apu5JWQ5eZjHITP66AX+QsLlbaJRBGYr8pNzwcAE2JVkwnf0yqjHTsDRcjR0mujy/NmZMXw5kl+kGBw==} engines: {node: '>= 6'} - '@jest/types@27.5.1': - resolution: {integrity: sha512-Cx46iJ9QpwQTjIdq5VJu2QTMMs3QlEjI0x1QbBP5W1+nMzyc2XmimiRR/CbX9TO0cPTeUlxWMOu8mslYsJ8DEw==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - '@jest/types@29.5.0': resolution: {integrity: sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -4756,11 +4715,6 @@ packages: engines: {node: '>=16'} hasBin: true - '@playwright/test@1.45.1': - resolution: {integrity: sha512-Wo1bWTzQvGA7LyKGIZc8nFSTFf2TkthGIFBR+QVNilvwouGzFd4PYukZe3rvf5PSqjHi1+1NyKSDZKcQWETzaA==} - engines: {node: '>=18'} - hasBin: true - '@polka/url@1.0.0-next.11': resolution: {integrity: sha512-3NsZsJIA/22P3QUyrEDNA2D133H4j224twJrdipXN38dpnIOzAbUDtOwkcJ5pXmn75w7LSQDjA4tO9dm1XlqlA==} @@ -4797,27 +4751,6 @@ packages: '@protobufjs/utf8@1.1.0': resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} - '@replayio/jest@27.2.35': - resolution: {integrity: sha512-4PgaL4tZG9BpvBPxnFoRKbFxpgghfkWdXeIqo5yIroHFBhkKtfSZSC4JWiVhzznclKQ4EHZCJgMQhUsGuGTEVw==} - hasBin: true - - '@replayio/playwright@1.1.8': - resolution: {integrity: sha512-1TvHKlQOC4peHvhquZ2uwoN7lk8JFa2G9OtFcvh2ebTUWl7JF/DZARR5NdO7swTs79FrpyBXJwqUIzAtlcpb4Q==} - hasBin: true - peerDependencies: - '@playwright/test': 1.19.x - - '@replayio/replay@0.20.1': - resolution: {integrity: sha512-sgbH7iDApXRQkdv6bcDVDACNQ0GQ9IUcZcMo/72g6hGe8YZG1JQbsUDd71EWMEfbjpoJI7bpkKe0VuS8vPybNw==} - hasBin: true - - '@replayio/sourcemap-upload@1.1.1': - resolution: {integrity: sha512-XcJmyi2lxVUv/OuCC4GS7SFDrwViObw5czLy2pz7i2AAHFdFQJxTSqnNnw00m7ocUBdgcn/P0DzKHeRvTgPx4w==} - engines: {node: '>=10.13'} - - '@replayio/test-utils@1.3.8': - resolution: {integrity: sha512-gzLWLxgzzqJU5l0Z+2K2eRusl7vhd+mAkiSdvI5kCxkw3l/3+n4R6vc2Kq5VIc1x8KlPf3oeTziW1Eojwuh8bQ==} - '@resvg/resvg-wasm@2.4.0': resolution: {integrity: sha512-C7c51Nn4yTxXFKvgh2txJFNweaVcfUPQxwEUFw4aWsCmfiBDJsTSwviIF8EcwjQ6k8bPyMWCl1vw4BdxE569Cg==} engines: {node: '>= 10'} @@ -4893,9 +4826,6 @@ packages: resolution: {integrity: sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==} engines: {node: '>=6'} - '@sinonjs/commons@1.8.6': - resolution: {integrity: sha512-Ky+XkAkqPZSm3NLBeUng77EBQl3cmeJhITaGHdYH8kjVB+aun3S4XBRti2zt17mtt0mIUDiNxYeoJm6drVvBJQ==} - '@sinonjs/commons@3.0.0': resolution: {integrity: sha512-jXBtWAF4vmdNmZgD5FoKsVLv3rPgDnLgPbU84LIJ3otV44vJlDRokVng5v8NFJdCf/da9legHcKaRuZs4L7faA==} @@ -4903,9 +4833,6 @@ packages: resolution: {integrity: sha512-OPwQlEdg40HAj5KNF8WW6q2KG4Z+cBCZb3m4ninfTZKaBmbIJodviQsDBoYMPHkOyJJMHnOJo5j2+LKDOhOACg==} deprecated: Use version 10.1.0. Version 10.2.0 has potential breaking issues - '@sinonjs/fake-timers@8.1.0': - resolution: {integrity: sha512-OAPJUAtgeINhh/TAlUID4QTs53Njm7xzddaVlEs/SXwgtiD1tW22zAB/W1wdqfrpmikgaWQ9Fw6Ws+hsiRm5Vg==} - '@surma/rollup-plugin-off-main-thread@2.2.3': resolution: {integrity: sha512-lR8q/9W7hZpMWweNiAKU7NQerBnzQQLvi8qnTDU/fxItPhtZVMbPV3lbCwjhIlNBe9Bbr5V+KHshvWmVSG9cxQ==} @@ -5449,9 +5376,6 @@ packages: '@types/platform@1.3.4': resolution: {integrity: sha512-U0o4K+GNiK0PNxoDwd8xRnvLVe4kzei6opn3/FCjAriqaP+rfrDdSl1kP/hLL6Y3/Y3hhGnBwD4dCkkAqs1W/Q==} - '@types/prettier@2.7.3': - resolution: {integrity: sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA==} - '@types/prompts@2.4.2': resolution: {integrity: sha512-TwNx7qsjvRIUv/BCx583tqF5IINEVjCNqg9ofKHRlSoUHE62WBHrem4B1HGXcIrG511v29d1kJ9a/t2Esz7MIg==} @@ -5554,9 +5478,6 @@ packages: '@types/yargs@13.0.12': resolution: {integrity: sha512-qCxJE1qgz2y0hA4pIxjBR+PelCH0U5CK1XJXFwCNqfmliatKp47UCXXE9Dyk1OXBDLvsCF57TqQEJaeLfDYEOQ==} - '@types/yargs@16.0.9': - resolution: {integrity: sha512-tHhzvkFXZQeTECenFoRljLBYPZJ7jAVxqqtEI0qTLOmuultnFp4I9yKE17vTuhf7BkhCu7I4XuemPgikDVuYqA==} - '@types/yargs@17.0.10': resolution: {integrity: sha512-gmEaFwpj/7f/ROdtIlci1R1VYU1J4j95m8T+Tj3iBgiBFKg1foE/PSl93bBd5T9LDXNPo8UlNN6W0qwD8O5OaA==} @@ -6487,9 +6408,6 @@ packages: builtins@1.0.3: resolution: {integrity: sha512-uYBjakWipfaO/bXI7E8rq6kpwHRZK5cNYrUv2OzZSI/FvmdMyXJ2tG9dKcjEC5YHmHpUAwsargWIZNWdxb/bnQ==} - builtins@5.0.1: - resolution: {integrity: sha512-qwVpFEHNfhYJIzNRBvd2C1kyo6jz3ZSMPyyuR47OPdiKWlbYnZNyDWuyR175qDnAJLiCo5fBBqPb3RiXgWlkOQ==} - busboy@1.6.0: resolution: {integrity: sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==} engines: {node: '>=10.16.0'} @@ -7788,10 +7706,6 @@ packages: didyoumean@1.2.2: resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==} - diff-sequences@27.5.1: - resolution: {integrity: sha512-k1gCAXAsNgLwEL+Y8Wvl+M6oEFj5bgazfZULpS5CneoPPXRaCCW7dm+q21Ky2VEE5X+VeRDBVg1Pcvvsr4TtNQ==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - diff-sequences@29.6.3: resolution: {integrity: sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -8462,10 +8376,6 @@ packages: expect-type@0.14.2: resolution: {integrity: sha512-ed3+tr5ujbIYXZ8Pl/VgIphwJQ0q5tBLGGdn7Zvwt1WyPBRX83xjT5pT77P/GkuQbctx0K2ZNSSan7eruJqTCQ==} - expect@27.5.1: - resolution: {integrity: sha512-E1q5hSUG2AmYQwQJ041nvgpkODHQvB+RKlB4IYdru6uJsyFTRyZAP463M+1lINorwbqAmUggi6+WwkD8lCS/Dw==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - expect@29.5.0: resolution: {integrity: sha512-yM7xqUrCO2JdpFo4XpM82t+PJBFybdqoQuJLDGeDX2ij8NZzqRHyu3Hp188/JX7SWqud+7t4MUdvcgGBICMHZg==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -9838,9 +9748,6 @@ packages: is-utf8@0.2.1: resolution: {integrity: sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q==} - is-uuid@1.0.2: - resolution: {integrity: sha512-tCByphFcJgf2qmiMo5hMCgNAquNSagOetVetDvBXswGkNfoyEMvGH1yDlF8cbZbKnbVBr4Y5/rlpMz9umxyBkQ==} - is-weakmap@2.0.2: resolution: {integrity: sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w==} engines: {node: '>= 0.4'} @@ -9899,9 +9806,6 @@ packages: isomorphic-unfetch@3.0.0: resolution: {integrity: sha512-V0tmJSYfkKokZ5mgl0cmfQMTb7MLHsBMngTkbLY0eXvKqiVRRoZP04Ly+KhKrJfKtzC9E6Pp15Jo+bwh7Vi2XQ==} - isomorphic-unfetch@3.1.0: - resolution: {integrity: sha512-geDJjpoZ8N0kWexiwkX8F9NkTsXhetLPVbZFQ+JTW239QNOwvB0gniuR1Wc6f0AMTn7/mFGyXvHTifrCp/GH8Q==} - isstream@0.1.2: resolution: {integrity: sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==} @@ -9949,10 +9853,6 @@ packages: resolution: {integrity: sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-circus@27.5.1: - resolution: {integrity: sha512-D95R7x5UtlMA5iBYsOHFFbMD/GVA4R/Kdq15f7xYWUfWHBto9NYRsOvnSauTgdF+ogCpJ4tyKOXhUifxS65gdw==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-circus@29.5.0: resolution: {integrity: sha512-gq/ongqeQKAplVxqJmbeUOJJKkW3dDNPY8PjhJ5G0lBRvu0e3EWGxGy5cI4LAGA7gV2UHCtWBI4EMXK8c9nQKA==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -9983,10 +9883,6 @@ packages: ts-node: optional: true - jest-diff@27.5.1: - resolution: {integrity: sha512-m0NvkX55LDt9T4mctTEgnZk3fmEg3NRYutvMPWM/0iPnkFj2wIeF45O1718cMSOFO1vINkqmxqD8vE37uTEbqw==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-diff@29.7.0: resolution: {integrity: sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -9995,10 +9891,6 @@ packages: resolution: {integrity: sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-each@27.5.1: - resolution: {integrity: sha512-1Ff6p+FbhT/bXQnEouYy00bkNSY7OUpfIcmdl8vZ31A1UUaurOLPA8a8BbJOF2RDUElwJhmeaV7LnagI+5UwNQ==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-each@29.7.0: resolution: {integrity: sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -10012,10 +9904,6 @@ packages: canvas: optional: true - jest-environment-node@27.5.1: - resolution: {integrity: sha512-Jt4ZUnxdOsTGwSRAfKEnE6BcwsSPNOijjwifq5sDFSA2kesnXTvNqKHYgM0hDq3549Uf/KzdXNYn4wMZJPlFLw==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-environment-node@29.7.0: resolution: {integrity: sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -10029,18 +9917,10 @@ packages: jest: optional: true - jest-get-type@27.5.1: - resolution: {integrity: sha512-2KY95ksYSaK7DMBWQn6dQz3kqAf3BB64y2udeG+hv4KfSOb9qwcYQstTJc1KCbsix+wLZWZYN8t7nwX3GOBLRw==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-get-type@29.6.3: resolution: {integrity: sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-haste-map@27.5.1: - resolution: {integrity: sha512-7GgkZ4Fw4NFbMSDSpZwXeBiIbx+t/46nJ2QitkOjvwPYyZmqttu2TDSimMHP1EkPOi4xUZAN1doE5Vd25H4Jng==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-haste-map@29.5.0: resolution: {integrity: sha512-IspOPnnBro8YfVYSw6yDRKh/TiCdRngjxeacCps1cQ9cgVN6+10JUcuJ1EabrgYLOATsIAigxA0rLR9x/YlrSA==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -10057,26 +9937,14 @@ packages: resolution: {integrity: sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-matcher-utils@27.5.1: - resolution: {integrity: sha512-z2uTx/T6LBaCoNWNFWwChLBKYxTMcGBRjAt+2SbP929/Fflb9aa5LGma654Rz8z9HLxsrUaYzxE9T/EFIL/PAw==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-matcher-utils@29.7.0: resolution: {integrity: sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-message-util@27.5.1: - resolution: {integrity: sha512-rMyFe1+jnyAAf+NHwTclDz0eAaLkVDdKVHHBFWsBWHnnh5YeJMNWWsv7AbFYXfK3oTqvL7VTWkhNLu1jX24D+g==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-message-util@29.7.0: resolution: {integrity: sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-mock@27.5.1: - resolution: {integrity: sha512-K4jKbY1d4ENhbrG2zuPWaQBvDly+iZ2yAW+T1fATN78hc0sInwn7wZB8XtlNnvHug5RMwV897Xm4LqmPM4e2Og==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-mock@29.5.0: resolution: {integrity: sha512-GqOzvdWDE4fAV2bWQLQCkujxYWL7RxjCnj71b5VhDAGOevB3qj3Ovg26A5NI84ZpODxyzaozXLOh2NCgkbvyaw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -10094,10 +9962,6 @@ packages: jest-resolve: optional: true - jest-regex-util@27.5.1: - resolution: {integrity: sha512-4bfKq2zie+x16okqDXjXn9ql2B0dScQu+vcwe4TvFVhkVyuWLqpZrZtXxLLWoXYgn0E87I6r6GRYHF7wFZBUvg==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-regex-util@29.4.3: resolution: {integrity: sha512-O4FglZaMmWXbGHSQInfXewIsd1LMn9p3ZXB/6r4FOkyhX2/iP/soMG98jGvk/A3HAN78+5VWcBGO0BJAPRh4kg==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -10110,10 +9974,6 @@ packages: resolution: {integrity: sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-resolve@27.5.1: - resolution: {integrity: sha512-FFDy8/9E6CV83IMbDpcjOhumAQPDyETnU2KZ1O98DwTnz8AOBsW/Xv3GySr1mOZdItLR+zDZ7I/UdTFbgSOVCw==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-resolve@29.7.0: resolution: {integrity: sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -10122,30 +9982,14 @@ packages: resolution: {integrity: sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-runtime@27.5.1: - resolution: {integrity: sha512-o7gxw3Gf+H2IGt8fv0RiyE1+r83FJBRruoA+FXrlHw6xEyBsU8ugA6IPfTdVyA0w8HClpbK+DGJxH59UrNMx8A==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-runtime@29.7.0: resolution: {integrity: sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-serializer@27.5.1: - resolution: {integrity: sha512-jZCyo6iIxO1aqUxpuBlwTDMkzOAJS4a3eYz3YzgxxVQFwLeSA7Jfq5cbqCY+JLvTDrWirgusI/0KwxKMgrdf7w==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - - jest-snapshot@27.5.1: - resolution: {integrity: sha512-yYykXI5a0I31xX67mgeLw1DZ0bJB+gpq5IpSuCAoyDi0+BhgU/RIrL+RTzDmkNTchvDFWKP8lp+w/42Z3us5sA==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-snapshot@29.7.0: resolution: {integrity: sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-util@27.5.1: - resolution: {integrity: sha512-Kv2o/8jNvX1MQ0KGtw480E/w4fBCDOnH6+6DmeKi6LZUIlKA5kwY0YNdlzaWTiVgxqAqik11QyxDOKk543aKXw==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-util@29.5.0: resolution: {integrity: sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -10154,10 +9998,6 @@ packages: resolution: {integrity: sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} - jest-validate@27.5.1: - resolution: {integrity: sha512-thkNli0LYTmOI1tDB3FI1S1RTp/Bqyd9pTarJwL87OIBFuqEb5Apv5EaApEudYg4g86e3CT6kM0RowkhtEnCBQ==} - engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} - jest-validate@29.7.0: resolution: {integrity: sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -10291,10 +10131,6 @@ packages: engines: {node: '>=6'} hasBin: true - jsonata@1.8.6: - resolution: {integrity: sha512-ZH2TPYdNP2JecOl/HvrH47Xc+9imibEMQ4YqKy/F/FrM+2a6vfbGxeCX23dB9Fr6uvGwv+ghf1KxWB3iZk09wA==} - engines: {node: '>= 8'} - jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} @@ -10711,9 +10547,6 @@ packages: resolution: {integrity: sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==} engines: {node: '>=8'} - make-error@1.3.6: - resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} - make-fetch-happen@8.0.13: resolution: {integrity: sha512-rQ5NijwwdU8tIaBrpTtSVrNCcAJfyDRcKBC76vOQlyJX588/88+TE+UpjWl4BgG7gCkp29wER7xcRqkeg+x64Q==} engines: {node: '>= 10'} @@ -11955,11 +11788,6 @@ packages: engines: {node: '>=16'} hasBin: true - playwright-core@1.45.1: - resolution: {integrity: sha512-LF4CUUtrUu2TCpDw4mcrAIuYrEjVDfT1cHbJMfwnE2+1b8PZcFzPNgvZCvq2JfQ4aTjRCCHw5EJ2tmr2NSzdPg==} - engines: {node: '>=18'} - hasBin: true - playwright-core@1.48.0: resolution: {integrity: sha512-RBvzjM9rdpP7UUFrQzRwR8L/xR4HyC1QXMzGYTbf1vjw25/ya9NRAVnXi/0fvFopjebvyPzsmoK58xxeEOaVvA==} engines: {node: '>=18'} @@ -11970,11 +11798,6 @@ packages: engines: {node: '>=16'} hasBin: true - playwright@1.45.1: - resolution: {integrity: sha512-Hjrgae4kpSQBr98nhCj3IScxVeVUixqj+5oyif8TdIn2opTCPEzqAqNMeK42i3cWDCVu9MI+ZsGWw+gVR4ISBg==} - engines: {node: '>=18'} - hasBin: true - playwright@1.48.0: resolution: {integrity: sha512-qPqFaMEHuY/ug8o0uteYJSRfMGFikhUysk8ZvAtfKmUK3kc/6oNl/y3EczF8OFGYIi/Ex2HspMfzYArk6+XQSA==} engines: {node: '>=18'} @@ -12841,10 +12664,6 @@ packages: resolution: {integrity: sha512-VCdBRNFTX1fyE7Nb6FYoURo/SPe62QCaAyzJvUjwRaIsc+NePBEniHlvxFmmX56+HZphIGtV0XeCirBtpDrTyQ==} engines: {node: '>=0.6'} - query-registry@2.6.0: - resolution: {integrity: sha512-Z5oNq7qH0g96qBTx2jAvS0X71hKP4tETtSJKEl6BdihzYqh9QKiJQBMT7qIQuzxR9lxfiso+aXCFhZ+EcAoppQ==} - engines: {node: '>=12'} - querystring-es3@0.2.1: resolution: {integrity: sha512-773xhDQnZBMFobEiztv8LIl70ch5MSF/jUQVlhwFyBILqq96anmoctVIYz+ZRp0qbCKATTn6ev02M3r7Ga5vqA==} engines: {node: '>=0.4.x'} @@ -13326,10 +13145,6 @@ packages: resolve-pkg-maps@1.0.0: resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} - resolve.exports@1.1.1: - resolution: {integrity: sha512-/NtpHNDN7jWhAaQ9BvBUYZ6YTXsRBgfqWFWP7BZBaoMJO/I3G5OFzvTuWNlZC3aPjins1F+TNrLKsGbH4rfsRQ==} - engines: {node: '>=10'} - resolve.exports@2.0.2: resolution: {integrity: sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg==} engines: {node: '>=10'} @@ -14153,9 +13968,6 @@ packages: engines: {node: '>= 4.0'} deprecated: Please upgrade to v9.0.0+ as we have fixed a public vulnerability with formidable dependency. Note that v9.0.0+ requires Node.js v14.18.0+. See https://github.com/ladjs/superagent/pull/1800 for insight. This project is supported and maintained by the team at Forward Email @ https://forwardemail.net - superstruct@0.15.5: - resolution: {integrity: sha512-4AOeU+P5UuE/4nOUkmcQdW5y7i9ndt1cQd/3iUe+LTz3RxESf/W/5lg4B74HbDMMv8PHnPnGCQFH45kBcrQYoQ==} - superstruct@1.0.3: resolution: {integrity: sha512-8iTn3oSS8nRGn+C2pgXSKPI3jmpm6FExNazNpjvqS6ZUJQCej3PUXEKM8NjHBOs54ExM+LPW/FBRhymrdcCiSg==} engines: {node: '>=14.0.0'} @@ -14332,9 +14144,6 @@ packages: third-party-capital@1.0.20: resolution: {integrity: sha512-oB7yIimd8SuGptespDAZnNkzIz+NWaJCu2RMsbs4Wmp9zSDUM8Nhi3s2OOcqYuv3mN4hitXc8DVx+LyUmbUDiA==} - throat@6.0.2: - resolution: {integrity: sha512-WKexMoJj3vEuK0yFEapj8y64V0A6xcuPuK9Gt1d0R+dzCSJc0lHqQytAbSB4cDAK0dWh4T0E2ETkoLE2WZ41OQ==} - through2@0.4.2: resolution: {integrity: sha512-45Llu+EwHKtAZYTPPVn3XZHBgakWMN3rokhEv5hu596XP+cNgplMg+Gj+1nmAvj+L0K7+N49zBKx5rah5u0QIQ==} @@ -14374,10 +14183,6 @@ packages: tiny-invariant@1.3.3: resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} - tiny-lru@8.0.2: - resolution: {integrity: sha512-ApGvZ6vVvTNdsmt676grvCkUCGwzG9IqXma5Z07xJgiC5L7akUMof5U8G2JTI9Rz/ovtVhJBlY6mNhEvtjzOIg==} - engines: {node: '>=6'} - tinydate@1.3.0: resolution: {integrity: sha512-7cR8rLy2QhYHpsBDBVYnnWXm8uRTr38RoZakFSW7Bs7PzfMPNZthuMLkwqZv7MTu8lhQ91cOFYS5a7iFj2oR3w==} engines: {node: '>=4'} @@ -14897,9 +14702,6 @@ packages: uri-js@4.4.1: resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} - url-join@4.0.1: - resolution: {integrity: sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==} - url-parse-lax@1.0.0: resolution: {integrity: sha512-BVA4lR5PIviy2PMseNd2jbFQ+jwSwQGdJejf5ctd1rEXt0Ypd7yanUK9+lYechVlN5VaTJGsu2U/3MDDu6KgBA==} engines: {node: '>=0.10.0'} @@ -15016,10 +14818,6 @@ packages: validate-npm-package-name@3.0.0: resolution: {integrity: sha512-M6w37eVCMMouJ9V/sdPGnC5H4uDr73/+xdq0FBLO3TFFX1+7wiUY6Es328NN+y43tmY+doUdN9g9J21vqB7iLw==} - validate-npm-package-name@4.0.0: - resolution: {integrity: sha512-mzR0L8ZDktZjpX4OB46KT+56MAhl4EIazWP/+G/HPGuvfdaqg4YsCdtOm6U9+LOFyYDoh4dpnpxZRB9MQQns5Q==} - engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} - validate-npm-package-name@5.0.1: resolution: {integrity: sha512-OljLrQ9SQdOUqTaQxqL5dEfZWrXExyyWsozYlAWFawPVNuD83igl7uJD2RTkNMbniIYgt8l81eCJGIdQF7avLQ==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -18195,15 +17993,6 @@ snapshots: '@istanbuljs/schema@0.1.2': {} - '@jest/console@27.5.1': - dependencies: - '@jest/types': 27.5.1 - '@types/node': 20.17.6 - chalk: 4.1.2 - jest-message-util: 27.5.1 - jest-util: 27.5.1 - slash: 3.0.0 - '@jest/console@29.7.0': dependencies: '@jest/types': 29.6.3 @@ -18248,13 +18037,6 @@ snapshots: - supports-color - ts-node - '@jest/environment@27.5.1': - dependencies: - '@jest/fake-timers': 27.5.1 - '@jest/types': 27.5.1 - '@types/node': 20.17.6 - jest-mock: 27.5.1 - '@jest/environment@29.5.0': dependencies: '@jest/fake-timers': 29.7.0 @@ -18280,15 +18062,6 @@ snapshots: transitivePeerDependencies: - supports-color - '@jest/fake-timers@27.5.1': - dependencies: - '@jest/types': 27.5.1 - '@sinonjs/fake-timers': 8.1.0 - '@types/node': 20.17.6 - jest-message-util: 27.5.1 - jest-mock: 27.5.1 - jest-util: 27.5.1 - '@jest/fake-timers@29.5.0': dependencies: '@jest/types': 29.5.0 @@ -18307,12 +18080,6 @@ snapshots: jest-mock: 29.7.0 jest-util: 29.7.0 - '@jest/globals@27.5.1': - dependencies: - '@jest/environment': 27.5.1 - '@jest/types': 27.5.1 - expect: 27.5.1 - '@jest/globals@29.7.0': dependencies: '@jest/environment': 29.7.0 @@ -18359,25 +18126,12 @@ snapshots: dependencies: '@sinclair/typebox': 0.27.8 - '@jest/source-map@27.5.1': - dependencies: - callsites: 3.1.0 - graceful-fs: 4.2.11 - source-map: 0.6.1 - '@jest/source-map@29.6.3': dependencies: '@jridgewell/trace-mapping': 0.3.22 callsites: 3.1.0 graceful-fs: 4.2.11 - '@jest/test-result@27.5.1': - dependencies: - '@jest/console': 27.5.1 - '@jest/types': 27.5.1 - '@types/istanbul-lib-coverage': 2.0.4 - collect-v8-coverage: 1.0.1 - '@jest/test-result@29.7.0': dependencies: '@jest/console': 29.7.0 @@ -18392,26 +18146,6 @@ snapshots: jest-haste-map: 29.7.0 slash: 3.0.0 - '@jest/transform@27.5.1': - dependencies: - '@babel/core': 7.22.5 - '@jest/types': 27.5.1 - babel-plugin-istanbul: 6.1.1 - chalk: 4.1.2 - convert-source-map: 1.9.0 - fast-json-stable-stringify: 2.1.0 - graceful-fs: 4.2.11 - jest-haste-map: 27.5.1 - jest-regex-util: 27.5.1 - jest-util: 27.5.1 - micromatch: 4.0.8 - pirates: 4.0.6 - slash: 3.0.0 - source-map: 0.6.1 - write-file-atomic: 3.0.3 - transitivePeerDependencies: - - supports-color - '@jest/transform@29.5.0': dependencies: '@babel/core': 7.22.5 @@ -18458,14 +18192,6 @@ snapshots: '@types/istanbul-reports': 1.1.2 '@types/yargs': 13.0.12 - '@jest/types@27.5.1': - dependencies: - '@types/istanbul-lib-coverage': 2.0.4 - '@types/istanbul-reports': 3.0.1 - '@types/node': 20.17.6 - '@types/yargs': 16.0.9 - chalk: 4.1.2 - '@jest/types@29.5.0': dependencies: '@jest/schemas': 29.4.3 @@ -19484,10 +19210,6 @@ snapshots: dependencies: playwright: 1.41.2 - '@playwright/test@1.45.1': - dependencies: - playwright: 1.45.1 - '@polka/url@1.0.0-next.11': {} '@polka/url@1.0.0-next.24': {} @@ -19515,75 +19237,6 @@ snapshots: '@protobufjs/utf8@1.1.0': {} - '@replayio/jest@27.2.35(encoding@0.1.13)': - dependencies: - '@replayio/replay': 0.20.1(encoding@0.1.13) - '@replayio/test-utils': 1.3.8(encoding@0.1.13) - jest-circus: 27.5.1 - jest-environment-node: 27.5.1 - uuid: 8.3.2 - transitivePeerDependencies: - - bufferutil - - encoding - - supports-color - - utf-8-validate - - '@replayio/playwright@1.1.8(@playwright/test@1.45.1)(encoding@0.1.13)': - dependencies: - '@playwright/test': 1.45.1 - '@replayio/replay': 0.20.1(encoding@0.1.13) - '@replayio/test-utils': 1.3.8(encoding@0.1.13) - uuid: 8.3.2 - transitivePeerDependencies: - - bufferutil - - encoding - - supports-color - - utf-8-validate - - '@replayio/replay@0.20.1(encoding@0.1.13)': - dependencies: - '@replayio/sourcemap-upload': 1.1.1(encoding@0.1.13) - '@types/semver': 7.5.6 - commander: 7.2.0 - debug: 4.3.4 - is-uuid: 1.0.2 - jsonata: 1.8.6 - node-fetch: 2.7.0(encoding@0.1.13) - p-map: 4.0.0 - query-registry: 2.6.0(encoding@0.1.13) - semver: 7.6.3 - superstruct: 0.15.5 - text-table: 0.2.0 - ws: 7.5.3 - transitivePeerDependencies: - - bufferutil - - encoding - - supports-color - - utf-8-validate - - '@replayio/sourcemap-upload@1.1.1(encoding@0.1.13)': - dependencies: - commander: 7.2.0 - debug: 4.3.7 - glob: 7.1.7 - node-fetch: 2.6.7(encoding@0.1.13) - string.prototype.matchall: 4.0.11 - transitivePeerDependencies: - - encoding - - supports-color - - '@replayio/test-utils@1.3.8(encoding@0.1.13)': - dependencies: - '@replayio/replay': 0.20.1(encoding@0.1.13) - debug: 4.3.7 - node-fetch: 2.6.7(encoding@0.1.13) - uuid: 8.3.2 - transitivePeerDependencies: - - bufferutil - - encoding - - supports-color - - utf-8-validate - '@resvg/resvg-wasm@2.4.0': {} '@rollup/plugin-alias@3.1.1(rollup@2.35.1)': @@ -19664,10 +19317,6 @@ snapshots: '@sindresorhus/is@0.14.0': {} - '@sinonjs/commons@1.8.6': - dependencies: - type-detect: 4.0.8 - '@sinonjs/commons@3.0.0': dependencies: type-detect: 4.0.8 @@ -19676,10 +19325,6 @@ snapshots: dependencies: '@sinonjs/commons': 3.0.0 - '@sinonjs/fake-timers@8.1.0': - dependencies: - '@sinonjs/commons': 1.8.6 - '@surma/rollup-plugin-off-main-thread@2.2.3': dependencies: ejs: 3.1.8 @@ -20234,8 +19879,6 @@ snapshots: '@types/platform@1.3.4': {} - '@types/prettier@2.7.3': {} - '@types/prompts@2.4.2': dependencies: '@types/node': 20.17.6 @@ -20348,10 +19991,6 @@ snapshots: dependencies: '@types/yargs-parser': 21.0.0 - '@types/yargs@16.0.9': - dependencies: - '@types/yargs-parser': 21.0.0 - '@types/yargs@17.0.10': dependencies: '@types/yargs-parser': 21.0.0 @@ -21499,10 +21138,6 @@ snapshots: builtins@1.0.3: {} - builtins@5.0.1: - dependencies: - semver: 7.6.3 - busboy@1.6.0: dependencies: streamsearch: 1.1.0 @@ -23010,8 +22645,6 @@ snapshots: didyoumean@1.2.2: {} - diff-sequences@27.5.1: {} - diff-sequences@29.6.3: {} diff@5.1.0: {} @@ -24101,13 +23734,6 @@ snapshots: expect-type@0.14.2: {} - expect@27.5.1: - dependencies: - '@jest/types': 27.5.1 - jest-get-type: 27.5.1 - jest-matcher-utils: 27.5.1 - jest-message-util: 27.5.1 - expect@29.5.0: dependencies: '@jest/expect-utils': 29.7.0 @@ -25703,8 +25329,6 @@ snapshots: is-utf8@0.2.1: {} - is-uuid@1.0.2: {} - is-weakmap@2.0.2: {} is-weakref@1.0.2: @@ -25754,13 +25378,6 @@ snapshots: transitivePeerDependencies: - encoding - isomorphic-unfetch@3.1.0(encoding@0.1.13): - dependencies: - node-fetch: 2.6.7(encoding@0.1.13) - unfetch: 4.2.0 - transitivePeerDependencies: - - encoding - isstream@0.1.2: {} istanbul-lib-coverage@3.2.0: {} @@ -25836,30 +25453,6 @@ snapshots: jest-util: 29.7.0 p-limit: 3.1.0 - jest-circus@27.5.1: - dependencies: - '@jest/environment': 27.5.1 - '@jest/test-result': 27.5.1 - '@jest/types': 27.5.1 - '@types/node': 20.17.6 - chalk: 4.1.2 - co: 4.6.0 - dedent: 0.7.0 - expect: 27.5.1 - is-generator-fn: 2.1.0 - jest-each: 27.5.1 - jest-matcher-utils: 27.5.1 - jest-message-util: 27.5.1 - jest-runtime: 27.5.1 - jest-snapshot: 27.5.1 - jest-util: 27.5.1 - pretty-format: 27.5.1 - slash: 3.0.0 - stack-utils: 2.0.6 - throat: 6.0.2 - transitivePeerDependencies: - - supports-color - jest-circus@29.5.0: dependencies: '@jest/environment': 29.7.0 @@ -25960,13 +25553,6 @@ snapshots: - babel-plugin-macros - supports-color - jest-diff@27.5.1: - dependencies: - chalk: 4.1.2 - diff-sequences: 27.5.1 - jest-get-type: 27.5.1 - pretty-format: 27.5.1 - jest-diff@29.7.0: dependencies: chalk: 4.1.2 @@ -25978,14 +25564,6 @@ snapshots: dependencies: detect-newline: 3.1.0 - jest-each@27.5.1: - dependencies: - '@jest/types': 27.5.1 - chalk: 4.1.2 - jest-get-type: 27.5.1 - jest-util: 27.5.1 - pretty-format: 27.5.1 - jest-each@29.7.0: dependencies: '@jest/types': 29.6.3 @@ -26009,15 +25587,6 @@ snapshots: - supports-color - utf-8-validate - jest-environment-node@27.5.1: - dependencies: - '@jest/environment': 27.5.1 - '@jest/fake-timers': 27.5.1 - '@jest/types': 27.5.1 - '@types/node': 20.17.6 - jest-mock: 27.5.1 - jest-util: 27.5.1 - jest-environment-node@29.7.0: dependencies: '@jest/environment': 29.7.0 @@ -26034,27 +25603,8 @@ snapshots: optionalDependencies: jest: 29.7.0(@types/node@20.17.6)(babel-plugin-macros@3.1.0) - jest-get-type@27.5.1: {} - jest-get-type@29.6.3: {} - jest-haste-map@27.5.1: - dependencies: - '@jest/types': 27.5.1 - '@types/graceful-fs': 4.1.9 - '@types/node': 20.17.6 - anymatch: 3.1.3 - fb-watchman: 2.0.1 - graceful-fs: 4.2.11 - jest-regex-util: 27.5.1 - jest-serializer: 27.5.1 - jest-util: 27.5.1 - jest-worker: 27.5.1 - micromatch: 4.0.8 - walker: 1.0.8 - optionalDependencies: - fsevents: 2.3.3 - jest-haste-map@29.5.0: dependencies: '@jest/types': 29.5.0 @@ -26099,13 +25649,6 @@ snapshots: jest-get-type: 29.6.3 pretty-format: 29.7.0 - jest-matcher-utils@27.5.1: - dependencies: - chalk: 4.1.2 - jest-diff: 27.5.1 - jest-get-type: 27.5.1 - pretty-format: 27.5.1 - jest-matcher-utils@29.7.0: dependencies: chalk: 4.1.2 @@ -26113,18 +25656,6 @@ snapshots: jest-get-type: 29.6.3 pretty-format: 29.7.0 - jest-message-util@27.5.1: - dependencies: - '@babel/code-frame': 7.22.5 - '@jest/types': 27.5.1 - '@types/stack-utils': 2.0.1 - chalk: 4.1.2 - graceful-fs: 4.2.11 - micromatch: 4.0.8 - pretty-format: 27.5.1 - slash: 3.0.0 - stack-utils: 2.0.6 - jest-message-util@29.7.0: dependencies: '@babel/code-frame': 7.22.5 @@ -26137,11 +25668,6 @@ snapshots: slash: 3.0.0 stack-utils: 2.0.6 - jest-mock@27.5.1: - dependencies: - '@jest/types': 27.5.1 - '@types/node': 20.17.6 - jest-mock@29.5.0: dependencies: '@jest/types': 29.5.0 @@ -26154,16 +25680,10 @@ snapshots: '@types/node': 20.17.6 jest-util: 29.7.0 - jest-pnp-resolver@1.2.2(jest-resolve@27.5.1): - optionalDependencies: - jest-resolve: 27.5.1 - jest-pnp-resolver@1.2.2(jest-resolve@29.7.0): optionalDependencies: jest-resolve: 29.7.0 - jest-regex-util@27.5.1: {} - jest-regex-util@29.4.3: {} jest-regex-util@29.6.3: {} @@ -26175,19 +25695,6 @@ snapshots: transitivePeerDependencies: - supports-color - jest-resolve@27.5.1: - dependencies: - '@jest/types': 27.5.1 - chalk: 4.1.2 - graceful-fs: 4.2.11 - jest-haste-map: 27.5.1 - jest-pnp-resolver: 1.2.2(jest-resolve@27.5.1) - jest-util: 27.5.1 - jest-validate: 27.5.1 - resolve: 1.22.8 - resolve.exports: 1.1.1 - slash: 3.0.0 - jest-resolve@29.7.0: dependencies: chalk: 4.1.2 @@ -26226,33 +25733,6 @@ snapshots: transitivePeerDependencies: - supports-color - jest-runtime@27.5.1: - dependencies: - '@jest/environment': 27.5.1 - '@jest/fake-timers': 27.5.1 - '@jest/globals': 27.5.1 - '@jest/source-map': 27.5.1 - '@jest/test-result': 27.5.1 - '@jest/transform': 27.5.1 - '@jest/types': 27.5.1 - chalk: 4.1.2 - cjs-module-lexer: 1.2.2 - collect-v8-coverage: 1.0.1 - execa: 5.0.0 - glob: 7.1.7 - graceful-fs: 4.2.11 - jest-haste-map: 27.5.1 - jest-message-util: 27.5.1 - jest-mock: 27.5.1 - jest-regex-util: 27.5.1 - jest-resolve: 27.5.1 - jest-snapshot: 27.5.1 - jest-util: 27.5.1 - slash: 3.0.0 - strip-bom: 4.0.0 - transitivePeerDependencies: - - supports-color - jest-runtime@29.7.0: dependencies: '@jest/environment': 29.7.0 @@ -26280,38 +25760,6 @@ snapshots: transitivePeerDependencies: - supports-color - jest-serializer@27.5.1: - dependencies: - '@types/node': 20.17.6 - graceful-fs: 4.2.11 - - jest-snapshot@27.5.1: - dependencies: - '@babel/core': 7.22.5 - '@babel/generator': 7.22.5 - '@babel/plugin-syntax-typescript': 7.22.5(@babel/core@7.22.5) - '@babel/traverse': 7.22.5 - '@babel/types': 7.22.5 - '@jest/transform': 27.5.1 - '@jest/types': 27.5.1 - '@types/babel__traverse': 7.11.0 - '@types/prettier': 2.7.3 - babel-preset-current-node-syntax: 1.0.1(@babel/core@7.22.5) - chalk: 4.1.2 - expect: 27.5.1 - graceful-fs: 4.2.11 - jest-diff: 27.5.1 - jest-get-type: 27.5.1 - jest-haste-map: 27.5.1 - jest-matcher-utils: 27.5.1 - jest-message-util: 27.5.1 - jest-util: 27.5.1 - natural-compare: 1.4.0 - pretty-format: 27.5.1 - semver: 7.6.3 - transitivePeerDependencies: - - supports-color - jest-snapshot@29.7.0: dependencies: '@babel/core': 7.22.5 @@ -26337,15 +25785,6 @@ snapshots: transitivePeerDependencies: - supports-color - jest-util@27.5.1: - dependencies: - '@jest/types': 27.5.1 - '@types/node': 20.17.6 - chalk: 4.1.2 - ci-info: 3.8.0 - graceful-fs: 4.2.11 - picomatch: 2.3.1 - jest-util@29.5.0: dependencies: '@jest/types': 29.5.0 @@ -26364,15 +25803,6 @@ snapshots: graceful-fs: 4.2.11 picomatch: 2.3.1 - jest-validate@27.5.1: - dependencies: - '@jest/types': 27.5.1 - camelcase: 6.2.0 - chalk: 4.1.2 - jest-get-type: 27.5.1 - leven: 3.1.0 - pretty-format: 27.5.1 - jest-validate@29.7.0: dependencies: '@jest/types': 29.6.3 @@ -26543,8 +25973,6 @@ snapshots: json5@2.2.3: {} - jsonata@1.8.6: {} - jsonfile@4.0.0: optionalDependencies: graceful-fs: 4.2.11 @@ -26991,8 +26419,6 @@ snapshots: dependencies: semver: 6.3.1 - make-error@1.3.6: {} - make-fetch-happen@8.0.13: dependencies: agentkeepalive: 4.1.4 @@ -28630,8 +28056,6 @@ snapshots: playwright-core@1.41.2: {} - playwright-core@1.45.1: {} - playwright-core@1.48.0: {} playwright@1.41.2: @@ -28640,12 +28064,6 @@ snapshots: optionalDependencies: fsevents: 2.3.2 - playwright@1.45.1: - dependencies: - playwright-core: 1.45.1 - optionalDependencies: - fsevents: 2.3.2 - playwright@1.48.0: dependencies: playwright-core: 1.48.0 @@ -29531,16 +28949,6 @@ snapshots: qs@6.7.0: {} - query-registry@2.6.0(encoding@0.1.13): - dependencies: - isomorphic-unfetch: 3.1.0(encoding@0.1.13) - make-error: 1.3.6 - tiny-lru: 8.0.2 - url-join: 4.0.1 - validate-npm-package-name: 4.0.0 - transitivePeerDependencies: - - encoding - querystring-es3@0.2.1: {} querystring@0.2.0: {} @@ -30174,8 +29582,6 @@ snapshots: resolve-pkg-maps@1.0.0: {} - resolve.exports@1.1.1: {} - resolve.exports@2.0.2: {} resolve@1.17.0: @@ -31118,8 +30524,6 @@ snapshots: transitivePeerDependencies: - supports-color - superstruct@0.15.5: {} - superstruct@1.0.3: {} supports-color@2.0.0: {} @@ -31346,8 +30750,6 @@ snapshots: third-party-capital@1.0.20: {} - throat@6.0.2: {} - through2@0.4.2: dependencies: readable-stream: 1.0.34 @@ -31390,8 +30792,6 @@ snapshots: tiny-invariant@1.3.3: {} - tiny-lru@8.0.2: {} - tinydate@1.3.0: {} title-case@3.0.3: @@ -31910,8 +31310,6 @@ snapshots: dependencies: punycode: 2.3.1 - url-join@4.0.1: {} - url-parse-lax@1.0.0: dependencies: prepend-http: 1.0.4 @@ -32019,10 +31417,6 @@ snapshots: dependencies: builtins: 1.0.3 - validate-npm-package-name@4.0.0: - dependencies: - builtins: 5.0.1 - validate-npm-package-name@5.0.1: {} vary@1.1.2: {} diff --git a/run-tests.js b/run-tests.js index a65acf45d40db..5bfd9aa365cf9 100644 --- a/run-tests.js +++ b/run-tests.js @@ -451,12 +451,7 @@ ${ENDGROUP}`) const start = new Date().getTime() let outputChunks = [] - const shouldRecordTestWithReplay = process.env.RECORD_REPLAY && isRetry - const args = [ - ...(shouldRecordTestWithReplay - ? [`--config=jest.replay.config.js`] - : []), ...(process.env.CI ? ['--ci'] : []), '--runInBand', '--forceExit', @@ -489,8 +484,6 @@ ${ENDGROUP}`) ? {} : { IS_RETRY: isRetry ? 'true' : undefined, - RECORD_REPLAY: shouldRecordTestWithReplay, - TRACE_PLAYWRIGHT: process.env.NEXT_TEST_MODE === 'deploy' ? undefined : 'true', CIRCLECI: '', diff --git a/test/lib/browsers/replay.ts b/test/lib/browsers/replay.ts deleted file mode 100644 index 5e865fad864a0..0000000000000 --- a/test/lib/browsers/replay.ts +++ /dev/null @@ -1,23 +0,0 @@ -import { getExecutablePath } from '@replayio/playwright' -import { Playwright } from './playwright' -export { quit } from './playwright' - -export class Replay extends Playwright { - async launchBrowser(browserName: string, launchOptions: Record) { - const browser: any = browserName === 'chrome' ? 'chromium' : browserName - const executablePath = getExecutablePath(browser) - - if (!executablePath) { - throw new Error(`No replay.io executable for browser \`${browserName}\``) - } - - return super.launchBrowser(browserName, { - ...launchOptions, - executablePath, - env: { - ...process.env, - RECORD_ALL_CONTENT: 1, - }, - }) - } -} diff --git a/test/lib/next-webdriver.ts b/test/lib/next-webdriver.ts index 7cc6540da4bf4..11c8637c87220 100644 --- a/test/lib/next-webdriver.ts +++ b/test/lib/next-webdriver.ts @@ -109,19 +109,9 @@ export default async function webdriver( pushErrorAsConsoleLog, } = options - // we import only the needed interface - if ( - process.env.RECORD_REPLAY === 'true' || - process.env.RECORD_REPLAY === '1' - ) { - const { Replay, quit } = await require('./browsers/replay') - CurrentInterface = Replay - browserQuit = quit - } else { - const { Playwright, quit } = await import('./browsers/playwright') - CurrentInterface = Playwright - browserQuit = quit - } + const { Playwright, quit } = await import('./browsers/playwright') + CurrentInterface = Playwright + browserQuit = quit const browser = new CurrentInterface() const browserName = process.env.BROWSER_NAME || 'chrome' From d31d6dd2384820af270cc5958ef6d70dd1fdf1b4 Mon Sep 17 00:00:00 2001 From: PapatMayuri <40386398+PapatMayuri@users.noreply.github.com> Date: Tue, 3 Dec 2024 04:34:46 +0530 Subject: [PATCH 12/16] updating with-linaria example to utilize the App Router. (#73419) This PR updates the with-linaria example for using the App Router. Here are the changes that have been made: - I renamed the `pages` folder and moved it to the `app` folder. - Added the `layout.tsx` file as part of the App Router. - Updated the package.json file. CC: @samcx --------- Co-authored-by: samcx --- examples/with-linaria/.gitignore | 16 +++++++++--- examples/with-linaria/app/layout.tsx | 18 +++++++++++++ examples/with-linaria/app/page.tsx | 5 ++++ examples/with-linaria/app/styles.css | 21 +++++++++++++++ examples/with-linaria/next-env.d.ts | 2 +- examples/with-linaria/package.json | 19 ++++++++------ examples/with-linaria/pages/index.tsx | 37 --------------------------- examples/with-linaria/tsconfig.json | 21 ++++++++++----- 8 files changed, 82 insertions(+), 57 deletions(-) create mode 100644 examples/with-linaria/app/layout.tsx create mode 100644 examples/with-linaria/app/page.tsx create mode 100644 examples/with-linaria/app/styles.css delete mode 100644 examples/with-linaria/pages/index.tsx diff --git a/examples/with-linaria/.gitignore b/examples/with-linaria/.gitignore index fd3dbb571a12a..f6d4e403e865c 100644 --- a/examples/with-linaria/.gitignore +++ b/examples/with-linaria/.gitignore @@ -3,8 +3,12 @@ # dependencies /node_modules /.pnp -.pnp.js -.yarn/install-state.gz +.pnp.* +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/versions # testing /coverage @@ -24,9 +28,10 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* -# local env files -.env*.local +# env files (can opt-in for committing if needed) +.env* # vercel .vercel @@ -34,3 +39,6 @@ yarn-error.log* # typescript *.tsbuildinfo next-env.d.ts + +# turbo +.turbo diff --git a/examples/with-linaria/app/layout.tsx b/examples/with-linaria/app/layout.tsx new file mode 100644 index 0000000000000..9b37b0bd13999 --- /dev/null +++ b/examples/with-linaria/app/layout.tsx @@ -0,0 +1,18 @@ +import type { Metadata } from "next"; + +export const metadata: Metadata = { + title: "With Linaria", + description: "Next.js example with Linaria.", +}; + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + {children} + + ); +} diff --git a/examples/with-linaria/app/page.tsx b/examples/with-linaria/app/page.tsx new file mode 100644 index 0000000000000..3a354cf6f81e1 --- /dev/null +++ b/examples/with-linaria/app/page.tsx @@ -0,0 +1,5 @@ +import "./styles.css"; + +export default function Home() { + return
Zero runtime CSS in JS
; +} diff --git a/examples/with-linaria/app/styles.css b/examples/with-linaria/app/styles.css new file mode 100644 index 0000000000000..3661b63d5dfc6 --- /dev/null +++ b/examples/with-linaria/app/styles.css @@ -0,0 +1,21 @@ +@keyframes spin { + from { + transform: rotate(0deg); + } + to { + transform: rotate(360deg); + } +} + +.box { + margin-top: 40px; + margin-left: 40px; + height: 200px; + width: 200px; + background-color: tomato; + animation: spin 2s linear infinite; +} + +.anotherClass { + color: blue; +} diff --git a/examples/with-linaria/next-env.d.ts b/examples/with-linaria/next-env.d.ts index 52e831b434248..1b3be0840f3f6 100644 --- a/examples/with-linaria/next-env.d.ts +++ b/examples/with-linaria/next-env.d.ts @@ -2,4 +2,4 @@ /// // NOTE: This file should not be edited -// see https://nextjs.org/docs/pages/api-reference/config/typescript for more information. +// see https://nextjs.org/docs/app/api-reference/config/typescript for more information. diff --git a/examples/with-linaria/package.json b/examples/with-linaria/package.json index 9b64d59acf754..2f854eb78e5b9 100644 --- a/examples/with-linaria/package.json +++ b/examples/with-linaria/package.json @@ -1,20 +1,23 @@ { "private": true, "scripts": { - "dev": "next", + "dev": "next dev", "build": "next build", "start": "next start" }, "dependencies": { - "@linaria/core": "^4.1.2", - "@linaria/react": "^4.1.4", + "@linaria/core": "^6.2.0", + "@linaria/react": "^6.2.1", "next": "latest", - "next-linaria": "^1.0.1-beta", - "react": "^18.2.0", - "react-dom": "^18.2.0" + "next-linaria": "1.0.1-beta", + "react": "^18.3.1", + "react-dom": "^18.3.1" }, "devDependencies": { - "@linaria/babel-preset": "^4.2.1", - "@linaria/webpack-loader": "^4.1.4" + "@linaria/babel-preset": "^5.0.4", + "@linaria/webpack-loader": "^5.0.4", + "@types/node": "^22.10.1", + "@types/react": "^18.3.12", + "typescript": "^5.7.2" } } diff --git a/examples/with-linaria/pages/index.tsx b/examples/with-linaria/pages/index.tsx deleted file mode 100644 index 707aadde01732..0000000000000 --- a/examples/with-linaria/pages/index.tsx +++ /dev/null @@ -1,37 +0,0 @@ -import Head from "next/head"; -import { styled } from "@linaria/react"; -import { css } from "@linaria/core"; - -const Box = styled.div` - margin-top: 40px; - margin-left: 40px; - height: 200px; - width: 200px; - background-color: tomato; - animation: spin 2s linear infinite; - - @keyframes spin { - from { - transform: rotate(0deg); - } - - to { - transform: rotate(360deg); - } - } -`; - -const anotherClass = css` - color: blue; -`; - -export default function Home() { - return ( - <> - - With Linaria - - Zero runtime CSS in JS - - ); -} diff --git a/examples/with-linaria/tsconfig.json b/examples/with-linaria/tsconfig.json index 1563f3e878573..d8b93235f205e 100644 --- a/examples/with-linaria/tsconfig.json +++ b/examples/with-linaria/tsconfig.json @@ -1,20 +1,27 @@ { "compilerOptions": { - "target": "es5", + "target": "ES2017", "lib": ["dom", "dom.iterable", "esnext"], "allowJs": true, "skipLibCheck": true, - "strict": false, - "forceConsistentCasingInFileNames": true, + "strict": true, "noEmit": true, - "incremental": true, "esModuleInterop": true, "module": "esnext", - "moduleResolution": "node", + "moduleResolution": "bundler", "resolveJsonModule": true, "isolatedModules": true, - "jsx": "preserve" + "jsx": "preserve", + "incremental": true, + "plugins": [ + { + "name": "next" + } + ], + "paths": { + "@/*": ["./*"] + } }, - "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx"], + "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], "exclude": ["node_modules"] } From 08c74b8b211e2cdfcac977c5c95964b83d965593 Mon Sep 17 00:00:00 2001 From: PapatMayuri <40386398+PapatMayuri@users.noreply.github.com> Date: Tue, 3 Dec 2024 04:46:45 +0530 Subject: [PATCH 13/16] updated with-polyfills example to utilize the App Router. (#73425) This PR updates the with-linaria example for using the App Router. Here are the changes that have been made: - I renamed the `pages` folder and moved it to the `app` folder. - Added the `layout.tsx` file as part of the App Router. - Updated the `package.json` file. CC: @samcx --------- Co-authored-by: Sam Ko --- examples/with-polyfills/README.md | 4 +-- examples/with-polyfills/app/layout.tsx | 22 +++++++++++++++ .../{pages/index.js => app/page.tsx} | 0 examples/with-polyfills/package.json | 9 +++++-- examples/with-polyfills/pages/_app.js | 9 ------- examples/with-polyfills/tsconfig.json | 27 +++++++++++++++++++ 6 files changed, 58 insertions(+), 13 deletions(-) create mode 100644 examples/with-polyfills/app/layout.tsx rename examples/with-polyfills/{pages/index.js => app/page.tsx} (100%) delete mode 100644 examples/with-polyfills/pages/_app.js create mode 100644 examples/with-polyfills/tsconfig.json diff --git a/examples/with-polyfills/README.md b/examples/with-polyfills/README.md index 6c6b21a1cd11d..2cec39b4d2d0b 100644 --- a/examples/with-polyfills/README.md +++ b/examples/with-polyfills/README.md @@ -1,10 +1,10 @@ # With Polyfills -Next.js supports IE11 and all modern browsers (Edge, Firefox, Chrome, Safari, Opera, et al) with no required configuration. It also adds [some polyfills](https://nextjs.org/docs/basic-features/supported-browsers-features#polyfills) by default. +Next.js supports IE11 and all modern browsers (Edge, Firefox, Chrome, Safari, Opera, et al) with no required configuration. It also adds [some polyfills](https://nextjs.org/docs/architecture/supported-browsers#polyfills) by default. If your own code or any external npm dependencies require features not supported by your target browsers, you need to add polyfills yourself. -In this case, you should add a top-level import for the specific polyfill you need in your Custom `` or the individual component. +In this case, you should add a top-level import for the specific polyfill you need in your [root layout](https://nextjs.org/docs/app/building-your-application/routing/layouts-and-templates#root-layout-required) or the individual component. ## Deploy your own diff --git a/examples/with-polyfills/app/layout.tsx b/examples/with-polyfills/app/layout.tsx new file mode 100644 index 0000000000000..3b63a860caad2 --- /dev/null +++ b/examples/with-polyfills/app/layout.tsx @@ -0,0 +1,22 @@ +import type { Metadata } from "next"; + +// Add your polyfills here or at the component level. +// For example... +// import 'resize-observer-polyfill' + +export const metadata: Metadata = { + title: "With Polyfills", + description: "Next.js example with polyfills.", +}; + +export default function RootLayout({ + children, +}: Readonly<{ + children: React.ReactNode; +}>) { + return ( + + {children} + + ); +} diff --git a/examples/with-polyfills/pages/index.js b/examples/with-polyfills/app/page.tsx similarity index 100% rename from examples/with-polyfills/pages/index.js rename to examples/with-polyfills/app/page.tsx diff --git a/examples/with-polyfills/package.json b/examples/with-polyfills/package.json index bf29745bfe271..365d0d9b79bb5 100644 --- a/examples/with-polyfills/package.json +++ b/examples/with-polyfills/package.json @@ -7,7 +7,12 @@ }, "dependencies": { "next": "latest", - "react": "^18.2.0", - "react-dom": "^18.2.0" + "react": "^18.3.1", + "react-dom": "^18.3.1" + }, + "devDependencies": { + "@types/node": "^22.10.1", + "@types/react": "^18.3.12", + "typescript": "^5.7.2" } } diff --git a/examples/with-polyfills/pages/_app.js b/examples/with-polyfills/pages/_app.js deleted file mode 100644 index e0a32060e18c1..0000000000000 --- a/examples/with-polyfills/pages/_app.js +++ /dev/null @@ -1,9 +0,0 @@ -// Add your polyfills here or at the component level. -// For example... -// import 'resize-observer-polyfill' - -function MyApp({ Component, pageProps }) { - return ; -} - -export default MyApp; diff --git a/examples/with-polyfills/tsconfig.json b/examples/with-polyfills/tsconfig.json new file mode 100644 index 0000000000000..d8b93235f205e --- /dev/null +++ b/examples/with-polyfills/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "target": "ES2017", + "lib": ["dom", "dom.iterable", "esnext"], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "noEmit": true, + "esModuleInterop": true, + "module": "esnext", + "moduleResolution": "bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "jsx": "preserve", + "incremental": true, + "plugins": [ + { + "name": "next" + } + ], + "paths": { + "@/*": ["./*"] + } + }, + "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"], + "exclude": ["node_modules"] +} From 9f8c5c14926901228f543c071eabd86a68570429 Mon Sep 17 00:00:00 2001 From: vercel-release-bot Date: Mon, 2 Dec 2024 23:24:37 +0000 Subject: [PATCH 14/16] v15.0.4-canary.35 --- lerna.json | 2 +- packages/create-next-app/package.json | 2 +- packages/eslint-config-next/package.json | 4 ++-- packages/eslint-plugin-next/package.json | 2 +- packages/font/package.json | 2 +- packages/next-bundle-analyzer/package.json | 2 +- packages/next-codemod/package.json | 2 +- packages/next-env/package.json | 2 +- packages/next-mdx/package.json | 2 +- packages/next-plugin-storybook/package.json | 2 +- packages/next-polyfill-module/package.json | 2 +- packages/next-polyfill-nomodule/package.json | 2 +- packages/next-swc/package.json | 2 +- packages/next/package.json | 14 +++++++------- packages/react-refresh-utils/package.json | 2 +- packages/third-parties/package.json | 4 ++-- pnpm-lock.yaml | 16 ++++++++-------- 17 files changed, 32 insertions(+), 32 deletions(-) diff --git a/lerna.json b/lerna.json index da567605812cb..5e1974ea2f4a3 100644 --- a/lerna.json +++ b/lerna.json @@ -16,5 +16,5 @@ "registry": "https://registry.npmjs.org/" } }, - "version": "15.0.4-canary.34" + "version": "15.0.4-canary.35" } diff --git a/packages/create-next-app/package.json b/packages/create-next-app/package.json index cbba3ea335c9f..b2296c8910639 100644 --- a/packages/create-next-app/package.json +++ b/packages/create-next-app/package.json @@ -1,6 +1,6 @@ { "name": "create-next-app", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "keywords": [ "react", "next", diff --git a/packages/eslint-config-next/package.json b/packages/eslint-config-next/package.json index e54cd245603e6..f776941ecdfbf 100644 --- a/packages/eslint-config-next/package.json +++ b/packages/eslint-config-next/package.json @@ -1,6 +1,6 @@ { "name": "eslint-config-next", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "description": "ESLint configuration used by Next.js.", "main": "index.js", "license": "MIT", @@ -10,7 +10,7 @@ }, "homepage": "https://nextjs.org/docs/app/api-reference/config/eslint#eslint-config", "dependencies": { - "@next/eslint-plugin-next": "15.0.4-canary.34", + "@next/eslint-plugin-next": "15.0.4-canary.35", "@rushstack/eslint-patch": "^1.10.3", "@typescript-eslint/eslint-plugin": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", "@typescript-eslint/parser": "^5.4.2 || ^6.0.0 || ^7.0.0 || ^8.0.0", diff --git a/packages/eslint-plugin-next/package.json b/packages/eslint-plugin-next/package.json index c1a1604dcef01..f4428da2a1dfb 100644 --- a/packages/eslint-plugin-next/package.json +++ b/packages/eslint-plugin-next/package.json @@ -1,6 +1,6 @@ { "name": "@next/eslint-plugin-next", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "description": "ESLint plugin for Next.js.", "main": "dist/index.js", "license": "MIT", diff --git a/packages/font/package.json b/packages/font/package.json index e69b563f5883c..1d4e55cf34ac2 100644 --- a/packages/font/package.json +++ b/packages/font/package.json @@ -1,7 +1,7 @@ { "name": "@next/font", "private": true, - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "repository": { "url": "vercel/next.js", "directory": "packages/font" diff --git a/packages/next-bundle-analyzer/package.json b/packages/next-bundle-analyzer/package.json index d4b7dedb6786d..285524c6f7852 100644 --- a/packages/next-bundle-analyzer/package.json +++ b/packages/next-bundle-analyzer/package.json @@ -1,6 +1,6 @@ { "name": "@next/bundle-analyzer", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "main": "index.js", "types": "index.d.ts", "license": "MIT", diff --git a/packages/next-codemod/package.json b/packages/next-codemod/package.json index d678f82269e94..ce616ce084fb3 100644 --- a/packages/next-codemod/package.json +++ b/packages/next-codemod/package.json @@ -1,6 +1,6 @@ { "name": "@next/codemod", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "license": "MIT", "repository": { "type": "git", diff --git a/packages/next-env/package.json b/packages/next-env/package.json index 37075900404c6..0e44207c4a60e 100644 --- a/packages/next-env/package.json +++ b/packages/next-env/package.json @@ -1,6 +1,6 @@ { "name": "@next/env", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "keywords": [ "react", "next", diff --git a/packages/next-mdx/package.json b/packages/next-mdx/package.json index f149e67f85b6b..d45e83a215499 100644 --- a/packages/next-mdx/package.json +++ b/packages/next-mdx/package.json @@ -1,6 +1,6 @@ { "name": "@next/mdx", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "main": "index.js", "license": "MIT", "repository": { diff --git a/packages/next-plugin-storybook/package.json b/packages/next-plugin-storybook/package.json index 4ddeec70cb2a3..a803d03a974f7 100644 --- a/packages/next-plugin-storybook/package.json +++ b/packages/next-plugin-storybook/package.json @@ -1,6 +1,6 @@ { "name": "@next/plugin-storybook", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "repository": { "url": "vercel/next.js", "directory": "packages/next-plugin-storybook" diff --git a/packages/next-polyfill-module/package.json b/packages/next-polyfill-module/package.json index 687f5deb81239..1c57e42bccc7e 100644 --- a/packages/next-polyfill-module/package.json +++ b/packages/next-polyfill-module/package.json @@ -1,6 +1,6 @@ { "name": "@next/polyfill-module", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "description": "A standard library polyfill for ES Modules supporting browsers (Edge 16+, Firefox 60+, Chrome 61+, Safari 10.1+)", "main": "dist/polyfill-module.js", "license": "MIT", diff --git a/packages/next-polyfill-nomodule/package.json b/packages/next-polyfill-nomodule/package.json index eaf35ab089c38..6d565dcaa80b1 100644 --- a/packages/next-polyfill-nomodule/package.json +++ b/packages/next-polyfill-nomodule/package.json @@ -1,6 +1,6 @@ { "name": "@next/polyfill-nomodule", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "description": "A polyfill for non-dead, nomodule browsers.", "main": "dist/polyfill-nomodule.js", "license": "MIT", diff --git a/packages/next-swc/package.json b/packages/next-swc/package.json index 9423495f0ea56..6d3f7b54999ab 100644 --- a/packages/next-swc/package.json +++ b/packages/next-swc/package.json @@ -1,6 +1,6 @@ { "name": "@next/swc", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "private": true, "scripts": { "clean": "node ../../scripts/rm.mjs native", diff --git a/packages/next/package.json b/packages/next/package.json index 95ae5b7488c59..21d5f3bbfbab3 100644 --- a/packages/next/package.json +++ b/packages/next/package.json @@ -1,6 +1,6 @@ { "name": "next", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "description": "The React Framework", "main": "./dist/server/next.js", "license": "MIT", @@ -97,7 +97,7 @@ ] }, "dependencies": { - "@next/env": "15.0.4-canary.34", + "@next/env": "15.0.4-canary.35", "@swc/counter": "0.1.3", "@swc/helpers": "0.5.13", "busboy": "1.6.0", @@ -161,11 +161,11 @@ "@jest/types": "29.5.0", "@mswjs/interceptors": "0.23.0", "@napi-rs/triples": "1.2.0", - "@next/font": "15.0.4-canary.34", - "@next/polyfill-module": "15.0.4-canary.34", - "@next/polyfill-nomodule": "15.0.4-canary.34", - "@next/react-refresh-utils": "15.0.4-canary.34", - "@next/swc": "15.0.4-canary.34", + "@next/font": "15.0.4-canary.35", + "@next/polyfill-module": "15.0.4-canary.35", + "@next/polyfill-nomodule": "15.0.4-canary.35", + "@next/react-refresh-utils": "15.0.4-canary.35", + "@next/swc": "15.0.4-canary.35", "@opentelemetry/api": "1.6.0", "@playwright/test": "1.41.2", "@swc/core": "1.9.2-nightly-20241111.1", diff --git a/packages/react-refresh-utils/package.json b/packages/react-refresh-utils/package.json index 5a5e105b92cb4..343d99100b445 100644 --- a/packages/react-refresh-utils/package.json +++ b/packages/react-refresh-utils/package.json @@ -1,6 +1,6 @@ { "name": "@next/react-refresh-utils", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "description": "An experimental package providing utilities for React Refresh.", "repository": { "url": "vercel/next.js", diff --git a/packages/third-parties/package.json b/packages/third-parties/package.json index 8359ee5e20b3f..7f4a7e85742e4 100644 --- a/packages/third-parties/package.json +++ b/packages/third-parties/package.json @@ -1,6 +1,6 @@ { "name": "@next/third-parties", - "version": "15.0.4-canary.34", + "version": "15.0.4-canary.35", "repository": { "url": "vercel/next.js", "directory": "packages/third-parties" @@ -26,7 +26,7 @@ "third-party-capital": "1.0.20" }, "devDependencies": { - "next": "15.0.4-canary.34", + "next": "15.0.4-canary.35", "outdent": "0.8.0", "prettier": "2.5.1", "typescript": "5.6.3" diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 43fa82a03e311..d7053291ce68c 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -786,7 +786,7 @@ importers: packages/eslint-config-next: dependencies: '@next/eslint-plugin-next': - specifier: 15.0.4-canary.34 + specifier: 15.0.4-canary.35 version: link:../eslint-plugin-next '@rushstack/eslint-patch': specifier: ^1.10.3 @@ -850,7 +850,7 @@ importers: packages/next: dependencies: '@next/env': - specifier: 15.0.4-canary.34 + specifier: 15.0.4-canary.35 version: link:../next-env '@swc/counter': specifier: 0.1.3 @@ -978,19 +978,19 @@ importers: specifier: 1.2.0 version: 1.2.0 '@next/font': - specifier: 15.0.4-canary.34 + specifier: 15.0.4-canary.35 version: link:../font '@next/polyfill-module': - specifier: 15.0.4-canary.34 + specifier: 15.0.4-canary.35 version: link:../next-polyfill-module '@next/polyfill-nomodule': - specifier: 15.0.4-canary.34 + specifier: 15.0.4-canary.35 version: link:../next-polyfill-nomodule '@next/react-refresh-utils': - specifier: 15.0.4-canary.34 + specifier: 15.0.4-canary.35 version: link:../react-refresh-utils '@next/swc': - specifier: 15.0.4-canary.34 + specifier: 15.0.4-canary.35 version: link:../next-swc '@opentelemetry/api': specifier: 1.6.0 @@ -1624,7 +1624,7 @@ importers: version: 1.0.20 devDependencies: next: - specifier: 15.0.4-canary.34 + specifier: 15.0.4-canary.35 version: link:../next outdent: specifier: 0.8.0 From 18e8bfbbdb836bb9165c8a80706b50b1cd794d22 Mon Sep 17 00:00:00 2001 From: Sam Ko Date: Mon, 2 Dec 2024 15:29:29 -0800 Subject: [PATCH 15/16] chore(cna): add .pnpm-debug.log* to all cna gitignore (#73446) ## Why? We may as well include a `.pnpm-debug.log` since it's possible to create Create Next App with [pnpm](https://pnpm.io/). --- packages/create-next-app/templates/app-empty/js/gitignore | 1 + packages/create-next-app/templates/app-empty/ts/gitignore | 1 + packages/create-next-app/templates/app-tw-empty/js/gitignore | 1 + packages/create-next-app/templates/app-tw-empty/ts/gitignore | 1 + packages/create-next-app/templates/app-tw/js/gitignore | 1 + packages/create-next-app/templates/app-tw/ts/gitignore | 1 + packages/create-next-app/templates/app/js/gitignore | 1 + packages/create-next-app/templates/app/ts/gitignore | 1 + packages/create-next-app/templates/default-empty/js/gitignore | 1 + packages/create-next-app/templates/default-empty/ts/gitignore | 1 + packages/create-next-app/templates/default-tw-empty/js/gitignore | 1 + packages/create-next-app/templates/default-tw-empty/ts/gitignore | 1 + packages/create-next-app/templates/default-tw/js/gitignore | 1 + packages/create-next-app/templates/default-tw/ts/gitignore | 1 + packages/create-next-app/templates/default/js/gitignore | 1 + packages/create-next-app/templates/default/ts/gitignore | 1 + 16 files changed, 16 insertions(+) diff --git a/packages/create-next-app/templates/app-empty/js/gitignore b/packages/create-next-app/templates/app-empty/js/gitignore index 8777267507c0e..6fc1b4d95a8ac 100644 --- a/packages/create-next-app/templates/app-empty/js/gitignore +++ b/packages/create-next-app/templates/app-empty/js/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # local env files .env*.local diff --git a/packages/create-next-app/templates/app-empty/ts/gitignore b/packages/create-next-app/templates/app-empty/ts/gitignore index 8777267507c0e..6fc1b4d95a8ac 100644 --- a/packages/create-next-app/templates/app-empty/ts/gitignore +++ b/packages/create-next-app/templates/app-empty/ts/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # local env files .env*.local diff --git a/packages/create-next-app/templates/app-tw-empty/js/gitignore b/packages/create-next-app/templates/app-tw-empty/js/gitignore index 8777267507c0e..6fc1b4d95a8ac 100644 --- a/packages/create-next-app/templates/app-tw-empty/js/gitignore +++ b/packages/create-next-app/templates/app-tw-empty/js/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # local env files .env*.local diff --git a/packages/create-next-app/templates/app-tw-empty/ts/gitignore b/packages/create-next-app/templates/app-tw-empty/ts/gitignore index 8777267507c0e..6fc1b4d95a8ac 100644 --- a/packages/create-next-app/templates/app-tw-empty/ts/gitignore +++ b/packages/create-next-app/templates/app-tw-empty/ts/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # local env files .env*.local diff --git a/packages/create-next-app/templates/app-tw/js/gitignore b/packages/create-next-app/templates/app-tw/js/gitignore index d32cc78b89fc9..5ef6a52078020 100644 --- a/packages/create-next-app/templates/app-tw/js/gitignore +++ b/packages/create-next-app/templates/app-tw/js/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # env files (can opt-in for committing if needed) .env* diff --git a/packages/create-next-app/templates/app-tw/ts/gitignore b/packages/create-next-app/templates/app-tw/ts/gitignore index d32cc78b89fc9..5ef6a52078020 100644 --- a/packages/create-next-app/templates/app-tw/ts/gitignore +++ b/packages/create-next-app/templates/app-tw/ts/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # env files (can opt-in for committing if needed) .env* diff --git a/packages/create-next-app/templates/app/js/gitignore b/packages/create-next-app/templates/app/js/gitignore index d32cc78b89fc9..5ef6a52078020 100644 --- a/packages/create-next-app/templates/app/js/gitignore +++ b/packages/create-next-app/templates/app/js/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # env files (can opt-in for committing if needed) .env* diff --git a/packages/create-next-app/templates/app/ts/gitignore b/packages/create-next-app/templates/app/ts/gitignore index d32cc78b89fc9..5ef6a52078020 100644 --- a/packages/create-next-app/templates/app/ts/gitignore +++ b/packages/create-next-app/templates/app/ts/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # env files (can opt-in for committing if needed) .env* diff --git a/packages/create-next-app/templates/default-empty/js/gitignore b/packages/create-next-app/templates/default-empty/js/gitignore index 8777267507c0e..6fc1b4d95a8ac 100644 --- a/packages/create-next-app/templates/default-empty/js/gitignore +++ b/packages/create-next-app/templates/default-empty/js/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # local env files .env*.local diff --git a/packages/create-next-app/templates/default-empty/ts/gitignore b/packages/create-next-app/templates/default-empty/ts/gitignore index 8777267507c0e..6fc1b4d95a8ac 100644 --- a/packages/create-next-app/templates/default-empty/ts/gitignore +++ b/packages/create-next-app/templates/default-empty/ts/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # local env files .env*.local diff --git a/packages/create-next-app/templates/default-tw-empty/js/gitignore b/packages/create-next-app/templates/default-tw-empty/js/gitignore index 8777267507c0e..6fc1b4d95a8ac 100644 --- a/packages/create-next-app/templates/default-tw-empty/js/gitignore +++ b/packages/create-next-app/templates/default-tw-empty/js/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # local env files .env*.local diff --git a/packages/create-next-app/templates/default-tw-empty/ts/gitignore b/packages/create-next-app/templates/default-tw-empty/ts/gitignore index 8777267507c0e..6fc1b4d95a8ac 100644 --- a/packages/create-next-app/templates/default-tw-empty/ts/gitignore +++ b/packages/create-next-app/templates/default-tw-empty/ts/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # local env files .env*.local diff --git a/packages/create-next-app/templates/default-tw/js/gitignore b/packages/create-next-app/templates/default-tw/js/gitignore index d32cc78b89fc9..5ef6a52078020 100644 --- a/packages/create-next-app/templates/default-tw/js/gitignore +++ b/packages/create-next-app/templates/default-tw/js/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # env files (can opt-in for committing if needed) .env* diff --git a/packages/create-next-app/templates/default-tw/ts/gitignore b/packages/create-next-app/templates/default-tw/ts/gitignore index d32cc78b89fc9..5ef6a52078020 100644 --- a/packages/create-next-app/templates/default-tw/ts/gitignore +++ b/packages/create-next-app/templates/default-tw/ts/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # env files (can opt-in for committing if needed) .env* diff --git a/packages/create-next-app/templates/default/js/gitignore b/packages/create-next-app/templates/default/js/gitignore index d32cc78b89fc9..5ef6a52078020 100644 --- a/packages/create-next-app/templates/default/js/gitignore +++ b/packages/create-next-app/templates/default/js/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # env files (can opt-in for committing if needed) .env* diff --git a/packages/create-next-app/templates/default/ts/gitignore b/packages/create-next-app/templates/default/ts/gitignore index d32cc78b89fc9..5ef6a52078020 100644 --- a/packages/create-next-app/templates/default/ts/gitignore +++ b/packages/create-next-app/templates/default/ts/gitignore @@ -28,6 +28,7 @@ npm-debug.log* yarn-debug.log* yarn-error.log* +.pnpm-debug.log* # env files (can opt-in for committing if needed) .env* From 616bcb3729cfc2ae8540b2e952a1a1cb6ed0ff3f Mon Sep 17 00:00:00 2001 From: Janka Uryga Date: Tue, 3 Dec 2024 00:55:13 +0100 Subject: [PATCH 16/16] refactor(after): remove unnecessary conditionals (#73447) These should've been removed in #73190 because we're always creating a `closeController`. --- packages/next/src/server/web/adapter.ts | 12 +++---- .../server/web/edge-route-module-wrapper.ts | 34 ++++++++----------- 2 files changed, 20 insertions(+), 26 deletions(-) diff --git a/packages/next/src/server/web/adapter.ts b/packages/next/src/server/web/adapter.ts index 045ae4354ed02..1e589d4bd91ae 100644 --- a/packages/next/src/server/web/adapter.ts +++ b/packages/next/src/server/web/adapter.ts @@ -289,13 +289,11 @@ export async function adapter( } finally { // middleware cannot stream, so we can consider the response closed // as soon as the handler returns. - if (closeController) { - // we can delay running it until a bit later -- - // if it's needed, we'll have a `waitUntil` lock anyway. - setTimeout(() => { - closeController!.dispatchClose() - }, 0) - } + // we can delay running it until a bit later -- + // if it's needed, we'll have a `waitUntil` lock anyway. + setTimeout(() => { + closeController.dispatchClose() + }, 0) } } ) diff --git a/packages/next/src/server/web/edge-route-module-wrapper.ts b/packages/next/src/server/web/edge-route-module-wrapper.ts index d5c16926d7e6f..59e6879d1c6a5 100644 --- a/packages/next/src/server/web/edge-route-module-wrapper.ts +++ b/packages/next/src/server/web/edge-route-module-wrapper.ts @@ -127,25 +127,21 @@ export class EdgeRouteModuleWrapper { } evt.waitUntil(Promise.all(waitUntilPromises)) - if (closeController) { - const _closeController = closeController // TS annoyance - "possibly undefined" in callbacks - - if (!res.body) { - // we can delay running it until a bit later -- - // if it's needed, we'll have a `waitUntil` lock anyway. - setTimeout(() => _closeController.dispatchClose(), 0) - } else { - // NOTE: if this is a streaming response, onClose may be called later, - // so we can't rely on `closeController.listeners` -- it might be 0 at this point. - const trackedBody = trackStreamConsumed(res.body, () => - _closeController.dispatchClose() - ) - res = new Response(trackedBody, { - status: res.status, - statusText: res.statusText, - headers: res.headers, - }) - } + if (!res.body) { + // we can delay running it until a bit later -- + // if it's needed, we'll have a `waitUntil` lock anyway. + setTimeout(() => closeController.dispatchClose(), 0) + } else { + // NOTE: if this is a streaming response, onClose may be called later, + // so we can't rely on `closeController.listeners` -- it might be 0 at this point. + const trackedBody = trackStreamConsumed(res.body, () => + closeController.dispatchClose() + ) + res = new Response(trackedBody, { + status: res.status, + statusText: res.statusText, + headers: res.headers, + }) } return res